[Core] Cleanup engine pause/sleep logic (#34528)
Signed-off-by: Nick Hill <nickhill123@gmail.com>
This commit is contained in:
@@ -3,8 +3,10 @@
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import time
|
||||
from contextlib import ExitStack
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -187,24 +189,33 @@ async def test_load(
|
||||
# =============================================================================
|
||||
# DP Pause/Resume Tests
|
||||
# =============================================================================
|
||||
# When expert_parallel=False: uses non-MoE model (DP replicas as separate engines).
|
||||
# When expert_parallel=True: uses MoE model + EP (DPEngineCoreProc, sync pause path).
|
||||
|
||||
DP_PAUSE_MODEL = "hmellor/tiny-random-LlamaForCausalLM"
|
||||
DP_PAUSE_MODEL_MOE = "ibm-research/PowerMoE-3b"
|
||||
DP_PAUSE_PROMPT = "This is a test of data parallel pause"
|
||||
|
||||
|
||||
def _get_dp_pause_engine_args(expert_parallel: bool) -> AsyncEngineArgs:
|
||||
"""Engine args for DP pause tests: MoE+EP when expert_parallel else small Llama."""
|
||||
model = DP_PAUSE_MODEL_MOE if expert_parallel else DP_PAUSE_MODEL
|
||||
return AsyncEngineArgs(
|
||||
model=model,
|
||||
enforce_eager=True,
|
||||
tensor_parallel_size=int(os.getenv("TP_SIZE", 1)),
|
||||
data_parallel_size=DP_SIZE,
|
||||
data_parallel_backend="mp",
|
||||
enable_expert_parallel=expert_parallel,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dp_pause_resume_basic():
|
||||
@pytest.mark.parametrize("expert_parallel", [False, True])
|
||||
async def test_dp_pause_resume_basic(expert_parallel: bool):
|
||||
"""Pausing from the client (one call) pauses all DP ranks; resume clears it."""
|
||||
if current_platform.is_rocm():
|
||||
pytest.skip("DP pause tests use mp backend only")
|
||||
with ExitStack() as after:
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=DP_PAUSE_MODEL,
|
||||
enforce_eager=True,
|
||||
tensor_parallel_size=int(os.getenv("TP_SIZE", 1)),
|
||||
data_parallel_size=DP_SIZE,
|
||||
data_parallel_backend="mp",
|
||||
)
|
||||
engine_args = _get_dp_pause_engine_args(expert_parallel)
|
||||
engine = AsyncLLM.from_engine_args(engine_args)
|
||||
after.callback(engine.shutdown)
|
||||
|
||||
@@ -226,18 +237,11 @@ async def test_dp_pause_resume_basic():
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dp_pause_abort():
|
||||
@pytest.mark.parametrize("expert_parallel", [False, True])
|
||||
async def test_dp_pause_abort(expert_parallel: bool):
|
||||
"""Pause with abort from one client aborts in-flight requests on all DP ranks."""
|
||||
if current_platform.is_rocm():
|
||||
pytest.skip("DP pause tests use mp backend only")
|
||||
with ExitStack() as after:
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=DP_PAUSE_MODEL,
|
||||
enforce_eager=True,
|
||||
tensor_parallel_size=int(os.getenv("TP_SIZE", 1)),
|
||||
data_parallel_size=DP_SIZE,
|
||||
data_parallel_backend="mp",
|
||||
)
|
||||
engine_args = _get_dp_pause_engine_args(expert_parallel)
|
||||
engine = AsyncLLM.from_engine_args(engine_args)
|
||||
after.callback(engine.shutdown)
|
||||
|
||||
@@ -286,41 +290,111 @@ async def test_dp_pause_abort():
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dp_pause_keep_then_resume():
|
||||
"""Pause with keep queues new requests; resume allows them to run."""
|
||||
if current_platform.is_rocm():
|
||||
pytest.skip("DP pause tests use mp backend only")
|
||||
@pytest.mark.parametrize("expert_parallel", [False, True])
|
||||
async def test_dp_pause_keep_then_resume(expert_parallel: bool):
|
||||
"""Start generation, pause after a few tokens (keep mode), resume; verify gap."""
|
||||
|
||||
pause_duration = 2.0
|
||||
min_tokens_before_pause = 3
|
||||
|
||||
with ExitStack() as after:
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=DP_PAUSE_MODEL,
|
||||
enforce_eager=True,
|
||||
tensor_parallel_size=int(os.getenv("TP_SIZE", 1)),
|
||||
data_parallel_size=DP_SIZE,
|
||||
data_parallel_backend="mp",
|
||||
)
|
||||
engine_args = _get_dp_pause_engine_args(expert_parallel)
|
||||
engine = AsyncLLM.from_engine_args(engine_args)
|
||||
after.callback(engine.shutdown)
|
||||
|
||||
await engine.pause_generation(mode="keep")
|
||||
assert await engine.is_paused()
|
||||
sampling_params = SamplingParams(max_tokens=15, ignore_eos=True)
|
||||
token_times: list[tuple[int, float]] = []
|
||||
pause_token_idx = 0
|
||||
|
||||
request_done = asyncio.Event()
|
||||
|
||||
async def gen():
|
||||
async for out in engine.generate(
|
||||
request_id="queued-keep",
|
||||
async def generator_task():
|
||||
nonlocal pause_token_idx
|
||||
out = None
|
||||
async for output in engine.generate(
|
||||
request_id="keep-resume-req",
|
||||
prompt=DP_PAUSE_PROMPT,
|
||||
sampling_params=SamplingParams(max_tokens=5),
|
||||
sampling_params=sampling_params,
|
||||
):
|
||||
pass
|
||||
request_done.set()
|
||||
token_count = len(output.outputs[0].token_ids)
|
||||
token_times.append((token_count, time.monotonic()))
|
||||
out = output
|
||||
return out
|
||||
|
||||
task = asyncio.create_task(gen())
|
||||
await asyncio.sleep(0.2)
|
||||
assert not request_done.is_set()
|
||||
async def controller_task():
|
||||
nonlocal pause_token_idx
|
||||
while len(token_times) < min_tokens_before_pause:
|
||||
await asyncio.sleep(0.01)
|
||||
await engine.pause_generation(mode="keep")
|
||||
await asyncio.sleep(pause_duration)
|
||||
pause_token_idx = len(token_times)
|
||||
await engine.resume_generation()
|
||||
|
||||
gen_task = asyncio.create_task(generator_task())
|
||||
ctrl_task = asyncio.create_task(controller_task())
|
||||
final_output, _ = await asyncio.gather(gen_task, ctrl_task)
|
||||
|
||||
assert final_output is not None and final_output.finished
|
||||
assert await engine.is_paused() is False
|
||||
assert pause_token_idx >= min_tokens_before_pause
|
||||
if pause_token_idx > 0 and pause_token_idx < len(token_times):
|
||||
pause_gap = (
|
||||
token_times[pause_token_idx][1] - token_times[pause_token_idx - 1][1]
|
||||
)
|
||||
assert pause_gap >= pause_duration * 0.8, (
|
||||
f"Expected gap ~{pause_duration}s after pause, got {pause_gap:.3f}s"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dp_pause_keep_race_staggered_engines():
|
||||
"""Race: send pause(keep) to engine 0, then add two requests,
|
||||
then pause(keep) to engine 1. Ensures no deadlock when pause
|
||||
requests are staggered and requests arrive in between."""
|
||||
if DP_SIZE != 2:
|
||||
pytest.skip("test_dp_pause_keep_race_staggered_engines requires DP_SIZE=2")
|
||||
|
||||
with ExitStack() as after:
|
||||
engine_args = _get_dp_pause_engine_args(expert_parallel=True)
|
||||
engine = AsyncLLM.from_engine_args(engine_args)
|
||||
after.callback(engine.shutdown)
|
||||
|
||||
client = engine.engine_core
|
||||
|
||||
original_call_utility = client.call_utility_async
|
||||
mid_pause_tasks: list[asyncio.Task] = []
|
||||
|
||||
async def staggered_pause_keep(method: str, *args) -> Any:
|
||||
if method != "pause_scheduler" or not args or args[0] != "keep":
|
||||
return await original_call_utility(method, *args)
|
||||
# Send pause(keep) to engine 0 first
|
||||
await client._call_utility_async(
|
||||
method, *args, engine=client.core_engines[0]
|
||||
)
|
||||
# In the middle: send two requests (race window)
|
||||
sp = SamplingParams(max_tokens=5, ignore_eos=True)
|
||||
|
||||
async def consume_gen(req_id: str) -> None:
|
||||
async for _ in engine.generate(
|
||||
request_id=req_id,
|
||||
prompt=DP_PAUSE_PROMPT,
|
||||
sampling_params=sp,
|
||||
):
|
||||
pass
|
||||
|
||||
t1 = asyncio.create_task(consume_gen("race-1"))
|
||||
t2 = asyncio.create_task(consume_gen("race-2"))
|
||||
mid_pause_tasks.extend([t1, t2])
|
||||
await asyncio.sleep(3)
|
||||
# Then send pause(keep) to engine 1
|
||||
result = await client._call_utility_async(
|
||||
method, *args, engine=client.core_engines[1]
|
||||
)
|
||||
return result
|
||||
|
||||
client.call_utility_async = staggered_pause_keep
|
||||
|
||||
await engine.pause_generation(mode="keep")
|
||||
assert await engine.is_paused()
|
||||
await engine.resume_generation()
|
||||
final = await asyncio.wait_for(task, timeout=10.0)
|
||||
assert final.finished
|
||||
assert not await engine.is_paused()
|
||||
# Let the two requests we sent mid-pause complete
|
||||
await asyncio.gather(*mid_pause_tasks)
|
||||
|
||||
Reference in New Issue
Block a user