add(v1): RequestStatesStats to RequestOutput (#24947)

Signed-off-by: huijjj <huijong.jeong@squeezebits.com>
This commit is contained in:
HUIJONG JEONG
2025-10-03 17:56:25 +09:00
committed by GitHub
parent eb0fa43868
commit 3e70e3d4d5
3 changed files with 24 additions and 11 deletions

View File

@@ -86,3 +86,16 @@ def test_max_model_len():
# It can be less if generation finishes due to other reasons (e.g., EOS)
# before reaching the absolute model length limit.
assert num_total_tokens <= max_model_len
def test_log_stats():
llm = LLM(
model=MODEL_NAME,
disable_log_stats=False,
gpu_memory_utilization=0.10,
enforce_eager=True, # reduce test time
)
outputs = llm.generate(PROMPTS, sampling_params=None)
# disable_log_stats is False, every output should have metrics
assert all(output.metrics is not None for output in outputs)