2025-02-02 14:58:18 -05:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2025-06-03 11:20:17 -07:00
|
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
2025-02-02 14:58:18 -05:00
|
|
|
|
2024-04-03 21:52:18 -07:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
from vllm.entrypoints.llm import LLM
|
|
|
|
|
from vllm.sampling_params import SamplingParams
|
|
|
|
|
|
|
|
|
|
|
2025-02-22 19:19:45 -08:00
|
|
|
@pytest.mark.parametrize("model", ["distilbert/distilgpt2"])
|
2024-04-03 21:52:18 -07:00
|
|
|
def test_computed_prefix_blocks(model: str):
|
|
|
|
|
# This test checks if the engine generates completions both with and
|
|
|
|
|
# without optional detokenization, that detokenization includes text
|
|
|
|
|
# and no-detokenization doesn't, and that both completions have the same
|
|
|
|
|
# token_ids.
|
|
|
|
|
prompt = (
|
|
|
|
|
"You are a helpful assistant. How do I build a car from cardboard and "
|
|
|
|
|
"paper clips? Is there an easy to follow video tutorial available "
|
2025-10-05 15:06:22 +01:00
|
|
|
"online for free?"
|
|
|
|
|
)
|
2024-04-03 21:52:18 -07:00
|
|
|
|
2025-02-22 19:19:45 -08:00
|
|
|
llm = LLM(model=model)
|
2025-10-05 15:06:22 +01:00
|
|
|
sampling_params = SamplingParams(max_tokens=10, temperature=0.0, detokenize=False)
|
2024-04-03 21:52:18 -07:00
|
|
|
|
2025-10-05 15:06:22 +01:00
|
|
|
outputs_no_detokenization = llm.generate(prompt, sampling_params)[0].outputs[0]
|
2024-04-03 21:52:18 -07:00
|
|
|
sampling_params.detokenize = True
|
2025-10-05 15:06:22 +01:00
|
|
|
outputs_with_detokenization = llm.generate(prompt, sampling_params)[0].outputs[0]
|
2024-04-03 21:52:18 -07:00
|
|
|
|
2025-10-05 15:06:22 +01:00
|
|
|
assert outputs_no_detokenization.text == ""
|
|
|
|
|
assert outputs_with_detokenization.text != ""
|
|
|
|
|
assert outputs_no_detokenization.token_ids == outputs_with_detokenization.token_ids
|