[Misc] Enable Paligemma's PrefixLM attention mask computation (#31725)

Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn>
This commit is contained in:
Isotr0py
2026-01-06 03:31:49 +08:00
committed by GitHub
parent d8e38d4939
commit 51e38a8e30
2 changed files with 1 additions and 7 deletions

View File

@@ -121,10 +121,6 @@ VLM_TEST_SETTINGS = {
),
auto_cls=AutoModelForImageTextToText,
vllm_output_post_proc=model_utils.paligemma_vllm_to_hf_output,
dtype="bfloat16",
marks=[
pytest.mark.skip(reason="vLLM does not support PrefixLM attention mask")
],
),
"qwen2_5_vl": VLMTestInfo(
models=["Qwen/Qwen2.5-VL-3B-Instruct"],