(bugfix): Fixed encode in LLM entrypoint for IOProcessr plugin prompts (#34618)

Signed-off-by: Christian Pinto <christian.pinto@ibm.com>
This commit is contained in:
Christian Pinto
2026-02-16 15:33:55 +00:00
committed by GitHub
parent 03a8770a6d
commit 6930becd45
3 changed files with 17 additions and 5 deletions

View File

@@ -120,13 +120,15 @@ async def test_prithvi_mae_plugin_online(
def test_prithvi_mae_plugin_offline(
vllm_runner, model_name: str, image_url: str | dict, plugin: str, expected_hash: str
):
img_prompt = dict(
img_data = dict(
data=image_url,
data_format="url",
image_format="tiff",
out_data_format="b64_json",
)
prompt = dict(data=img_data)
with vllm_runner(
model_name,
runner="pooling",
@@ -139,7 +141,7 @@ def test_prithvi_mae_plugin_offline(
io_processor_plugin=plugin,
default_torch_num_threads=1,
) as llm_runner:
pooler_output = llm_runner.get_llm().encode(img_prompt, pooling_task="plugin")
pooler_output = llm_runner.get_llm().encode(prompt, pooling_task="plugin")
output = pooler_output[0].outputs
# verify the output is formatted as expected for this plugin