[Frontend] Use new Renderer for Completions and Tokenize API (#32863)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-01-31 20:51:15 +08:00
committed by GitHub
parent 8980001c93
commit f0a1c8453a
64 changed files with 2116 additions and 2003 deletions

View File

@@ -96,7 +96,7 @@ def test_gemma_multimodal(
dtype="bfloat16",
) as vllm_model:
llm = vllm_model.get_llm()
prompts = llm.preprocess_chat(messages)
prompts = llm._preprocess_chat([messages])
result = llm.classify(prompts)
assert result[0].outputs.probs[0] > 0.95

View File

@@ -29,7 +29,8 @@ def test_smaller_truncation_size(
model_name, runner="pooling", max_model_len=max_model_len
) as vllm_model:
vllm_output = vllm_model.llm.embed(
input_str, truncate_prompt_tokens=truncate_prompt_tokens
input_str,
tokenization_kwargs=dict(truncate_prompt_tokens=truncate_prompt_tokens),
)
prompt_tokens = vllm_output[0].prompt_token_ids
@@ -44,7 +45,8 @@ def test_max_truncation_size(vllm_runner, model_name=MODEL_NAME, input_str=input
model_name, runner="pooling", max_model_len=max_model_len
) as vllm_model:
vllm_output = vllm_model.llm.embed(
input_str, truncate_prompt_tokens=truncate_prompt_tokens
input_str,
tokenization_kwargs=dict(truncate_prompt_tokens=truncate_prompt_tokens),
)
prompt_tokens = vllm_output[0].prompt_token_ids
@@ -64,7 +66,8 @@ def test_bigger_truncation_size(
) as vllm_model,
):
llm_output = vllm_model.llm.embed(
input_str, truncate_prompt_tokens=truncate_prompt_tokens
input_str,
tokenization_kwargs=dict(truncate_prompt_tokens=truncate_prompt_tokens),
)
assert (

View File

@@ -187,7 +187,10 @@ def mteb_test_embed_models(
head_dtype = model_config.head_dtype
# Test embedding_size, isnan and whether to use normalize
vllm_outputs = vllm_model.embed(example_prompts, truncate_prompt_tokens=-1)
vllm_outputs = vllm_model.embed(
example_prompts,
tokenization_kwargs=dict(truncate_prompt_tokens=-1),
)
outputs_tensor = torch.tensor(vllm_outputs)
assert not torch.any(torch.isnan(outputs_tensor))
embedding_size = model_config.embedding_size

View File

@@ -79,9 +79,9 @@ class VllmMtebCrossEncoder(MtebCrossEncoderMixin):
outputs = self.llm.score(
queries,
corpus,
truncate_prompt_tokens=-1,
use_tqdm=False,
chat_template=self.chat_template,
tokenization_kwargs={"truncate_prompt_tokens": -1},
)
scores = np.array(outputs)
scores = scores[np.argsort(r)]