[Bugfix] Replace PoolingParams.normalize with use_activation (#32243)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2026-01-13 18:45:42 +08:00
committed by simon-mo
parent 11b6af5280
commit 0aa8c40552
21 changed files with 68 additions and 70 deletions

View File

@@ -53,7 +53,9 @@ def test_token_embed(llm: LLM):
def test_pooling_params(llm: LLM):
def get_outputs(normalize):
outputs = llm.embed(
prompts, pooling_params=PoolingParams(normalize=normalize), use_tqdm=False
prompts,
pooling_params=PoolingParams(use_activation=normalize),
use_tqdm=False,
)
return torch.tensor([x.outputs.embedding for x in outputs])

View File

@@ -216,7 +216,7 @@ def server_with_chunked_processing():
"512", # Set smaller max_model_len to trigger chunking mechanism
"--pooler-config",
(
'{"pooling_type": "MEAN", "normalize": true, '
'{"pooling_type": "MEAN", "use_activation": true, '
'"enable_chunked_processing": true, "max_embed_len": 10000}'
),
"--gpu-memory-utilization",

View File

@@ -236,17 +236,14 @@ class TestModel:
"use_activation": use_activation,
},
)
if response.status_code != 200:
return response
outputs = response.json()
return torch.tensor([x["score"] for x in outputs["data"]])
if model["is_cross_encoder"]:
default = get_outputs(use_activation=None)
w_activation = get_outputs(use_activation=True)
wo_activation = get_outputs(use_activation=False)
default = get_outputs(use_activation=None)
w_activation = get_outputs(use_activation=True)
wo_activation = get_outputs(use_activation=False)
if model["is_cross_encoder"]:
assert torch.allclose(default, w_activation, atol=1e-2), (
"Default should use activation."
)
@@ -256,9 +253,3 @@ class TestModel:
assert torch.allclose(F.sigmoid(wo_activation), w_activation, atol=1e-2), (
"w_activation should be close to activation(wo_activation)."
)
else:
get_outputs(use_activation=None)
# The activation parameter only works for the is_cross_encoder model
response = get_outputs(use_activation=True)
assert response.status_code == 400