Support logit_bias in v1 Sampler (#13079)
This commit is contained in:
@@ -45,9 +45,11 @@ def _remove_requests(
|
||||
|
||||
|
||||
def _construct_expected_sampling_metadata(
|
||||
reqs: List[CachedRequestState], req_ids_retained: Set[int],
|
||||
req_id_index_in_input_batch: Dict[str, int],
|
||||
device: torch.device) -> SamplingMetadata:
|
||||
reqs: List[CachedRequestState],
|
||||
req_ids_retained: Set[int],
|
||||
req_id_index_in_input_batch: Dict[str, int],
|
||||
device: torch.device,
|
||||
) -> SamplingMetadata:
|
||||
"""
|
||||
Constructs and returns the expected SamplingMetadata for this
|
||||
batch.
|
||||
@@ -63,6 +65,7 @@ def _construct_expected_sampling_metadata(
|
||||
temperature = [0.0 for _ in range(num_reqs)]
|
||||
stop_token_ids: List[Set[int]] = [set() for _ in range(num_reqs)]
|
||||
min_tokens = [0 for _ in range(num_reqs)]
|
||||
logit_bias = [None] * num_reqs
|
||||
for req in reqs:
|
||||
if req.req_id not in req_ids_retained:
|
||||
continue
|
||||
@@ -71,20 +74,21 @@ def _construct_expected_sampling_metadata(
|
||||
prompt_token_ids[index_in_input_batch] = req.prompt_token_ids
|
||||
presence_penalties[
|
||||
index_in_input_batch] = req.sampling_params.presence_penalty
|
||||
frequency_penalties[
|
||||
index_in_input_batch] = req.sampling_params.frequency_penalty
|
||||
repetition_penalties[
|
||||
index_in_input_batch] = req.sampling_params.repetition_penalty
|
||||
frequency_penalties[index_in_input_batch] = (
|
||||
req.sampling_params.frequency_penalty)
|
||||
repetition_penalties[index_in_input_batch] = (
|
||||
req.sampling_params.repetition_penalty)
|
||||
top_k[index_in_input_batch] = req.sampling_params.top_k
|
||||
top_p[index_in_input_batch] = req.sampling_params.top_p
|
||||
temperature[index_in_input_batch] = req.sampling_params.temperature
|
||||
stop_token_ids[
|
||||
index_in_input_batch] = req.sampling_params.all_stop_token_ids
|
||||
min_tokens[index_in_input_batch] = req.sampling_params.min_tokens
|
||||
|
||||
logit_bias[index_in_input_batch] = req.sampling_params.logit_bias
|
||||
|
||||
return SamplingMetadata(
|
||||
temperature=torch.tensor(temperature, dtype=torch.float, device=device),
|
||||
temperature=torch.tensor(temperature, dtype=torch.float,
|
||||
device=device),
|
||||
all_greedy=False,
|
||||
all_random=True,
|
||||
top_p=torch.tensor(top_p, dtype=torch.float, device=device),
|
||||
@@ -93,41 +97,45 @@ def _construct_expected_sampling_metadata(
|
||||
no_top_k=all(x == 0 for x in top_k),
|
||||
generators={},
|
||||
max_num_logprobs=0,
|
||||
prompt_token_ids= make_tensor_with_pad(
|
||||
prompt_token_ids=make_tensor_with_pad(
|
||||
prompt_token_ids,
|
||||
pad=VOCAB_SIZE,
|
||||
device=torch.device(device),
|
||||
dtype=torch.int64,
|
||||
),
|
||||
frequency_penalties=torch.tensor(
|
||||
frequency_penalties, dtype=torch.float,
|
||||
device=device),
|
||||
presence_penalties=torch.tensor(
|
||||
presence_penalties, dtype=torch.float,
|
||||
device=device),
|
||||
repetition_penalties=torch.tensor(
|
||||
repetition_penalties, dtype=torch.float,
|
||||
device=device),
|
||||
frequency_penalties=torch.tensor(frequency_penalties,
|
||||
dtype=torch.float,
|
||||
device=device),
|
||||
presence_penalties=torch.tensor(presence_penalties,
|
||||
dtype=torch.float,
|
||||
device=device),
|
||||
repetition_penalties=torch.tensor(repetition_penalties,
|
||||
dtype=torch.float,
|
||||
device=device),
|
||||
output_token_ids=output_token_ids,
|
||||
min_tokens=min_tokens,
|
||||
stop_token_ids=stop_token_ids,
|
||||
no_penalties=(all(x ==0 for x in presence_penalties) and \
|
||||
all(x ==0 for x in frequency_penalties) and \
|
||||
all(x ==1 for x in repetition_penalties))
|
||||
no_penalties=(all(x == 0 for x in presence_penalties)
|
||||
and all(x == 0 for x in frequency_penalties)
|
||||
and all(x == 1 for x in repetition_penalties)),
|
||||
logit_bias=logit_bias,
|
||||
)
|
||||
|
||||
|
||||
def _create_sampling_params():
|
||||
return SamplingParams(top_k=np.random.randint(1, 10),
|
||||
top_p=np.random.uniform(0.0, 1.0),
|
||||
presence_penalty=np.random.uniform(-2.0, 2.0),
|
||||
repetition_penalty=np.random.uniform(0.0, 2.0),
|
||||
frequency_penalty=np.random.uniform(-2.0, 2.0),
|
||||
min_tokens=np.random.randint(1, 10),
|
||||
stop_token_ids=[
|
||||
np.random.randint(0, VOCAB_SIZE)
|
||||
for _ in range(np.random.randint(10))
|
||||
])
|
||||
return SamplingParams(
|
||||
top_k=np.random.randint(1, 10),
|
||||
top_p=np.random.uniform(0.0, 1.0),
|
||||
presence_penalty=np.random.uniform(-2.0, 2.0),
|
||||
repetition_penalty=np.random.uniform(0.0, 2.0),
|
||||
frequency_penalty=np.random.uniform(-2.0, 2.0),
|
||||
min_tokens=np.random.randint(1, 10),
|
||||
stop_token_ids=[
|
||||
np.random.randint(0, VOCAB_SIZE)
|
||||
for _ in range(np.random.randint(10))
|
||||
],
|
||||
logit_bias={0: np.random.uniform(-3.0, 3.0)},
|
||||
)
|
||||
|
||||
|
||||
def _construct_cached_request_state(req_id_suffix: int):
|
||||
@@ -139,16 +147,18 @@ def _construct_cached_request_state(req_id_suffix: int):
|
||||
np.random.randint(0, VOCAB_SIZE)
|
||||
for _ in range(np.random.randint(0, NUM_OUTPUT_TOKENS))
|
||||
]
|
||||
return CachedRequestState(req_id=f"req_id_{req_id_suffix}",
|
||||
prompt_token_ids=prompt_token_ids,
|
||||
prompt=None,
|
||||
sampling_params=_create_sampling_params(),
|
||||
mm_inputs=[],
|
||||
mm_positions=[],
|
||||
block_ids=[],
|
||||
generator=None,
|
||||
num_computed_tokens=len(output_token_ids),
|
||||
output_token_ids=output_token_ids)
|
||||
return CachedRequestState(
|
||||
req_id=f"req_id_{req_id_suffix}",
|
||||
prompt_token_ids=prompt_token_ids,
|
||||
prompt=None,
|
||||
sampling_params=_create_sampling_params(),
|
||||
mm_inputs=[],
|
||||
mm_positions=[],
|
||||
block_ids=[],
|
||||
generator=None,
|
||||
num_computed_tokens=len(output_token_ids),
|
||||
output_token_ids=output_token_ids,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
||||
@@ -163,12 +173,14 @@ def test_sampling_metadata_in_input_batch(device: str, batch_size: int):
|
||||
output of `make_sampling_metadata` is then compared against the expected
|
||||
results to ensure correctness.
|
||||
"""
|
||||
input_batch: InputBatch = InputBatch(max_num_reqs=batch_size,
|
||||
max_model_len=1024,
|
||||
max_num_blocks_per_req=10,
|
||||
device=torch.device(device),
|
||||
pin_memory=is_pin_memory_available(),
|
||||
vocab_size=1024)
|
||||
input_batch: InputBatch = InputBatch(
|
||||
max_num_reqs=batch_size,
|
||||
max_model_len=1024,
|
||||
max_num_blocks_per_req=10,
|
||||
device=torch.device(device),
|
||||
pin_memory=is_pin_memory_available(),
|
||||
vocab_size=1024,
|
||||
)
|
||||
reqs: List[CachedRequestState] = []
|
||||
req_id_reqs = {}
|
||||
req_id_output_token_ids = {}
|
||||
@@ -206,21 +218,27 @@ def test_sampling_metadata_in_input_batch(device: str, batch_size: int):
|
||||
sampling_metadata.top_p)
|
||||
assert torch.allclose(expected_sampling_metadata.top_k,
|
||||
sampling_metadata.top_k)
|
||||
assert torch.allclose(expected_sampling_metadata.frequency_penalties,
|
||||
sampling_metadata.frequency_penalties)
|
||||
assert torch.allclose(expected_sampling_metadata.presence_penalties,
|
||||
sampling_metadata.presence_penalties)
|
||||
assert torch.allclose(expected_sampling_metadata.repetition_penalties,
|
||||
sampling_metadata.repetition_penalties)
|
||||
assert torch.allclose(
|
||||
expected_sampling_metadata.frequency_penalties,
|
||||
sampling_metadata.frequency_penalties,
|
||||
)
|
||||
assert torch.allclose(
|
||||
expected_sampling_metadata.presence_penalties,
|
||||
sampling_metadata.presence_penalties,
|
||||
)
|
||||
assert torch.allclose(
|
||||
expected_sampling_metadata.repetition_penalties,
|
||||
sampling_metadata.repetition_penalties,
|
||||
)
|
||||
assert torch.allclose(expected_sampling_metadata.prompt_token_ids,
|
||||
sampling_metadata.prompt_token_ids)
|
||||
assert (expected_sampling_metadata.output_token_ids ==
|
||||
sampling_metadata.output_token_ids)
|
||||
assert (
|
||||
expected_sampling_metadata.min_tokens == sampling_metadata.min_tokens)
|
||||
assert (expected_sampling_metadata.stop_token_ids ==
|
||||
sampling_metadata.stop_token_ids)
|
||||
assert (expected_sampling_metadata.no_penalties ==
|
||||
sampling_metadata.no_penalties)
|
||||
assert (expected_sampling_metadata.no_top_p == sampling_metadata.no_top_p)
|
||||
assert (expected_sampling_metadata.no_top_k == sampling_metadata.no_top_k)
|
||||
assert expected_sampling_metadata.min_tokens == sampling_metadata.min_tokens
|
||||
assert expected_sampling_metadata.stop_token_ids == \
|
||||
sampling_metadata.stop_token_ids
|
||||
assert expected_sampling_metadata.no_penalties == \
|
||||
sampling_metadata.no_penalties
|
||||
assert expected_sampling_metadata.no_top_p == sampling_metadata.no_top_p
|
||||
assert expected_sampling_metadata.no_top_k == sampling_metadata.no_top_k
|
||||
assert expected_sampling_metadata.logit_bias == sampling_metadata.logit_bias
|
||||
|
||||
Reference in New Issue
Block a user