[Core] Make encoder-decoder inputs a nested structure to be more composable (#9604)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
@@ -3,6 +3,7 @@ from typing import Any, Dict, Generator, List, Optional
|
||||
import pytest
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from vllm.inputs import token_inputs
|
||||
from vllm.sequence import Logprob, SamplingParams, Sequence, SequenceGroup
|
||||
from vllm.transformers_utils.detokenizer import (Detokenizer,
|
||||
detokenize_incrementally)
|
||||
@@ -169,10 +170,7 @@ def create_sequence(prompt_token_ids=None):
|
||||
prompt_token_ids = prompt_token_ids or [1]
|
||||
return Sequence(
|
||||
seq_id=0,
|
||||
inputs={
|
||||
"prompt": "<s>",
|
||||
"prompt_token_ids": prompt_token_ids,
|
||||
},
|
||||
inputs=token_inputs(prompt_token_ids, prompt="<s>"),
|
||||
block_size=16,
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user