[Frontend] Batch inference for llm.chat() API (#8648)
Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Roger Wang <ywang@roblox.com> Co-authored-by: Roger Wang <136131678+ywang96@users.noreply.github.com>
This commit is contained in:
@@ -485,7 +485,8 @@ class LLM:
|
||||
|
||||
def chat(
|
||||
self,
|
||||
messages: List[ChatCompletionMessageParam],
|
||||
messages: Union[List[ChatCompletionMessageParam],
|
||||
List[List[ChatCompletionMessageParam]]],
|
||||
sampling_params: Optional[Union[SamplingParams,
|
||||
List[SamplingParams]]] = None,
|
||||
use_tqdm: bool = True,
|
||||
@@ -505,8 +506,9 @@ class LLM:
|
||||
to the OpenAI API.
|
||||
|
||||
Args:
|
||||
messages: A single conversation represented as a list of messages.
|
||||
Each message is a dictionary with 'role' and 'content' keys.
|
||||
messages: A list of conversations or a single conversation.
|
||||
- Each conversation is represented as a list of messages.
|
||||
- Each message is a dictionary with 'role' and 'content' keys.
|
||||
sampling_params: The sampling parameters for text generation.
|
||||
If None, we use the default sampling parameters. When it
|
||||
is a single value, it is applied to every prompt. When it
|
||||
@@ -523,42 +525,56 @@ class LLM:
|
||||
A list of ``RequestOutput`` objects containing the generated
|
||||
responses in the same order as the input messages.
|
||||
"""
|
||||
list_of_messages: List[List[ChatCompletionMessageParam]]
|
||||
|
||||
tokenizer = self.get_tokenizer()
|
||||
model_config = self.llm_engine.get_model_config()
|
||||
|
||||
conversation, mm_data = parse_chat_messages(messages, model_config,
|
||||
tokenizer)
|
||||
|
||||
prompt: Union[str, List[int]]
|
||||
if isinstance(tokenizer, MistralTokenizer):
|
||||
prompt = apply_mistral_chat_template(
|
||||
tokenizer,
|
||||
messages=messages,
|
||||
chat_template=chat_template,
|
||||
add_generation_prompt=add_generation_prompt,
|
||||
tools=tools,
|
||||
)
|
||||
# Handle multi and single conversations
|
||||
if is_list_of(messages, list):
|
||||
# messages is List[List[...]]
|
||||
list_of_messages = messages
|
||||
else:
|
||||
prompt = apply_hf_chat_template(
|
||||
tokenizer,
|
||||
conversation=conversation,
|
||||
chat_template=chat_template,
|
||||
add_generation_prompt=add_generation_prompt,
|
||||
tools=tools,
|
||||
)
|
||||
# messages is List[...]
|
||||
list_of_messages = [messages]
|
||||
|
||||
inputs: PromptInputs
|
||||
if is_list_of(prompt, int):
|
||||
inputs = TokensPrompt(prompt_token_ids=prompt)
|
||||
else:
|
||||
inputs = TextPrompt(prompt=prompt)
|
||||
prompts: List[Union[TokensPrompt, TextPrompt]] = []
|
||||
|
||||
if mm_data is not None:
|
||||
inputs["multi_modal_data"] = mm_data
|
||||
for msgs in list_of_messages:
|
||||
tokenizer = self.get_tokenizer()
|
||||
model_config = self.llm_engine.get_model_config()
|
||||
|
||||
conversation, mm_data = parse_chat_messages(
|
||||
msgs, model_config, tokenizer)
|
||||
|
||||
prompt_data: Union[str, List[int]]
|
||||
if isinstance(tokenizer, MistralTokenizer):
|
||||
prompt_data = apply_mistral_chat_template(
|
||||
tokenizer,
|
||||
messages=msgs,
|
||||
chat_template=chat_template,
|
||||
add_generation_prompt=add_generation_prompt,
|
||||
tools=tools,
|
||||
)
|
||||
else:
|
||||
prompt_data = apply_hf_chat_template(
|
||||
tokenizer,
|
||||
conversation=conversation,
|
||||
chat_template=chat_template,
|
||||
add_generation_prompt=add_generation_prompt,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
prompt: Union[TokensPrompt, TextPrompt]
|
||||
if is_list_of(prompt_data, int):
|
||||
prompt = TokensPrompt(prompt_token_ids=prompt_data)
|
||||
else:
|
||||
prompt = TextPrompt(prompt=prompt_data)
|
||||
|
||||
if mm_data is not None:
|
||||
prompt["multi_modal_data"] = mm_data
|
||||
|
||||
prompts.append(prompt)
|
||||
|
||||
return self.generate(
|
||||
inputs,
|
||||
prompts,
|
||||
sampling_params=sampling_params,
|
||||
use_tqdm=use_tqdm,
|
||||
lora_request=lora_request,
|
||||
|
||||
Reference in New Issue
Block a user