[Frontend][VLM] Add support for multiple multi-modal items (#8049)

This commit is contained in:
Roger Wang
2024-08-31 16:35:53 -07:00
committed by GitHub
parent 8423aef4c8
commit 5231f0898e
8 changed files with 524 additions and 136 deletions

View File

@@ -65,10 +65,10 @@ class OpenAIServingTokenization(OpenAIServing):
if isinstance(request, TokenizeChatRequest):
model_config = self.model_config
conversation, mm_futures = parse_chat_messages(
conversation, mm_data_future = parse_chat_messages(
request.messages, model_config, tokenizer)
if mm_futures:
if mm_data_future:
logger.warning(
"Multi-modal inputs are ignored during tokenization")