[Misc] Remove unnecessary detokenization in multimodal processing (#12868)

This commit is contained in:
Cyrus Leung
2025-02-07 22:21:17 +08:00
committed by GitHub
parent 1918aa1b80
commit ce26b16268
4 changed files with 7 additions and 10 deletions

View File

@@ -92,7 +92,7 @@ async def test_single_chat_session_image(client: openai.AsyncOpenAI,
choice = chat_completion.choices[0]
assert choice.finish_reason == "length"
assert chat_completion.usage == openai.types.CompletionUsage(
completion_tokens=10, prompt_tokens=775, total_tokens=785)
completion_tokens=10, prompt_tokens=774, total_tokens=784)
message = choice.message
message = chat_completion.choices[0].message
@@ -185,7 +185,7 @@ async def test_single_chat_session_image_base64encoded(
choice = chat_completion.choices[0]
assert choice.finish_reason == "length"
assert chat_completion.usage == openai.types.CompletionUsage(
completion_tokens=10, prompt_tokens=775, total_tokens=785)
completion_tokens=10, prompt_tokens=774, total_tokens=784)
message = choice.message
message = chat_completion.choices[0].message