Extend renderer with embedding support and integrate completion endpoint (#24405)
Signed-off-by: sfeng33 <4florafeng@gmail.com>
This commit is contained in:
@@ -1,13 +1,17 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import io
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pybase64
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from vllm.entrypoints.renderer import CompletionRenderer
|
||||
from vllm.inputs.data import is_embeds_prompt
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -178,3 +182,132 @@ class TestRenderPrompt:
|
||||
with pytest.raises(ValueError, match="No tokenizer available"):
|
||||
await renderer_no_tokenizer.render_prompt(
|
||||
prompt_or_prompts="Hello world", max_length=100)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_token_input_with_needs_detokenization(
|
||||
self, renderer, mock_async_tokenizer):
|
||||
# When needs_detokenization=True for token inputs, renderer should
|
||||
# use the async tokenizer to decode and include the original text
|
||||
# in the returned prompt object.
|
||||
mock_async_tokenizer.decode = AsyncMock(return_value="decoded text")
|
||||
renderer.async_tokenizer_pool[
|
||||
renderer.tokenizer] = mock_async_tokenizer
|
||||
|
||||
tokens = [1, 2, 3, 4]
|
||||
results = await renderer.render_prompt(
|
||||
prompt_or_prompts=tokens,
|
||||
needs_detokenization=True,
|
||||
)
|
||||
|
||||
assert len(results) == 1
|
||||
assert results[0]["prompt_token_ids"] == tokens
|
||||
assert results[0]["prompt"] == "decoded text"
|
||||
mock_async_tokenizer.decode.assert_awaited_once()
|
||||
|
||||
|
||||
class TestRenderEmbedPrompt:
|
||||
|
||||
def _create_test_embed_bytes(self, tensor: torch.Tensor) -> bytes:
|
||||
"""Helper to create base64-encoded tensor bytes"""
|
||||
buffer = io.BytesIO()
|
||||
torch.save(tensor, buffer)
|
||||
buffer.seek(0)
|
||||
return pybase64.b64encode(buffer.read())
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_single_prompt_embed(self, renderer):
|
||||
# Create a test tensor
|
||||
test_tensor = torch.randn(10, 768, dtype=torch.float32)
|
||||
embed_bytes = self._create_test_embed_bytes(test_tensor)
|
||||
|
||||
results = await renderer.render_prompt_and_embeds(
|
||||
prompt_embeds=embed_bytes, cache_salt="test_salt")
|
||||
|
||||
assert len(results) == 1
|
||||
assert is_embeds_prompt(results[0])
|
||||
assert torch.allclose(results[0]["prompt_embeds"], test_tensor)
|
||||
assert results[0]["cache_salt"] == "test_salt"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_prompt_embeds(self, renderer):
|
||||
# Create multiple test tensors
|
||||
test_tensors = [
|
||||
torch.randn(8, 512, dtype=torch.float32),
|
||||
torch.randn(12, 512, dtype=torch.float32),
|
||||
]
|
||||
embed_bytes_list = [
|
||||
self._create_test_embed_bytes(t) for t in test_tensors
|
||||
]
|
||||
|
||||
results = await renderer.render_prompt_and_embeds(
|
||||
prompt_embeds=embed_bytes_list)
|
||||
|
||||
assert len(results) == 2
|
||||
for i, result in enumerate(results):
|
||||
assert is_embeds_prompt(result)
|
||||
assert torch.allclose(result["prompt_embeds"], test_tensors[i])
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_prompt_embed_truncation(self, renderer):
|
||||
# Create tensor with more tokens than truncation limit
|
||||
test_tensor = torch.randn(20, 768, dtype=torch.float32)
|
||||
embed_bytes = self._create_test_embed_bytes(test_tensor)
|
||||
|
||||
results = await renderer.render_prompt_and_embeds(
|
||||
prompt_embeds=embed_bytes, truncate_prompt_tokens=10)
|
||||
|
||||
assert len(results) == 1
|
||||
# Should keep last 10 tokens
|
||||
expected = test_tensor[-10:]
|
||||
assert torch.allclose(results[0]["prompt_embeds"], expected)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_prompt_embed_different_dtypes(self, renderer):
|
||||
# Test different supported dtypes
|
||||
dtypes = [torch.float32, torch.float16, torch.bfloat16]
|
||||
|
||||
for dtype in dtypes:
|
||||
test_tensor = torch.randn(5, 256, dtype=dtype)
|
||||
embed_bytes = self._create_test_embed_bytes(test_tensor)
|
||||
|
||||
results = await renderer.render_prompt_and_embeds(
|
||||
prompt_embeds=embed_bytes)
|
||||
|
||||
assert len(results) == 1
|
||||
assert results[0]["prompt_embeds"].dtype == dtype
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_prompt_embed_squeeze_batch_dim(self, renderer):
|
||||
# Test tensor with batch dimension gets squeezed
|
||||
test_tensor = torch.randn(1, 10, 768, dtype=torch.float32)
|
||||
embed_bytes = self._create_test_embed_bytes(test_tensor)
|
||||
|
||||
results = await renderer.render_prompt_and_embeds(
|
||||
prompt_embeds=embed_bytes)
|
||||
|
||||
assert len(results) == 1
|
||||
# Should be squeezed to 2D
|
||||
assert results[0]["prompt_embeds"].shape == (10, 768)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_both_prompts_and_embeds(self, renderer,
|
||||
mock_async_tokenizer):
|
||||
# Set up text tokenization
|
||||
mock_async_tokenizer.return_value = MockTokenizerResult(
|
||||
[101, 102, 103])
|
||||
renderer.async_tokenizer_pool[
|
||||
renderer.tokenizer] = mock_async_tokenizer
|
||||
|
||||
# Create embed
|
||||
test_tensor = torch.randn(5, 256, dtype=torch.float32)
|
||||
embed_bytes = self._create_test_embed_bytes(test_tensor)
|
||||
|
||||
results = await renderer.render_prompt_and_embeds(
|
||||
prompt_or_prompts="Hello world", prompt_embeds=embed_bytes)
|
||||
|
||||
assert len(results) == 2
|
||||
# First should be embed prompt
|
||||
assert is_embeds_prompt(results[0])
|
||||
# Second should be tokens prompt
|
||||
assert "prompt_token_ids" in results[1]
|
||||
assert results[1]["prompt_token_ids"] == [101, 102, 103]
|
||||
|
||||
Reference in New Issue
Block a user