[Core][Bugfix][Perf] Introduce MQLLMEngine to avoid asyncio OH (#8157)

Co-authored-by: Nick Hill <nickhill@us.ibm.com>
Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com>
Co-authored-by: Robert Shaw <114415538+robertgshaw2-neuralmagic@users.noreply.github.com>
Co-authored-by: Simon Mo <simon.mo@hey.com>
This commit is contained in:
Alexander Matveev
2024-09-18 09:56:58 -04:00
committed by GitHub
parent 9d104b5beb
commit 7c7714d856
36 changed files with 1464 additions and 1169 deletions

View File

@@ -1,7 +1,7 @@
from typing import List, Optional, Union
from vllm.config import ModelConfig
from vllm.engine.protocol import AsyncEngineClient
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.chat_utils import (apply_hf_chat_template,
apply_mistral_chat_template,
load_chat_template,
@@ -29,7 +29,7 @@ class OpenAIServingTokenization(OpenAIServing):
def __init__(
self,
async_engine_client: AsyncEngineClient,
engine_client: EngineClient,
model_config: ModelConfig,
served_model_names: List[str],
*,
@@ -37,7 +37,7 @@ class OpenAIServingTokenization(OpenAIServing):
request_logger: Optional[RequestLogger],
chat_template: Optional[str],
):
super().__init__(async_engine_client=async_engine_client,
super().__init__(engine_client=engine_client,
model_config=model_config,
served_model_names=served_model_names,
lora_modules=lora_modules,
@@ -66,7 +66,7 @@ class OpenAIServingTokenization(OpenAIServing):
prompt_adapter_request,
) = self._maybe_get_adapters(request)
tokenizer = await self.async_engine_client.get_tokenizer(lora_request)
tokenizer = await self.engine_client.get_tokenizer(lora_request)
prompt: Union[str, List[int]]
if isinstance(request, TokenizeChatRequest):
@@ -132,7 +132,7 @@ class OpenAIServingTokenization(OpenAIServing):
prompt_adapter_request,
) = self._maybe_get_adapters(request)
tokenizer = await self.async_engine_client.get_tokenizer(lora_request)
tokenizer = await self.engine_client.get_tokenizer(lora_request)
self._log_inputs(request_id,
request.tokens,