[Frontend] [Core] Add Tensorizer support for V1, LoRA adapter serialization and deserialization (#17926)

Signed-off-by: Sanger Steel <sangersteel@gmail.com>
This commit is contained in:
Sanger Steel
2025-05-22 21:44:18 -04:00
committed by GitHub
parent c91fe7b1b9
commit c32e249a23
16 changed files with 606 additions and 197 deletions

View File

@@ -25,7 +25,7 @@ from vllm.distributed.parallel_state import (
from vllm.forward_context import get_forward_context, set_forward_context
from vllm.logger import init_logger
from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding
from vllm.model_executor.model_loader import get_model
from vllm.model_executor.model_loader import TensorizerLoader, get_model
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange
from vllm.multimodal.utils import group_mm_inputs_by_modality
@@ -60,6 +60,7 @@ from .utils import (gather_mm_placeholders, sanity_check_mm_encoder_outputs,
if TYPE_CHECKING:
import xgrammar as xgr
from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
from vllm.v1.core.sched.output import SchedulerOutput
else:
xgr = LazyLoader("xgr", globals(), "xgrammar")
@@ -1534,6 +1535,15 @@ class GPUModelRunner(LoRAModelRunnerMixin):
time_after_load - time_before_load)
prepare_communication_buffer_for_model(self.model)
def save_tensorized_model(
self,
tensorizer_config: "TensorizerConfig",
) -> None:
TensorizerLoader.save_model(
self.model,
tensorizer_config=tensorizer_config,
)
def _get_prompt_logprobs_dict(
self,
hidden_states: torch.Tensor,

View File

@@ -31,6 +31,7 @@ from vllm.v1.worker.worker_base import WorkerBase
logger = init_logger(__name__)
if TYPE_CHECKING:
from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
from vllm.v1.core.sched.output import SchedulerOutput
@@ -326,6 +327,13 @@ class Worker(WorkerBase):
max_size=max_size,
)
def save_tensorized_model(
self,
tensorizer_config: "TensorizerConfig",
) -> None:
self.model_runner.save_tensorized_model(
tensorizer_config=tensorizer_config, )
def init_worker_distributed_environment(
vllm_config: VllmConfig,