[Misc] Avoid misleading warning messages (#10438)

Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
Jee Jee Li
2024-11-19 16:54:58 +08:00
committed by GitHub
parent 272e31c0bd
commit 382b6a4852
2 changed files with 7 additions and 8 deletions

View File

@@ -870,7 +870,7 @@ def dummy_data_for_qwen(
return DummyData(seq_data, mm_data)
class QWenBaseModel(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA):
class QWenBaseModel(nn.Module, SupportsPP, SupportsLoRA):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
@@ -1024,7 +1024,7 @@ class QWenLLM(QWenBaseModel):
embedding_padding_modules = []
class QWenVL(QWenBaseModel):
class QWenVL(QWenBaseModel, SupportsMultiModal):
packed_modules_mapping = {
"c_attn": ["c_attn"],
"gate_up_proj": [
@@ -1062,7 +1062,7 @@ class QWenVL(QWenBaseModel):
@MULTIMODAL_REGISTRY.register_max_image_tokens(MAX_QWEN_IMG_TOKENS)
@INPUT_REGISTRY.register_dummy_data(dummy_data_for_qwen)
@INPUT_REGISTRY.register_input_processor(input_processor_for_qwen)
class QWenLMHeadModel(QWenBaseModel, SupportsLoRA):
class QWenLMHeadModel(QWenBaseModel, SupportsMultiModal, SupportsLoRA):
"""
QWenLMHeadModel is not only applicable to LLM but also to VL, which is not
conducive to the current integration logic of LoRA in vLLM. Therefore, it
@@ -1083,7 +1083,7 @@ class QWenLMHeadModel(QWenBaseModel, SupportsLoRA):
config = vllm_config.model_config.hf_config
# Initialize VL
if hasattr(config, "visual"):
return QWenVL(vllm_config=vllm_config)
return QWenVL(vllm_config=vllm_config, prefix=prefix)
# Initialize LLM
else:
return QWenLLM(vllm_config=vllm_config)
return QWenLLM(vllm_config=vllm_config, prefix=prefix)