[Model] Enable LoRA support for tower and connector in GLM4-V (#31652)
Signed-off-by: Zyyeric <eric1976808123@gmail.com> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
@@ -1788,6 +1788,20 @@ class Glm4vForConditionalGeneration(
|
||||
tower_model="visual.",
|
||||
)
|
||||
|
||||
def get_num_mm_encoder_tokens(
|
||||
self,
|
||||
num_image_tokens: int,
|
||||
) -> int:
|
||||
merge_size = self.config.vision_config.spatial_merge_size
|
||||
return num_image_tokens * (merge_size**2)
|
||||
|
||||
def get_num_mm_connector_tokens(
|
||||
self,
|
||||
num_vision_tokens: int,
|
||||
) -> int:
|
||||
merge_size = self.config.vision_config.spatial_merge_size
|
||||
return num_vision_tokens // (merge_size**2)
|
||||
|
||||
|
||||
@MULTIMODAL_REGISTRY.register_processor(
|
||||
Glm4vMultiModalProcessor,
|
||||
|
||||
Reference in New Issue
Block a user