[Backport] [Kimi-K2.5] Replace torch.cuda with current_platform for d… (#33320)
This commit is contained in:
@@ -58,6 +58,7 @@ from vllm.multimodal.processing import (
|
||||
PromptReplacement,
|
||||
PromptUpdate,
|
||||
)
|
||||
from vllm.platforms import current_platform
|
||||
from vllm.sequence import IntermediateTensors
|
||||
from vllm.transformers_utils.configs import KimiK25Config
|
||||
from vllm.transformers_utils.processor import cached_get_image_processor
|
||||
@@ -320,7 +321,7 @@ class KimiK25ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP)
|
||||
model_config.multimodal_config.mm_encoder_tp_mode == "data"
|
||||
)
|
||||
self.hidden_size = config.text_config.hidden_size
|
||||
self.device = torch.cuda.current_device()
|
||||
self.device = current_platform.current_device()
|
||||
# Build vision tower directly with KimiK25VisionConfig
|
||||
self.vision_tower = MoonViT3dPretrainedModel(
|
||||
config.vision_config,
|
||||
|
||||
Reference in New Issue
Block a user