Revert "[Renderer] Separate out RendererConfig from ModelConfig (#30145)" (#30199)

This commit is contained in:
Cyrus Leung
2025-12-07 16:00:22 +08:00
committed by GitHub
parent 27f4c2fd46
commit e83b7e379c
105 changed files with 797 additions and 969 deletions

View File

@@ -19,7 +19,6 @@ from vllm.config import (
DeviceConfig,
ModelConfig,
ParallelConfig,
RendererConfig,
SchedulerConfig,
SpeculativeConfig,
VllmConfig,
@@ -62,7 +61,6 @@ def _create_proposer(
vllm_config = VllmConfig(
model_config=model_config,
renderer_config=RendererConfig(model_config=model_config),
cache_config=CacheConfig(),
speculative_config=speculative_config,
device_config=DeviceConfig(device=current_platform.device_type),

View File

@@ -18,7 +18,6 @@ from vllm.config import (
DeviceConfig,
ModelConfig,
ParallelConfig,
RendererConfig,
SchedulerConfig,
SpeculativeConfig,
VllmConfig,
@@ -47,7 +46,6 @@ def _create_mtp_proposer(num_speculative_tokens: int) -> EagleProposer:
vllm_config = VllmConfig(
model_config=model_config,
renderer_config=RendererConfig(model_config=model_config),
cache_config=CacheConfig(),
speculative_config=speculative_config,
device_config=DeviceConfig(device=current_platform.device_type),

View File

@@ -4,7 +4,6 @@ import numpy as np
from vllm.config import (
ModelConfig,
RendererConfig,
SpeculativeConfig,
VllmConfig,
)
@@ -70,7 +69,6 @@ def test_ngram_proposer():
return NgramProposer(
vllm_config=VllmConfig(
model_config=model_config,
renderer_config=RendererConfig(model_config=model_config),
speculative_config=SpeculativeConfig(
prompt_lookup_min=min_n,
prompt_lookup_max=max_n,