[V1] VLM preprocessor hashing (#11020)
Signed-off-by: Roger Wang <ywang@roblox.com> Signed-off-by: Alexander Matveev <alexm@neuralmagic.com> Co-authored-by: Michael Goin <michael@neuralmagic.com> Co-authored-by: Roger Wang <ywang@roblox.com>
This commit is contained in:
committed by
GitHub
parent
452a723bf2
commit
4e11683368
@@ -143,6 +143,7 @@ class EngineArgs:
|
||||
tokenizer_pool_extra_config: Optional[Dict[str, Any]] = None
|
||||
limit_mm_per_prompt: Optional[Mapping[str, int]] = None
|
||||
mm_processor_kwargs: Optional[Dict[str, Any]] = None
|
||||
mm_cache_preprocessor: bool = False
|
||||
enable_lora: bool = False
|
||||
enable_lora_bias: bool = False
|
||||
max_loras: int = 1
|
||||
@@ -593,6 +594,12 @@ class EngineArgs:
|
||||
type=json.loads,
|
||||
help=('Overrides for the multimodal input mapping/processing, '
|
||||
'e.g., image processor. For example: {"num_crops": 4}.'))
|
||||
parser.add_argument(
|
||||
'--mm-cache-preprocessor',
|
||||
action='store_true',
|
||||
help='If true, then enables caching of the multi-modal '
|
||||
'preprocessor/mapper. Otherwise, the mapper executes each time'
|
||||
', and for better performance consider enabling frontend process.')
|
||||
|
||||
# LoRA related configs
|
||||
parser.add_argument('--enable-lora',
|
||||
@@ -965,6 +972,7 @@ class EngineArgs:
|
||||
use_async_output_proc=not self.disable_async_output_proc,
|
||||
config_format=self.config_format,
|
||||
mm_processor_kwargs=self.mm_processor_kwargs,
|
||||
mm_cache_preprocessor=self.mm_cache_preprocessor,
|
||||
override_neuron_config=self.override_neuron_config,
|
||||
override_pooler_config=self.override_pooler_config,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user