nemotron-nano-vl: Allow use_audio_in_video to be passed at vllm serve time (#38538)

Signed-off-by: Andrii Skliar <askliar@nvidia.com>
Co-authored-by: Andrii Skliar <askliar@nvidia.com>
This commit is contained in:
Andrii Skliar
2026-04-09 13:44:39 +02:00
committed by GitHub
parent c8d98f81f6
commit df2503e125
2 changed files with 74 additions and 18 deletions

View File

@@ -597,19 +597,26 @@ class NanoNemotronVLMultiModalProcessor(
def _extract_audio_from_videos(
self,
mm_items: MultiModalDataItems,
) -> tuple[MultiModalDataItems, list[AudioItem]]:
) -> tuple[MultiModalDataItems, list[AudioItem], list[bool]]:
"""Extract audio tracks from video bytes in *mm_items*.
Videos whose bytes are missing or that contain no audio stream are
silently skipped. The returned *has_audio* mask is aligned with
the video list so callers know which ``<video>`` tokens need an
accompanying audio context.
Returns:
The augmented *mm_items* (with audio added) and the list of
extracted audio items.
A 3-tuple of (augmented mm_items, extracted audio items,
per-video boolean mask indicating which videos have audio).
"""
videos = mm_items.get_items("video", VideoProcessorItems)
assert isinstance(videos.metadata, list)
metadata_list = videos.metadata
audio_items: list[AudioItem] = []
for metadata in metadata_list:
has_audio: list[bool] = []
for idx, metadata in enumerate(metadata_list):
video_bytes = metadata.get("original_video_bytes")
if video_bytes is None or len(video_bytes) == 0:
raise ValueError(
@@ -618,7 +625,16 @@ class NanoNemotronVLMultiModalProcessor(
"video must be loaded with keep_video_bytes=True (e.g. via "
"the chat API with a model that sets use_audio_in_video)."
)
audio_items.append(load_audio_pyav(BytesIO(video_bytes)))
try:
audio_items.append(load_audio_pyav(BytesIO(video_bytes)))
has_audio.append(True)
except Exception:
logger.debug(
"Video %d: no audio stream found, skipping audio extraction.",
idx,
exc_info=True,
)
has_audio.append(False)
# Create a new VideoProcessorItems with metadata that does not contain
# the large video bytes, to avoid modifying the input `mm_items`.
@@ -628,45 +644,83 @@ class NanoNemotronVLMultiModalProcessor(
]
new_videos = VideoProcessorItems(data=videos.data, metadata=new_metadata_list)
audio_parsed = self.data_parser.parse_mm_data({"audio": audio_items})
audio_parsed = {}
if audio_items:
audio_parsed = self.data_parser.parse_mm_data({"audio": audio_items})
# Create a new MultiModalDataItems with the new video and audio items.
new_mm_items_dict = {**mm_items, **audio_parsed, "video": new_videos}
mm_items = MultiModalDataItems(new_mm_items_dict)
return mm_items, audio_items
return mm_items, audio_items, has_audio
def apply(
self,
inputs: ProcessorInputs,
timing_ctx: TimingContext,
) -> MultiModalInput:
use_audio_in_video = bool(
inputs.hf_processor_mm_kwargs.get("use_audio_in_video", False)
mm_config = self.info.ctx.model_config.get_multimodal_config()
merged_kwargs = mm_config.merge_mm_processor_kwargs(
inputs.hf_processor_mm_kwargs
)
use_audio_in_video = bool(merged_kwargs.get("use_audio_in_video", False))
inputs.hf_processor_mm_kwargs = {
k: v
for k, v in inputs.hf_processor_mm_kwargs.items()
if k != "use_audio_in_video"
}
if not (
use_audio_in_video
and "video" in inputs.mm_data_items
and "audio" not in inputs.mm_data_items
):
if not (use_audio_in_video and "video" in inputs.mm_data_items):
return super().apply(inputs, timing_ctx)
mm_items, audio_items = self._extract_audio_from_videos(inputs.mm_data_items)
inputs.mm_data_items = mm_items
mm_items = inputs.mm_data_items
if "audio" in mm_items:
# Audio was pre-populated by upstream (e.g., OpenAI chat endpoint).
# Reuse existing audio items; validate 1:1 correspondence.
videos = mm_items.get_items("video", VideoProcessorItems)
audios = mm_items.get_items("audio", AudioProcessorItems)
if len(audios) != len(videos):
raise ValueError(
"use_audio_in_video requires equal number of audio and "
f"video items, got num_audios={len(audios)}, "
f"num_videos={len(videos)}"
)
audio_items = audios.get_all()
has_audio = [True] * len(videos)
logger.info(
"Using %d pre-populated audio item(s) from upstream.",
len(audio_items),
)
else:
# Extract audio from video bytes (library usage path).
mm_items, audio_items, has_audio = self._extract_audio_from_videos(mm_items)
inputs.mm_data_items = mm_items
logger.info(
"Extracted audio from video bytes: %d audio(s), has_audio=%s.",
len(audio_items),
has_audio,
)
if not audio_items:
return super().apply(inputs, timing_ctx)
prompt = inputs.prompt
tokenizer = self.info.get_tokenizer()
if not isinstance(prompt, str):
prompt = tokenizer.decode(prompt, skip_special_tokens=False)
for _ in audio_items:
prompt = prompt.replace("<video>", "<video>" + AUDIO_CONTEXT, 1)
# Inject AUDIO_CONTEXT only after <video> tokens whose video
# actually contained an audio stream (preserving video-audio pairing).
tag = "<video>"
head, *rest = prompt.split(tag)
rebuilt = [head]
for append_audio, part in zip(has_audio, rest, strict=True):
rebuilt.append(tag)
if append_audio:
rebuilt.append(AUDIO_CONTEXT)
rebuilt.append(part)
prompt = "".join(rebuilt)
inputs.prompt = tokenizer.encode(prompt, add_special_tokens=False)

View File

@@ -771,6 +771,7 @@ class NanoNemotronVLProcessor(BaseNanoNemotronVLProcessor):
max_num_tiles: int | None = None,
video_token: str | None = None,
video_pruning_rate: float | None = None,
use_audio_in_video: bool = False,
) -> None:
super().__init__(
config=config,
@@ -781,6 +782,7 @@ class NanoNemotronVLProcessor(BaseNanoNemotronVLProcessor):
# add extra video token for video processing
self.video_token = video_token
self.video_pruning_rate = video_pruning_rate
self.use_audio_in_video = use_audio_in_video
# Video params live exclusively in vision_config
vision_config = getattr(config, "vision_config", config)