[Bugfix] Fix broken vision language example (#14292)

Signed-off-by: Isotr0py <2037008807@qq.com>
This commit is contained in:
Isotr0py
2025-03-05 23:57:10 +08:00
committed by GitHub
parent 8f808cf86e
commit f71b00a19e

View File

@@ -152,15 +152,13 @@ def run_h2ovl(questions: list[str], modality: str):
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True)
prompts = [ messages = [[{
tokenizer.apply_chat_template([{ 'role': 'user',
'role': 'user', 'content': f"<image>\n{question}"
'content': f"<image>\n{question}" }] for question in questions]
}], prompts = tokenizer.apply_chat_template(messages,
tokenize=False, tokenize=False,
add_generation_prompt=True) add_generation_prompt=True)
for question in questions
]
# Stop tokens for H2OVL-Mississippi # Stop tokens for H2OVL-Mississippi
# https://huggingface.co/h2oai/h2ovl-mississippi-800m # https://huggingface.co/h2oai/h2ovl-mississippi-800m
@@ -209,15 +207,13 @@ def run_internvl(questions: list[str], modality: str):
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True)
prompts = [ messages = [[{
tokenizer.apply_chat_template([{ 'role': 'user',
'role': 'user', 'content': f"<image>\n{question}"
'content': f"<image>\n{question}" }] for question in questions]
}], prompts = tokenizer.apply_chat_template(messages,
tokenize=False, tokenize=False,
add_generation_prompt=True) add_generation_prompt=True)
for question in questions
]
# Stop tokens for InternVL # Stop tokens for InternVL
# models variants may have different stop tokens # models variants may have different stop tokens
@@ -399,7 +395,7 @@ def run_mllama(questions: list[str], modality: str):
) )
tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)
messages = [{ messages = [[{
"role": "role":
"user", "user",
"content": [{ "content": [{
@@ -408,7 +404,7 @@ def run_mllama(questions: list[str], modality: str):
"type": "text", "type": "text",
"text": f"{question}" "text": f"{question}"
}] }]
} for question in questions] }] for question in questions]
prompts = tokenizer.apply_chat_template(messages, prompts = tokenizer.apply_chat_template(messages,
add_generation_prompt=True, add_generation_prompt=True,
tokenize=False) tokenize=False)
@@ -454,10 +450,10 @@ def run_nvlm_d(questions: list[str], modality: str):
tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=True) trust_remote_code=True)
messages = [{ messages = [[{
'role': 'user', 'role': 'user',
'content': f"<image>\n{question}" 'content': f"<image>\n{question}"
} for question in questions] }] for question in questions]
prompts = tokenizer.apply_chat_template(messages, prompts = tokenizer.apply_chat_template(messages,
tokenize=False, tokenize=False,
add_generation_prompt=True) add_generation_prompt=True)