[Model] Support Pixtral models in the HF Transformers format (#9036)
This commit is contained in:
@@ -277,6 +277,22 @@ def run_qwen2_vl(question: str, modality: str):
|
||||
return llm, prompt, stop_token_ids
|
||||
|
||||
|
||||
# Pixtral HF-format
|
||||
def run_pixtral_hf(question: str, modality: str):
|
||||
assert modality == "image"
|
||||
|
||||
model_name = "mistral-community/pixtral-12b"
|
||||
|
||||
llm = LLM(
|
||||
model=model_name,
|
||||
max_model_len=8192,
|
||||
)
|
||||
|
||||
prompt = f"<s>[INST]{question}\n[IMG][/INST]"
|
||||
stop_token_ids = None
|
||||
return llm, prompt, stop_token_ids
|
||||
|
||||
|
||||
# LLama 3.2
|
||||
def run_mllama(question: str, modality: str):
|
||||
assert modality == "image"
|
||||
@@ -347,6 +363,7 @@ model_example_map = {
|
||||
"NVLM_D": run_nvlm_d,
|
||||
"qwen_vl": run_qwen_vl,
|
||||
"qwen2_vl": run_qwen2_vl,
|
||||
"pixtral_hf": run_pixtral_hf,
|
||||
"mllama": run_mllama,
|
||||
"molmo": run_molmo,
|
||||
"glm4v": run_glm4v,
|
||||
|
||||
Reference in New Issue
Block a user