Files
vllm/tests/evals/gsm8k/configs/moe-refactor/Nemotron-Nano-30B-Fp8-ModelOpt-fi-trtllm.yaml

9 lines
274 B
YAML

model_name: "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8"
accuracy_threshold: 0.29
num_questions: 1319
num_fewshot: 5
server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2"
env:
VLLM_USE_FLASHINFER_MOE_FP8: "1"
VLLM_FLASHINFER_MOE_BACKEND: "latency"