model_name: "Qwen/Qwen3-Next-80B-A3B-Instruct-FP8" accuracy_threshold: 0.85 num_questions: 1319 num_fewshot: 5 server_args: >- --max-model-len 4096 --tensor-parallel-size 2 --enable-expert-parallel --async-scheduling env: VLLM_USE_FLASHINFER_MOE_FP8: "1"