[Kernel] FP8 support for MoE kernel / Mixtral (#4244)
This PR is the first step towards fixing https://github.com/vllm-project/vllm/pull/3208
It implements dynamic per-tensor scaling (see https://github.com/vllm-project/vllm/pull/4118), so users do not need to compute activation scales on a calibration dataset and they also don't need to convert their model checkpoints. It is enough to specify the `quantization="fp8"` argument. You can try out the PR like this:
```python
from vllm import LLM, SamplingParams
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
llm = LLM(model="mistralai/Mixtral-8x7B-Instruct-v0.1", tensor_parallel_size=2, quantization="fp8")
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
**Performance**: For this PR, the focus is on making the code clean (while still trying to get reasonable performance), there is a bunch of optimizations that we will submit as a follow up PR that significantly improve the performance (similar to the numbers in https://github.com/vllm-project/vllm/pull/3954). With this PR, the results are as follows:
<img width="725" alt="Screenshot 2024-04-21 at 1 31 50 PM" src="https://github.com/vllm-project/vllm/assets/113316/d8fe1118-07a0-4d4e-8530-37a77d465a03">
**Accuracy**: The accuracy with this PR on MMLU on `mistralai/Mixtral-8x7B-v0.1` is as follows:
```
| Groups |Version|Filter|n-shot|Metric|Value | |Stderr|
|------------------|-------|------|-----:|------|-----:|---|-----:|
|mmlu |N/A |none | 0|acc |0.7018|± |0.0036|
| - humanities |N/A |none | 5|acc |0.6472|± |0.0065|
| - other |N/A |none | 5|acc |0.7673|± |0.0072|
| - social_sciences|N/A |none | 5|acc |0.8099|± |0.0070|
| - stem |N/A |none | 5|acc |0.6131|± |0.0083|
```
this compares favorably with the fp16 results which are
```
| Groups |Version|Filter|n-shot|Metric|Value | |Stderr|
|------------------|-------|------|-----:|------|-----:|---|-----:|
|mmlu |N/A |none | 0|acc |0.7020|± |0.1313|
| - humanities |N/A |none | 5|acc |0.6425|± |0.1349|
| - other |N/A |none | 5|acc |0.7744|± |0.1038|
| - social_sciences|N/A |none | 5|acc |0.8131|± |0.0695|
| - stem |N/A |none | 5|acc |0.6108|± |0.1383|
```
Happy hacking!
2024-04-23 18:18:23 -07:00
|
|
|
from typing import Dict, Optional, Tuple
|
2024-04-11 03:26:07 +00:00
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from vllm._C import cache_ops as vllm_cache_ops
|
|
|
|
|
from vllm._C import ops as vllm_ops
|
|
|
|
|
except ImportError:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# activation ops
|
|
|
|
|
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.silu_and_mul(out, x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.gelu_and_mul(out, x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.gelu_tanh_and_mul(out, x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.gelu_fast(out, x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.gelu_new(out, x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# page attention ops
|
|
|
|
|
def paged_attention_v1(
|
|
|
|
|
out: torch.Tensor,
|
|
|
|
|
query: torch.Tensor,
|
|
|
|
|
key_cache: torch.Tensor,
|
|
|
|
|
value_cache: torch.Tensor,
|
|
|
|
|
num_kv_heads: int,
|
|
|
|
|
scale: float,
|
|
|
|
|
block_tables: torch.Tensor,
|
|
|
|
|
context_lens: torch.Tensor,
|
|
|
|
|
block_size: int,
|
|
|
|
|
max_context_len: int,
|
|
|
|
|
alibi_slopes: Optional[torch.Tensor],
|
|
|
|
|
kv_cache_dtype: str,
|
|
|
|
|
kv_scale: float,
|
|
|
|
|
) -> None:
|
|
|
|
|
vllm_ops.paged_attention_v1(out, query, key_cache, value_cache,
|
|
|
|
|
num_kv_heads, scale, block_tables,
|
|
|
|
|
context_lens, block_size, max_context_len,
|
|
|
|
|
alibi_slopes, kv_cache_dtype, kv_scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def paged_attention_v2(
|
|
|
|
|
out: torch.Tensor,
|
|
|
|
|
exp_sum: torch.Tensor,
|
|
|
|
|
max_logits: torch.Tensor,
|
|
|
|
|
tmp_out: torch.Tensor,
|
|
|
|
|
query: torch.Tensor,
|
|
|
|
|
key_cache: torch.Tensor,
|
|
|
|
|
value_cache: torch.Tensor,
|
|
|
|
|
num_kv_heads: int,
|
|
|
|
|
scale: float,
|
|
|
|
|
block_tables: torch.Tensor,
|
|
|
|
|
context_lens: torch.Tensor,
|
|
|
|
|
block_size: int,
|
|
|
|
|
max_context_len: int,
|
|
|
|
|
alibi_slopes: Optional[torch.Tensor],
|
|
|
|
|
kv_cache_dtype: str,
|
|
|
|
|
kv_scale: float,
|
|
|
|
|
) -> None:
|
|
|
|
|
vllm_ops.paged_attention_v2(out, exp_sum, max_logits, tmp_out, query,
|
|
|
|
|
key_cache, value_cache, num_kv_heads, scale,
|
|
|
|
|
block_tables, context_lens, block_size,
|
|
|
|
|
max_context_len, alibi_slopes, kv_cache_dtype,
|
|
|
|
|
kv_scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# pos encoding ops
|
|
|
|
|
def rotary_embedding(
|
|
|
|
|
positions: torch.Tensor,
|
|
|
|
|
query: torch.Tensor,
|
|
|
|
|
key: torch.Tensor,
|
|
|
|
|
head_size: int,
|
|
|
|
|
cos_sin_cache: torch.Tensor,
|
|
|
|
|
is_neox: bool,
|
|
|
|
|
) -> None:
|
|
|
|
|
vllm_ops.rotary_embedding(positions, query, key, head_size, cos_sin_cache,
|
|
|
|
|
is_neox)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def batched_rotary_embedding(positions: torch.Tensor, query: torch.Tensor,
|
|
|
|
|
key: torch.Tensor, head_size: int,
|
|
|
|
|
cos_sin_cache: torch.Tensor, is_neox: bool,
|
|
|
|
|
rot_dim: int,
|
|
|
|
|
cos_sin_cache_offsets: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.batched_rotary_embedding(positions, query, key, head_size,
|
|
|
|
|
cos_sin_cache, is_neox, rot_dim,
|
|
|
|
|
cos_sin_cache_offsets)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# layer norm ops
|
|
|
|
|
def rms_norm(out: torch.Tensor, input: torch.Tensor, weight: torch.Tensor,
|
|
|
|
|
epsilon: float) -> None:
|
|
|
|
|
vllm_ops.rms_norm(out, input, weight, epsilon)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fused_add_rms_norm(input: torch.Tensor, residual: torch.Tensor,
|
|
|
|
|
weight: torch.Tensor, epsilon: float) -> None:
|
|
|
|
|
vllm_ops.fused_add_rms_norm(input, residual, weight, epsilon)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# quantization ops
|
|
|
|
|
# awq
|
|
|
|
|
def awq_dequantize(qweight: torch.Tensor, scales: torch.Tensor,
|
|
|
|
|
zeros: torch.Tensor, split_k_iters: int, thx: int,
|
|
|
|
|
thy: int) -> torch.Tensor:
|
|
|
|
|
return vllm_ops.awq_dequantize(qweight, scales, zeros, split_k_iters, thx,
|
|
|
|
|
thy)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def awq_gemm(input: torch.Tensor, qweight: torch.Tensor, qzeros: torch.Tensor,
|
|
|
|
|
scales: torch.Tensor, split_k_iters: int) -> torch.Tensor:
|
|
|
|
|
return vllm_ops.awq_gemm(input, qweight, qzeros, scales, split_k_iters)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# gptq
|
|
|
|
|
def gptq_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
|
|
|
|
|
b_gptq_qzeros: torch.Tensor, b_gptq_scales: torch.Tensor,
|
|
|
|
|
b_g_idx: torch.Tensor, use_exllama: bool,
|
|
|
|
|
bit: int) -> torch.Tensor:
|
|
|
|
|
return vllm_ops.gptq_gemm(a, b_q_weight, b_gptq_qzeros, b_gptq_scales,
|
|
|
|
|
b_g_idx, use_exllama, bit)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gptq_shuffle(q_weight: torch.Tensor, q_perm: torch.Tensor,
|
|
|
|
|
bit: int) -> None:
|
|
|
|
|
vllm_ops.gptq_shuffle(q_weight, q_perm, bit)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# squeezellm
|
|
|
|
|
def squeezellm_gemm(vec: torch.Tensor, mat: torch.Tensor, mul: torch.Tensor,
|
|
|
|
|
lookup_table: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.squeezellm_gemm(vec, mat, mul, lookup_table)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# marlin
|
|
|
|
|
def marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
|
|
|
|
|
b_scales: torch.Tensor, workspace: torch.Tensor, size_m: int,
|
|
|
|
|
size_n: int, size_k: int) -> torch.Tensor:
|
|
|
|
|
return vllm_ops.marlin_gemm(a, b_q_weight, b_scales, workspace, size_m,
|
|
|
|
|
size_n, size_k)
|
|
|
|
|
|
|
|
|
|
|
[Kernel] FP8 support for MoE kernel / Mixtral (#4244)
This PR is the first step towards fixing https://github.com/vllm-project/vllm/pull/3208
It implements dynamic per-tensor scaling (see https://github.com/vllm-project/vllm/pull/4118), so users do not need to compute activation scales on a calibration dataset and they also don't need to convert their model checkpoints. It is enough to specify the `quantization="fp8"` argument. You can try out the PR like this:
```python
from vllm import LLM, SamplingParams
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
llm = LLM(model="mistralai/Mixtral-8x7B-Instruct-v0.1", tensor_parallel_size=2, quantization="fp8")
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
**Performance**: For this PR, the focus is on making the code clean (while still trying to get reasonable performance), there is a bunch of optimizations that we will submit as a follow up PR that significantly improve the performance (similar to the numbers in https://github.com/vllm-project/vllm/pull/3954). With this PR, the results are as follows:
<img width="725" alt="Screenshot 2024-04-21 at 1 31 50 PM" src="https://github.com/vllm-project/vllm/assets/113316/d8fe1118-07a0-4d4e-8530-37a77d465a03">
**Accuracy**: The accuracy with this PR on MMLU on `mistralai/Mixtral-8x7B-v0.1` is as follows:
```
| Groups |Version|Filter|n-shot|Metric|Value | |Stderr|
|------------------|-------|------|-----:|------|-----:|---|-----:|
|mmlu |N/A |none | 0|acc |0.7018|± |0.0036|
| - humanities |N/A |none | 5|acc |0.6472|± |0.0065|
| - other |N/A |none | 5|acc |0.7673|± |0.0072|
| - social_sciences|N/A |none | 5|acc |0.8099|± |0.0070|
| - stem |N/A |none | 5|acc |0.6131|± |0.0083|
```
this compares favorably with the fp16 results which are
```
| Groups |Version|Filter|n-shot|Metric|Value | |Stderr|
|------------------|-------|------|-----:|------|-----:|---|-----:|
|mmlu |N/A |none | 0|acc |0.7020|± |0.1313|
| - humanities |N/A |none | 5|acc |0.6425|± |0.1349|
| - other |N/A |none | 5|acc |0.7744|± |0.1038|
| - social_sciences|N/A |none | 5|acc |0.8131|± |0.0695|
| - stem |N/A |none | 5|acc |0.6108|± |0.1383|
```
Happy hacking!
2024-04-23 18:18:23 -07:00
|
|
|
# fp8
|
|
|
|
|
def scaled_fp8_quant(input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
|
|
|
scale = torch.zeros(1, device=input.device, dtype=torch.float32)
|
|
|
|
|
output = torch.empty_like(input, dtype=torch.float8_e4m3fn)
|
|
|
|
|
vllm_ops.scaled_fp8_quant(output, input, scale)
|
|
|
|
|
return output, scale
|
|
|
|
|
|
|
|
|
|
|
2024-04-11 03:26:07 +00:00
|
|
|
# moe
|
|
|
|
|
def moe_align_block_size(topk_ids: torch.Tensor, num_experts: int,
|
|
|
|
|
block_size: int, sorted_token_ids: torch.Tensor,
|
|
|
|
|
experts_ids: torch.Tensor,
|
|
|
|
|
num_tokens_post_pad: torch.Tensor) -> None:
|
|
|
|
|
vllm_ops.moe_align_block_size(topk_ids, num_experts, block_size,
|
|
|
|
|
sorted_token_ids, experts_ids,
|
|
|
|
|
num_tokens_post_pad)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def reshape_and_cache(
|
|
|
|
|
key: torch.Tensor,
|
|
|
|
|
value: torch.Tensor,
|
|
|
|
|
key_cache: torch.Tensor,
|
|
|
|
|
value_cache: torch.Tensor,
|
|
|
|
|
slot_mapping: torch.Tensor,
|
|
|
|
|
kv_cache_dtype: str,
|
|
|
|
|
kv_scale: float,
|
|
|
|
|
) -> None:
|
|
|
|
|
vllm_cache_ops.reshape_and_cache(key, value, key_cache, value_cache,
|
|
|
|
|
slot_mapping, kv_cache_dtype, kv_scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def copy_blocks(key_caches: torch.Tensor, value_caches: torch.Tensor,
|
|
|
|
|
block_mapping: torch.Tensor) -> None:
|
|
|
|
|
vllm_cache_ops.copy_blocks(key_caches, value_caches, block_mapping)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def swap_blocks(src: torch.Tensor, dst: torch.Tensor,
|
|
|
|
|
block_mapping: Dict[int, int]) -> None:
|
|
|
|
|
vllm_cache_ops.swap_blocks(src, dst, block_mapping)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_fp8(output: torch.Tensor, input: torch.Tensor) -> None:
|
|
|
|
|
vllm_cache_ops.convert_fp8(output, input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#TODO: cuda_utils, custom_ar
|