[ROCm] [Feature] [Doc] [Dockerfile] [BugFix] Support Per-Token-Activation Per-Channel-Weight FP8 Quantization Inferencing (#12501)
This commit is contained in:
@@ -72,7 +72,7 @@ class RocmPlatform(Platform):
|
||||
|
||||
supported_quantization: list[str] = [
|
||||
"awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors",
|
||||
"fbgemm_fp8", "gguf", "quark"
|
||||
"fbgemm_fp8", "gguf", "quark", "ptpc_fp8"
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
||||
Reference in New Issue
Block a user