fix QERL attention import path (#33432)
Signed-off-by: vasiliy <vasiliy@fb.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Michael Goin <mgoin64@gmail.com>
This commit is contained in:
committed by
GitHub
parent
6c1f9e4c18
commit
3f96fcf646
@@ -7,9 +7,9 @@ from weakref import WeakKeyDictionary
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from vllm.attention.layer import Attention, MLAAttention
|
|
||||||
from vllm.config import ModelConfig
|
from vllm.config import ModelConfig
|
||||||
from vllm.logger import init_logger
|
from vllm.logger import init_logger
|
||||||
|
from vllm.model_executor.layers.attention import Attention, MLAAttention
|
||||||
from vllm.model_executor.layers.quantization.base_config import QuantizeMethodBase
|
from vllm.model_executor.layers.quantization.base_config import QuantizeMethodBase
|
||||||
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user