[5/N][Attention] Finish eliminating vllm/attention folder (#32064)
Signed-off-by: Matthew Bonanni <mbonanni@redhat.com>
This commit is contained in:
@@ -29,7 +29,7 @@ The initialization code should look like this:
|
||||
```python
|
||||
from torch import nn
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.attention.layer import Attention
|
||||
from vllm.model_executor.layers.attention import Attention
|
||||
|
||||
class MyAttention(nn.Module):
|
||||
def __init__(self, vllm_config: VllmConfig, prefix: str):
|
||||
|
||||
@@ -271,7 +271,7 @@ Taking `MMEncoderAttention` as an example:
|
||||
??? code
|
||||
|
||||
```python
|
||||
from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention
|
||||
from vllm.model_executor.layers.attention import MMEncoderAttention
|
||||
from vllm.model_executor.custom_op import CustomOp
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user