38 lines
1.5 KiB
Python
38 lines
1.5 KiB
Python
from vllm.model_executor.models.aquila import AquilaForCausalLM
|
|
from vllm.model_executor.models.baichuan import (BaiChuanForCausalLM,
|
|
BaichuanForCausalLM)
|
|
from vllm.model_executor.models.bloom import BloomForCausalLM
|
|
from vllm.model_executor.models.falcon import FalconForCausalLM
|
|
from vllm.model_executor.models.gpt2 import GPT2LMHeadModel
|
|
from vllm.model_executor.models.gpt_bigcode import GPTBigCodeForCausalLM
|
|
from vllm.model_executor.models.gpt_j import GPTJForCausalLM
|
|
from vllm.model_executor.models.gpt_neox import GPTNeoXForCausalLM
|
|
from vllm.model_executor.models.internlm import InternLMForCausalLM
|
|
from vllm.model_executor.models.llama import LlamaForCausalLM
|
|
from vllm.model_executor.models.mistral import MistralForCausalLM
|
|
from vllm.model_executor.models.mpt import MptForCausalLM
|
|
from vllm.model_executor.models.opt import OPTForCausalLM
|
|
from vllm.model_executor.models.qwen import QWenLMHeadModel
|
|
from vllm.model_executor.models.chatglm import ChatGLMForCausalLM
|
|
from vllm.model_executor.models.yi import YiForCausalLM
|
|
|
|
__all__ = [
|
|
"AquilaForCausalLM",
|
|
"BaiChuanForCausalLM",
|
|
"BaichuanForCausalLM",
|
|
"BloomForCausalLM",
|
|
"ChatGLMForCausalLM",
|
|
"FalconForCausalLM",
|
|
"GPT2LMHeadModel",
|
|
"GPTBigCodeForCausalLM",
|
|
"GPTJForCausalLM",
|
|
"GPTNeoXForCausalLM",
|
|
"InternLMForCausalLM",
|
|
"LlamaForCausalLM",
|
|
"MptForCausalLM",
|
|
"OPTForCausalLM",
|
|
"QWenLMHeadModel",
|
|
"MistralForCausalLM",
|
|
"YiForCausalLM",
|
|
]
|