Convert formatting to use ruff instead of yapf + isort (#26247)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -50,7 +50,6 @@ async def test_prithvi_mae_plugin_online(
|
||||
server: RemoteOpenAIServer,
|
||||
model_name: str,
|
||||
):
|
||||
|
||||
request_payload_url = {
|
||||
"data": {
|
||||
"data": image_url,
|
||||
@@ -60,7 +59,7 @@ async def test_prithvi_mae_plugin_online(
|
||||
},
|
||||
"priority": 0,
|
||||
"model": model_name,
|
||||
"softmax": False
|
||||
"softmax": False,
|
||||
}
|
||||
|
||||
ret = requests.post(
|
||||
@@ -77,8 +76,8 @@ async def test_prithvi_mae_plugin_online(
|
||||
plugin_data = parsed_response.data
|
||||
|
||||
assert all(
|
||||
plugin_data.get(attr)
|
||||
for attr in ["type", "format", "data", "request_id"])
|
||||
plugin_data.get(attr) for attr in ["type", "format", "data", "request_id"]
|
||||
)
|
||||
|
||||
# We just check that the output is a valid base64 string.
|
||||
# Raises an exception and fails the test if the string is corrupted.
|
||||
@@ -87,7 +86,6 @@ async def test_prithvi_mae_plugin_online(
|
||||
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
def test_prithvi_mae_plugin_offline(vllm_runner, model_name: str):
|
||||
|
||||
img_prompt = dict(
|
||||
data=image_url,
|
||||
data_format="url",
|
||||
@@ -98,16 +96,16 @@ def test_prithvi_mae_plugin_offline(vllm_runner, model_name: str):
|
||||
pooling_params = PoolingParams(task="encode", softmax=False)
|
||||
|
||||
with vllm_runner(
|
||||
model_name,
|
||||
runner="pooling",
|
||||
skip_tokenizer_init=True,
|
||||
trust_remote_code=True,
|
||||
enforce_eager=True,
|
||||
# Limit the maximum number of parallel requests
|
||||
# to avoid the model going OOM in CI.
|
||||
max_num_seqs=1,
|
||||
model_impl="terratorch",
|
||||
io_processor_plugin="prithvi_to_tiff",
|
||||
model_name,
|
||||
runner="pooling",
|
||||
skip_tokenizer_init=True,
|
||||
trust_remote_code=True,
|
||||
enforce_eager=True,
|
||||
# Limit the maximum number of parallel requests
|
||||
# to avoid the model going OOM in CI.
|
||||
max_num_seqs=1,
|
||||
model_impl="terratorch",
|
||||
io_processor_plugin="prithvi_to_tiff",
|
||||
) as llm_runner:
|
||||
pooler_output = llm_runner.get_llm().encode(
|
||||
img_prompt,
|
||||
@@ -117,8 +115,8 @@ def test_prithvi_mae_plugin_offline(vllm_runner, model_name: str):
|
||||
|
||||
# verify the output is formatted as expected for this plugin
|
||||
assert all(
|
||||
hasattr(output, attr)
|
||||
for attr in ["type", "format", "data", "request_id"])
|
||||
hasattr(output, attr) for attr in ["type", "format", "data", "request_id"]
|
||||
)
|
||||
|
||||
# We just check that the output is a valid base64 string.
|
||||
# Raises an exception and fails the test if the string is corrupted.
|
||||
|
||||
@@ -10,29 +10,38 @@ from vllm.plugins import load_general_plugins
|
||||
def test_platform_plugins():
|
||||
# simulate workload by running an example
|
||||
import runpy
|
||||
|
||||
current_file = __file__
|
||||
import os
|
||||
|
||||
example_file = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(current_file))),
|
||||
"examples", "offline_inference/basic/basic.py")
|
||||
"examples",
|
||||
"offline_inference/basic/basic.py",
|
||||
)
|
||||
runpy.run_path(example_file)
|
||||
|
||||
# check if the plugin is loaded correctly
|
||||
from vllm.platforms import _init_trace, current_platform
|
||||
|
||||
assert current_platform.device_name == "DummyDevice", (
|
||||
f"Expected DummyDevice, got {current_platform.device_name}, "
|
||||
"possibly because current_platform is imported before the plugin"
|
||||
f" is loaded. The first import:\n{_init_trace}")
|
||||
f" is loaded. The first import:\n{_init_trace}"
|
||||
)
|
||||
|
||||
|
||||
def test_oot_custom_op(monkeypatch: pytest.MonkeyPatch):
|
||||
# simulate workload by running an example
|
||||
load_general_plugins()
|
||||
from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding
|
||||
|
||||
layer = RotaryEmbedding(16, 16, 16, 16, True, torch.float16)
|
||||
assert layer.__class__.__name__ == "DummyRotaryEmbedding", (
|
||||
f"Expected DummyRotaryEmbedding, got {layer.__class__.__name__}, "
|
||||
"possibly because the custom op is not registered correctly.")
|
||||
"possibly because the custom op is not registered correctly."
|
||||
)
|
||||
assert hasattr(layer, "addition_config"), (
|
||||
"Expected DummyRotaryEmbedding to have an 'addition_config' attribute, "
|
||||
"which is set by the custom op.")
|
||||
"which is set by the custom op."
|
||||
)
|
||||
|
||||
@@ -10,7 +10,6 @@ from vllm.v1.engine.llm_engine import LLMEngine
|
||||
|
||||
|
||||
class DummyV1Scheduler(Scheduler):
|
||||
|
||||
def schedule(self):
|
||||
raise Exception("Exception raised by DummyV1Scheduler")
|
||||
|
||||
@@ -23,7 +22,6 @@ def test_scheduler_plugins_v1(monkeypatch: pytest.MonkeyPatch):
|
||||
m.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
|
||||
|
||||
with pytest.raises(Exception) as exception_info:
|
||||
|
||||
engine_args = EngineArgs(
|
||||
model="facebook/opt-125m",
|
||||
enforce_eager=True, # reduce test time
|
||||
@@ -36,5 +34,4 @@ def test_scheduler_plugins_v1(monkeypatch: pytest.MonkeyPatch):
|
||||
engine.add_request("0", "foo", sampling_params)
|
||||
engine.step()
|
||||
|
||||
assert str(
|
||||
exception_info.value) == "Exception raised by DummyV1Scheduler"
|
||||
assert str(exception_info.value) == "Exception raised by DummyV1Scheduler"
|
||||
|
||||
Reference in New Issue
Block a user