[torchao] fix safetensors for sharding (#28169)
Signed-off-by: Angel Li <liangel@meta.com>
This commit is contained in:
@@ -225,13 +225,12 @@ def test_reload_weights():
|
||||
@pytest.mark.skip(
|
||||
reason="since torchao nightly is only compatible with torch nightly"
|
||||
"currently https://github.com/pytorch/ao/issues/2919, we'll have to skip "
|
||||
"torchao tests that requires newer versions (0.14.0.dev+) for now"
|
||||
"torchao tests that requires newer versions (0.15.0.dev+) for now"
|
||||
)
|
||||
def test_opt_125m_float8_weight_only_safetensors_model_loading_with_params(vllm_runner):
|
||||
def test_safetensors_model_loading_with_params(vllm_runner):
|
||||
torch._dynamo.reset()
|
||||
model_name = (
|
||||
"torchao-testing/opt-125m-Float8WeightOnlyConfig-v2-0.14.0.dev-safetensors"
|
||||
)
|
||||
# using this model to test safetensors loading with file sharding
|
||||
model_name = "torchao-testing/Qwen3-8B-INT4-0.15.0dev-safetensors"
|
||||
with vllm_runner(model_name=model_name, dtype="bfloat16") as llm:
|
||||
output = llm.generate_greedy(["The capital of France is"], max_tokens=4)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user