Support RL online quantization with torchao (#23014)

Signed-off-by: Jerry Zhang <jerryzh168@gmail.com>
This commit is contained in:
Jerry Zhang
2025-10-01 16:39:29 -07:00
committed by GitHub
parent 4134312b35
commit c31246800c
6 changed files with 465 additions and 16 deletions

View File

@@ -20,7 +20,6 @@ def test_pre_quantized_model(vllm_runner):
output = llm.generate_greedy(["The capital of France is"],
max_tokens=32)
assert output
print(output)
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
@@ -42,7 +41,6 @@ def test_opt_125m_int8wo_model_loading_with_params(vllm_runner,
max_tokens=32)
assert output
print(output)
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
@@ -57,7 +55,6 @@ def test_opt_125m_int4wo_model_per_module_quant(vllm_runner):
max_tokens=32)
assert output
print(output)
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
@@ -72,7 +69,6 @@ def test_qwenvl_int8wo_model_loading_with_params(vllm_runner):
max_tokens=32)
assert output
print(output)
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
@@ -92,7 +88,127 @@ def test_opt_125m_awq_int4wo_model_loading_with_params(vllm_runner):
max_tokens=32)
assert output
print(output)
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
def test_on_the_fly_quant_config_dict_json(vllm_runner):
"""Testing on the fly quantization, load_weights integration point,
with config dict serialized to json string
"""
torch._dynamo.reset()
model_name = "facebook/opt-125m"
import json
from torchao.core.config import config_to_dict
from torchao.quantization import (
Float8DynamicActivationFloat8WeightConfig, PerRow)
torchao_quant_config = Float8DynamicActivationFloat8WeightConfig(
granularity=PerRow())
hf_overrides = {
"quantization_config_dict_json":
json.dumps(config_to_dict(torchao_quant_config))
}
with vllm_runner(model_name=model_name,
dtype="bfloat16",
pt_load_map_location="cuda:0",
quantization="torchao",
hf_overrides=hf_overrides) as llm:
output = llm.generate_greedy(["The capital of France is"],
max_tokens=32)
assert output
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
def test_on_the_fly_quant_config_file(vllm_runner):
"""Testing on the fly quantization, load_weights integration point,
with config file
"""
torch._dynamo.reset()
model_name = "facebook/opt-125m"
import json
from tempfile import NamedTemporaryFile
from torchao.core.config import config_to_dict
from torchao.quantization import (
Float8DynamicActivationFloat8WeightConfig, PerRow)
config = Float8DynamicActivationFloat8WeightConfig(granularity=PerRow())
with NamedTemporaryFile(mode="w", delete=False) as f:
f.write(json.dumps(config_to_dict(config)))
# close the file to save it
f.close()
config_file_name = str(f.name)
hf_overrides = {"quantization_config_file": config_file_name}
with vllm_runner(model_name=model_name,
dtype="bfloat16",
pt_load_map_location="cuda:0",
quantization="torchao",
hf_overrides=hf_overrides) as llm:
output = llm.generate_greedy(["The capital of France is"],
max_tokens=32)
assert output
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
def test_reload_weights():
import json
from torchao.core.config import config_to_dict
from torchao.quantization import (
Float8DynamicActivationFloat8WeightConfig, PerRow)
from vllm import LLM, SamplingParams
torchao_quant_config = Float8DynamicActivationFloat8WeightConfig(
granularity=PerRow())
hf_overrides = {
"quantization_config_dict_json":
json.dumps(config_to_dict(torchao_quant_config))
}
llm = LLM(
model="Qwen/Qwen3-0.6B",
dtype="bfloat16",
load_format="dummy",
enforce_eager=True,
quantization="torchao",
hf_overrides=hf_overrides,
)
# Update load format from `dummy` to `auto`
llm.collective_rpc("update_config",
args=({
"load_config": {
"load_format": "auto"
}
}, ))
# Now reload real weights inplace
llm.collective_rpc("reload_weights")
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0, top_p=0.95)
outputs = llm.generate(prompts, sampling_params)
# make sure it runs
for output in outputs:
generated_text = output.outputs[0].text
assert generated_text
# can also uncomment locally to make sure the generated
# output makes sense
# prompt = output.prompt
# print(f"Prompt: {prompt!r}")
# print(f"Output: {generated_text!r}")
# print("-" * 60)
if __name__ == "__main__":