Add support for loading torchao models with AOPerModuleConfig (#17826)

Signed-off-by: Jerry Zhang <jerryzh168@gmail.com>
This commit is contained in:
Jerry Zhang
2025-05-14 16:24:59 -07:00
committed by GitHub
parent 2fc9075b82
commit 7974736740
2 changed files with 37 additions and 10 deletions

View File

@ -31,9 +31,6 @@ def test_pre_quantized_model(vllm_runner):
])
def test_opt_125m_int4wo_model_loading_with_params(vllm_runner,
pt_load_map_location):
"""
Test loading roberta-base model with no lm_head.
"""
torch._dynamo.reset()
model_name = "jerryzh168/opt-125m-int4wo"
with vllm_runner(model_name=model_name,
@ -47,5 +44,20 @@ def test_opt_125m_int4wo_model_loading_with_params(vllm_runner,
print(output)
@pytest.mark.skipif(not TORCHAO_AVAILABLE, reason="torchao is not available")
def test_opt_125m_int4wo_model_per_module_quant(vllm_runner):
torch._dynamo.reset()
model_name = "jerryzh168/opt-125m-int4wo-per-module"
with vllm_runner(model_name=model_name,
quantization="torchao",
dtype="bfloat16",
pt_load_map_location="cuda:0") as llm:
output = llm.generate_greedy(["The capital of France is"],
max_tokens=32)
assert output
print(output)
if __name__ == "__main__":
pytest.main([__file__])