[Kernel] w4a16 support for compressed-tensors (#5385)
Co-authored-by: Robert Shaw <114415538+robertgshaw2-neuralmagic@users.noreply.github.com>
This commit is contained in:
@ -3,12 +3,13 @@
|
||||
Run `pytest tests/quantization/test_compressed_tensors.py`.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from vllm import SamplingParams
|
||||
from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors import ( # noqa: E501
|
||||
CompressedTensorsLinearMethod, CompressedTensorsW8A8DynamicToken,
|
||||
CompressedTensorsW8A8StaticTensor)
|
||||
CompressedTensorsLinearMethod, CompressedTensorsW4A16,
|
||||
CompressedTensorsW8A8DynamicToken, CompressedTensorsW8A8StaticTensor)
|
||||
|
||||
|
||||
def test_compressed_tensors_w8a8_static_setup(vllm_runner):
|
||||
@ -60,3 +61,25 @@ def test_compressed_tensors_w8a8_dynanmic_per_token(vllm_runner):
|
||||
assert isinstance(qkv_proj.quant_method, CompressedTensorsLinearMethod)
|
||||
assert isinstance(qkv_proj.scheme, CompressedTensorsW8A8DynamicToken)
|
||||
assert qkv_proj.weight.dtype is torch.int8
|
||||
|
||||
|
||||
@pytest.mark.parametrize("w4a16_args", [
|
||||
("nm-testing/tinyllama-oneshot-w4a16-channel-v2", "channel", None),
|
||||
("nm-testing/tinyllama-oneshot-w4a16-group128-v2", "group", 128),
|
||||
])
|
||||
def test_compressed_tensors_w4a16(vllm_runner, w4a16_args):
|
||||
model, strategy, group = w4a16_args
|
||||
with vllm_runner(model) as llm:
|
||||
model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501
|
||||
layer = model.model.layers[0]
|
||||
|
||||
qkv_proj = layer.self_attn.qkv_proj
|
||||
assert isinstance(qkv_proj.quant_method, CompressedTensorsLinearMethod)
|
||||
assert isinstance(qkv_proj.scheme, CompressedTensorsW4A16)
|
||||
|
||||
assert qkv_proj.scheme.strategy == strategy
|
||||
assert qkv_proj.scheme.group_size == group
|
||||
|
||||
assert qkv_proj.weight_packed.dtype is torch.int32
|
||||
assert qkv_proj.weight_scale.dtype is torch.float16
|
||||
assert qkv_proj.weight_packed.pack_factor == 8
|
||||
|
||||
Reference in New Issue
Block a user