Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@ -3,7 +3,6 @@
import json
import os
from typing import Optional
import pytest
@ -30,7 +29,7 @@ def set_test_environment():
dummy_hf_overrides = {"num_layers": 4, "num_hidden_layers": 4}
def can_initialize(model: str, extra_args: Optional[list[str]] = None):
def can_initialize(model: str, extra_args: list[str] | None = None):
# Server arguments
extra_args = extra_args if extra_args is not None else []
server_args = [

View File

@ -5,8 +5,6 @@
Run `pytest tests/quantization/test_compressed_tensors.py`.
"""
from typing import Optional
import pytest
import torch
from compressed_tensors.quantization import QuantizationType
@ -104,7 +102,7 @@ def test_compressed_tensors_w8a8_static_setup(vllm_runner, model_args):
down_proj = layer.mlp.down_proj
# assert zp for symmetric and asymmetric cases
def zp_valid(zp: Optional[torch.Tensor]):
def zp_valid(zp: torch.Tensor | None):
if is_symmetric:
return zp is None

View File

@ -11,7 +11,6 @@ import importlib.metadata
import os
from dataclasses import dataclass
from importlib.util import find_spec
from typing import Optional
import huggingface_hub
import lm_eval
@ -156,8 +155,8 @@ class AccuracyTestConfig:
def get_model_args(
self,
tp_size: int,
model_max_len: Optional[int] = None,
kwargs: Optional[dict] = None,
model_max_len: int | None = None,
kwargs: dict | None = None,
) -> dict:
if kwargs is None:
kwargs = {}

View File

@ -7,7 +7,7 @@ See https://github.com/vllm-project/vllm/issues/11926 for more details.
Run `pytest tests/quantization/test_register_quantization_config.py`.
"""
from typing import Any, Optional
from typing import Any
import pytest
import torch
@ -37,10 +37,10 @@ class FakeQuantLinearMethod(UnquantizedLinearMethod):
def apply(
self,
layer: "torch.nn.Module",
x: "torch.Tensor",
bias: Optional["torch.Tensor"] = None,
) -> "torch.Tensor":
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
"""Perform fake quantization before the linear layer."""
# Calculate the scales dynamically
@ -72,7 +72,7 @@ class CustomQuantConfig(QuantizationConfig):
"""Name of the quantization method."""
return "custom_quant"
def get_supported_act_dtypes(self) -> list["torch.dtype"]:
def get_supported_act_dtypes(self) -> list[torch.dtype]:
"""List of supported activation dtypes."""
return [torch.float16, torch.bfloat16]
@ -92,8 +92,8 @@ class CustomQuantConfig(QuantizationConfig):
return CustomQuantConfig(num_bits=config.get("num_bits", 8))
def get_quant_method(
self, layer: "torch.nn.Module", prefix: str
) -> Optional["FakeQuantLinearMethod"]:
self, layer: torch.nn.Module, prefix: str
) -> FakeQuantLinearMethod | None:
"""Get the quantize method to use for the quantized layer."""
if isinstance(layer, LinearBase):
return FakeQuantLinearMethod(num_bits=self.num_bits)