Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any, Optional
from typing import Any
import pytest
import torch
@ -24,7 +24,7 @@ def rejection_sampler():
def create_logits_tensor(
output_token_ids: list[list[int]],
vocab_size: int = 100,
token_idx_to_override: Optional[int] = None,
token_idx_to_override: int | None = None,
) -> torch.Tensor:
"""Helper function to create logits tensor that
will produce desired token ids on argmax"""
@ -43,18 +43,18 @@ def create_logits_tensor(
def create_sampling_metadata(
all_greedy: bool,
output_token_ids: Optional[list[list[int]]] = None,
prompt_token_ids: Optional[torch.Tensor] = None,
spec_token_ids: Optional[torch.Tensor] = None,
temperature: Optional[torch.Tensor] = None,
top_k: Optional[torch.Tensor] = None,
top_p: Optional[torch.Tensor] = None,
generators: Optional[dict[int, Any]] = None,
frequency_penalties: Optional[list[float]] = None,
presence_penalties: Optional[list[float]] = None,
repetition_penalties: Optional[list[float]] = None,
bad_words_token_ids: Optional[dict[int, list[list[int]]]] = None,
allowed_token_ids_mask: Optional[torch.Tensor] = None,
output_token_ids: list[list[int]] | None = None,
prompt_token_ids: torch.Tensor | None = None,
spec_token_ids: torch.Tensor | None = None,
temperature: torch.Tensor | None = None,
top_k: torch.Tensor | None = None,
top_p: torch.Tensor | None = None,
generators: dict[int, Any] | None = None,
frequency_penalties: list[float] | None = None,
presence_penalties: list[float] | None = None,
repetition_penalties: list[float] | None = None,
bad_words_token_ids: dict[int, list[list[int]]] | None = None,
allowed_token_ids_mask: torch.Tensor | None = None,
) -> SamplingMetadata:
"""Create a v1 sampling metadata object with all_greedy set
to the given value. Either all greedy or all random sampling

View File

@ -3,7 +3,7 @@
from collections.abc import Iterator
from enum import Enum
from typing import NamedTuple, Optional
from typing import NamedTuple
import regex as re
import torch
@ -23,7 +23,7 @@ class BatchLogprobsComposition(Enum):
SAMPLE_PROMPT = 3
BatchLogprobsSpecType = list[tuple[Optional[int], Optional[int]]]
BatchLogprobsSpecType = list[tuple[int | None, int | None]]
def get_test_batch(
@ -222,8 +222,8 @@ def create_allowed_token_ids(
vocab_size: int,
num_allowed_token_ids: int,
device: torch.device,
) -> Optional[torch.Tensor]:
mask: Optional[torch.Tensor] = None
) -> torch.Tensor | None:
mask: torch.Tensor | None = None
for i in range(batch_size):
if i % 2 == 1:
continue