Update Optional[x] -> x | None and Union[x, y] to x | y (#26633)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-12 17:51:31 +01:00
committed by GitHub
parent 9bb38130cb
commit 8fcaaf6a16
944 changed files with 9490 additions and 10121 deletions

View File

@ -1,15 +1,13 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
import base64
import datetime
import os
import tempfile
import urllib.request
from collections.abc import Sequence
from typing import Any, Union
from typing import Any
import albumentations
import numpy as np
@ -160,11 +158,11 @@ def read_geotiff(
def load_image(
data: Union[list[str]],
data: list[str],
path_type: str,
mean: list[float] | None = None,
std: list[float] | None = None,
indices: Union[list[int], None] | None = None,
indices: list[int] | None | None = None,
):
"""Build an input example by loading images in *file_paths*.
@ -280,7 +278,7 @@ class PrithviMultimodalDataProcessor(IOProcessor):
prompt: IOProcessorInput,
request_id: str | None = None,
**kwargs,
) -> Union[PromptType, Sequence[PromptType]]:
) -> PromptType | Sequence[PromptType]:
image_data = dict(prompt)
if request_id:

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any, Literal, Optional, TypedDict, Union
from typing import Any, Literal, TypedDict
import albumentations
from pydantic import BaseModel
@ -38,7 +38,7 @@ class ImagePrompt(BaseModel):
"""
MultiModalPromptType = Union[ImagePrompt]
MultiModalPromptType = ImagePrompt
class ImageRequestOutput(BaseModel):
@ -54,4 +54,4 @@ class ImageRequestOutput(BaseModel):
type: Literal["path", "b64_json"]
format: str
data: str
request_id: Optional[str] = None
request_id: str | None = None

View File

@ -2,7 +2,6 @@
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
from typing import Optional, Union
import torch
import torch.nn as nn
@ -44,9 +43,9 @@ class MyGemma2Embedding(nn.Module):
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids,
positions,

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Optional
import torch
@ -20,7 +19,7 @@ from vllm.multimodal import MULTIMODAL_REGISTRY
dummy_inputs=LlavaDummyInputsBuilder,
)
class MyLlava(LlavaForConditionalGeneration):
def compute_logits(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]:
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None:
# this dummy model always predicts the first token
logits = super().compute_logits(hidden_states)
if logits is not None:

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Optional
import torch
@ -9,7 +8,7 @@ from vllm.model_executor.models.opt import OPTForCausalLM
class MyOPTForCausalLM(OPTForCausalLM):
def compute_logits(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]:
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None:
# this dummy model always predicts the first token
logits = super().compute_logits(hidden_states)
if logits is not None:

View File

@ -1,10 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Optional
def dummy_platform_plugin() -> Optional[str]:
def dummy_platform_plugin() -> str | None:
return "vllm_add_dummy_platform.dummy_platform.DummyPlatform"