chore: add ast-grep rule to convert Optional[T] to T | None (#25560)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
-LAN-
2025-09-15 13:06:33 +08:00
committed by GitHub
parent 2e44ebe98d
commit bab4975809
394 changed files with 2555 additions and 2792 deletions

View File

@ -2,7 +2,7 @@
from __future__ import annotations
from typing import Any, Optional
from typing import Any
from core.model_manager import ModelInstance
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenizer import GPT2Tokenizer
@ -24,7 +24,7 @@ class EnhanceRecursiveCharacterTextSplitter(RecursiveCharacterTextSplitter):
@classmethod
def from_encoder(
cls: type[TS],
embedding_model_instance: Optional[ModelInstance],
embedding_model_instance: ModelInstance | None,
allowed_special: Union[Literal["all"], Set[str]] = set(), # noqa: UP037
disallowed_special: Union[Literal["all"], Collection[str]] = "all", # noqa: UP037
**kwargs: Any,
@ -48,7 +48,7 @@ class EnhanceRecursiveCharacterTextSplitter(RecursiveCharacterTextSplitter):
class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter):
def __init__(self, fixed_separator: str = "\n\n", separators: Optional[list[str]] = None, **kwargs: Any):
def __init__(self, fixed_separator: str = "\n\n", separators: list[str] | None = None, **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._fixed_separator = fixed_separator

View File

@ -9,7 +9,6 @@ from dataclasses import dataclass
from typing import (
Any,
Literal,
Optional,
TypeVar,
Union,
)
@ -71,7 +70,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
def split_text(self, text: str) -> list[str]:
"""Split text into multiple components."""
def create_documents(self, texts: list[str], metadatas: Optional[list[dict]] = None) -> list[Document]:
def create_documents(self, texts: list[str], metadatas: list[dict] | None = None) -> list[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
@ -94,7 +93,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
metadatas.append(doc.metadata or {})
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: list[str], separator: str) -> Optional[str]:
def _join_docs(self, docs: list[str], separator: str) -> str | None:
text = separator.join(docs)
text = text.strip()
if text == "":
@ -194,7 +193,7 @@ class TokenTextSplitter(TextSplitter):
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
model_name: str | None = None,
allowed_special: Union[Literal["all"], Set[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
@ -245,7 +244,7 @@ class RecursiveCharacterTextSplitter(TextSplitter):
def __init__(
self,
separators: Optional[list[str]] = None,
separators: list[str] | None = None,
keep_separator: bool = True,
**kwargs: Any,
):