Mergin main into fix/chore-fix

This commit is contained in:
Yeuoly
2024-10-14 16:22:12 +08:00
433 changed files with 11823 additions and 2782 deletions

View File

@ -1,46 +0,0 @@
import os
from collections.abc import Callable
from typing import Literal
import pytest
# import monkeypatch
from _pytest.monkeypatch import MonkeyPatch
from openai.resources.moderations import Moderations
from tests.integration_tests.model_runtime.__mock.openai_moderation import MockModerationClass
def mock_openai(
monkeypatch: MonkeyPatch,
methods: list[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]],
) -> Callable[[], None]:
"""
mock openai module
:param monkeypatch: pytest monkeypatch fixture
:return: unpatch function
"""
def unpatch() -> None:
monkeypatch.undo()
if "moderation" in methods:
monkeypatch.setattr(Moderations, "create", MockModerationClass.moderation_create)
return unpatch
MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
@pytest.fixture
def setup_openai_mock(request, monkeypatch):
methods = request.param if hasattr(request, "param") else []
if MOCK:
unpatch = mock_openai(monkeypatch, methods=methods)
yield
if MOCK:
unpatch()

View File

@ -1,101 +0,0 @@
import re
from typing import Any, Literal, Union
from openai._types import NOT_GIVEN, NotGiven
from openai.resources.moderations import Moderations
from openai.types import ModerationCreateResponse
from openai.types.moderation import Categories, CategoryScores, Moderation
from core.model_runtime.errors.invoke import InvokeAuthorizationError
class MockModerationClass:
def moderation_create(
self: Moderations,
*,
input: Union[str, list[str]],
model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN,
**kwargs: Any,
) -> ModerationCreateResponse:
if isinstance(input, str):
input = [input]
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)):
raise InvokeAuthorizationError("Invalid base url")
if len(self._client.api_key) < 18:
raise InvokeAuthorizationError("Invalid API key")
for text in input:
result = []
if "kill" in text:
moderation_categories = {
"harassment": False,
"harassment/threatening": False,
"hate": False,
"hate/threatening": False,
"self-harm": False,
"self-harm/instructions": False,
"self-harm/intent": False,
"sexual": False,
"sexual/minors": False,
"violence": False,
"violence/graphic": False,
}
moderation_categories_scores = {
"harassment": 1.0,
"harassment/threatening": 1.0,
"hate": 1.0,
"hate/threatening": 1.0,
"self-harm": 1.0,
"self-harm/instructions": 1.0,
"self-harm/intent": 1.0,
"sexual": 1.0,
"sexual/minors": 1.0,
"violence": 1.0,
"violence/graphic": 1.0,
}
result.append(
Moderation(
flagged=True,
categories=Categories(**moderation_categories),
category_scores=CategoryScores(**moderation_categories_scores),
)
)
else:
moderation_categories = {
"harassment": False,
"harassment/threatening": False,
"hate": False,
"hate/threatening": False,
"self-harm": False,
"self-harm/instructions": False,
"self-harm/intent": False,
"sexual": False,
"sexual/minors": False,
"violence": False,
"violence/graphic": False,
}
moderation_categories_scores = {
"harassment": 0.0,
"harassment/threatening": 0.0,
"hate": 0.0,
"hate/threatening": 0.0,
"self-harm": 0.0,
"self-harm/instructions": 0.0,
"self-harm/intent": 0.0,
"sexual": 0.0,
"sexual/minors": 0.0,
"violence": 0.0,
"violence/graphic": 0.0,
}
result.append(
Moderation(
flagged=False,
categories=Categories(**moderation_categories),
category_scores=CategoryScores(**moderation_categories_scores),
)
)
return ModerationCreateResponse(id="shiroii kuloko", model=model, results=result)

View File

@ -1,44 +0,0 @@
import os
import pytest
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
@pytest.mark.parametrize("setup_openai_mock", [["moderation"]], indirect=True)
def test_validate_credentials(setup_openai_mock):
model = OpenAIModerationModel()
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(model="text-moderation-stable", credentials={"openai_api_key": "invalid_key"})
model.validate_credentials(
model="text-moderation-stable", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}
)
@pytest.mark.parametrize("setup_openai_mock", [["moderation"]], indirect=True)
def test_invoke_model(setup_openai_mock):
model = OpenAIModerationModel()
result = model.invoke(
model="text-moderation-stable",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
text="hello",
user="abc-123",
)
assert isinstance(result, bool)
assert result is False
result = model.invoke(
model="text-moderation-stable",
credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")},
text="i will kill you",
user="abc-123",
)
assert isinstance(result, bool)
assert result is True

View File

@ -1,57 +0,0 @@
import logging
import os
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.provider_entities import ProviderConfig, ProviderEntity, SimpleProviderEntity
from core.model_runtime.model_providers.model_provider_factory import ModelProviderExtension, ModelProviderFactory
logger = logging.getLogger(__name__)
def test_get_providers():
factory = ModelProviderFactory("test_tenant")
providers = factory.get_providers()
for provider in providers:
logger.debug(provider)
assert len(providers) >= 1
assert isinstance(providers[0], ProviderEntity)
def test_get_models():
factory = ModelProviderFactory("test_tenant")
providers = factory.get_models(
model_type=ModelType.LLM,
provider_configs=[
ProviderConfig(provider="openai", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")})
],
)
logger.debug(providers)
assert len(providers) >= 1
assert isinstance(providers[0], SimpleProviderEntity)
# all provider models type equals to ModelType.LLM
for provider in providers:
for provider_model in provider.models:
assert provider_model.model_type == ModelType.LLM
providers = factory.get_models(
provider="openai",
provider_configs=[
ProviderConfig(provider="openai", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")})
],
)
assert len(providers) == 1
assert isinstance(providers[0], SimpleProviderEntity)
assert providers[0].provider == "openai"
def test_provider_credentials_validate():
factory = ModelProviderFactory("test_tenant")
factory.provider_credentials_validate(
provider="openai", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}
)

View File

@ -1,11 +0,0 @@
import os
import tiktoken
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer
def test_tiktoken():
os.environ["TIKTOKEN_CACHE_DIR"] = "/tmp/.tiktoken_cache"
GPT2Tokenizer.get_num_tokens("Hello, world!")
assert tiktoken.registry.ENCODING_CONSTRUCTORS is not None

View File

@ -1,25 +0,0 @@
import os
from unittest.mock import Mock, patch
import pytest
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.voyage.voyage import VoyageProvider
def test_validate_provider_credentials():
provider = VoyageProvider()
with pytest.raises(CredentialsValidateFailedError):
provider.validate_provider_credentials(credentials={"api_key": "hahahaha"})
with patch("requests.post") as mock_post:
mock_response = Mock()
mock_response.json.return_value = {
"object": "list",
"data": [{"object": "embedding", "embedding": [0.23333 for _ in range(1024)], "index": 0}],
"model": "voyage-3",
"usage": {"total_tokens": 1},
}
mock_response.status_code = 200
mock_post.return_value = mock_response
provider.validate_provider_credentials(credentials={"api_key": os.environ.get("VOYAGE_API_KEY")})

View File

@ -1,92 +0,0 @@
import os
from unittest.mock import Mock, patch
import pytest
from core.model_runtime.entities.rerank_entities import RerankResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.voyage.rerank.rerank import VoyageRerankModel
def test_validate_credentials():
model = VoyageRerankModel()
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(
model="rerank-lite-1",
credentials={"api_key": "invalid_key"},
)
with patch("httpx.post") as mock_post:
mock_response = Mock()
mock_response.json.return_value = {
"object": "list",
"data": [
{
"relevance_score": 0.546875,
"index": 0,
"document": "Carson City is the capital city of the American state of Nevada. At the 2010 United "
"States Census, Carson City had a population of 55,274.",
},
{
"relevance_score": 0.4765625,
"index": 1,
"document": "The Commonwealth of the Northern Mariana Islands is a group of islands in the "
"Pacific Ocean that are a political division controlled by the United States. Its "
"capital is Saipan.",
},
],
"model": "rerank-lite-1",
"usage": {"total_tokens": 96},
}
mock_response.status_code = 200
mock_post.return_value = mock_response
model.validate_credentials(
model="rerank-lite-1",
credentials={
"api_key": os.environ.get("VOYAGE_API_KEY"),
},
)
def test_invoke_model():
model = VoyageRerankModel()
with patch("httpx.post") as mock_post:
mock_response = Mock()
mock_response.json.return_value = {
"object": "list",
"data": [
{
"relevance_score": 0.84375,
"index": 0,
"document": "Kasumi is a girl name of Japanese origin meaning mist.",
},
{
"relevance_score": 0.4765625,
"index": 1,
"document": "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she "
"leads a team named PopiParty.",
},
],
"model": "rerank-lite-1",
"usage": {"total_tokens": 59},
}
mock_response.status_code = 200
mock_post.return_value = mock_response
result = model.invoke(
model="rerank-lite-1",
credentials={
"api_key": os.environ.get("VOYAGE_API_KEY"),
},
query="Who is Kasumi?",
docs=[
"Kasumi is a girl name of Japanese origin meaning mist.",
"Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a team named "
"PopiParty.",
],
score_threshold=0.5,
)
assert isinstance(result, RerankResult)
assert len(result.docs) == 1
assert result.docs[0].index == 0
assert result.docs[0].score >= 0.5

View File

@ -1,70 +0,0 @@
import os
from unittest.mock import Mock, patch
import pytest
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.voyage.text_embedding.text_embedding import VoyageTextEmbeddingModel
def test_validate_credentials():
model = VoyageTextEmbeddingModel()
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(model="voyage-3", credentials={"api_key": "invalid_key"})
with patch("requests.post") as mock_post:
mock_response = Mock()
mock_response.json.return_value = {
"object": "list",
"data": [{"object": "embedding", "embedding": [0.23333 for _ in range(1024)], "index": 0}],
"model": "voyage-3",
"usage": {"total_tokens": 1},
}
mock_response.status_code = 200
mock_post.return_value = mock_response
model.validate_credentials(model="voyage-3", credentials={"api_key": os.environ.get("VOYAGE_API_KEY")})
def test_invoke_model():
model = VoyageTextEmbeddingModel()
with patch("requests.post") as mock_post:
mock_response = Mock()
mock_response.json.return_value = {
"object": "list",
"data": [
{"object": "embedding", "embedding": [0.23333 for _ in range(1024)], "index": 0},
{"object": "embedding", "embedding": [0.23333 for _ in range(1024)], "index": 1},
],
"model": "voyage-3",
"usage": {"total_tokens": 2},
}
mock_response.status_code = 200
mock_post.return_value = mock_response
result = model.invoke(
model="voyage-3",
credentials={
"api_key": os.environ.get("VOYAGE_API_KEY"),
},
texts=["hello", "world"],
user="abc-123",
)
assert isinstance(result, TextEmbeddingResult)
assert len(result.embeddings) == 2
assert result.usage.total_tokens == 2
def test_get_num_tokens():
model = VoyageTextEmbeddingModel()
num_tokens = model.get_num_tokens(
model="voyage-3",
credentials={
"api_key": os.environ.get("VOYAGE_API_KEY"),
},
texts=["ping"],
)
assert num_tokens == 1