mirror of
https://github.com/langgenius/dify.git
synced 2026-05-04 01:18:05 +08:00
Merge main into fix/chore-fix
This commit is contained in:
@ -0,0 +1,59 @@
|
||||
import os
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
import pytest
|
||||
|
||||
# import monkeypatch
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from nomic import embed
|
||||
|
||||
|
||||
def create_embedding(texts: list[str], model: str, **kwargs: Any) -> dict:
|
||||
texts_len = len(texts)
|
||||
|
||||
foo_embedding_sample = 0.123456
|
||||
|
||||
combined = {
|
||||
"embeddings": [[foo_embedding_sample for _ in range(768)] for _ in range(texts_len)],
|
||||
"usage": {"prompt_tokens": texts_len, "total_tokens": texts_len},
|
||||
"model": model,
|
||||
"inference_mode": "remote",
|
||||
}
|
||||
|
||||
return combined
|
||||
|
||||
|
||||
def mock_nomic(
|
||||
monkeypatch: MonkeyPatch,
|
||||
methods: list[Literal["text_embedding"]],
|
||||
) -> Callable[[], None]:
|
||||
"""
|
||||
mock nomic module
|
||||
|
||||
:param monkeypatch: pytest monkeypatch fixture
|
||||
:return: unpatch function
|
||||
"""
|
||||
|
||||
def unpatch() -> None:
|
||||
monkeypatch.undo()
|
||||
|
||||
if "text_embedding" in methods:
|
||||
monkeypatch.setattr(embed, "text", create_embedding)
|
||||
|
||||
return unpatch
|
||||
|
||||
|
||||
MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_nomic_mock(request, monkeypatch):
|
||||
methods = request.param if hasattr(request, "param") else []
|
||||
if MOCK:
|
||||
unpatch = mock_nomic(monkeypatch, methods=methods)
|
||||
|
||||
yield
|
||||
|
||||
if MOCK:
|
||||
unpatch()
|
||||
@ -9,7 +9,6 @@ from requests.exceptions import ConnectionError
|
||||
from requests.sessions import Session
|
||||
from xinference_client.client.restful.restful_client import (
|
||||
Client,
|
||||
RESTfulChatglmCppChatModelHandle,
|
||||
RESTfulChatModelHandle,
|
||||
RESTfulEmbeddingModelHandle,
|
||||
RESTfulGenerateModelHandle,
|
||||
@ -19,9 +18,7 @@ from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage
|
||||
|
||||
|
||||
class MockXinferenceClass:
|
||||
def get_chat_model(
|
||||
self: Client, model_uid: str
|
||||
) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
|
||||
def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
|
||||
if not re.match(r"https?:\/\/[^\s\/$.?#].[^\s]*$", self.base_url):
|
||||
raise RuntimeError("404 Not Found")
|
||||
|
||||
|
||||
186
api/tests/integration_tests/model_runtime/fireworks/test_llm.py
Normal file
186
api/tests/integration_tests/model_runtime/fireworks/test_llm.py
Normal file
@ -0,0 +1,186 @@
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.fireworks.llm.llm import FireworksLargeLanguageModel
|
||||
|
||||
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
|
||||
def test_predefined_models():
|
||||
model = FireworksLargeLanguageModel()
|
||||
model_schemas = model.predefined_models()
|
||||
|
||||
assert len(model_schemas) >= 1
|
||||
assert isinstance(model_schemas[0], AIModelEntity)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_validate_credentials_for_chat_model(setup_openai_mock):
|
||||
model = FireworksLargeLanguageModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
# model name to gpt-3.5-turbo because of mocking
|
||||
model.validate_credentials(model="gpt-3.5-turbo", credentials={"fireworks_api_key": "invalid_key"})
|
||||
|
||||
model.validate_credentials(
|
||||
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
||||
credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_invoke_chat_model(setup_openai_mock):
|
||||
model = FireworksLargeLanguageModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
||||
credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(content="Hello World!"),
|
||||
],
|
||||
model_parameters={
|
||||
"temperature": 0.0,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 0.0,
|
||||
"frequency_penalty": 0.0,
|
||||
"max_tokens": 10,
|
||||
},
|
||||
stop=["How"],
|
||||
stream=False,
|
||||
user="foo",
|
||||
)
|
||||
|
||||
assert isinstance(result, LLMResult)
|
||||
assert len(result.message.content) > 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_invoke_chat_model_with_tools(setup_openai_mock):
|
||||
model = FireworksLargeLanguageModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
||||
credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(
|
||||
content="what's the weather today in London?",
|
||||
),
|
||||
],
|
||||
model_parameters={"temperature": 0.0, "max_tokens": 100},
|
||||
tools=[
|
||||
PromptMessageTool(
|
||||
name="get_weather",
|
||||
description="Determine weather in my location",
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
|
||||
"unit": {"type": "string", "enum": ["c", "f"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
),
|
||||
PromptMessageTool(
|
||||
name="get_stock_price",
|
||||
description="Get the current stock price",
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {"symbol": {"type": "string", "description": "The stock symbol"}},
|
||||
"required": ["symbol"],
|
||||
},
|
||||
),
|
||||
],
|
||||
stream=False,
|
||||
user="foo",
|
||||
)
|
||||
|
||||
assert isinstance(result, LLMResult)
|
||||
assert isinstance(result.message, AssistantPromptMessage)
|
||||
assert len(result.message.tool_calls) > 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_invoke_stream_chat_model(setup_openai_mock):
|
||||
model = FireworksLargeLanguageModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
||||
credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(content="Hello World!"),
|
||||
],
|
||||
model_parameters={"temperature": 0.0, "max_tokens": 100},
|
||||
stream=True,
|
||||
user="foo",
|
||||
)
|
||||
|
||||
assert isinstance(result, Generator)
|
||||
|
||||
for chunk in result:
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
if chunk.delta.finish_reason is not None:
|
||||
assert chunk.delta.usage is not None
|
||||
assert chunk.delta.usage.completion_tokens > 0
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = FireworksLargeLanguageModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
||||
credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")},
|
||||
prompt_messages=[UserPromptMessage(content="Hello World!")],
|
||||
)
|
||||
|
||||
assert num_tokens == 10
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
||||
credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(content="Hello World!"),
|
||||
],
|
||||
tools=[
|
||||
PromptMessageTool(
|
||||
name="get_weather",
|
||||
description="Determine weather in my location",
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
|
||||
"unit": {"type": "string", "enum": ["c", "f"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
assert num_tokens == 77
|
||||
@ -0,0 +1,17 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.fireworks.fireworks import FireworksProvider
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_validate_provider_credentials(setup_openai_mock):
|
||||
provider = FireworksProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(credentials={})
|
||||
|
||||
provider.validate_provider_credentials(credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")})
|
||||
@ -0,0 +1,28 @@
|
||||
import os
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.mixedbread.mixedbread import MixedBreadProvider
|
||||
|
||||
|
||||
def test_validate_provider_credentials():
|
||||
provider = MixedBreadProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(credentials={"api_key": "hahahaha"})
|
||||
with patch("requests.post") as mock_post:
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
"usage": {"prompt_tokens": 3, "total_tokens": 3},
|
||||
"model": "mixedbread-ai/mxbai-embed-large-v1",
|
||||
"data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}],
|
||||
"object": "list",
|
||||
"normalized": "true",
|
||||
"encoding_format": "float",
|
||||
"dimensions": 1024,
|
||||
}
|
||||
mock_response.status_code = 200
|
||||
mock_post.return_value = mock_response
|
||||
provider.validate_provider_credentials(credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")})
|
||||
@ -0,0 +1,100 @@
|
||||
import os
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.mixedbread.rerank.rerank import MixedBreadRerankModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = MixedBreadRerankModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="mxbai-rerank-large-v1",
|
||||
credentials={"api_key": "invalid_key"},
|
||||
)
|
||||
with patch("httpx.post") as mock_post:
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
"usage": {"prompt_tokens": 86, "total_tokens": 86},
|
||||
"model": "mixedbread-ai/mxbai-rerank-large-v1",
|
||||
"data": [
|
||||
{
|
||||
"index": 0,
|
||||
"score": 0.06762695,
|
||||
"input": "Carson City is the capital city of the American state of Nevada. At the 2010 United "
|
||||
"States Census, Carson City had a population of 55,274.",
|
||||
"object": "text_document",
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"score": 0.057403564,
|
||||
"input": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific "
|
||||
"Ocean that are a political division controlled by the United States. Its capital is "
|
||||
"Saipan.",
|
||||
"object": "text_document",
|
||||
},
|
||||
],
|
||||
"object": "list",
|
||||
"top_k": 2,
|
||||
"return_input": True,
|
||||
}
|
||||
mock_response.status_code = 200
|
||||
mock_post.return_value = mock_response
|
||||
model.validate_credentials(
|
||||
model="mxbai-rerank-large-v1",
|
||||
credentials={
|
||||
"api_key": os.environ.get("MIXEDBREAD_API_KEY"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = MixedBreadRerankModel()
|
||||
with patch("httpx.post") as mock_post:
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
"usage": {"prompt_tokens": 56, "total_tokens": 56},
|
||||
"model": "mixedbread-ai/mxbai-rerank-large-v1",
|
||||
"data": [
|
||||
{
|
||||
"index": 0,
|
||||
"score": 0.6044922,
|
||||
"input": "Kasumi is a girl name of Japanese origin meaning mist.",
|
||||
"object": "text_document",
|
||||
},
|
||||
{
|
||||
"index": 1,
|
||||
"score": 0.0703125,
|
||||
"input": "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a "
|
||||
"team named PopiParty.",
|
||||
"object": "text_document",
|
||||
},
|
||||
],
|
||||
"object": "list",
|
||||
"top_k": 2,
|
||||
"return_input": "true",
|
||||
}
|
||||
mock_response.status_code = 200
|
||||
mock_post.return_value = mock_response
|
||||
result = model.invoke(
|
||||
model="mxbai-rerank-large-v1",
|
||||
credentials={
|
||||
"api_key": os.environ.get("MIXEDBREAD_API_KEY"),
|
||||
},
|
||||
query="Who is Kasumi?",
|
||||
docs=[
|
||||
"Kasumi is a girl name of Japanese origin meaning mist.",
|
||||
"Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a team named "
|
||||
"PopiParty.",
|
||||
],
|
||||
score_threshold=0.5,
|
||||
)
|
||||
|
||||
assert isinstance(result, RerankResult)
|
||||
assert len(result.docs) == 1
|
||||
assert result.docs[0].index == 0
|
||||
assert result.docs[0].score >= 0.5
|
||||
@ -0,0 +1,78 @@
|
||||
import os
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.mixedbread.text_embedding.text_embedding import MixedBreadTextEmbeddingModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = MixedBreadTextEmbeddingModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(model="mxbai-embed-large-v1", credentials={"api_key": "invalid_key"})
|
||||
with patch("requests.post") as mock_post:
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
"usage": {"prompt_tokens": 3, "total_tokens": 3},
|
||||
"model": "mixedbread-ai/mxbai-embed-large-v1",
|
||||
"data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}],
|
||||
"object": "list",
|
||||
"normalized": "true",
|
||||
"encoding_format": "float",
|
||||
"dimensions": 1024,
|
||||
}
|
||||
mock_response.status_code = 200
|
||||
mock_post.return_value = mock_response
|
||||
model.validate_credentials(
|
||||
model="mxbai-embed-large-v1", credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")}
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = MixedBreadTextEmbeddingModel()
|
||||
|
||||
with patch("requests.post") as mock_post:
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
"usage": {"prompt_tokens": 6, "total_tokens": 6},
|
||||
"model": "mixedbread-ai/mxbai-embed-large-v1",
|
||||
"data": [
|
||||
{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"},
|
||||
{"embedding": [0.23333 for _ in range(1024)], "index": 1, "object": "embedding"},
|
||||
],
|
||||
"object": "list",
|
||||
"normalized": "true",
|
||||
"encoding_format": "float",
|
||||
"dimensions": 1024,
|
||||
}
|
||||
mock_response.status_code = 200
|
||||
mock_post.return_value = mock_response
|
||||
result = model.invoke(
|
||||
model="mxbai-embed-large-v1",
|
||||
credentials={
|
||||
"api_key": os.environ.get("MIXEDBREAD_API_KEY"),
|
||||
},
|
||||
texts=["hello", "world"],
|
||||
user="abc-123",
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert len(result.embeddings) == 2
|
||||
assert result.usage.total_tokens == 6
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = MixedBreadTextEmbeddingModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="mxbai-embed-large-v1",
|
||||
credentials={
|
||||
"api_key": os.environ.get("MIXEDBREAD_API_KEY"),
|
||||
},
|
||||
texts=["ping"],
|
||||
)
|
||||
|
||||
assert num_tokens == 1
|
||||
@ -0,0 +1,62 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel
|
||||
from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True)
|
||||
def test_validate_credentials(setup_nomic_mock):
|
||||
model = NomicTextEmbeddingModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="nomic-embed-text-v1.5",
|
||||
credentials={
|
||||
"nomic_api_key": "invalid_key",
|
||||
},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model="nomic-embed-text-v1.5",
|
||||
credentials={
|
||||
"nomic_api_key": os.environ.get("NOMIC_API_KEY"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True)
|
||||
def test_invoke_model(setup_nomic_mock):
|
||||
model = NomicTextEmbeddingModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="nomic-embed-text-v1.5",
|
||||
credentials={
|
||||
"nomic_api_key": os.environ.get("NOMIC_API_KEY"),
|
||||
},
|
||||
texts=["hello", "world"],
|
||||
user="foo",
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert result.model == "nomic-embed-text-v1.5"
|
||||
assert len(result.embeddings) == 2
|
||||
assert result.usage.total_tokens == 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True)
|
||||
def test_get_num_tokens(setup_nomic_mock):
|
||||
model = NomicTextEmbeddingModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="nomic-embed-text-v1.5",
|
||||
credentials={
|
||||
"nomic_api_key": os.environ.get("NOMIC_API_KEY"),
|
||||
},
|
||||
texts=["hello", "world"],
|
||||
)
|
||||
|
||||
assert num_tokens == 2
|
||||
@ -0,0 +1,22 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.nomic.nomic import NomicAtlasProvider
|
||||
from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel
|
||||
from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True)
|
||||
def test_validate_provider_credentials(setup_nomic_mock):
|
||||
provider = NomicAtlasProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(credentials={})
|
||||
|
||||
provider.validate_provider_credentials(
|
||||
credentials={
|
||||
"nomic_api_key": os.environ.get("NOMIC_API_KEY"),
|
||||
},
|
||||
)
|
||||
@ -0,0 +1,91 @@
|
||||
from uuid import uuid4
|
||||
|
||||
from constants import UUID_NIL
|
||||
from core.prompt.utils.extract_thread_messages import extract_thread_messages
|
||||
|
||||
|
||||
class TestMessage:
|
||||
def __init__(self, id, parent_message_id):
|
||||
self.id = id
|
||||
self.parent_message_id = parent_message_id
|
||||
|
||||
def __getitem__(self, item):
|
||||
return getattr(self, item)
|
||||
|
||||
|
||||
def test_extract_thread_messages_single_message():
|
||||
messages = [TestMessage(str(uuid4()), UUID_NIL)]
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 1
|
||||
assert result[0] == messages[0]
|
||||
|
||||
|
||||
def test_extract_thread_messages_linear_thread():
|
||||
id1, id2, id3, id4, id5 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4())
|
||||
messages = [
|
||||
TestMessage(id5, id4),
|
||||
TestMessage(id4, id3),
|
||||
TestMessage(id3, id2),
|
||||
TestMessage(id2, id1),
|
||||
TestMessage(id1, UUID_NIL),
|
||||
]
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 5
|
||||
assert [msg["id"] for msg in result] == [id5, id4, id3, id2, id1]
|
||||
|
||||
|
||||
def test_extract_thread_messages_branched_thread():
|
||||
id1, id2, id3, id4 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4())
|
||||
messages = [
|
||||
TestMessage(id4, id2),
|
||||
TestMessage(id3, id2),
|
||||
TestMessage(id2, id1),
|
||||
TestMessage(id1, UUID_NIL),
|
||||
]
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 3
|
||||
assert [msg["id"] for msg in result] == [id4, id2, id1]
|
||||
|
||||
|
||||
def test_extract_thread_messages_empty_list():
|
||||
messages = []
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
def test_extract_thread_messages_partially_loaded():
|
||||
id0, id1, id2, id3 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4())
|
||||
messages = [
|
||||
TestMessage(id3, id2),
|
||||
TestMessage(id2, id1),
|
||||
TestMessage(id1, id0),
|
||||
]
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 3
|
||||
assert [msg["id"] for msg in result] == [id3, id2, id1]
|
||||
|
||||
|
||||
def test_extract_thread_messages_legacy_messages():
|
||||
id1, id2, id3 = str(uuid4()), str(uuid4()), str(uuid4())
|
||||
messages = [
|
||||
TestMessage(id3, UUID_NIL),
|
||||
TestMessage(id2, UUID_NIL),
|
||||
TestMessage(id1, UUID_NIL),
|
||||
]
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 3
|
||||
assert [msg["id"] for msg in result] == [id3, id2, id1]
|
||||
|
||||
|
||||
def test_extract_thread_messages_mixed_with_legacy_messages():
|
||||
id1, id2, id3, id4, id5 = str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4()), str(uuid4())
|
||||
messages = [
|
||||
TestMessage(id5, id4),
|
||||
TestMessage(id4, id2),
|
||||
TestMessage(id3, id2),
|
||||
TestMessage(id2, UUID_NIL),
|
||||
TestMessage(id1, UUID_NIL),
|
||||
]
|
||||
result = extract_thread_messages(messages)
|
||||
assert len(result) == 4
|
||||
assert [msg["id"] for msg in result] == [id5, id4, id2, id1]
|
||||
Reference in New Issue
Block a user