Merge remote-tracking branch 'origin/main' into feat/trigger

This commit is contained in:
lyzno1
2025-10-17 19:21:15 +08:00
56 changed files with 673 additions and 164 deletions

View File

@ -34,12 +34,17 @@ def test_workflow_tool_should_raise_tool_invoke_error_when_result_has_error_fiel
monkeypatch.setattr(tool, "_get_app", lambda *args, **kwargs: None)
monkeypatch.setattr(tool, "_get_workflow", lambda *args, **kwargs: None)
# Mock user resolution to avoid database access
from unittest.mock import Mock
mock_user = Mock()
monkeypatch.setattr(tool, "_resolve_user", lambda *args, **kwargs: mock_user)
# replace `WorkflowAppGenerator.generate` 's return value.
monkeypatch.setattr(
"core.app.apps.workflow.app_generator.WorkflowAppGenerator.generate",
lambda *args, **kwargs: {"data": {"error": "oops"}},
)
monkeypatch.setattr("libs.login.current_user", lambda *args, **kwargs: None)
with pytest.raises(ToolInvokeError) as exc_info:
# WorkflowTool always returns a generator, so we need to iterate to

View File

@ -7,14 +7,11 @@ This test suite validates the behavior of a workflow that:
3. Handles multiple answer nodes with different outputs
"""
import pytest
from core.workflow.graph_events import (
GraphRunStartedEvent,
GraphRunSucceededEvent,
NodeRunStartedEvent,
NodeRunStreamChunkEvent,
NodeRunSucceededEvent,
)
from .test_mock_config import MockConfigBuilder
@ -29,7 +26,6 @@ class TestComplexBranchWorkflow:
self.runner = TableTestRunner()
self.fixture_path = "test_complex_branch"
@pytest.mark.skip(reason="output in this workflow can be random")
def test_hello_branch_with_llm(self):
"""
Test when query contains 'hello' - should trigger true branch.
@ -41,42 +37,17 @@ class TestComplexBranchWorkflow:
fixture_path=self.fixture_path,
query="hello world",
expected_outputs={
"answer": f"{mock_text_1}contains 'hello'",
"answer": f"contains 'hello'{mock_text_1}",
},
description="Basic hello case with parallel LLM execution",
use_auto_mock=True,
mock_config=(MockConfigBuilder().with_node_output("1755502777322", {"text": mock_text_1}).build()),
expected_event_sequence=[
GraphRunStartedEvent,
# Start
NodeRunStartedEvent,
NodeRunSucceededEvent,
# If/Else (no streaming)
NodeRunStartedEvent,
NodeRunSucceededEvent,
# LLM (with streaming)
NodeRunStartedEvent,
]
# LLM
+ [NodeRunStreamChunkEvent] * (mock_text_1.count(" ") + 2)
+ [
# Answer's text
NodeRunStreamChunkEvent,
NodeRunSucceededEvent,
# Answer
NodeRunStartedEvent,
NodeRunSucceededEvent,
# Answer 2
NodeRunStartedEvent,
NodeRunSucceededEvent,
GraphRunSucceededEvent,
],
),
WorkflowTestCase(
fixture_path=self.fixture_path,
query="say hello to everyone",
expected_outputs={
"answer": "Mocked response for greetingcontains 'hello'",
"answer": "contains 'hello'Mocked response for greeting",
},
description="Hello in middle of sentence",
use_auto_mock=True,
@ -93,6 +64,35 @@ class TestComplexBranchWorkflow:
for result in suite_result.results:
assert result.success, f"Test '{result.test_case.description}' failed: {result.error}"
assert result.actual_outputs
assert any(isinstance(event, GraphRunStartedEvent) for event in result.events)
assert any(isinstance(event, GraphRunSucceededEvent) for event in result.events)
start_index = next(
idx for idx, event in enumerate(result.events) if isinstance(event, GraphRunStartedEvent)
)
success_index = max(
idx for idx, event in enumerate(result.events) if isinstance(event, GraphRunSucceededEvent)
)
assert start_index < success_index
started_node_ids = {event.node_id for event in result.events if isinstance(event, NodeRunStartedEvent)}
assert {"1755502773326", "1755502777322"}.issubset(started_node_ids), (
f"Branch or LLM nodes missing in events: {started_node_ids}"
)
assert any(isinstance(event, NodeRunStreamChunkEvent) for event in result.events), (
"Expected streaming chunks from LLM execution"
)
llm_start_index = next(
idx
for idx, event in enumerate(result.events)
if isinstance(event, NodeRunStartedEvent) and event.node_id == "1755502777322"
)
assert any(
idx > llm_start_index and isinstance(event, NodeRunStreamChunkEvent)
for idx, event in enumerate(result.events)
), "Streaming chunks should follow LLM node start"
def test_non_hello_branch_with_llm(self):
"""

View File

@ -95,10 +95,10 @@ def _make_succeeded_event() -> NodeRunSucceededEvent:
)
def test_dispatcher_checks_commands_after_node_completion() -> None:
"""Dispatcher should only check commands after node completion events."""
def test_dispatcher_checks_commands_during_idle_and_on_completion() -> None:
"""Dispatcher polls commands when idle and re-checks after completion events."""
started_checks = _run_dispatcher_for_event(_make_started_event())
succeeded_checks = _run_dispatcher_for_event(_make_succeeded_event())
assert started_checks == 0
assert succeeded_checks == 1
assert started_checks == 1
assert succeeded_checks == 2

View File

@ -21,7 +21,6 @@ from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom,
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
from core.workflow.entities import GraphInitParams, GraphRuntimeState, VariablePool
from core.workflow.graph import Graph
from core.workflow.nodes.llm import llm_utils
from core.workflow.nodes.llm.entities import (
ContextConfig,
@ -83,14 +82,6 @@ def graph_init_params() -> GraphInitParams:
)
@pytest.fixture
def graph() -> Graph:
# TODO: This fixture uses old Graph constructor parameters that are incompatible
# with the new queue-based engine. Need to rewrite for new engine architecture.
pytest.skip("Graph fixture incompatible with new queue-based engine - needs rewrite for ResponseStreamCoordinator")
return Graph()
@pytest.fixture
def graph_runtime_state() -> GraphRuntimeState:
variable_pool = VariablePool(
@ -105,7 +96,7 @@ def graph_runtime_state() -> GraphRuntimeState:
@pytest.fixture
def llm_node(
llm_node_data: LLMNodeData, graph_init_params: GraphInitParams, graph: Graph, graph_runtime_state: GraphRuntimeState
llm_node_data: LLMNodeData, graph_init_params: GraphInitParams, graph_runtime_state: GraphRuntimeState
) -> LLMNode:
mock_file_saver = mock.MagicMock(spec=LLMFileSaver)
node_config = {
@ -493,9 +484,7 @@ def test_handle_list_messages_basic(llm_node):
@pytest.fixture
def llm_node_for_multimodal(
llm_node_data, graph_init_params, graph, graph_runtime_state
) -> tuple[LLMNode, LLMFileSaver]:
def llm_node_for_multimodal(llm_node_data, graph_init_params, graph_runtime_state) -> tuple[LLMNode, LLMFileSaver]:
mock_file_saver: LLMFileSaver = mock.MagicMock(spec=LLMFileSaver)
node_config = {
"id": "1",
@ -655,7 +644,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown:
gen = llm_node._save_multimodal_output_and_convert_result_to_markdown(
contents=frozenset(["hello world"]), file_saver=mock_file_saver, file_outputs=[]
)
assert list(gen) == ["frozenset({'hello world'})"]
assert list(gen) == ["hello world"]
mock_file_saver.save_binary_string.assert_not_called()
mock_file_saver.save_remote_url.assert_not_called()

View File

@ -181,14 +181,11 @@ class TestAuthIntegration:
)
def test_all_providers_factory_creation(self, provider, credentials):
"""Test factory creation for all supported providers"""
try:
auth_class = ApiKeyAuthFactory.get_apikey_auth_factory(provider)
assert auth_class is not None
auth_class = ApiKeyAuthFactory.get_apikey_auth_factory(provider)
assert auth_class is not None
factory = ApiKeyAuthFactory(provider, credentials)
assert factory.auth is not None
except ImportError:
pytest.skip(f"Provider {provider} not implemented yet")
factory = ApiKeyAuthFactory(provider, credentials)
assert factory.auth is not None
def _create_success_response(self, status_code=200):
"""Create successful HTTP response mock"""

View File

@ -41,7 +41,10 @@ class TestMetadataBugCompleteValidation:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"
with patch("services.metadata_service.current_user", mock_user):
with patch(
"services.metadata_service.current_account_with_tenant",
return_value=(mock_user, mock_user.current_tenant_id),
):
# Should crash with TypeError
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.create_metadata("dataset-123", mock_metadata_args)
@ -51,7 +54,10 @@ class TestMetadataBugCompleteValidation:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"
with patch("services.metadata_service.current_user", mock_user):
with patch(
"services.metadata_service.current_account_with_tenant",
return_value=(mock_user, mock_user.current_tenant_id),
):
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.update_metadata_name("dataset-123", "metadata-456", None)

View File

@ -29,7 +29,10 @@ class TestMetadataNullableBug:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"
with patch("services.metadata_service.current_user", mock_user):
with patch(
"services.metadata_service.current_account_with_tenant",
return_value=(mock_user, mock_user.current_tenant_id),
):
# This should crash with TypeError when calling len(None)
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.create_metadata("dataset-123", mock_metadata_args)
@ -40,7 +43,10 @@ class TestMetadataNullableBug:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"
with patch("services.metadata_service.current_user", mock_user):
with patch(
"services.metadata_service.current_account_with_tenant",
return_value=(mock_user, mock_user.current_tenant_id),
):
# This should crash with TypeError when calling len(None)
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.update_metadata_name("dataset-123", "metadata-456", None)
@ -88,7 +94,10 @@ class TestMetadataNullableBug:
mock_user.current_tenant_id = "tenant-123"
mock_user.id = "user-456"
with patch("services.metadata_service.current_user", mock_user):
with patch(
"services.metadata_service.current_account_with_tenant",
return_value=(mock_user, mock_user.current_tenant_id),
):
# Step 4: Service layer crashes on len(None)
with pytest.raises(TypeError, match="object of type 'NoneType' has no len"):
MetadataService.create_metadata("dataset-123", mock_metadata_args)