mirror of
https://github.com/langgenius/dify.git
synced 2026-05-06 02:18:08 +08:00
feat: add vibe workflow (#30258)
Co-authored-by: yyh <yuanyouhuilyz@gmail.com>
This commit is contained in:
@ -0,0 +1,288 @@
|
||||
"""
|
||||
Unit tests for the Mermaid Generator.
|
||||
|
||||
Tests cover:
|
||||
- Basic workflow rendering
|
||||
- Reserved word handling ('end' → 'end_node')
|
||||
- Question classifier multi-branch edges
|
||||
- If-else branch labels
|
||||
- Edge validation and skipping
|
||||
- Tool node formatting
|
||||
"""
|
||||
|
||||
|
||||
from core.workflow.generator.utils.mermaid_generator import generate_mermaid
|
||||
|
||||
|
||||
class TestBasicWorkflow:
|
||||
"""Tests for basic workflow Mermaid generation."""
|
||||
|
||||
def test_simple_start_end_workflow(self):
|
||||
"""Test simple Start → End workflow."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "title": "Start"},
|
||||
{"id": "end", "type": "end", "title": "End"},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "end"}],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "flowchart TD" in result
|
||||
assert 'start["type=start|title=Start"]' in result
|
||||
assert 'end_node["type=end|title=End"]' in result
|
||||
assert "start --> end_node" in result
|
||||
|
||||
def test_start_llm_end_workflow(self):
|
||||
"""Test Start → LLM → End workflow."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "title": "Start"},
|
||||
{"id": "llm", "type": "llm", "title": "Generate"},
|
||||
{"id": "end", "type": "end", "title": "End"},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "start", "target": "llm"},
|
||||
{"source": "llm", "target": "end"},
|
||||
],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert 'llm["type=llm|title=Generate"]' in result
|
||||
assert "start --> llm" in result
|
||||
assert "llm --> end_node" in result
|
||||
|
||||
def test_empty_workflow(self):
|
||||
"""Test empty workflow returns minimal output."""
|
||||
workflow_data = {"nodes": [], "edges": []}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert result == "flowchart TD"
|
||||
|
||||
def test_missing_keys_handled(self):
|
||||
"""Test workflow with missing keys doesn't crash."""
|
||||
workflow_data = {}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "flowchart TD" in result
|
||||
|
||||
|
||||
class TestReservedWords:
|
||||
"""Tests for reserved word handling in node IDs."""
|
||||
|
||||
def test_end_node_id_is_replaced(self):
|
||||
"""Test 'end' node ID is replaced with 'end_node'."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "end", "type": "end", "title": "End"}],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Should use end_node instead of end
|
||||
assert "end_node[" in result
|
||||
assert '"type=end|title=End"' in result
|
||||
|
||||
def test_subgraph_node_id_is_replaced(self):
|
||||
"""Test 'subgraph' node ID is replaced with 'subgraph_node'."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "subgraph", "type": "code", "title": "Process"}],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "subgraph_node[" in result
|
||||
|
||||
def test_edge_uses_safe_ids(self):
|
||||
"""Test edges correctly reference safe IDs after replacement."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "title": "Start"},
|
||||
{"id": "end", "type": "end", "title": "End"},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "end"}],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Edge should use end_node, not end
|
||||
assert "start --> end_node" in result
|
||||
assert "start --> end\n" not in result
|
||||
|
||||
|
||||
class TestBranchEdges:
|
||||
"""Tests for branching node edge labels."""
|
||||
|
||||
def test_question_classifier_source_handles(self):
|
||||
"""Test question-classifier edges with sourceHandle labels."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "classifier", "type": "question-classifier", "title": "Classify"},
|
||||
{"id": "refund", "type": "llm", "title": "Handle Refund"},
|
||||
{"id": "inquiry", "type": "llm", "title": "Handle Inquiry"},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "classifier", "target": "refund", "sourceHandle": "refund"},
|
||||
{"source": "classifier", "target": "inquiry", "sourceHandle": "inquiry"},
|
||||
],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "classifier -->|refund| refund" in result
|
||||
assert "classifier -->|inquiry| inquiry" in result
|
||||
|
||||
def test_if_else_true_false_handles(self):
|
||||
"""Test if-else edges with true/false labels."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "ifelse", "type": "if-else", "title": "Check"},
|
||||
{"id": "yes_branch", "type": "llm", "title": "Yes"},
|
||||
{"id": "no_branch", "type": "llm", "title": "No"},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "ifelse", "target": "yes_branch", "sourceHandle": "true"},
|
||||
{"source": "ifelse", "target": "no_branch", "sourceHandle": "false"},
|
||||
],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "ifelse -->|true| yes_branch" in result
|
||||
assert "ifelse -->|false| no_branch" in result
|
||||
|
||||
def test_source_handle_source_is_ignored(self):
|
||||
"""Test sourceHandle='source' doesn't add label."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "llm1", "type": "llm", "title": "LLM 1"},
|
||||
{"id": "llm2", "type": "llm", "title": "LLM 2"},
|
||||
],
|
||||
"edges": [{"source": "llm1", "target": "llm2", "sourceHandle": "source"}],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Should be plain arrow without label
|
||||
assert "llm1 --> llm2" in result
|
||||
assert "llm1 -->|source|" not in result
|
||||
|
||||
|
||||
class TestEdgeValidation:
|
||||
"""Tests for edge validation and error handling."""
|
||||
|
||||
def test_edge_with_missing_source_is_skipped(self):
|
||||
"""Test edge with non-existent source node is skipped."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "end", "type": "end", "title": "End"}],
|
||||
"edges": [{"source": "nonexistent", "target": "end"}],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Should not contain the invalid edge
|
||||
assert "nonexistent" not in result
|
||||
assert "-->" not in result or "nonexistent" not in result
|
||||
|
||||
def test_edge_with_missing_target_is_skipped(self):
|
||||
"""Test edge with non-existent target node is skipped."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "start", "type": "start", "title": "Start"}],
|
||||
"edges": [{"source": "start", "target": "nonexistent"}],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Edge should be skipped
|
||||
assert "start --> nonexistent" not in result
|
||||
|
||||
def test_edge_without_source_or_target_is_skipped(self):
|
||||
"""Test edge missing source or target is skipped."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "start", "type": "start", "title": "Start"}],
|
||||
"edges": [{"source": "start"}, {"target": "start"}, {}],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# No edges should be rendered
|
||||
assert result.count("-->") == 0
|
||||
|
||||
|
||||
class TestToolNodes:
|
||||
"""Tests for tool node formatting."""
|
||||
|
||||
def test_tool_node_includes_tool_key(self):
|
||||
"""Test tool node includes tool_key in label."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "search",
|
||||
"type": "tool",
|
||||
"title": "Search",
|
||||
"config": {"tool_key": "google/search"},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert 'search["type=tool|title=Search|tool=google/search"]' in result
|
||||
|
||||
def test_tool_node_with_tool_name_fallback(self):
|
||||
"""Test tool node uses tool_name as fallback."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "tool1",
|
||||
"type": "tool",
|
||||
"title": "My Tool",
|
||||
"config": {"tool_name": "my_tool"},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "tool=my_tool" in result
|
||||
|
||||
def test_tool_node_missing_tool_key_shows_unknown(self):
|
||||
"""Test tool node without tool_key shows 'unknown'."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "tool1", "type": "tool", "title": "Tool", "config": {}}],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "tool=unknown" in result
|
||||
|
||||
|
||||
class TestNodeFormatting:
|
||||
"""Tests for node label formatting."""
|
||||
|
||||
def test_quotes_in_title_are_escaped(self):
|
||||
"""Test double quotes in title are replaced with single quotes."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "llm", "type": "llm", "title": 'Say "Hello"'}],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Double quotes should be replaced
|
||||
assert "Say 'Hello'" in result
|
||||
assert 'Say "Hello"' not in result
|
||||
|
||||
def test_node_without_id_is_skipped(self):
|
||||
"""Test node without id is skipped."""
|
||||
workflow_data = {
|
||||
"nodes": [{"type": "llm", "title": "No ID"}],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
# Should only have flowchart header
|
||||
lines = [line for line in result.split("\n") if line.strip()]
|
||||
assert len(lines) == 1
|
||||
|
||||
def test_node_default_values(self):
|
||||
"""Test node with missing type/title uses defaults."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "node1"}],
|
||||
"edges": [],
|
||||
}
|
||||
result = generate_mermaid(workflow_data)
|
||||
|
||||
assert "type=unknown" in result
|
||||
assert "title=Untitled" in result
|
||||
81
api/tests/unit_tests/core/llm_generator/test_node_repair.py
Normal file
81
api/tests/unit_tests/core/llm_generator/test_node_repair.py
Normal file
@ -0,0 +1,81 @@
|
||||
from core.workflow.generator.utils.node_repair import NodeRepair
|
||||
|
||||
|
||||
class TestNodeRepair:
|
||||
"""Tests for NodeRepair utility."""
|
||||
|
||||
def test_repair_if_else_valid_operators(self):
|
||||
"""Test that valid operators remain unchanged."""
|
||||
nodes = [
|
||||
{
|
||||
"id": "node1",
|
||||
"type": "if-else",
|
||||
"config": {
|
||||
"cases": [
|
||||
{
|
||||
"conditions": [
|
||||
{"comparison_operator": "≥", "value": "1"},
|
||||
{"comparison_operator": "=", "value": "2"},
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
result = NodeRepair.repair(nodes)
|
||||
assert result.was_repaired is False
|
||||
assert result.nodes == nodes
|
||||
|
||||
def test_repair_if_else_invalid_operators(self):
|
||||
"""Test that invalid operators are normalized."""
|
||||
nodes = [
|
||||
{
|
||||
"id": "node1",
|
||||
"type": "if-else",
|
||||
"config": {
|
||||
"cases": [
|
||||
{
|
||||
"conditions": [
|
||||
{"comparison_operator": ">=", "value": "1"},
|
||||
{"comparison_operator": "<=", "value": "2"},
|
||||
{"comparison_operator": "!=", "value": "3"},
|
||||
{"comparison_operator": "==", "value": "4"},
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
result = NodeRepair.repair(nodes)
|
||||
assert result.was_repaired is True
|
||||
assert len(result.repairs_made) == 4
|
||||
|
||||
conditions = result.nodes[0]["config"]["cases"][0]["conditions"]
|
||||
assert conditions[0]["comparison_operator"] == "≥"
|
||||
assert conditions[1]["comparison_operator"] == "≤"
|
||||
assert conditions[2]["comparison_operator"] == "≠"
|
||||
assert conditions[3]["comparison_operator"] == "="
|
||||
|
||||
def test_repair_ignores_other_nodes(self):
|
||||
"""Test that other node types are ignored."""
|
||||
nodes = [{"id": "node1", "type": "llm", "config": {"some_field": ">="}}]
|
||||
result = NodeRepair.repair(nodes)
|
||||
assert result.was_repaired is False
|
||||
assert result.nodes[0]["config"]["some_field"] == ">="
|
||||
|
||||
def test_repair_handles_missing_config(self):
|
||||
"""Test robustness against missing fields."""
|
||||
nodes = [
|
||||
{
|
||||
"id": "node1",
|
||||
"type": "if-else",
|
||||
# Missing config
|
||||
},
|
||||
{
|
||||
"id": "node2",
|
||||
"type": "if-else",
|
||||
"config": {}, # Missing cases
|
||||
},
|
||||
]
|
||||
result = NodeRepair.repair(nodes)
|
||||
assert result.was_repaired is False
|
||||
@ -0,0 +1,99 @@
|
||||
"""
|
||||
Tests for node schemas validation.
|
||||
|
||||
Ensures that the node configuration stays in sync with registered node types.
|
||||
"""
|
||||
|
||||
from core.workflow.generator.config.node_schemas import (
|
||||
get_builtin_node_schemas,
|
||||
validate_node_schemas,
|
||||
)
|
||||
|
||||
|
||||
class TestNodeSchemasValidation:
|
||||
"""Tests for node schema validation utilities."""
|
||||
|
||||
def test_validate_node_schemas_returns_no_warnings(self):
|
||||
"""Ensure all registered node types have corresponding schemas."""
|
||||
warnings = validate_node_schemas()
|
||||
# If this test fails, it means a new node type was added but
|
||||
# no schema was defined for it in node_schemas.py
|
||||
assert len(warnings) == 0, (
|
||||
f"Missing schemas for node types: {warnings}. "
|
||||
"Please add schemas for these node types in node_schemas.py "
|
||||
"or add them to _INTERNAL_NODE_TYPES if they don't need schemas."
|
||||
)
|
||||
|
||||
def test_builtin_node_schemas_not_empty(self):
|
||||
"""Ensure BUILTIN_NODE_SCHEMAS contains expected node types."""
|
||||
# get_builtin_node_schemas() includes dynamic schemas
|
||||
all_schemas = get_builtin_node_schemas()
|
||||
assert len(all_schemas) > 0
|
||||
# Core node types should always be present
|
||||
expected_types = ["llm", "code", "http-request", "if-else"]
|
||||
for node_type in expected_types:
|
||||
assert node_type in all_schemas, f"Missing schema for core node type: {node_type}"
|
||||
|
||||
def test_schema_structure(self):
|
||||
"""Ensure each schema has required fields."""
|
||||
all_schemas = get_builtin_node_schemas()
|
||||
for node_type, schema in all_schemas.items():
|
||||
assert "description" in schema, f"Missing 'description' in schema for {node_type}"
|
||||
# 'parameters' is optional but if present should be a dict
|
||||
if "parameters" in schema:
|
||||
assert isinstance(schema["parameters"], dict), (
|
||||
f"'parameters' in schema for {node_type} should be a dict"
|
||||
)
|
||||
|
||||
|
||||
class TestNodeSchemasMerged:
|
||||
"""Tests to verify the merged configuration works correctly."""
|
||||
|
||||
def test_fallback_rules_available(self):
|
||||
"""Ensure FALLBACK_RULES is available from node_schemas."""
|
||||
from core.workflow.generator.config.node_schemas import FALLBACK_RULES
|
||||
|
||||
assert len(FALLBACK_RULES) > 0
|
||||
assert "http-request" in FALLBACK_RULES
|
||||
assert "code" in FALLBACK_RULES
|
||||
assert "llm" in FALLBACK_RULES
|
||||
|
||||
def test_node_type_aliases_available(self):
|
||||
"""Ensure NODE_TYPE_ALIASES is available from node_schemas."""
|
||||
from core.workflow.generator.config.node_schemas import NODE_TYPE_ALIASES
|
||||
|
||||
assert len(NODE_TYPE_ALIASES) > 0
|
||||
assert NODE_TYPE_ALIASES.get("gpt") == "llm"
|
||||
assert NODE_TYPE_ALIASES.get("api") == "http-request"
|
||||
|
||||
def test_field_name_corrections_available(self):
|
||||
"""Ensure FIELD_NAME_CORRECTIONS is available from node_schemas."""
|
||||
from core.workflow.generator.config.node_schemas import (
|
||||
FIELD_NAME_CORRECTIONS,
|
||||
get_corrected_field_name,
|
||||
)
|
||||
|
||||
assert len(FIELD_NAME_CORRECTIONS) > 0
|
||||
# Test the helper function
|
||||
assert get_corrected_field_name("http-request", "text") == "body"
|
||||
assert get_corrected_field_name("llm", "response") == "text"
|
||||
assert get_corrected_field_name("code", "unknown") == "unknown"
|
||||
|
||||
def test_config_init_exports(self):
|
||||
"""Ensure config __init__.py exports all needed symbols."""
|
||||
from core.workflow.generator.config import (
|
||||
BUILTIN_NODE_SCHEMAS,
|
||||
FALLBACK_RULES,
|
||||
FIELD_NAME_CORRECTIONS,
|
||||
NODE_TYPE_ALIASES,
|
||||
get_corrected_field_name,
|
||||
validate_node_schemas,
|
||||
)
|
||||
|
||||
# Just verify imports work
|
||||
assert BUILTIN_NODE_SCHEMAS is not None
|
||||
assert FALLBACK_RULES is not None
|
||||
assert FIELD_NAME_CORRECTIONS is not None
|
||||
assert NODE_TYPE_ALIASES is not None
|
||||
assert callable(get_corrected_field_name)
|
||||
assert callable(validate_node_schemas)
|
||||
173
api/tests/unit_tests/core/llm_generator/test_planner_prompts.py
Normal file
173
api/tests/unit_tests/core/llm_generator/test_planner_prompts.py
Normal file
@ -0,0 +1,173 @@
|
||||
"""
|
||||
Unit tests for the Planner Prompts.
|
||||
|
||||
Tests cover:
|
||||
- Tool formatting for planner context
|
||||
- Edge cases with missing fields
|
||||
- Empty tool lists
|
||||
"""
|
||||
|
||||
|
||||
from core.workflow.generator.prompts.planner_prompts import format_tools_for_planner
|
||||
|
||||
|
||||
class TestFormatToolsForPlanner:
|
||||
"""Tests for format_tools_for_planner function."""
|
||||
|
||||
def test_empty_tools_returns_default_message(self):
|
||||
"""Test empty tools list returns default message."""
|
||||
result = format_tools_for_planner([])
|
||||
|
||||
assert result == "No external tools available."
|
||||
|
||||
def test_none_tools_returns_default_message(self):
|
||||
"""Test None tools list returns default message."""
|
||||
result = format_tools_for_planner(None)
|
||||
|
||||
assert result == "No external tools available."
|
||||
|
||||
def test_single_tool_formatting(self):
|
||||
"""Test single tool is formatted correctly."""
|
||||
tools = [
|
||||
{
|
||||
"provider_id": "google",
|
||||
"tool_key": "search",
|
||||
"tool_label": "Google Search",
|
||||
"tool_description": "Search the web using Google",
|
||||
}
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
assert "[google/search]" in result
|
||||
assert "Google Search" in result
|
||||
assert "Search the web using Google" in result
|
||||
|
||||
def test_multiple_tools_formatting(self):
|
||||
"""Test multiple tools are formatted correctly."""
|
||||
tools = [
|
||||
{
|
||||
"provider_id": "google",
|
||||
"tool_key": "search",
|
||||
"tool_label": "Search",
|
||||
"tool_description": "Web search",
|
||||
},
|
||||
{
|
||||
"provider_id": "slack",
|
||||
"tool_key": "send_message",
|
||||
"tool_label": "Send Message",
|
||||
"tool_description": "Send a Slack message",
|
||||
},
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
lines = result.strip().split("\n")
|
||||
assert len(lines) == 2
|
||||
assert "[google/search]" in result
|
||||
assert "[slack/send_message]" in result
|
||||
|
||||
def test_tool_without_provider_uses_key_only(self):
|
||||
"""Test tool without provider_id uses tool_key only."""
|
||||
tools = [
|
||||
{
|
||||
"tool_key": "my_tool",
|
||||
"tool_label": "My Tool",
|
||||
"tool_description": "A custom tool",
|
||||
}
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
# Should format as [my_tool] without provider prefix
|
||||
assert "[my_tool]" in result
|
||||
assert "My Tool" in result
|
||||
|
||||
def test_tool_with_tool_name_fallback(self):
|
||||
"""Test tool uses tool_name when tool_key is missing."""
|
||||
tools = [
|
||||
{
|
||||
"tool_name": "fallback_tool",
|
||||
"description": "Fallback description",
|
||||
}
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
assert "fallback_tool" in result
|
||||
assert "Fallback description" in result
|
||||
|
||||
def test_tool_with_missing_description(self):
|
||||
"""Test tool with missing description doesn't crash."""
|
||||
tools = [
|
||||
{
|
||||
"provider_id": "test",
|
||||
"tool_key": "tool1",
|
||||
"tool_label": "Tool 1",
|
||||
}
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
assert "[test/tool1]" in result
|
||||
assert "Tool 1" in result
|
||||
|
||||
def test_tool_with_all_missing_fields(self):
|
||||
"""Test tool with all fields missing uses defaults."""
|
||||
tools = [{}]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
# Should not crash, may produce minimal output
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_tool_uses_provider_fallback(self):
|
||||
"""Test tool uses 'provider' when 'provider_id' is missing."""
|
||||
tools = [
|
||||
{
|
||||
"provider": "openai",
|
||||
"tool_key": "dalle",
|
||||
"tool_label": "DALL-E",
|
||||
"tool_description": "Generate images",
|
||||
}
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
assert "[openai/dalle]" in result
|
||||
|
||||
def test_tool_label_fallback_to_key(self):
|
||||
"""Test tool_label falls back to tool_key when missing."""
|
||||
tools = [
|
||||
{
|
||||
"provider_id": "test",
|
||||
"tool_key": "my_key",
|
||||
"tool_description": "Description here",
|
||||
}
|
||||
]
|
||||
result = format_tools_for_planner(tools)
|
||||
|
||||
# Label should fallback to key
|
||||
assert "my_key" in result
|
||||
assert "Description here" in result
|
||||
|
||||
|
||||
class TestPlannerPromptConstants:
|
||||
"""Tests for planner prompt constant availability."""
|
||||
|
||||
def test_planner_system_prompt_exists(self):
|
||||
"""Test PLANNER_SYSTEM_PROMPT is defined."""
|
||||
from core.workflow.generator.prompts.planner_prompts import PLANNER_SYSTEM_PROMPT
|
||||
|
||||
assert PLANNER_SYSTEM_PROMPT is not None
|
||||
assert len(PLANNER_SYSTEM_PROMPT) > 0
|
||||
assert "{tools_summary}" in PLANNER_SYSTEM_PROMPT
|
||||
|
||||
def test_planner_user_prompt_exists(self):
|
||||
"""Test PLANNER_USER_PROMPT is defined."""
|
||||
from core.workflow.generator.prompts.planner_prompts import PLANNER_USER_PROMPT
|
||||
|
||||
assert PLANNER_USER_PROMPT is not None
|
||||
assert "{instruction}" in PLANNER_USER_PROMPT
|
||||
|
||||
def test_planner_system_prompt_has_required_sections(self):
|
||||
"""Test PLANNER_SYSTEM_PROMPT has required XML sections."""
|
||||
from core.workflow.generator.prompts.planner_prompts import PLANNER_SYSTEM_PROMPT
|
||||
|
||||
assert "<role>" in PLANNER_SYSTEM_PROMPT
|
||||
assert "<task>" in PLANNER_SYSTEM_PROMPT
|
||||
assert "<available_tools>" in PLANNER_SYSTEM_PROMPT
|
||||
assert "<response_format>" in PLANNER_SYSTEM_PROMPT
|
||||
@ -0,0 +1,536 @@
|
||||
"""
|
||||
Unit tests for the Validation Rule Engine.
|
||||
|
||||
Tests cover:
|
||||
- Structure rules (required fields, types, formats)
|
||||
- Semantic rules (variable references, edge connections)
|
||||
- Reference rules (model exists, tool configured, dataset valid)
|
||||
- ValidationEngine integration
|
||||
"""
|
||||
|
||||
|
||||
from core.workflow.generator.validation import (
|
||||
ValidationContext,
|
||||
ValidationEngine,
|
||||
)
|
||||
from core.workflow.generator.validation.rules import (
|
||||
extract_variable_refs,
|
||||
is_placeholder,
|
||||
)
|
||||
|
||||
|
||||
class TestPlaceholderDetection:
|
||||
"""Tests for placeholder detection utility."""
|
||||
|
||||
def test_detects_please_select(self):
|
||||
assert is_placeholder("PLEASE_SELECT_YOUR_MODEL") is True
|
||||
|
||||
def test_detects_your_prefix(self):
|
||||
assert is_placeholder("YOUR_API_KEY") is True
|
||||
|
||||
def test_detects_todo(self):
|
||||
assert is_placeholder("TODO: fill this in") is True
|
||||
|
||||
def test_detects_placeholder(self):
|
||||
assert is_placeholder("PLACEHOLDER_VALUE") is True
|
||||
|
||||
def test_detects_example_prefix(self):
|
||||
assert is_placeholder("EXAMPLE_URL") is True
|
||||
|
||||
def test_detects_replace_prefix(self):
|
||||
assert is_placeholder("REPLACE_WITH_ACTUAL") is True
|
||||
|
||||
def test_case_insensitive(self):
|
||||
assert is_placeholder("please_select") is True
|
||||
assert is_placeholder("Please_Select") is True
|
||||
|
||||
def test_valid_values_not_detected(self):
|
||||
assert is_placeholder("https://api.example.com") is False
|
||||
assert is_placeholder("gpt-4") is False
|
||||
assert is_placeholder("my_variable") is False
|
||||
|
||||
def test_non_string_returns_false(self):
|
||||
assert is_placeholder(123) is False
|
||||
assert is_placeholder(None) is False
|
||||
assert is_placeholder(["list"]) is False
|
||||
|
||||
|
||||
class TestVariableRefExtraction:
|
||||
"""Tests for variable reference extraction."""
|
||||
|
||||
def test_extracts_simple_ref(self):
|
||||
refs = extract_variable_refs("Hello {{#start.query#}}")
|
||||
assert refs == [("start", "query")]
|
||||
|
||||
def test_extracts_multiple_refs(self):
|
||||
refs = extract_variable_refs("{{#node1.output#}} and {{#node2.text#}}")
|
||||
assert refs == [("node1", "output"), ("node2", "text")]
|
||||
|
||||
def test_extracts_nested_field(self):
|
||||
refs = extract_variable_refs("{{#http_request.body#}}")
|
||||
assert refs == [("http_request", "body")]
|
||||
|
||||
def test_no_refs_returns_empty(self):
|
||||
refs = extract_variable_refs("No references here")
|
||||
assert refs == []
|
||||
|
||||
def test_handles_malformed_refs(self):
|
||||
refs = extract_variable_refs("{{#invalid}} and {{incomplete#}}")
|
||||
assert refs == []
|
||||
|
||||
|
||||
class TestValidationContext:
|
||||
"""Tests for ValidationContext."""
|
||||
|
||||
def test_node_map_lookup(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "start", "type": "start"},
|
||||
{"id": "llm_1", "type": "llm"},
|
||||
]
|
||||
)
|
||||
assert ctx.get_node("start") == {"id": "start", "type": "start"}
|
||||
assert ctx.get_node("nonexistent") is None
|
||||
|
||||
def test_model_set(self):
|
||||
ctx = ValidationContext(
|
||||
available_models=[
|
||||
{"provider": "openai", "model": "gpt-4"},
|
||||
{"provider": "anthropic", "model": "claude-3"},
|
||||
]
|
||||
)
|
||||
assert ctx.has_model("openai", "gpt-4") is True
|
||||
assert ctx.has_model("anthropic", "claude-3") is True
|
||||
assert ctx.has_model("openai", "gpt-3.5") is False
|
||||
|
||||
def test_tool_set(self):
|
||||
ctx = ValidationContext(
|
||||
available_tools=[
|
||||
{"provider_id": "google", "tool_key": "search", "is_team_authorization": True},
|
||||
{"provider_id": "slack", "tool_key": "send_message", "is_team_authorization": False},
|
||||
]
|
||||
)
|
||||
assert ctx.has_tool("google/search") is True
|
||||
assert ctx.has_tool("search") is True
|
||||
assert ctx.is_tool_configured("google/search") is True
|
||||
assert ctx.is_tool_configured("slack/send_message") is False
|
||||
|
||||
def test_upstream_downstream_nodes(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "start", "type": "start"},
|
||||
{"id": "llm", "type": "llm"},
|
||||
{"id": "end", "type": "end"},
|
||||
],
|
||||
edges=[
|
||||
{"source": "start", "target": "llm"},
|
||||
{"source": "llm", "target": "end"},
|
||||
],
|
||||
)
|
||||
assert ctx.get_upstream_nodes("llm") == ["start"]
|
||||
assert ctx.get_downstream_nodes("llm") == ["end"]
|
||||
|
||||
|
||||
class TestStructureRules:
|
||||
"""Tests for structure validation rules."""
|
||||
|
||||
def test_llm_missing_prompt_template(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[{"id": "llm_1", "type": "llm", "config": {}}]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
assert result.has_errors
|
||||
errors = [e for e in result.all_errors if e.rule_id == "llm.prompt_template.required"]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is True
|
||||
|
||||
def test_llm_with_prompt_template_passes(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"prompt_template": [
|
||||
{"role": "system", "text": "You are helpful"},
|
||||
{"role": "user", "text": "Hello"},
|
||||
]
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
# No prompt_template errors
|
||||
errors = [e for e in result.all_errors if "prompt_template" in e.rule_id]
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_http_request_missing_url(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[{"id": "http_1", "type": "http-request", "config": {}}]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if "http.url" in e.rule_id]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is True
|
||||
|
||||
def test_http_request_placeholder_url(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "http_1",
|
||||
"type": "http-request",
|
||||
"config": {"url": "PLEASE_SELECT_YOUR_URL", "method": "GET"},
|
||||
}
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if "placeholder" in e.rule_id]
|
||||
assert len(errors) == 1
|
||||
|
||||
def test_code_node_missing_fields(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[{"id": "code_1", "type": "code", "config": {}}]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
error_rules = {e.rule_id for e in result.all_errors}
|
||||
assert "code.code.required" in error_rules
|
||||
assert "code.language.required" in error_rules
|
||||
|
||||
def test_knowledge_retrieval_missing_dataset(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[{"id": "kb_1", "type": "knowledge-retrieval", "config": {}}]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if "knowledge.dataset" in e.rule_id]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is False # User must configure
|
||||
|
||||
|
||||
class TestSemanticRules:
|
||||
"""Tests for semantic validation rules."""
|
||||
|
||||
def test_valid_variable_reference(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"prompt_template": [
|
||||
{"role": "user", "text": "Process: {{#start.query#}}"}
|
||||
]
|
||||
},
|
||||
},
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
# No variable reference errors
|
||||
errors = [e for e in result.all_errors if "variable.ref" in e.rule_id]
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_invalid_variable_reference(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"prompt_template": [
|
||||
{"role": "user", "text": "Process: {{#nonexistent.field#}}"}
|
||||
]
|
||||
},
|
||||
},
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if "variable.ref" in e.rule_id]
|
||||
assert len(errors) == 1
|
||||
assert "nonexistent" in errors[0].message
|
||||
|
||||
def test_edge_validation(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
edges=[
|
||||
{"source": "start", "target": "end"},
|
||||
{"source": "nonexistent", "target": "end"},
|
||||
],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if "edge" in e.rule_id]
|
||||
assert len(errors) == 1
|
||||
assert "nonexistent" in errors[0].message
|
||||
|
||||
|
||||
class TestReferenceRules:
|
||||
"""Tests for reference validation rules (models, tools)."""
|
||||
|
||||
def test_llm_missing_model_with_available(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {"prompt_template": [{"role": "user", "text": "Hi"}]},
|
||||
}
|
||||
],
|
||||
available_models=[{"provider": "openai", "model": "gpt-4"}],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if e.rule_id == "model.required"]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is True
|
||||
|
||||
def test_llm_missing_model_no_available(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {"prompt_template": [{"role": "user", "text": "Hi"}]},
|
||||
}
|
||||
],
|
||||
available_models=[], # No models available
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if e.rule_id == "model.no_available"]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is False
|
||||
|
||||
def test_llm_with_valid_model(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"prompt_template": [{"role": "user", "text": "Hi"}],
|
||||
"model": {"provider": "openai", "name": "gpt-4"},
|
||||
},
|
||||
}
|
||||
],
|
||||
available_models=[{"provider": "openai", "model": "gpt-4"}],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if "model" in e.rule_id]
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_llm_with_invalid_model(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"prompt_template": [{"role": "user", "text": "Hi"}],
|
||||
"model": {"provider": "openai", "name": "gpt-99"},
|
||||
},
|
||||
}
|
||||
],
|
||||
available_models=[{"provider": "openai", "model": "gpt-4"}],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if e.rule_id == "model.not_found"]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is True
|
||||
|
||||
def test_tool_node_not_found(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "tool_1",
|
||||
"type": "tool",
|
||||
"config": {"tool_key": "nonexistent/tool"},
|
||||
}
|
||||
],
|
||||
available_tools=[],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if e.rule_id == "tool.not_found"]
|
||||
assert len(errors) == 1
|
||||
|
||||
def test_tool_node_not_configured(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "tool_1",
|
||||
"type": "tool",
|
||||
"config": {"tool_key": "google/search"},
|
||||
}
|
||||
],
|
||||
available_tools=[
|
||||
{"provider_id": "google", "tool_key": "search", "is_team_authorization": False}
|
||||
],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
errors = [e for e in result.all_errors if e.rule_id == "tool.not_configured"]
|
||||
assert len(errors) == 1
|
||||
assert errors[0].is_fixable is False
|
||||
|
||||
|
||||
class TestValidationResult:
|
||||
"""Tests for ValidationResult classification."""
|
||||
|
||||
def test_has_errors(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[{"id": "llm_1", "type": "llm", "config": {}}]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
assert result.has_errors is True
|
||||
assert result.is_valid is False
|
||||
|
||||
def test_has_fixable_errors(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {"prompt_template": [{"role": "user", "text": "Hi"}]},
|
||||
}
|
||||
],
|
||||
available_models=[{"provider": "openai", "model": "gpt-4"}],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
assert result.has_fixable_errors is True
|
||||
assert len(result.fixable_errors) > 0
|
||||
|
||||
def test_get_fixable_by_node(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "llm_1", "type": "llm", "config": {}},
|
||||
{"id": "http_1", "type": "http-request", "config": {}},
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
by_node = result.get_fixable_by_node()
|
||||
assert "llm_1" in by_node
|
||||
assert "http_1" in by_node
|
||||
|
||||
def test_to_dict(self):
|
||||
ctx = ValidationContext(
|
||||
nodes=[{"id": "llm_1", "type": "llm", "config": {}}]
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
d = result.to_dict()
|
||||
assert "fixable" in d
|
||||
assert "user_required" in d
|
||||
assert "warnings" in d
|
||||
assert "all_warnings" in d
|
||||
assert "stats" in d
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration tests for the full validation pipeline."""
|
||||
|
||||
def test_complete_workflow_validation(self):
|
||||
"""Test validation of a complete workflow."""
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{
|
||||
"id": "start",
|
||||
"type": "start",
|
||||
"config": {"variables": [{"variable": "query", "type": "text-input"}]},
|
||||
},
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"model": {"provider": "openai", "name": "gpt-4"},
|
||||
"prompt_template": [{"role": "user", "text": "{{#start.query#}}"}],
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": "end",
|
||||
"type": "end",
|
||||
"config": {"outputs": [{"variable": "result", "value_selector": ["llm_1", "text"]}]},
|
||||
},
|
||||
],
|
||||
edges=[
|
||||
{"source": "start", "target": "llm_1"},
|
||||
{"source": "llm_1", "target": "end"},
|
||||
],
|
||||
available_models=[{"provider": "openai", "model": "gpt-4"}],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
# Should have no errors
|
||||
assert result.is_valid is True
|
||||
assert len(result.fixable_errors) == 0
|
||||
assert len(result.user_required_errors) == 0
|
||||
|
||||
def test_workflow_with_multiple_errors(self):
|
||||
"""Test workflow with multiple types of errors."""
|
||||
ctx = ValidationContext(
|
||||
nodes=[
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "llm_1",
|
||||
"type": "llm",
|
||||
"config": {}, # Missing prompt_template and model
|
||||
},
|
||||
{
|
||||
"id": "kb_1",
|
||||
"type": "knowledge-retrieval",
|
||||
"config": {"dataset_ids": ["PLEASE_SELECT_YOUR_DATASET"]},
|
||||
},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
available_models=[{"provider": "openai", "model": "gpt-4"}],
|
||||
)
|
||||
engine = ValidationEngine()
|
||||
result = engine.validate(ctx)
|
||||
|
||||
# Should have multiple errors
|
||||
assert result.has_errors is True
|
||||
assert len(result.fixable_errors) >= 2 # model, prompt_template
|
||||
assert len(result.user_required_errors) >= 1 # dataset placeholder
|
||||
|
||||
# Check stats
|
||||
assert result.stats["total_nodes"] == 4
|
||||
assert result.stats["total_errors"] >= 3
|
||||
|
||||
|
||||
|
||||
@ -0,0 +1,435 @@
|
||||
"""
|
||||
Unit tests for the Vibe Workflow Validator.
|
||||
|
||||
Tests cover:
|
||||
- Basic validation function
|
||||
- User-friendly validation hints
|
||||
- Edge cases and error handling
|
||||
"""
|
||||
|
||||
|
||||
from core.workflow.generator.utils.workflow_validator import ValidationHint, WorkflowValidator
|
||||
|
||||
|
||||
class TestValidationHint:
|
||||
"""Tests for ValidationHint dataclass."""
|
||||
|
||||
def test_hint_creation(self):
|
||||
"""Test creating a validation hint."""
|
||||
hint = ValidationHint(
|
||||
node_id="llm_1",
|
||||
field="model",
|
||||
message="Model is not configured",
|
||||
severity="error",
|
||||
)
|
||||
assert hint.node_id == "llm_1"
|
||||
assert hint.field == "model"
|
||||
assert hint.message == "Model is not configured"
|
||||
assert hint.severity == "error"
|
||||
|
||||
def test_hint_with_suggestion(self):
|
||||
"""Test hint with suggestion."""
|
||||
hint = ValidationHint(
|
||||
node_id="http_1",
|
||||
field="url",
|
||||
message="URL is required",
|
||||
severity="error",
|
||||
suggestion="Add a valid URL like https://api.example.com",
|
||||
)
|
||||
assert hint.suggestion is not None
|
||||
|
||||
|
||||
class TestWorkflowValidatorBasic:
|
||||
"""Tests for basic validation scenarios."""
|
||||
|
||||
def test_empty_workflow_is_valid(self):
|
||||
"""Test empty workflow passes validation."""
|
||||
workflow_data = {"nodes": [], "edges": []}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
# Empty but valid structure
|
||||
assert is_valid is True
|
||||
assert len(hints) == 0
|
||||
|
||||
def test_minimal_valid_workflow(self):
|
||||
"""Test minimal Start → End workflow."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "end"}],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
assert is_valid is True
|
||||
|
||||
def test_complete_workflow_with_llm(self):
|
||||
"""Test complete workflow with LLM node."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {"variables": []}},
|
||||
{
|
||||
"id": "llm",
|
||||
"type": "llm",
|
||||
"config": {
|
||||
"model": {"provider": "openai", "name": "gpt-4"},
|
||||
"prompt_template": [{"role": "user", "text": "Hello"}],
|
||||
},
|
||||
},
|
||||
{"id": "end", "type": "end", "config": {"outputs": []}},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "start", "target": "llm"},
|
||||
{"source": "llm", "target": "end"},
|
||||
],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
# Should pass with no critical errors
|
||||
errors = [h for h in hints if h.severity == "error"]
|
||||
assert len(errors) == 0
|
||||
|
||||
|
||||
class TestVariableReferenceValidation:
|
||||
"""Tests for variable reference validation."""
|
||||
|
||||
def test_valid_variable_reference(self):
|
||||
"""Test valid variable reference passes."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "llm",
|
||||
"type": "llm",
|
||||
"config": {"prompt_template": [{"role": "user", "text": "Query: {{#start.query#}}"}]},
|
||||
},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "llm"}],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
ref_errors = [h for h in hints if "reference" in h.message.lower()]
|
||||
assert len(ref_errors) == 0
|
||||
|
||||
def test_invalid_variable_reference(self):
|
||||
"""Test invalid variable reference generates hint."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "llm",
|
||||
"type": "llm",
|
||||
"config": {"prompt_template": [{"role": "user", "text": "{{#nonexistent.field#}}"}]},
|
||||
},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "llm"}],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
# Should have a hint about invalid reference
|
||||
ref_hints = [h for h in hints if "nonexistent" in h.message or "reference" in h.message.lower()]
|
||||
assert len(ref_hints) >= 1
|
||||
|
||||
|
||||
class TestEdgeValidation:
|
||||
"""Tests for edge validation."""
|
||||
|
||||
def test_edge_with_invalid_source(self):
|
||||
"""Test edge with non-existent source generates hint."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "end", "type": "end", "config": {}}],
|
||||
"edges": [{"source": "nonexistent", "target": "end"}],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
# Should have hint about invalid edge
|
||||
edge_hints = [h for h in hints if "edge" in h.message.lower() or "source" in h.message.lower()]
|
||||
assert len(edge_hints) >= 1
|
||||
|
||||
def test_edge_with_invalid_target(self):
|
||||
"""Test edge with non-existent target generates hint."""
|
||||
workflow_data = {
|
||||
"nodes": [{"id": "start", "type": "start", "config": {}}],
|
||||
"edges": [{"source": "start", "target": "nonexistent"}],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
edge_hints = [h for h in hints if "edge" in h.message.lower() or "target" in h.message.lower()]
|
||||
assert len(edge_hints) >= 1
|
||||
|
||||
|
||||
class TestToolValidation:
|
||||
"""Tests for tool node validation."""
|
||||
|
||||
def test_tool_node_found_in_available(self):
|
||||
"""Test tool node that exists in available tools."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "tool1",
|
||||
"type": "tool",
|
||||
"config": {"tool_key": "google/search"},
|
||||
},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "tool1"}, {"source": "tool1", "target": "end"}],
|
||||
}
|
||||
available_tools = [{"provider_id": "google", "tool_key": "search", "is_team_authorization": True}]
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, available_tools)
|
||||
|
||||
tool_errors = [h for h in hints if h.severity == "error" and "tool" in h.message.lower()]
|
||||
assert len(tool_errors) == 0
|
||||
|
||||
def test_tool_node_not_found(self):
|
||||
"""Test tool node not in available tools generates hint."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "tool1",
|
||||
"type": "tool",
|
||||
"config": {"tool_key": "unknown/tool"},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
}
|
||||
available_tools = []
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, available_tools)
|
||||
|
||||
tool_hints = [h for h in hints if "tool" in h.message.lower()]
|
||||
assert len(tool_hints) >= 1
|
||||
|
||||
|
||||
class TestQuestionClassifierValidation:
|
||||
"""Tests for question-classifier node validation."""
|
||||
|
||||
def test_question_classifier_with_classes(self):
|
||||
"""Test question-classifier with valid classes."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "classifier",
|
||||
"type": "question-classifier",
|
||||
"config": {
|
||||
"classes": [
|
||||
{"id": "class1", "name": "Class 1"},
|
||||
{"id": "class2", "name": "Class 2"},
|
||||
],
|
||||
"model": {"provider": "openai", "name": "gpt-4", "mode": "chat"},
|
||||
},
|
||||
},
|
||||
{"id": "h1", "type": "llm", "config": {}},
|
||||
{"id": "h2", "type": "llm", "config": {}},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "start", "target": "classifier"},
|
||||
{"source": "classifier", "sourceHandle": "class1", "target": "h1"},
|
||||
{"source": "classifier", "sourceHandle": "class2", "target": "h2"},
|
||||
{"source": "h1", "target": "end"},
|
||||
{"source": "h2", "target": "end"},
|
||||
],
|
||||
}
|
||||
available_models = [{"provider": "openai", "model": "gpt-4", "mode": "chat"}]
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [], available_models=available_models)
|
||||
|
||||
class_errors = [h for h in hints if "class" in h.message.lower() and h.severity == "error"]
|
||||
assert len(class_errors) == 0
|
||||
|
||||
def test_question_classifier_missing_classes(self):
|
||||
"""Test question-classifier without classes generates hint."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "classifier",
|
||||
"type": "question-classifier",
|
||||
"config": {"model": {"provider": "openai", "name": "gpt-4", "mode": "chat"}},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
}
|
||||
available_models = [{"provider": "openai", "model": "gpt-4", "mode": "chat"}]
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [], available_models=available_models)
|
||||
|
||||
# Should have hint about missing classes
|
||||
class_hints = [h for h in hints if "class" in h.message.lower()]
|
||||
assert len(class_hints) >= 1
|
||||
|
||||
|
||||
class TestHttpRequestValidation:
|
||||
"""Tests for HTTP request node validation."""
|
||||
|
||||
def test_http_request_with_url(self):
|
||||
"""Test HTTP request with valid URL."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "http",
|
||||
"type": "http-request",
|
||||
"config": {"url": "https://api.example.com", "method": "GET"},
|
||||
},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "http"}, {"source": "http", "target": "end"}],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
url_errors = [h for h in hints if "url" in h.message.lower() and h.severity == "error"]
|
||||
assert len(url_errors) == 0
|
||||
|
||||
def test_http_request_missing_url(self):
|
||||
"""Test HTTP request without URL generates hint."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "http",
|
||||
"type": "http-request",
|
||||
"config": {"method": "GET"},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
|
||||
url_hints = [h for h in hints if "url" in h.message.lower()]
|
||||
assert len(url_hints) >= 1
|
||||
|
||||
|
||||
class TestParameterExtractorValidation:
|
||||
"""Tests for parameter-extractor node validation."""
|
||||
|
||||
def test_parameter_extractor_valid_params(self):
|
||||
"""Test parameter-extractor with valid parameters."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "extractor",
|
||||
"type": "parameter-extractor",
|
||||
"config": {
|
||||
"instruction": "Extract info",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"description": "Name",
|
||||
"required": True,
|
||||
}
|
||||
],
|
||||
"model": {"provider": "openai", "name": "gpt-4", "mode": "chat"},
|
||||
},
|
||||
},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [{"source": "start", "target": "extractor"}, {"source": "extractor", "target": "end"}],
|
||||
}
|
||||
available_models = [{"provider": "openai", "model": "gpt-4", "mode": "chat"}]
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [], available_models=available_models)
|
||||
|
||||
errors = [h for h in hints if h.severity == "error"]
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_parameter_extractor_missing_required_field(self):
|
||||
"""Test parameter-extractor missing 'required' field in parameter item."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "extractor",
|
||||
"type": "parameter-extractor",
|
||||
"config": {
|
||||
"instruction": "Extract info",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"type": "string",
|
||||
"description": "Name",
|
||||
# Missing 'required'
|
||||
}
|
||||
],
|
||||
"model": {"provider": "openai", "name": "gpt-4", "mode": "chat"},
|
||||
},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
}
|
||||
available_models = [{"provider": "openai", "model": "gpt-4", "mode": "chat"}]
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [], available_models=available_models)
|
||||
|
||||
errors = [h for h in hints if "required" in h.message and h.severity == "error"]
|
||||
assert len(errors) >= 1
|
||||
assert "parameter-extractor" in errors[0].node_type
|
||||
|
||||
|
||||
class TestIfElseValidation:
|
||||
"""Tests for if-else node validation."""
|
||||
|
||||
def test_if_else_valid_operators(self):
|
||||
"""Test if-else with valid operators."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "ifelse",
|
||||
"type": "if-else",
|
||||
"config": {
|
||||
"cases": [{"case_id": "c1", "conditions": [{"comparison_operator": "≥", "value": "1"}]}]
|
||||
},
|
||||
},
|
||||
{"id": "t", "type": "llm", "config": {}},
|
||||
{"id": "f", "type": "llm", "config": {}},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "start", "target": "ifelse"},
|
||||
{"source": "ifelse", "sourceHandle": "true", "target": "t"},
|
||||
{"source": "ifelse", "sourceHandle": "false", "target": "f"},
|
||||
{"source": "t", "target": "end"},
|
||||
{"source": "f", "target": "end"},
|
||||
],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
errors = [h for h in hints if h.severity == "error"]
|
||||
# Filter out LLM model errors if any (available tools/models check might trigger)
|
||||
# (actually available_models empty list might trigger model error?
|
||||
# No, model config validation skips if model field not present? No, LLM has model config.
|
||||
# But logic skips check if key missing? Let's check logic.
|
||||
# _check_model_config checks if provider/name match available. If available is empty, it fails.
|
||||
# But wait, validate default available_models is None?
|
||||
# I should provide mock available_models or ignore model errors.
|
||||
|
||||
# Actually LLM node "config": {} implies missing model config. Rules check if config structure is valid?
|
||||
# Let's filter specifically for operator errors.
|
||||
operator_errors = [h for h in errors if "operator" in h.message]
|
||||
assert len(operator_errors) == 0
|
||||
|
||||
def test_if_else_invalid_operators(self):
|
||||
"""Test if-else with invalid operators."""
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "start", "type": "start", "config": {}},
|
||||
{
|
||||
"id": "ifelse",
|
||||
"type": "if-else",
|
||||
"config": {
|
||||
"cases": [{"case_id": "c1", "conditions": [{"comparison_operator": ">=", "value": "1"}]}]
|
||||
},
|
||||
},
|
||||
{"id": "t", "type": "llm", "config": {}},
|
||||
{"id": "f", "type": "llm", "config": {}},
|
||||
{"id": "end", "type": "end", "config": {}},
|
||||
],
|
||||
"edges": [
|
||||
{"source": "start", "target": "ifelse"},
|
||||
{"source": "ifelse", "sourceHandle": "true", "target": "t"},
|
||||
{"source": "ifelse", "sourceHandle": "false", "target": "f"},
|
||||
{"source": "t", "target": "end"},
|
||||
{"source": "f", "target": "end"},
|
||||
],
|
||||
}
|
||||
is_valid, hints = WorkflowValidator.validate(workflow_data, [])
|
||||
operator_errors = [h for h in hints if "operator" in h.message and h.severity == "error"]
|
||||
assert len(operator_errors) > 0
|
||||
assert "≥" in operator_errors[0].suggestion
|
||||
Reference in New Issue
Block a user