refactor(api): move workflow knowledge nodes and trigger nodes (#33445)

This commit is contained in:
-LAN-
2026-03-15 15:24:59 +08:00
committed by GitHub
parent 1b6e695520
commit fb41b215c8
232 changed files with 1575 additions and 1421 deletions

View File

@ -35,7 +35,7 @@ from core.ops.entities.trace_entity import (
WorkflowTraceInfo,
)
from dify_graph.entities import WorkflowNodeExecution
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
class RecordingTraceClient:
@ -413,7 +413,7 @@ def test_build_workflow_node_span_routes_llm_type(trace_instance: AliyunDataTrac
monkeypatch.setattr(trace_instance, "build_workflow_llm_span", MagicMock(return_value="llm"))
node_execution.node_type = NodeType.LLM
node_execution.node_type = BuiltinNodeTypes.LLM
assert trace_instance.build_workflow_node_span(node_execution, trace_info, trace_metadata) == "llm"
@ -426,7 +426,7 @@ def test_build_workflow_node_span_routes_knowledge_retrieval_type(
monkeypatch.setattr(trace_instance, "build_workflow_retrieval_span", MagicMock(return_value="retrieval"))
node_execution.node_type = NodeType.KNOWLEDGE_RETRIEVAL
node_execution.node_type = BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL
assert trace_instance.build_workflow_node_span(node_execution, trace_info, trace_metadata) == "retrieval"
@ -437,7 +437,7 @@ def test_build_workflow_node_span_routes_tool_type(trace_instance: AliyunDataTra
monkeypatch.setattr(trace_instance, "build_workflow_tool_span", MagicMock(return_value="tool"))
node_execution.node_type = NodeType.TOOL
node_execution.node_type = BuiltinNodeTypes.TOOL
assert trace_instance.build_workflow_node_span(node_execution, trace_info, trace_metadata) == "tool"
@ -448,7 +448,7 @@ def test_build_workflow_node_span_routes_code_type(trace_instance: AliyunDataTra
monkeypatch.setattr(trace_instance, "build_workflow_task_span", MagicMock(return_value="task"))
node_execution.node_type = NodeType.CODE
node_execution.node_type = BuiltinNodeTypes.CODE
assert trace_instance.build_workflow_node_span(node_execution, trace_info, trace_metadata) == "task"
@ -460,7 +460,7 @@ def test_build_workflow_node_span_handles_errors(
trace_metadata = MagicMock()
monkeypatch.setattr(trace_instance, "build_workflow_task_span", MagicMock(side_effect=RuntimeError("boom")))
node_execution.node_type = NodeType.CODE
node_execution.node_type = BuiltinNodeTypes.CODE
assert trace_instance.build_workflow_node_span(node_execution, trace_info, trace_metadata) is None
assert "Error occurred in build_workflow_node_span" in caplog.text

View File

@ -25,7 +25,7 @@ from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
UnitEnum,
)
from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace
from dify_graph.enums import NodeType
from dify_graph.enums import BuiltinNodeTypes
from models import EndUser
from models.enums import MessageStatus
@ -147,7 +147,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch):
node_llm = MagicMock()
node_llm.id = "node-llm"
node_llm.title = "LLM Node"
node_llm.node_type = NodeType.LLM
node_llm.node_type = BuiltinNodeTypes.LLM
node_llm.status = "succeeded"
node_llm.process_data = {
"model_mode": "chat",
@ -164,7 +164,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch):
node_other = MagicMock()
node_other.id = "node-other"
node_other.title = "Other Node"
node_other.node_type = NodeType.CODE
node_other.node_type = BuiltinNodeTypes.CODE
node_other.status = "failed"
node_other.process_data = None
node_other.inputs = {"code": "print"}
@ -664,7 +664,7 @@ def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypat
node = MagicMock()
node.id = "n1"
node.title = "LLM Node"
node.node_type = NodeType.LLM
node.node_type = BuiltinNodeTypes.LLM
node.status = "succeeded"
class BadDict(collections.UserDict):

View File

@ -21,7 +21,7 @@ from core.ops.langsmith_trace.entities.langsmith_trace_entity import (
LangSmithRunUpdateModel,
)
from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
from models import EndUser
@ -145,7 +145,7 @@ def test_workflow_trace(trace_instance, monkeypatch):
node_llm = MagicMock()
node_llm.id = "node-llm"
node_llm.title = "LLM Node"
node_llm.node_type = NodeType.LLM
node_llm.node_type = BuiltinNodeTypes.LLM
node_llm.status = "succeeded"
node_llm.process_data = {
"model_mode": "chat",
@ -162,7 +162,7 @@ def test_workflow_trace(trace_instance, monkeypatch):
node_other = MagicMock()
node_other.id = "node-other"
node_other.title = "Tool Node"
node_other.node_type = NodeType.TOOL
node_other.node_type = BuiltinNodeTypes.TOOL
node_other.status = "succeeded"
node_other.process_data = None
node_other.inputs = {"tool_input": "val"}
@ -174,7 +174,7 @@ def test_workflow_trace(trace_instance, monkeypatch):
node_retrieval = MagicMock()
node_retrieval.id = "node-retrieval"
node_retrieval.title = "Retrieval Node"
node_retrieval.node_type = NodeType.KNOWLEDGE_RETRIEVAL
node_retrieval.node_type = BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL
node_retrieval.status = "succeeded"
node_retrieval.process_data = None
node_retrieval.inputs = {"query": "val"}
@ -555,7 +555,7 @@ def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch, capl
node_llm = MagicMock()
node_llm.id = "node-llm"
node_llm.title = "LLM Node"
node_llm.node_type = NodeType.LLM
node_llm.node_type = BuiltinNodeTypes.LLM
node_llm.status = "succeeded"
node_llm.process_data = BadDict({"model_mode": "chat", "model_name": "gpt-4", "usage": True, "prompts": ["p"]})
node_llm.inputs = {}

View File

@ -21,7 +21,7 @@ from core.ops.entities.trace_entity import (
WorkflowTraceInfo,
)
from core.ops.mlflow_trace.mlflow_trace import MLflowDataTrace, datetime_to_nanoseconds
from dify_graph.enums import NodeType
from dify_graph.enums import BuiltinNodeTypes
# ── Helpers ──────────────────────────────────────────────────────────────────
@ -161,7 +161,7 @@ def _make_node(**overrides):
"tenant_id": "t1",
"app_id": "app-1",
"title": "Node Title",
"node_type": NodeType.CODE,
"node_type": BuiltinNodeTypes.CODE,
"status": "succeeded",
"inputs": '{"key": "value"}',
"outputs": '{"result": "ok"}',
@ -362,7 +362,7 @@ class TestWorkflowTrace:
def test_workflow_with_llm_node(self, trace_instance, mock_tracing, mock_db):
llm_node = _make_node(
node_type=NodeType.LLM,
node_type=BuiltinNodeTypes.LLM,
process_data=json.dumps(
{
"prompts": [{"role": "user", "text": "hi"}],
@ -388,7 +388,7 @@ class TestWorkflowTrace:
def test_workflow_with_question_classifier_node(self, trace_instance, mock_tracing, mock_db):
qc_node = _make_node(
node_type=NodeType.QUESTION_CLASSIFIER,
node_type=BuiltinNodeTypes.QUESTION_CLASSIFIER,
process_data=json.dumps(
{
"prompts": "classify this",
@ -408,7 +408,7 @@ class TestWorkflowTrace:
def test_workflow_with_http_request_node(self, trace_instance, mock_tracing, mock_db):
http_node = _make_node(
node_type=NodeType.HTTP_REQUEST,
node_type=BuiltinNodeTypes.HTTP_REQUEST,
process_data='{"url": "https://api.com"}',
)
mock_db.session.query.return_value.filter.return_value.order_by.return_value.all.return_value = [http_node]
@ -424,7 +424,7 @@ class TestWorkflowTrace:
def test_workflow_with_knowledge_retrieval_node(self, trace_instance, mock_tracing, mock_db):
kr_node = _make_node(
node_type=NodeType.KNOWLEDGE_RETRIEVAL,
node_type=BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL,
outputs=json.dumps(
{
"result": [
@ -846,13 +846,13 @@ class TestGetNodeSpanType:
@pytest.mark.parametrize(
("node_type", "expected_contains"),
[
(NodeType.LLM, "LLM"),
(NodeType.QUESTION_CLASSIFIER, "LLM"),
(NodeType.KNOWLEDGE_RETRIEVAL, "RETRIEVER"),
(NodeType.TOOL, "TOOL"),
(NodeType.CODE, "TOOL"),
(NodeType.HTTP_REQUEST, "TOOL"),
(NodeType.AGENT, "AGENT"),
(BuiltinNodeTypes.LLM, "LLM"),
(BuiltinNodeTypes.QUESTION_CLASSIFIER, "LLM"),
(BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL, "RETRIEVER"),
(BuiltinNodeTypes.TOOL, "TOOL"),
(BuiltinNodeTypes.CODE, "TOOL"),
(BuiltinNodeTypes.HTTP_REQUEST, "TOOL"),
(BuiltinNodeTypes.AGENT, "AGENT"),
],
)
def test_mapped_types(self, trace_instance, node_type, expected_contains):

View File

@ -18,7 +18,7 @@ from core.ops.entities.trace_entity import (
WorkflowTraceInfo,
)
from core.ops.opik_trace.opik_trace import OpikDataTrace, prepare_opik_uuid, wrap_dict, wrap_metadata
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
from models import EndUser
from models.enums import MessageStatus
@ -172,7 +172,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch):
node_llm = MagicMock()
node_llm.id = LLM_NODE_ID
node_llm.title = "LLM Node"
node_llm.node_type = NodeType.LLM
node_llm.node_type = BuiltinNodeTypes.LLM
node_llm.status = "succeeded"
node_llm.process_data = {
"model_mode": "chat",
@ -189,7 +189,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch):
node_other = MagicMock()
node_other.id = CODE_NODE_ID
node_other.title = "Other Node"
node_other.node_type = NodeType.CODE
node_other.node_type = BuiltinNodeTypes.CODE
node_other.status = "failed"
node_other.process_data = None
node_other.inputs = {"code": "print"}
@ -641,7 +641,7 @@ def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch
node = MagicMock()
node.id = "88e8e918-472e-4b69-8051-12502c34fc07"
node.title = "LLM Node"
node.node_type = NodeType.LLM
node.node_type = BuiltinNodeTypes.LLM
node.status = "succeeded"
class BadDict(collections.UserDict):

View File

@ -15,7 +15,7 @@ from core.ops.entities.trace_entity import (
)
from core.ops.tencent_trace.tencent_trace import TencentDataTrace
from dify_graph.entities import WorkflowNodeExecution
from dify_graph.enums import NodeType
from dify_graph.enums import BuiltinNodeTypes
from models import Account, App, TenantAccountJoin
logger = logging.getLogger(__name__)
@ -320,10 +320,10 @@ class TestTencentDataTrace:
node1 = MagicMock(spec=WorkflowNodeExecution)
node1.id = "n1"
node1.node_type = NodeType.LLM
node1.node_type = BuiltinNodeTypes.LLM
node2 = MagicMock(spec=WorkflowNodeExecution)
node2.id = "n2"
node2.node_type = NodeType.TOOL
node2.node_type = BuiltinNodeTypes.TOOL
with patch.object(tencent_data_trace, "_get_workflow_node_executions", return_value=[node1, node2]):
with patch.object(tencent_data_trace, "_build_workflow_node_span", side_effect=["span1", "span2"]):
@ -359,10 +359,10 @@ class TestTencentDataTrace:
trace_info = MagicMock(spec=WorkflowTraceInfo)
nodes = [
(NodeType.LLM, mock_span_builder.build_workflow_llm_span),
(NodeType.KNOWLEDGE_RETRIEVAL, mock_span_builder.build_workflow_retrieval_span),
(NodeType.TOOL, mock_span_builder.build_workflow_tool_span),
(NodeType.CODE, mock_span_builder.build_workflow_task_span),
(BuiltinNodeTypes.LLM, mock_span_builder.build_workflow_llm_span),
(BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL, mock_span_builder.build_workflow_retrieval_span),
(BuiltinNodeTypes.TOOL, mock_span_builder.build_workflow_tool_span),
(BuiltinNodeTypes.CODE, mock_span_builder.build_workflow_task_span),
]
for node_type, builder_method in nodes:
@ -377,7 +377,7 @@ class TestTencentDataTrace:
def test_build_workflow_node_span_exception(self, tencent_data_trace, mock_span_builder):
node = MagicMock(spec=WorkflowNodeExecution)
node.node_type = NodeType.LLM
node.node_type = BuiltinNodeTypes.LLM
node.id = "n1"
mock_span_builder.build_workflow_llm_span.side_effect = Exception("error")

View File

@ -1,29 +1,29 @@
from openinference.semconv.trace import OpenInferenceSpanKindValues
from core.ops.arize_phoenix_trace.arize_phoenix_trace import _NODE_TYPE_TO_SPAN_KIND, _get_node_span_kind
from dify_graph.enums import NodeType
from dify_graph.enums import BUILT_IN_NODE_TYPES, BuiltinNodeTypes
class TestGetNodeSpanKind:
"""Tests for _get_node_span_kind helper."""
def test_all_node_types_are_mapped_correctly(self):
"""Ensure every NodeType enum member is mapped to the correct span kind."""
"""Ensure every built-in node type is mapped to the correct span kind."""
# Mappings for node types that have a specialised span kind.
special_mappings = {
NodeType.LLM: OpenInferenceSpanKindValues.LLM,
NodeType.KNOWLEDGE_RETRIEVAL: OpenInferenceSpanKindValues.RETRIEVER,
NodeType.TOOL: OpenInferenceSpanKindValues.TOOL,
NodeType.AGENT: OpenInferenceSpanKindValues.AGENT,
BuiltinNodeTypes.LLM: OpenInferenceSpanKindValues.LLM,
BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL: OpenInferenceSpanKindValues.RETRIEVER,
BuiltinNodeTypes.TOOL: OpenInferenceSpanKindValues.TOOL,
BuiltinNodeTypes.AGENT: OpenInferenceSpanKindValues.AGENT,
}
# Test that every NodeType enum member is mapped to the correct span kind.
# Test that every built-in node type is mapped to the correct span kind.
# Node types not in `special_mappings` should default to CHAIN.
for node_type in NodeType:
for node_type in BUILT_IN_NODE_TYPES:
expected_span_kind = special_mappings.get(node_type, OpenInferenceSpanKindValues.CHAIN)
actual_span_kind = _get_node_span_kind(node_type)
assert actual_span_kind == expected_span_kind, (
f"NodeType.{node_type.name} was mapped to {actual_span_kind}, but {expected_span_kind} was expected."
f"Node type {node_type!r} was mapped to {actual_span_kind}, but {expected_span_kind} was expected."
)
def test_unknown_string_defaults_to_chain(self):

View File

@ -22,7 +22,7 @@ from core.ops.entities.trace_entity import (
)
from core.ops.weave_trace.entities.weave_trace_entity import WeaveTraceModel
from core.ops.weave_trace.weave_trace import WeaveDataTrace
from dify_graph.enums import NodeType, WorkflowNodeExecutionMetadataKey
from dify_graph.enums import BuiltinNodeTypes, WorkflowNodeExecutionMetadataKey
# ── Helpers ──────────────────────────────────────────────────────────────────
@ -173,7 +173,7 @@ def _make_node(**overrides):
defaults = {
"id": "node-1",
"title": "Node Title",
"node_type": NodeType.CODE,
"node_type": BuiltinNodeTypes.CODE,
"status": "succeeded",
"inputs": {"key": "value"},
"outputs": {"result": "ok"},
@ -633,7 +633,7 @@ class TestWorkflowTrace:
"""Workflow trace iterates node executions and creates node runs."""
node = _make_node(
id="node-1",
node_type=NodeType.CODE,
node_type=BuiltinNodeTypes.CODE,
inputs={"k": "v"},
outputs={"r": "ok"},
elapsed_time=0.5,
@ -655,7 +655,7 @@ class TestWorkflowTrace:
def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch):
"""LLM node uses process_data prompts as inputs."""
node = _make_node(
node_type=NodeType.LLM,
node_type=BuiltinNodeTypes.LLM,
process_data={
"prompts": [{"role": "user", "content": "hi"}],
"model_mode": "chat",
@ -683,7 +683,7 @@ class TestWorkflowTrace:
def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch):
"""Non-LLM node uses node_execution.inputs directly."""
node = _make_node(
node_type=NodeType.TOOL,
node_type=BuiltinNodeTypes.TOOL,
inputs={"tool_input": "val"},
process_data=None,
)
@ -743,7 +743,7 @@ class TestWorkflowTrace:
def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch):
"""Chat mode LLM node adds ls_provider and ls_model_name to attributes."""
node = _make_node(
node_type=NodeType.LLM,
node_type=BuiltinNodeTypes.LLM,
process_data={"model_mode": "chat", "model_provider": "openai", "model_name": "gpt-4", "prompts": []},
)
self._setup_repo(monkeypatch, nodes=[node])