feat: Human Input Node (#32060)

The frontend and backend implementation for the human input node.

Co-authored-by: twwu <twwu@dify.ai>
Co-authored-by: JzoNg <jzongcode@gmail.com>
Co-authored-by: yyh <92089059+lyzno1@users.noreply.github.com>
Co-authored-by: zhsama <torvalds@linux.do>
This commit is contained in:
QuantumGhost
2026-02-09 14:57:23 +08:00
committed by GitHub
parent 56e3a55023
commit a1fc280102
474 changed files with 32667 additions and 2050 deletions

View File

@ -2309,6 +2309,12 @@ class TestRegisterService:
mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True
mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False
from extensions.ext_database import db
from models.model import DifySetup
db.session.query(DifySetup).delete()
db.session.commit()
# Execute setup
RegisterService.setup(
email=admin_email,
@ -2319,9 +2325,7 @@ class TestRegisterService:
)
# Verify account was created
from extensions.ext_database import db
from models import Account
from models.model import DifySetup
account = db.session.query(Account).filter_by(email=admin_email).first()
assert account is not None

View File

@ -1,5 +1,5 @@
import uuid
from unittest.mock import MagicMock, patch
from unittest.mock import ANY, MagicMock, patch
import pytest
from faker import Faker
@ -26,6 +26,7 @@ class TestAppGenerateService:
patch("services.app_generate_service.AgentChatAppGenerator") as mock_agent_chat_generator,
patch("services.app_generate_service.AdvancedChatAppGenerator") as mock_advanced_chat_generator,
patch("services.app_generate_service.WorkflowAppGenerator") as mock_workflow_generator,
patch("services.app_generate_service.MessageBasedAppGenerator") as mock_message_based_generator,
patch("services.account_service.FeatureService") as mock_account_feature_service,
patch("services.app_generate_service.dify_config") as mock_dify_config,
patch("configs.dify_config") as mock_global_dify_config,
@ -38,9 +39,13 @@ class TestAppGenerateService:
# Setup default mock returns for workflow service
mock_workflow_service_instance = mock_workflow_service.return_value
mock_workflow_service_instance.get_published_workflow.return_value = MagicMock(spec=Workflow)
mock_workflow_service_instance.get_draft_workflow.return_value = MagicMock(spec=Workflow)
mock_workflow_service_instance.get_published_workflow_by_id.return_value = MagicMock(spec=Workflow)
mock_published_workflow = MagicMock(spec=Workflow)
mock_published_workflow.id = str(uuid.uuid4())
mock_workflow_service_instance.get_published_workflow.return_value = mock_published_workflow
mock_draft_workflow = MagicMock(spec=Workflow)
mock_draft_workflow.id = str(uuid.uuid4())
mock_workflow_service_instance.get_draft_workflow.return_value = mock_draft_workflow
mock_workflow_service_instance.get_published_workflow_by_id.return_value = mock_published_workflow
# Setup default mock returns for rate limiting
mock_rate_limit_instance = mock_rate_limit.return_value
@ -66,6 +71,8 @@ class TestAppGenerateService:
mock_advanced_chat_generator_instance.generate.return_value = ["advanced_chat_response"]
mock_advanced_chat_generator_instance.single_iteration_generate.return_value = ["single_iteration_response"]
mock_advanced_chat_generator_instance.single_loop_generate.return_value = ["single_loop_response"]
mock_advanced_chat_generator_instance.retrieve_events.return_value = ["advanced_chat_events"]
mock_advanced_chat_generator_instance.convert_to_event_stream.return_value = ["advanced_chat_stream"]
mock_advanced_chat_generator.convert_to_event_stream.return_value = ["advanced_chat_stream"]
mock_workflow_generator_instance = mock_workflow_generator.return_value
@ -76,6 +83,8 @@ class TestAppGenerateService:
mock_workflow_generator_instance.single_loop_generate.return_value = ["workflow_single_loop_response"]
mock_workflow_generator.convert_to_event_stream.return_value = ["workflow_stream"]
mock_message_based_generator.retrieve_events.return_value = ["workflow_events"]
# Setup default mock returns for account service
mock_account_feature_service.get_system_features.return_value.is_allow_register = True
@ -88,6 +97,7 @@ class TestAppGenerateService:
mock_global_dify_config.BILLING_ENABLED = False
mock_global_dify_config.APP_MAX_ACTIVE_REQUESTS = 100
mock_global_dify_config.APP_DAILY_RATE_LIMIT = 1000
mock_global_dify_config.HOSTED_POOL_CREDITS = 1000
yield {
"billing_service": mock_billing_service,
@ -98,6 +108,7 @@ class TestAppGenerateService:
"agent_chat_generator": mock_agent_chat_generator,
"advanced_chat_generator": mock_advanced_chat_generator,
"workflow_generator": mock_workflow_generator,
"message_based_generator": mock_message_based_generator,
"account_feature_service": mock_account_feature_service,
"dify_config": mock_dify_config,
"global_dify_config": mock_global_dify_config,
@ -280,8 +291,10 @@ class TestAppGenerateService:
assert result == ["test_response"]
# Verify advanced chat generator was called
mock_external_service_dependencies["advanced_chat_generator"].return_value.generate.assert_called_once()
mock_external_service_dependencies["advanced_chat_generator"].convert_to_event_stream.assert_called_once()
mock_external_service_dependencies["advanced_chat_generator"].return_value.retrieve_events.assert_called_once()
mock_external_service_dependencies[
"advanced_chat_generator"
].return_value.convert_to_event_stream.assert_called_once()
def test_generate_workflow_mode_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
@ -304,7 +317,7 @@ class TestAppGenerateService:
assert result == ["test_response"]
# Verify workflow generator was called
mock_external_service_dependencies["workflow_generator"].return_value.generate.assert_called_once()
mock_external_service_dependencies["message_based_generator"].retrieve_events.assert_called_once()
mock_external_service_dependencies["workflow_generator"].convert_to_event_stream.assert_called_once()
def test_generate_with_specific_workflow_id(self, db_session_with_containers, mock_external_service_dependencies):
@ -970,14 +983,27 @@ class TestAppGenerateService:
}
# Execute the method under test
result = AppGenerateService.generate(
app_model=app, user=account, args=args, invoke_from=InvokeFrom.SERVICE_API, streaming=True
)
with patch("services.app_generate_service.AppExecutionParams") as mock_exec_params:
mock_payload = MagicMock()
mock_payload.workflow_run_id = fake.uuid4()
mock_payload.model_dump_json.return_value = "{}"
mock_exec_params.new.return_value = mock_payload
result = AppGenerateService.generate(
app_model=app, user=account, args=args, invoke_from=InvokeFrom.SERVICE_API, streaming=True
)
# Verify the result
assert result == ["test_response"]
# Verify workflow generator was called with complex args
mock_external_service_dependencies["workflow_generator"].return_value.generate.assert_called_once()
call_args = mock_external_service_dependencies["workflow_generator"].return_value.generate.call_args
assert call_args[1]["args"] == args
# Verify payload was built with complex args
mock_exec_params.new.assert_called_once()
call_kwargs = mock_exec_params.new.call_args.kwargs
assert call_kwargs["args"] == args
# Verify workflow streaming event retrieval was used
mock_external_service_dependencies["message_based_generator"].retrieve_events.assert_called_once_with(
ANY,
mock_payload.workflow_run_id,
on_subscribe=ANY,
)

View File

@ -0,0 +1,112 @@
import json
import uuid
from unittest.mock import MagicMock
import pytest
from core.workflow.enums import NodeType
from core.workflow.nodes.human_input.entities import (
EmailDeliveryConfig,
EmailDeliveryMethod,
EmailRecipients,
ExternalRecipient,
HumanInputNodeData,
)
from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole
from models.model import App, AppMode
from models.workflow import Workflow, WorkflowType
from services.workflow_service import WorkflowService
def _create_app_with_draft_workflow(session, *, delivery_method_id: uuid.UUID) -> tuple[App, Account]:
tenant = Tenant(name="Test Tenant")
account = Account(name="Tester", email="tester@example.com")
session.add_all([tenant, account])
session.flush()
session.add(
TenantAccountJoin(
tenant_id=tenant.id,
account_id=account.id,
current=True,
role=TenantAccountRole.OWNER.value,
)
)
app = App(
tenant_id=tenant.id,
name="Test App",
description="",
mode=AppMode.WORKFLOW.value,
icon_type="emoji",
icon="app",
icon_background="#ffffff",
enable_site=True,
enable_api=True,
created_by=account.id,
updated_by=account.id,
)
session.add(app)
session.flush()
email_method = EmailDeliveryMethod(
id=delivery_method_id,
enabled=True,
config=EmailDeliveryConfig(
recipients=EmailRecipients(
whole_workspace=False,
items=[ExternalRecipient(email="recipient@example.com")],
),
subject="Test {{recipient_email}}",
body="Body {{#url#}} {{form_content}}",
),
)
node_data = HumanInputNodeData(
title="Human Input",
delivery_methods=[email_method],
form_content="Hello Human Input",
inputs=[],
user_actions=[],
).model_dump(mode="json")
node_data["type"] = NodeType.HUMAN_INPUT.value
graph = json.dumps({"nodes": [{"id": "human-node", "data": node_data}], "edges": []})
workflow = Workflow.new(
tenant_id=tenant.id,
app_id=app.id,
type=WorkflowType.WORKFLOW.value,
version=Workflow.VERSION_DRAFT,
graph=graph,
features=json.dumps({}),
created_by=account.id,
environment_variables=[],
conversation_variables=[],
rag_pipeline_variables=[],
)
session.add(workflow)
session.commit()
return app, account
def test_human_input_delivery_test_sends_email(
db_session_with_containers,
monkeypatch: pytest.MonkeyPatch,
) -> None:
delivery_method_id = uuid.uuid4()
app, account = _create_app_with_draft_workflow(db_session_with_containers, delivery_method_id=delivery_method_id)
send_mock = MagicMock()
monkeypatch.setattr("services.human_input_delivery_test_service.mail.is_inited", lambda: True)
monkeypatch.setattr("services.human_input_delivery_test_service.mail.send", send_mock)
service = WorkflowService()
service.test_human_input_delivery(
app_model=app,
account=account,
node_id="human-node",
delivery_method_id=str(delivery_method_id),
)
assert send_mock.call_count == 1
assert send_mock.call_args.kwargs["to"] == "recipient@example.com"

View File

@ -0,0 +1,38 @@
from __future__ import annotations
import pytest
from services.message_service import MessageService
from tests.test_containers_integration_tests.helpers.execution_extra_content import (
create_human_input_message_fixture,
)
@pytest.mark.usefixtures("flask_req_ctx_with_containers")
def test_pagination_returns_extra_contents(db_session_with_containers):
fixture = create_human_input_message_fixture(db_session_with_containers)
pagination = MessageService.pagination_by_first_id(
app_model=fixture.app,
user=fixture.account,
conversation_id=fixture.conversation.id,
first_id=None,
limit=10,
)
assert pagination.data
message = pagination.data[0]
assert message.extra_contents == [
{
"type": "human_input",
"workflow_run_id": fixture.message.workflow_run_id,
"submitted": True,
"form_submission_data": {
"node_id": fixture.form.node_id,
"node_title": fixture.node_title,
"rendered_content": fixture.form.rendered_content,
"action_id": fixture.action_id,
"action_text": fixture.action_text,
},
}
]

View File

@ -465,6 +465,27 @@ class TestWorkflowRunService:
db.session.add(node_execution)
node_executions.append(node_execution)
paused_node_execution = WorkflowNodeExecutionModel(
tenant_id=app.tenant_id,
app_id=app.id,
workflow_id=workflow_run.workflow_id,
triggered_from="workflow-run",
workflow_run_id=workflow_run.id,
index=99,
node_id="node_paused",
node_type="human_input",
title="Paused Node",
inputs=json.dumps({"input": "paused"}),
process_data=json.dumps({"process": "paused"}),
status="paused",
elapsed_time=0.5,
execution_metadata=json.dumps({"tokens": 0}),
created_by_role=CreatorUserRole.ACCOUNT,
created_by=account.id,
created_at=datetime.now(UTC),
)
db.session.add(paused_node_execution)
db.session.commit()
# Act: Execute the method under test
@ -473,16 +494,19 @@ class TestWorkflowRunService:
# Assert: Verify the expected outcomes
assert result is not None
assert len(result) == 3
assert len(result) == 4
# Verify node execution properties
statuses = [node_execution.status for node_execution in result]
assert "paused" in statuses
assert statuses.count("succeeded") == 3
assert statuses.count("paused") == 1
for node_execution in result:
assert node_execution.tenant_id == app.tenant_id
assert node_execution.app_id == app.id
assert node_execution.workflow_run_id == workflow_run.id
assert node_execution.index in [0, 1, 2] # Check that index is one of the expected values
assert node_execution.node_id.startswith("node_") # Check that node_id starts with "node_"
assert node_execution.status == "succeeded"
assert node_execution.node_id.startswith("node_")
def test_get_workflow_run_node_executions_empty(
self, db_session_with_containers, mock_external_service_dependencies

View File

@ -6,6 +6,7 @@ from faker import Faker
from pydantic import ValidationError
from core.tools.entities.tool_entities import WorkflowToolParameterConfiguration
from core.tools.errors import WorkflowToolHumanInputNotSupportedError
from models.tools import WorkflowToolProvider
from models.workflow import Workflow as WorkflowModel
from services.account_service import AccountService, TenantService
@ -513,6 +514,62 @@ class TestWorkflowToolManageService:
assert tool_count == 0
def test_create_workflow_tool_human_input_node_error(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test workflow tool creation fails when workflow contains human input nodes.
This test verifies:
- Human input nodes prevent workflow tool publishing
- Correct error message
- No database changes when workflow is invalid
"""
fake = Faker()
# Create test data
app, account, workflow = self._create_test_app_and_account(
db_session_with_containers, mock_external_service_dependencies
)
workflow.graph = json.dumps(
{
"nodes": [
{
"id": "human_input_node",
"data": {"type": "human-input"},
}
]
}
)
tool_parameters = self._create_test_workflow_tool_parameters()
with pytest.raises(WorkflowToolHumanInputNotSupportedError) as exc_info:
WorkflowToolManageService.create_workflow_tool(
user_id=account.id,
tenant_id=account.current_tenant.id,
workflow_app_id=app.id,
name=fake.word(),
label=fake.word(),
icon={"type": "emoji", "emoji": "🔧"},
description=fake.text(max_nb_chars=200),
parameters=tool_parameters,
)
assert exc_info.value.error_code == "workflow_tool_human_input_not_supported"
from extensions.ext_database import db
tool_count = (
db.session.query(WorkflowToolProvider)
.where(
WorkflowToolProvider.tenant_id == account.current_tenant.id,
)
.count()
)
assert tool_count == 0
def test_update_workflow_tool_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test successful workflow tool update with valid parameters.
@ -600,6 +657,80 @@ class TestWorkflowToolManageService:
mock_external_service_dependencies["tool_label_manager"].update_tool_labels.assert_called()
mock_external_service_dependencies["tool_transform_service"].workflow_provider_to_controller.assert_called()
def test_update_workflow_tool_human_input_node_error(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test workflow tool update fails when workflow contains human input nodes.
This test verifies:
- Human input nodes prevent workflow tool updates
- Correct error message
- Existing tool data remains unchanged
"""
fake = Faker()
# Create test data
app, account, workflow = self._create_test_app_and_account(
db_session_with_containers, mock_external_service_dependencies
)
# Create initial workflow tool
initial_tool_name = fake.word()
initial_tool_parameters = self._create_test_workflow_tool_parameters()
WorkflowToolManageService.create_workflow_tool(
user_id=account.id,
tenant_id=account.current_tenant.id,
workflow_app_id=app.id,
name=initial_tool_name,
label=fake.word(),
icon={"type": "emoji", "emoji": "🔧"},
description=fake.text(max_nb_chars=200),
parameters=initial_tool_parameters,
)
from extensions.ext_database import db
created_tool = (
db.session.query(WorkflowToolProvider)
.where(
WorkflowToolProvider.tenant_id == account.current_tenant.id,
WorkflowToolProvider.app_id == app.id,
)
.first()
)
original_name = created_tool.name
workflow.graph = json.dumps(
{
"nodes": [
{
"id": "human_input_node",
"data": {"type": "human-input"},
}
]
}
)
db.session.commit()
with pytest.raises(WorkflowToolHumanInputNotSupportedError) as exc_info:
WorkflowToolManageService.update_workflow_tool(
user_id=account.id,
tenant_id=account.current_tenant.id,
workflow_tool_id=created_tool.id,
name=fake.word(),
label=fake.word(),
icon={"type": "emoji", "emoji": "⚙️"},
description=fake.text(max_nb_chars=200),
parameters=initial_tool_parameters,
)
assert exc_info.value.error_code == "workflow_tool_human_input_not_supported"
db.session.refresh(created_tool)
assert created_tool.name == original_name
def test_update_workflow_tool_not_found_error(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test workflow tool update fails when tool does not exist.