From be4c828214b4df661a474260382136fffb0958b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Fri, 24 Apr 2026 14:37:10 +0800 Subject: [PATCH] feat: add service api of HITL (#32826) Co-authored-by: Blackoutta Co-authored-by: QuantumGhost Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: QuantumGhost Co-authored-by: Yunlu Wen --- api/controllers/common/human_input.py | 6 + api/controllers/console/human_input_form.py | 18 +- api/controllers/service_api/__init__.py | 4 + .../service_api/app/human_input_form.py | 137 ++++ .../service_api/app/workflow_events.py | 142 ++++ api/controllers/web/human_input_form.py | 7 +- .../app/apps/advanced_chat/app_generator.py | 12 +- .../generate_response_converter.py | 39 +- .../advanced_chat/generate_task_pipeline.py | 72 +- .../agent_chat/generate_response_converter.py | 14 +- .../base_app_generate_response_converter.py | 24 +- .../apps/chat/generate_response_converter.py | 14 +- .../common/workflow_response_converter.py | 22 +- .../completion/generate_response_converter.py | 16 +- api/core/app/apps/message_generator.py | 5 +- .../pipeline/generate_response_converter.py | 8 +- .../app/apps/pipeline/pipeline_generator.py | 12 +- api/core/app/apps/streaming_utils.py | 2 +- api/core/app/apps/workflow/app_generator.py | 12 +- .../workflow/generate_response_converter.py | 17 +- .../apps/workflow/generate_task_pipeline.py | 59 +- api/core/app/entities/task_entities.py | 98 ++- api/core/workflow/human_input_forms.py | 50 +- api/core/workflow/human_input_policy.py | 73 ++ .../sqlalchemy_api_workflow_run_repository.py | 19 +- api/services/app_generate_service.py | 6 + .../workflow_event_snapshot_service.py | 161 +++- .../app_generate/workflow_execute_task.py | 28 +- ..._sqlalchemy_api_workflow_run_repository.py | 140 +++- .../console/test_human_input_form.py | 29 + .../service_api/app/test_hitl_service_api.py | 707 +++++++++++++++++ .../service_api/app/test_human_input_form.py | 184 +++++ .../service_api/app/test_workflow_events.py | 166 ++++ .../test_generate_response_converter.py | 37 +- .../test_generate_task_pipeline_core.py | 55 ++ ...st_base_app_generate_response_converter.py | 102 +++ .../core/app/apps/test_message_generator.py | 19 +- .../core/app/apps/test_streaming_utils.py | 22 + .../test_generate_task_pipeline_core.py | 48 +- .../core/workflow/test_human_input_forms.py | 50 +- .../core/workflow/test_human_input_policy.py | 50 ++ ..._sqlalchemy_api_workflow_run_repository.py | 64 ++ .../services/test_app_generate_service.py | 3 +- .../test_workflow_event_snapshot_service.py | 708 ++++++++++++++++-- .../tasks/test_workflow_execute_task.py | 229 +++++- .../template/template_advanced_chat.en.mdx | 507 +++++++++++++ .../template/template_advanced_chat.ja.mdx | 507 +++++++++++++ .../template/template_advanced_chat.zh.mdx | 507 +++++++++++++ .../develop/template/template_workflow.en.mdx | 511 +++++++++++++ .../develop/template/template_workflow.ja.mdx | 511 +++++++++++++ .../develop/template/template_workflow.zh.mdx | 511 +++++++++++++ 51 files changed, 6530 insertions(+), 214 deletions(-) create mode 100644 api/controllers/common/human_input.py create mode 100644 api/controllers/service_api/app/human_input_form.py create mode 100644 api/controllers/service_api/app/workflow_events.py create mode 100644 api/core/workflow/human_input_policy.py create mode 100644 api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py create mode 100644 api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py create mode 100644 api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py create mode 100644 api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py create mode 100644 api/tests/unit_tests/core/workflow/test_human_input_policy.py create mode 100644 api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py diff --git a/api/controllers/common/human_input.py b/api/controllers/common/human_input.py new file mode 100644 index 0000000000..5d6f4efb95 --- /dev/null +++ b/api/controllers/common/human_input.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel, JsonValue + + +class HumanInputFormSubmitPayload(BaseModel): + inputs: dict[str, JsonValue] + action: str diff --git a/api/controllers/console/human_input_form.py b/api/controllers/console/human_input_form.py index 845af37365..79b3e6cc9f 100644 --- a/api/controllers/console/human_input_form.py +++ b/api/controllers/console/human_input_form.py @@ -8,10 +8,10 @@ from collections.abc import Generator from flask import Response, jsonify, request from flask_restx import Resource -from pydantic import BaseModel from sqlalchemy import select from sqlalchemy.orm import Session, sessionmaker +from controllers.common.human_input import HumanInputFormSubmitPayload from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from controllers.web.error import InvalidArgumentError, NotFoundError @@ -20,11 +20,11 @@ from core.app.apps.base_app_generator import BaseAppGenerator from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter from core.app.apps.message_generator import MessageGenerator from core.app.apps.workflow.app_generator import WorkflowAppGenerator +from core.workflow.human_input_policy import HumanInputSurface, is_recipient_type_allowed_for_surface from extensions.ext_database import db from libs.login import current_account_with_tenant, login_required from models import App from models.enums import CreatorUserRole -from models.human_input import RecipientType from models.model import AppMode from models.workflow import WorkflowRun from repositories.factory import DifyAPIRepositoryFactory @@ -34,11 +34,6 @@ from services.workflow_event_snapshot_service import build_workflow_event_stream logger = logging.getLogger(__name__) -class HumanInputFormSubmitPayload(BaseModel): - inputs: dict - action: str - - def _jsonify_form_definition(form: Form) -> Response: payload = form.get_definition().model_dump() payload["expiration_time"] = int(form.expiration_time.timestamp()) @@ -56,6 +51,11 @@ class ConsoleHumanInputFormApi(Resource): if form.tenant_id != current_tenant_id: raise NotFoundError("App not found") + @staticmethod + def _ensure_console_recipient_type(form: Form) -> None: + if not is_recipient_type_allowed_for_surface(form.recipient_type, HumanInputSurface.CONSOLE): + raise NotFoundError("form not found") + @setup_required @login_required @account_initialization_required @@ -99,10 +99,8 @@ class ConsoleHumanInputFormApi(Resource): raise NotFoundError(f"form not found, token={form_token}") self._ensure_console_access(form) - + self._ensure_console_recipient_type(form) recipient_type = form.recipient_type - if recipient_type not in {RecipientType.CONSOLE, RecipientType.BACKSTAGE}: - raise NotFoundError(f"form not found, token={form_token}") # The type checker is not smart enought to validate the following invariant. # So we need to assert it manually. assert recipient_type is not None, "recipient_type cannot be None here." diff --git a/api/controllers/service_api/__init__.py b/api/controllers/service_api/__init__.py index 4f7f7d9a98..182631e8f5 100644 --- a/api/controllers/service_api/__init__.py +++ b/api/controllers/service_api/__init__.py @@ -23,9 +23,11 @@ from .app import ( conversation, file, file_preview, + human_input_form, message, site, workflow, + workflow_events, ) from .dataset import ( dataset, @@ -50,6 +52,7 @@ __all__ = [ "file", "file_preview", "hit_testing", + "human_input_form", "index", "message", "metadata", @@ -58,6 +61,7 @@ __all__ = [ "segment", "site", "workflow", + "workflow_events", ] api.add_namespace(service_api_ns) diff --git a/api/controllers/service_api/app/human_input_form.py b/api/controllers/service_api/app/human_input_form.py new file mode 100644 index 0000000000..8e5003dbbf --- /dev/null +++ b/api/controllers/service_api/app/human_input_form.py @@ -0,0 +1,137 @@ +""" +Service API human input form endpoints. + +This module exposes app-token authenticated APIs for fetching and submitting +paused human input forms in workflow/chatflow runs. +""" + +import json +import logging +from datetime import datetime + +from flask import Response +from flask_restx import Resource +from werkzeug.exceptions import BadRequest, NotFound + +from controllers.common.human_input import HumanInputFormSubmitPayload +from controllers.common.schema import register_schema_models +from controllers.service_api import service_api_ns +from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token +from core.workflow.human_input_policy import HumanInputSurface, is_recipient_type_allowed_for_surface +from extensions.ext_database import db +from models.model import App, EndUser +from services.human_input_service import Form, FormNotFoundError, HumanInputService + +logger = logging.getLogger(__name__) + + +register_schema_models(service_api_ns, HumanInputFormSubmitPayload) + + +def _stringify_default_values(values: dict[str, object]) -> dict[str, str]: + result: dict[str, str] = {} + for key, value in values.items(): + if value is None: + result[key] = "" + elif isinstance(value, (dict, list)): + result[key] = json.dumps(value, ensure_ascii=False) + else: + result[key] = str(value) + return result + + +def _to_timestamp(value: datetime) -> int: + return int(value.timestamp()) + + +def _jsonify_form_definition(form: Form) -> Response: + definition_payload = form.get_definition().model_dump() + payload = { + "form_content": definition_payload["rendered_content"], + "inputs": definition_payload["inputs"], + "resolved_default_values": _stringify_default_values(definition_payload["default_values"]), + "user_actions": definition_payload["user_actions"], + "expiration_time": _to_timestamp(form.expiration_time), + } + return Response(json.dumps(payload, ensure_ascii=False), mimetype="application/json") + + +def _ensure_form_belongs_to_app(form: Form, app_model: App) -> None: + if form.app_id != app_model.id or form.tenant_id != app_model.tenant_id: + raise NotFound("Form not found") + + +def _ensure_form_is_allowed_for_service_api(form: Form) -> None: + # Keep app-token callers scoped to the public web-form surface; internal HITL + # routes must continue to flow through console-only authentication. + if not is_recipient_type_allowed_for_surface(form.recipient_type, HumanInputSurface.SERVICE_API): + raise NotFound("Form not found") + + +@service_api_ns.route("/form/human_input/") +class WorkflowHumanInputFormApi(Resource): + @service_api_ns.doc("get_human_input_form") + @service_api_ns.doc(description="Get a paused human input form by token") + @service_api_ns.doc(params={"form_token": "Human input form token"}) + @service_api_ns.doc( + responses={ + 200: "Form retrieved successfully", + 401: "Unauthorized - invalid API token", + 404: "Form not found", + 412: "Form already submitted or expired", + } + ) + @validate_app_token + def get(self, app_model: App, form_token: str): + service = HumanInputService(db.engine) + form = service.get_form_by_token(form_token) + if form is None: + raise NotFound("Form not found") + + _ensure_form_belongs_to_app(form, app_model) + _ensure_form_is_allowed_for_service_api(form) + service.ensure_form_active(form) + return _jsonify_form_definition(form) + + @service_api_ns.expect(service_api_ns.models[HumanInputFormSubmitPayload.__name__]) + @service_api_ns.doc("submit_human_input_form") + @service_api_ns.doc(description="Submit a paused human input form by token") + @service_api_ns.doc(params={"form_token": "Human input form token"}) + @service_api_ns.doc( + responses={ + 200: "Form submitted successfully", + 400: "Bad request - invalid submission data", + 401: "Unauthorized - invalid API token", + 404: "Form not found", + 412: "Form already submitted or expired", + } + ) + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) + def post(self, app_model: App, end_user: EndUser, form_token: str): + payload = HumanInputFormSubmitPayload.model_validate(service_api_ns.payload or {}) + + service = HumanInputService(db.engine) + form = service.get_form_by_token(form_token) + if form is None: + raise NotFound("Form not found") + + _ensure_form_belongs_to_app(form, app_model) + _ensure_form_is_allowed_for_service_api(form) + + recipient_type = form.recipient_type + if recipient_type is None: + logger.warning("Recipient type is None for form, form_id=%s", form.id) + raise BadRequest("Form recipient type is invalid") + + try: + service.submit_form_by_token( + recipient_type=recipient_type, + form_token=form_token, + selected_action_id=payload.action, + form_data=payload.inputs, + submission_end_user_id=end_user.id, + ) + except FormNotFoundError: + raise NotFound("Form not found") + + return {}, 200 diff --git a/api/controllers/service_api/app/workflow_events.py b/api/controllers/service_api/app/workflow_events.py new file mode 100644 index 0000000000..b281b271c0 --- /dev/null +++ b/api/controllers/service_api/app/workflow_events.py @@ -0,0 +1,142 @@ +""" +Service API workflow resume event stream endpoints. +""" + +import json +from collections.abc import Generator + +from flask import Response, request +from flask_restx import Resource +from sqlalchemy.orm import sessionmaker +from werkzeug.exceptions import NotFound + +from controllers.service_api import service_api_ns +from controllers.service_api.app.error import NotWorkflowAppError +from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token +from core.app.apps.advanced_chat.app_generator import AdvancedChatAppGenerator +from core.app.apps.base_app_generator import BaseAppGenerator +from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter +from core.app.apps.message_generator import MessageGenerator +from core.app.apps.workflow.app_generator import WorkflowAppGenerator +from core.app.entities.task_entities import StreamEvent +from core.workflow.human_input_policy import HumanInputSurface +from extensions.ext_database import db +from models.enums import CreatorUserRole +from models.model import App, AppMode, EndUser +from repositories.factory import DifyAPIRepositoryFactory +from services.workflow_event_snapshot_service import build_workflow_event_stream + + +@service_api_ns.route("/workflow//events") +class WorkflowEventsApi(Resource): + """Service API for getting workflow execution events after resume.""" + + @service_api_ns.doc("get_workflow_events") + @service_api_ns.doc(description="Get workflow execution events stream after resume") + @service_api_ns.doc( + params={ + "task_id": "Workflow run ID", + "user": "End user identifier (query param)", + "include_state_snapshot": ( + "Whether to replay from persisted state snapshot, " + 'specify `"true"` to include a status snapshot of executed nodes' + ), + "continue_on_pause": ( + "Whether to keep the stream open across workflow_paused events," + 'specify `"true"` to keep the stream open for `workflow_paused` events.' + ), + } + ) + @service_api_ns.doc( + responses={ + 200: "SSE event stream", + 401: "Unauthorized - invalid API token", + 404: "Workflow run not found", + } + ) + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY, required=True)) + def get(self, app_model: App, end_user: EndUser, task_id: str): + app_mode = AppMode.value_of(app_model.mode) + if app_mode not in {AppMode.WORKFLOW, AppMode.ADVANCED_CHAT}: + raise NotWorkflowAppError() + + session_maker = sessionmaker(db.engine) + repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + workflow_run = repo.get_workflow_run_by_id_and_tenant_id( + tenant_id=app_model.tenant_id, + run_id=task_id, + ) + + if workflow_run is None: + raise NotFound("Workflow run not found") + + if workflow_run.app_id != app_model.id: + raise NotFound("Workflow run not found") + + if workflow_run.created_by_role != CreatorUserRole.END_USER: + raise NotFound("Workflow run not found") + + if workflow_run.created_by != end_user.id: + raise NotFound("Workflow run not found") + + workflow_run_entity = workflow_run + + if workflow_run_entity.finished_at is not None: + response = WorkflowResponseConverter.workflow_run_result_to_finish_response( + task_id=workflow_run_entity.id, + workflow_run=workflow_run_entity, + creator_user=end_user, + ) + + payload = response.model_dump(mode="json") + payload["event"] = response.event.value + + def _generate_finished_events() -> Generator[str, None, None]: + yield f"data: {json.dumps(payload)}\n\n" + + event_generator = _generate_finished_events + else: + msg_generator = MessageGenerator() + generator: BaseAppGenerator + if app_mode == AppMode.ADVANCED_CHAT: + generator = AdvancedChatAppGenerator() + elif app_mode == AppMode.WORKFLOW: + generator = WorkflowAppGenerator() + else: + raise NotWorkflowAppError() + + include_state_snapshot = request.args.get("include_state_snapshot", "false").lower() == "true" + continue_on_pause = request.args.get("continue_on_pause", "false").lower() == "true" + terminal_events: list[StreamEvent] | None = [] if continue_on_pause else None + + def _generate_stream_events(): + if include_state_snapshot: + return generator.convert_to_event_stream( + build_workflow_event_stream( + app_mode=app_mode, + workflow_run=workflow_run_entity, + tenant_id=app_model.tenant_id, + app_id=app_model.id, + session_maker=session_maker, + human_input_surface=HumanInputSurface.SERVICE_API, + close_on_pause=not continue_on_pause, + ) + ) + return generator.convert_to_event_stream( + msg_generator.retrieve_events( + app_mode, + workflow_run_entity.id, + terminal_events=terminal_events, + ), + ) + + event_generator = _generate_stream_events + + return Response( + event_generator(), + mimetype="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + }, + ) diff --git a/api/controllers/web/human_input_form.py b/api/controllers/web/human_input_form.py index 44876f8303..1ddf2e0717 100644 --- a/api/controllers/web/human_input_form.py +++ b/api/controllers/web/human_input_form.py @@ -9,11 +9,11 @@ from typing import Any, NotRequired, TypedDict from flask import Response, request from flask_restx import Resource -from pydantic import BaseModel from sqlalchemy import select from werkzeug.exceptions import Forbidden from configs import dify_config +from controllers.common.human_input import HumanInputFormSubmitPayload from controllers.web import web_ns from controllers.web.error import NotFoundError, WebFormRateLimitExceededError from controllers.web.site import serialize_app_site_payload @@ -26,11 +26,6 @@ from services.human_input_service import Form, FormNotFoundError, HumanInputServ logger = logging.getLogger(__name__) -class HumanInputFormSubmitPayload(BaseModel): - inputs: dict - action: str - - _FORM_SUBMIT_RATE_LIMITER = RateLimiter( prefix="web_form_submit_rate_limit", max_attempts=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_MAX_ATTEMPTS, diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 9e64b471cb..b79d5514b4 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -34,7 +34,11 @@ from core.app.apps.exc import GenerateTaskStoppedError from core.app.apps.message_based_app_generator import MessageBasedAppGenerator from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom -from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse +from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, + ChatbotAppBlockingResponse, + ChatbotAppStreamResponse, +) from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig, PauseStatePersistenceLayer from core.helper.trace_id_helper import extract_external_trace_id_from_args from core.ops.ops_trace_manager import TraceQueueManager @@ -655,7 +659,11 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): user: Account | EndUser, draft_var_saver_factory: DraftVariableSaverFactory, stream: bool = False, - ) -> ChatbotAppBlockingResponse | Generator[ChatbotAppStreamResponse, None, None]: + ) -> ( + ChatbotAppBlockingResponse + | AdvancedChatPausedBlockingResponse + | Generator[ChatbotAppStreamResponse, None, None] + ): """ Handle response. :param application_generate_entity: application generate entity diff --git a/api/core/app/apps/advanced_chat/generate_response_converter.py b/api/core/app/apps/advanced_chat/generate_response_converter.py index fe2702ed69..7cb0c9a8d3 100644 --- a/api/core/app/apps/advanced_chat/generate_response_converter.py +++ b/api/core/app/apps/advanced_chat/generate_response_converter.py @@ -3,7 +3,7 @@ from typing import Any, cast from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( - AppBlockingResponse, + AdvancedChatPausedBlockingResponse, AppStreamResponse, ChatbotAppBlockingResponse, ChatbotAppStreamResponse, @@ -12,22 +12,40 @@ from core.app.entities.task_entities import ( NodeFinishStreamResponse, NodeStartStreamResponse, PingStreamResponse, + StreamEvent, ) -class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = ChatbotAppBlockingResponse - +class AdvancedChatAppGenerateResponseConverter( + AppGenerateResponseConverter[ChatbotAppBlockingResponse | AdvancedChatPausedBlockingResponse] +): @classmethod - def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_full_response( + cls, blocking_response: ChatbotAppBlockingResponse | AdvancedChatPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking full response. :param blocking_response: blocking response :return: """ - blocking_response = cast(ChatbotAppBlockingResponse, blocking_response) + if isinstance(blocking_response, AdvancedChatPausedBlockingResponse): + paused_data = blocking_response.data.model_dump(mode="json") + return { + "event": StreamEvent.WORKFLOW_PAUSED.value, + "task_id": blocking_response.task_id, + "id": blocking_response.data.id, + "message_id": blocking_response.data.message_id, + "conversation_id": blocking_response.data.conversation_id, + "mode": blocking_response.data.mode, + "answer": blocking_response.data.answer, + "metadata": blocking_response.data.metadata, + "created_at": blocking_response.data.created_at, + "workflow_run_id": blocking_response.data.workflow_run_id, + "data": paused_data, + } + response = { - "event": "message", + "event": StreamEvent.MESSAGE.value, "task_id": blocking_response.task_id, "id": blocking_response.data.id, "message_id": blocking_response.data.message_id, @@ -41,7 +59,9 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_simple_response( + cls, blocking_response: ChatbotAppBlockingResponse | AdvancedChatPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking simple response. :param blocking_response: blocking response @@ -50,7 +70,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): response = cls.convert_blocking_full_response(blocking_response) metadata = response.get("metadata", {}) - response["metadata"] = cls._get_simple_metadata(metadata) + if isinstance(metadata, dict): + response["metadata"] = cls._get_simple_metadata(metadata) return response diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 78b582bdf5..82dbf5381d 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -53,14 +53,18 @@ from core.app.entities.queue_entities import ( WorkflowQueueMessage, ) from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, ChatbotAppBlockingResponse, ChatbotAppStreamResponse, ErrorStreamResponse, + HumanInputRequiredPauseReasonPayload, + HumanInputRequiredResponse, MessageAudioEndStreamResponse, MessageAudioStreamResponse, MessageEndStreamResponse, PingStreamResponse, StreamResponse, + WorkflowPauseStreamResponse, WorkflowTaskState, ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline @@ -210,7 +214,13 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): if message.status == MessageStatus.PAUSED and message.answer: self._task_state.answer = message.answer - def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: + def process( + self, + ) -> Union[ + ChatbotAppBlockingResponse, + AdvancedChatPausedBlockingResponse, + Generator[ChatbotAppStreamResponse, None, None], + ]: """ Process generate task pipeline. :return: @@ -226,14 +236,39 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): else: return self._to_blocking_response(generator) - def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) -> ChatbotAppBlockingResponse: + def _to_blocking_response( + self, generator: Generator[StreamResponse, None, None] + ) -> Union[ChatbotAppBlockingResponse, AdvancedChatPausedBlockingResponse]: """ Process blocking response. :return: """ + human_input_responses: list[HumanInputRequiredResponse] = [] for stream_response in generator: if isinstance(stream_response, ErrorStreamResponse): raise stream_response.err + elif isinstance(stream_response, HumanInputRequiredResponse): + human_input_responses.append(stream_response) + elif isinstance(stream_response, WorkflowPauseStreamResponse): + return AdvancedChatPausedBlockingResponse( + task_id=stream_response.task_id, + data=AdvancedChatPausedBlockingResponse.Data( + id=self._message_id, + mode=self._conversation_mode, + conversation_id=self._conversation_id, + message_id=self._message_id, + workflow_run_id=stream_response.data.workflow_run_id, + answer=self._task_state.answer, + metadata=self._message_end_to_stream_response().metadata, + created_at=self._message_created_at, + paused_nodes=stream_response.data.paused_nodes, + reasons=stream_response.data.reasons, + status=stream_response.data.status, + elapsed_time=stream_response.data.elapsed_time, + total_tokens=stream_response.data.total_tokens, + total_steps=stream_response.data.total_steps, + ), + ) elif isinstance(stream_response, MessageEndStreamResponse): extras = {} if stream_response.metadata: @@ -254,8 +289,41 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): else: continue + if human_input_responses: + return self._build_paused_blocking_response_from_human_input(human_input_responses) + raise ValueError("queue listening stopped unexpectedly.") + def _build_paused_blocking_response_from_human_input( + self, human_input_responses: list[HumanInputRequiredResponse] + ) -> AdvancedChatPausedBlockingResponse: + runtime_state = self._resolve_graph_runtime_state() + paused_nodes = list(dict.fromkeys(response.data.node_id for response in human_input_responses)) + reasons = [ + HumanInputRequiredPauseReasonPayload.from_response_data(response.data).model_dump(mode="json") + for response in human_input_responses + ] + + return AdvancedChatPausedBlockingResponse( + task_id=self._application_generate_entity.task_id, + data=AdvancedChatPausedBlockingResponse.Data( + id=self._message_id, + mode=self._conversation_mode, + conversation_id=self._conversation_id, + message_id=self._message_id, + workflow_run_id=human_input_responses[-1].workflow_run_id, + answer=self._task_state.answer, + metadata=self._message_end_to_stream_response().metadata, + created_at=self._message_created_at, + paused_nodes=paused_nodes, + reasons=reasons, + status=WorkflowExecutionStatus.PAUSED, + elapsed_time=time.perf_counter() - self._base_task_pipeline.start_at, + total_tokens=runtime_state.total_tokens, + total_steps=runtime_state.node_run_steps, + ), + ) + def _to_stream_response( self, generator: Generator[StreamResponse, None, None] ) -> Generator[ChatbotAppStreamResponse, Any, None]: diff --git a/api/core/app/apps/agent_chat/generate_response_converter.py b/api/core/app/apps/agent_chat/generate_response_converter.py index 731c6ee12e..03bc0a9108 100644 --- a/api/core/app/apps/agent_chat/generate_response_converter.py +++ b/api/core/app/apps/agent_chat/generate_response_converter.py @@ -1,6 +1,8 @@ from collections.abc import Generator from typing import Any, cast +from pydantic import JsonValue + from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( AppStreamResponse, @@ -12,11 +14,9 @@ from core.app.entities.task_entities import ( ) -class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = ChatbotAppBlockingResponse - +class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter[ChatbotAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking full response. :param blocking_response: blocking response @@ -37,7 +37,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking simple response. :param blocking_response: blocking response @@ -70,7 +70,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, @@ -101,7 +101,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, diff --git a/api/core/app/apps/base_app_generate_response_converter.py b/api/core/app/apps/base_app_generate_response_converter.py index d5edfaeb25..abcbb2f943 100644 --- a/api/core/app/apps/base_app_generate_response_converter.py +++ b/api/core/app/apps/base_app_generate_response_converter.py @@ -1,7 +1,9 @@ import logging from abc import ABC, abstractmethod from collections.abc import Generator, Mapping -from typing import Any, Union +from typing import Any, Union, cast + +from pydantic import JsonValue from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.task_entities import AppBlockingResponse, AppStreamResponse @@ -11,8 +13,10 @@ from graphon.model_runtime.errors.invoke import InvokeError logger = logging.getLogger(__name__) -class AppGenerateResponseConverter(ABC): - _blocking_response_type: type[AppBlockingResponse] +class AppGenerateResponseConverter[TBlockingResponse: AppBlockingResponse](ABC): + @classmethod + def _cast_blocking_response(cls, response: AppBlockingResponse) -> TBlockingResponse: + return cast(TBlockingResponse, response) @classmethod def convert( @@ -20,7 +24,7 @@ class AppGenerateResponseConverter(ABC): ) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]: if invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API}: if isinstance(response, AppBlockingResponse): - return cls.convert_blocking_full_response(response) + return cls.convert_blocking_full_response(cls._cast_blocking_response(response)) else: def _generate_full_response() -> Generator[dict[str, Any] | str, Any, None]: @@ -29,7 +33,7 @@ class AppGenerateResponseConverter(ABC): return _generate_full_response() else: if isinstance(response, AppBlockingResponse): - return cls.convert_blocking_simple_response(response) + return cls.convert_blocking_simple_response(cls._cast_blocking_response(response)) else: def _generate_simple_response() -> Generator[dict[str, Any] | str, Any, None]: @@ -39,12 +43,12 @@ class AppGenerateResponseConverter(ABC): @classmethod @abstractmethod - def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_full_response(cls, blocking_response: TBlockingResponse) -> dict[str, Any]: raise NotImplementedError @classmethod @abstractmethod - def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_simple_response(cls, blocking_response: TBlockingResponse) -> dict[str, Any]: raise NotImplementedError @classmethod @@ -106,13 +110,13 @@ class AppGenerateResponseConverter(ABC): return metadata @classmethod - def _error_to_stream_response(cls, e: Exception) -> dict[str, Any]: + def _error_to_stream_response(cls, e: Exception) -> dict[str, JsonValue]: """ Error to stream response. :param e: exception :return: """ - error_responses: dict[type[Exception], dict[str, Any]] = { + error_responses: dict[type[Exception], dict[str, JsonValue]] = { ValueError: {"code": "invalid_param", "status": 400}, ProviderTokenNotInitError: {"code": "provider_not_initialize", "status": 400}, QuotaExceededError: { @@ -126,7 +130,7 @@ class AppGenerateResponseConverter(ABC): } # Determine the response based on the type of exception - data: dict[str, Any] | None = None + data: dict[str, JsonValue] | None = None for k, v in error_responses.items(): if isinstance(e, k): data = v diff --git a/api/core/app/apps/chat/generate_response_converter.py b/api/core/app/apps/chat/generate_response_converter.py index 3d0375151d..26efcbfafd 100644 --- a/api/core/app/apps/chat/generate_response_converter.py +++ b/api/core/app/apps/chat/generate_response_converter.py @@ -1,6 +1,8 @@ from collections.abc import Generator from typing import Any, cast +from pydantic import JsonValue + from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( AppStreamResponse, @@ -12,11 +14,9 @@ from core.app.entities.task_entities import ( ) -class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = ChatbotAppBlockingResponse - +class ChatAppGenerateResponseConverter(AppGenerateResponseConverter[ChatbotAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking full response. :param blocking_response: blocking response @@ -37,7 +37,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking simple response. :param blocking_response: blocking response @@ -70,7 +70,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, @@ -101,7 +101,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index bd685d5189..7bab3f7bff 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -52,6 +52,7 @@ from core.tools.tool_manager import ToolManager from core.trigger.constants import TRIGGER_PLUGIN_NODE_TYPE from core.trigger.trigger_manager import TriggerManager from core.workflow.human_input_forms import load_form_tokens_by_form_id +from core.workflow.human_input_policy import HumanInputSurface, enrich_human_input_pause_reasons from core.workflow.system_variables import SystemVariableKey, system_variables_to_mapping from core.workflow.workflow_entry import WorkflowEntry from extensions.ext_database import db @@ -336,7 +337,26 @@ class WorkflowResponseConverter: except (TypeError, json.JSONDecodeError): definition_payload = {} display_in_ui_by_form_id[str(form_id)] = bool(definition_payload.get("display_in_ui")) - form_token_by_form_id = load_form_tokens_by_form_id(human_input_form_ids, session=session) + form_token_by_form_id = load_form_tokens_by_form_id( + human_input_form_ids, + session=session, + surface=( + HumanInputSurface.SERVICE_API + if self._application_generate_entity.invoke_from == InvokeFrom.SERVICE_API + else None + ), + ) + + # Reconnect paths must preserve the same pause-reason contract as live streams; + # otherwise clients see schema drift after resume. + pause_reasons = enrich_human_input_pause_reasons( + pause_reasons, + form_tokens_by_form_id=form_token_by_form_id, + expiration_times_by_form_id={ + form_id: int(expiration_time.timestamp()) + for form_id, expiration_time in expiration_times_by_form_id.items() + }, + ) responses: list[StreamResponse] = [] diff --git a/api/core/app/apps/completion/generate_response_converter.py b/api/core/app/apps/completion/generate_response_converter.py index 71886b39ba..ad978f58e0 100644 --- a/api/core/app/apps/completion/generate_response_converter.py +++ b/api/core/app/apps/completion/generate_response_converter.py @@ -1,6 +1,8 @@ from collections.abc import Generator from typing import Any, cast +from pydantic import JsonValue + from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( AppStreamResponse, @@ -12,17 +14,15 @@ from core.app.entities.task_entities import ( ) -class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = CompletionAppBlockingResponse - +class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter[CompletionAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: CompletionAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: CompletionAppBlockingResponse): """ Convert blocking full response. :param blocking_response: blocking response :return: """ - response = { + response: dict[str, Any] = { "event": "message", "task_id": blocking_response.task_id, "id": blocking_response.data.id, @@ -36,7 +36,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: CompletionAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: CompletionAppBlockingResponse): """ Convert blocking simple response. :param blocking_response: blocking response @@ -69,7 +69,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "message_id": chunk.message_id, "created_at": chunk.created_at, @@ -99,7 +99,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "message_id": chunk.message_id, "created_at": chunk.created_at, diff --git a/api/core/app/apps/message_generator.py b/api/core/app/apps/message_generator.py index 68631bb230..c04f20c796 100644 --- a/api/core/app/apps/message_generator.py +++ b/api/core/app/apps/message_generator.py @@ -1,6 +1,7 @@ -from collections.abc import Callable, Generator, Mapping +from collections.abc import Callable, Generator, Iterable, Mapping from core.app.apps.streaming_utils import stream_topic_events +from core.app.entities.task_entities import StreamEvent from extensions.ext_redis import get_pubsub_broadcast_channel from libs.broadcast_channel.channel import Topic from models.model import AppMode @@ -26,6 +27,7 @@ class MessageGenerator: idle_timeout=300, ping_interval: float = 10.0, on_subscribe: Callable[[], None] | None = None, + terminal_events: Iterable[str | StreamEvent] | None = None, ) -> Generator[Mapping | str, None, None]: topic = cls.get_response_topic(app_mode, workflow_run_id) return stream_topic_events( @@ -33,4 +35,5 @@ class MessageGenerator: idle_timeout=idle_timeout, ping_interval=ping_interval, on_subscribe=on_subscribe, + terminal_events=terminal_events, ) diff --git a/api/core/app/apps/pipeline/generate_response_converter.py b/api/core/app/apps/pipeline/generate_response_converter.py index 02b3160b7c..3913657ae8 100644 --- a/api/core/app/apps/pipeline/generate_response_converter.py +++ b/api/core/app/apps/pipeline/generate_response_converter.py @@ -13,11 +13,9 @@ from core.app.entities.task_entities import ( ) -class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = WorkflowAppBlockingResponse - +class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter[WorkflowAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, Any]: # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: """ Convert blocking full response. :param blocking_response: blocking response @@ -26,7 +24,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): return dict(blocking_response.model_dump()) @classmethod - def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, Any]: # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: """ Convert blocking simple response. :param blocking_response: blocking response diff --git a/api/core/app/apps/pipeline/pipeline_generator.py b/api/core/app/apps/pipeline/pipeline_generator.py index 4b2f17189b..4a76d0809e 100644 --- a/api/core/app/apps/pipeline/pipeline_generator.py +++ b/api/core/app/apps/pipeline/pipeline_generator.py @@ -27,7 +27,11 @@ from core.app.apps.workflow.generate_response_converter import WorkflowAppGenera from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline from core.app.entities.app_invoke_entities import InvokeFrom, RagPipelineGenerateEntity from core.app.entities.rag_pipeline_invoke_entities import RagPipelineInvokeEntity -from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse +from core.app.entities.task_entities import ( + WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, + WorkflowAppStreamResponse, +) from core.datasource.entities.datasource_entities import ( DatasourceProviderType, OnlineDriveBrowseFilesRequest, @@ -627,7 +631,11 @@ class PipelineGenerator(BaseAppGenerator): user: Account | EndUser, draft_var_saver_factory: DraftVariableSaverFactory, stream: bool = False, - ) -> WorkflowAppBlockingResponse | Generator[WorkflowAppStreamResponse, None, None]: + ) -> ( + WorkflowAppBlockingResponse + | WorkflowAppPausedBlockingResponse + | Generator[WorkflowAppStreamResponse, None, None] + ): """ Handle response. :param application_generate_entity: application generate entity diff --git a/api/core/app/apps/streaming_utils.py b/api/core/app/apps/streaming_utils.py index af3441aca3..5743bad4b6 100644 --- a/api/core/app/apps/streaming_utils.py +++ b/api/core/app/apps/streaming_utils.py @@ -59,7 +59,7 @@ def stream_topic_events( def _normalize_terminal_events(terminal_events: Iterable[str | StreamEvent] | None) -> set[str]: - if not terminal_events: + if terminal_events is None: return {StreamEvent.WORKFLOW_FINISHED.value, StreamEvent.WORKFLOW_PAUSED.value} values: set[str] = set() for item in terminal_events: diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 6937014a06..e811c2b2e0 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -25,7 +25,11 @@ from core.app.apps.workflow.app_runner import WorkflowAppRunner from core.app.apps.workflow.generate_response_converter import WorkflowAppGenerateResponseConverter from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity -from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse +from core.app.entities.task_entities import ( + WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, + WorkflowAppStreamResponse, +) from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig, PauseStatePersistenceLayer from core.db.session_factory import session_factory from core.helper.trace_id_helper import extract_external_trace_id_from_args @@ -612,7 +616,11 @@ class WorkflowAppGenerator(BaseAppGenerator): user: Account | EndUser, draft_var_saver_factory: DraftVariableSaverFactory, stream: bool = False, - ) -> WorkflowAppBlockingResponse | Generator[WorkflowAppStreamResponse, None, None]: + ) -> ( + WorkflowAppBlockingResponse + | WorkflowAppPausedBlockingResponse + | Generator[WorkflowAppStreamResponse, None, None] + ): """ Handle response. :param application_generate_entity: application generate entity diff --git a/api/core/app/apps/workflow/generate_response_converter.py b/api/core/app/apps/workflow/generate_response_converter.py index c69826cbef..4037388798 100644 --- a/api/core/app/apps/workflow/generate_response_converter.py +++ b/api/core/app/apps/workflow/generate_response_converter.py @@ -9,24 +9,29 @@ from core.app.entities.task_entities import ( NodeStartStreamResponse, PingStreamResponse, WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, WorkflowAppStreamResponse, ) -class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = WorkflowAppBlockingResponse - +class WorkflowAppGenerateResponseConverter( + AppGenerateResponseConverter[WorkflowAppBlockingResponse | WorkflowAppPausedBlockingResponse] +): @classmethod - def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response( + cls, blocking_response: WorkflowAppBlockingResponse | WorkflowAppPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking full response. :param blocking_response: blocking response :return: """ - return blocking_response.model_dump() + return dict(blocking_response.model_dump()) @classmethod - def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response( + cls, blocking_response: WorkflowAppBlockingResponse | WorkflowAppPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking simple response. :param blocking_response: blocking response diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 15645add57..87d9b73078 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -42,12 +42,15 @@ from core.app.entities.queue_entities import ( ) from core.app.entities.task_entities import ( ErrorStreamResponse, + HumanInputRequiredPauseReasonPayload, + HumanInputRequiredResponse, MessageAudioEndStreamResponse, MessageAudioStreamResponse, PingStreamResponse, StreamResponse, TextChunkStreamResponse, WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, WorkflowAppStreamResponse, WorkflowFinishStreamResponse, WorkflowPauseStreamResponse, @@ -118,7 +121,11 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): ) self._graph_runtime_state: GraphRuntimeState | None = self._base_task_pipeline.queue_manager.graph_runtime_state - def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: + def process( + self, + ) -> Union[ + WorkflowAppBlockingResponse, WorkflowAppPausedBlockingResponse, Generator[WorkflowAppStreamResponse, None, None] + ]: """ Process generate task pipeline. :return: @@ -129,19 +136,24 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): else: return self._to_blocking_response(generator) - def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) -> WorkflowAppBlockingResponse: + def _to_blocking_response( + self, generator: Generator[StreamResponse, None, None] + ) -> Union[WorkflowAppBlockingResponse, WorkflowAppPausedBlockingResponse]: """ To blocking response. :return: """ + human_input_responses: list[HumanInputRequiredResponse] = [] for stream_response in generator: if isinstance(stream_response, ErrorStreamResponse): raise stream_response.err + elif isinstance(stream_response, HumanInputRequiredResponse): + human_input_responses.append(stream_response) elif isinstance(stream_response, WorkflowPauseStreamResponse): - response = WorkflowAppBlockingResponse( + return WorkflowAppPausedBlockingResponse( task_id=self._application_generate_entity.task_id, workflow_run_id=stream_response.data.workflow_run_id, - data=WorkflowAppBlockingResponse.Data( + data=WorkflowAppPausedBlockingResponse.Data( id=stream_response.data.workflow_run_id, workflow_id=self._workflow.id, status=stream_response.data.status, @@ -152,12 +164,13 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): total_steps=stream_response.data.total_steps, created_at=stream_response.data.created_at, finished_at=None, + paused_nodes=stream_response.data.paused_nodes, + reasons=stream_response.data.reasons, ), ) - return response elif isinstance(stream_response, WorkflowFinishStreamResponse): - response = WorkflowAppBlockingResponse( + return WorkflowAppBlockingResponse( task_id=self._application_generate_entity.task_id, workflow_run_id=stream_response.data.id, data=WorkflowAppBlockingResponse.Data( @@ -174,12 +187,44 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): ), ) - return response else: continue + if human_input_responses: + return self._build_paused_blocking_response_from_human_input(human_input_responses) + raise ValueError("queue listening stopped unexpectedly.") + def _build_paused_blocking_response_from_human_input( + self, human_input_responses: list[HumanInputRequiredResponse] + ) -> WorkflowAppPausedBlockingResponse: + runtime_state = self._resolve_graph_runtime_state() + paused_nodes = list(dict.fromkeys(response.data.node_id for response in human_input_responses)) + created_at = int(runtime_state.start_at) + reasons = [ + HumanInputRequiredPauseReasonPayload.from_response_data(response.data).model_dump(mode="json") + for response in human_input_responses + ] + + return WorkflowAppPausedBlockingResponse( + task_id=self._application_generate_entity.task_id, + workflow_run_id=human_input_responses[-1].workflow_run_id, + data=WorkflowAppPausedBlockingResponse.Data( + id=human_input_responses[-1].workflow_run_id, + workflow_id=self._workflow.id, + status=WorkflowExecutionStatus.PAUSED, + outputs={}, + error=None, + elapsed_time=time.perf_counter() - self._base_task_pipeline.start_at, + total_tokens=runtime_state.total_tokens, + total_steps=runtime_state.node_run_steps, + created_at=created_at, + finished_at=None, + paused_nodes=paused_nodes, + reasons=reasons, + ), + ) + def _to_stream_response( self, generator: Generator[StreamResponse, None, None] ) -> Generator[WorkflowAppStreamResponse, None, None]: diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 6e4ca69cf0..ad05566521 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -1,12 +1,13 @@ from collections.abc import Mapping, Sequence from enum import StrEnum -from typing import Any +from typing import Any, Literal -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, JsonValue from core.app.entities.agent_strategy import AgentStrategyInfo from core.rag.entities import RetrievalSourceMetadata from graphon.entities import WorkflowStartReason +from graphon.entities.pause_reason import PauseReasonType from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from graphon.model_runtime.entities.llm_entities import LLMResult, LLMUsage from graphon.nodes.human_input.entities import FormInput, UserAction @@ -295,6 +296,40 @@ class HumanInputRequiredResponse(StreamResponse): data: Data +class HumanInputRequiredPauseReasonPayload(BaseModel): + """ + Public pause-reason payload used by blocking responses when only + ``human_input_required`` events are available. + """ + + TYPE: Literal[PauseReasonType.HUMAN_INPUT_REQUIRED] = PauseReasonType.HUMAN_INPUT_REQUIRED + form_id: str + node_id: str + node_title: str + form_content: str + inputs: Sequence[FormInput] = Field(default_factory=list) + actions: Sequence[UserAction] = Field(default_factory=list) + display_in_ui: bool = False + form_token: str | None = None + resolved_default_values: Mapping[str, Any] = Field(default_factory=dict) + expiration_time: int + + @classmethod + def from_response_data(cls, data: HumanInputRequiredResponse.Data) -> "HumanInputRequiredPauseReasonPayload": + return cls( + form_id=data.form_id, + node_id=data.node_id, + node_title=data.node_title, + form_content=data.form_content, + inputs=data.inputs, + actions=data.actions, + display_in_ui=data.display_in_ui, + form_token=data.form_token, + resolved_default_values=data.resolved_default_values, + expiration_time=data.expiration_time, + ) + + class HumanInputFormFilledResponse(StreamResponse): class Data(BaseModel): """ @@ -355,7 +390,7 @@ class NodeStartStreamResponse(StreamResponse): workflow_run_id: str data: Data - def to_ignore_detail_dict(self): + def to_ignore_detail_dict(self) -> dict[str, JsonValue]: return { "event": self.event.value, "task_id": self.task_id, @@ -412,7 +447,7 @@ class NodeFinishStreamResponse(StreamResponse): workflow_run_id: str data: Data - def to_ignore_detail_dict(self): + def to_ignore_detail_dict(self) -> dict[str, JsonValue]: return { "event": self.event.value, "task_id": self.task_id, @@ -774,6 +809,34 @@ class ChatbotAppBlockingResponse(AppBlockingResponse): data: Data +class AdvancedChatPausedBlockingResponse(AppBlockingResponse): + """ + ChatbotAppPausedBlockingResponse entity + """ + + class Data(BaseModel): + """ + Data entity + """ + + id: str + mode: str + conversation_id: str + message_id: str + workflow_run_id: str + answer: str + metadata: Mapping[str, object] = Field(default_factory=dict) + created_at: int + paused_nodes: Sequence[str] = Field(default_factory=list) + reasons: Sequence[Mapping[str, Any]] = Field(default_factory=list[Mapping[str, Any]]) + status: WorkflowExecutionStatus + elapsed_time: float + total_tokens: int + total_steps: int + + data: Data + + class CompletionAppBlockingResponse(AppBlockingResponse): """ CompletionAppBlockingResponse entity @@ -819,6 +882,33 @@ class WorkflowAppBlockingResponse(AppBlockingResponse): data: Data +class WorkflowAppPausedBlockingResponse(AppBlockingResponse): + """ + WorkflowAppPausedBlockingResponse entity + """ + + class Data(BaseModel): + """ + Data entity + """ + + id: str + workflow_id: str + status: WorkflowExecutionStatus + outputs: Mapping[str, Any] | None = None + error: str | None = None + elapsed_time: float + total_tokens: int + total_steps: int + created_at: int + finished_at: int | None + paused_nodes: Sequence[str] = Field(default_factory=list) + reasons: Sequence[Mapping[str, Any]] = Field(default_factory=list) + + workflow_run_id: str + data: Data + + class AgentLogStreamResponse(StreamResponse): """ AgentLogStreamResponse entity diff --git a/api/core/workflow/human_input_forms.py b/api/core/workflow/human_input_forms.py index f124b321d4..b02f69ec33 100644 --- a/api/core/workflow/human_input_forms.py +++ b/api/core/workflow/human_input_forms.py @@ -12,20 +12,16 @@ from collections.abc import Sequence from sqlalchemy import select from sqlalchemy.orm import Session +from core.workflow.human_input_policy import HumanInputSurface, get_preferred_form_token from extensions.ext_database import db from models.human_input import HumanInputFormRecipient, RecipientType -_FORM_TOKEN_PRIORITY = { - RecipientType.BACKSTAGE: 0, - RecipientType.CONSOLE: 1, - RecipientType.STANDALONE_WEB_APP: 2, -} - def load_form_tokens_by_form_id( form_ids: Sequence[str], *, session: Session | None = None, + surface: HumanInputSurface | None = None, ) -> dict[str, str]: """Load the preferred access token for each human input form.""" unique_form_ids = list(dict.fromkeys(form_ids)) @@ -33,23 +29,43 @@ def load_form_tokens_by_form_id( return {} if session is not None: - return _load_form_tokens_by_form_id(session, unique_form_ids) + return _load_form_tokens_by_form_id(session, unique_form_ids, surface=surface) with Session(bind=db.engine, expire_on_commit=False) as new_session: - return _load_form_tokens_by_form_id(new_session, unique_form_ids) + return _load_form_tokens_by_form_id(new_session, unique_form_ids, surface=surface) -def _load_form_tokens_by_form_id(session: Session, form_ids: Sequence[str]) -> dict[str, str]: - tokens_by_form_id: dict[str, tuple[int, str]] = {} +def _load_form_tokens_by_form_id( + session: Session, + form_ids: Sequence[str], + *, + surface: HumanInputSurface | None = None, +) -> dict[str, str]: + recipients_by_form_id: dict[str, list[tuple[RecipientType, str]]] = {} stmt = select(HumanInputFormRecipient).where(HumanInputFormRecipient.form_id.in_(form_ids)) for recipient in session.scalars(stmt): - priority = _FORM_TOKEN_PRIORITY.get(recipient.recipient_type) - if priority is None or not recipient.access_token: + if not recipient.access_token: continue + recipients_by_form_id.setdefault(recipient.form_id, []).append( + (recipient.recipient_type, recipient.access_token) + ) - candidate = (priority, recipient.access_token) - current = tokens_by_form_id.get(recipient.form_id) - if current is None or candidate[0] < current[0]: - tokens_by_form_id[recipient.form_id] = candidate + tokens_by_form_id: dict[str, str] = {} + for form_id, recipients in recipients_by_form_id.items(): + token = _get_surface_form_token(recipients, surface=surface) + if token is not None: + tokens_by_form_id[form_id] = token + return tokens_by_form_id - return {form_id: token for form_id, (_, token) in tokens_by_form_id.items()} + +def _get_surface_form_token( + recipients: Sequence[tuple[RecipientType, str]], + *, + surface: HumanInputSurface | None, +) -> str | None: + if surface == HumanInputSurface.SERVICE_API: + for recipient_type, token in recipients: + if recipient_type == RecipientType.STANDALONE_WEB_APP and token: + return token + + return get_preferred_form_token(recipients) diff --git a/api/core/workflow/human_input_policy.py b/api/core/workflow/human_input_policy.py new file mode 100644 index 0000000000..798eb8723f --- /dev/null +++ b/api/core/workflow/human_input_policy.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from collections.abc import Mapping, Sequence +from enum import StrEnum +from typing import Any + +from graphon.entities.pause_reason import PauseReasonType +from models.human_input import RecipientType + + +class HumanInputSurface(StrEnum): + SERVICE_API = "service_api" + CONSOLE = "console" + + +# Service API is intentionally narrower than other surfaces: app-token callers +# should only be able to act on end-user web forms, not internal console flows. +_ALLOWED_RECIPIENT_TYPES_BY_SURFACE: dict[HumanInputSurface, frozenset[RecipientType]] = { + HumanInputSurface.SERVICE_API: frozenset({RecipientType.STANDALONE_WEB_APP}), + HumanInputSurface.CONSOLE: frozenset({RecipientType.CONSOLE, RecipientType.BACKSTAGE}), +} + +# A single HITL form can have multiple recipient records; this shared priority +# keeps every API surface consistent about which resume token to expose. +_RECIPIENT_TOKEN_PRIORITY: dict[RecipientType, int] = { + RecipientType.BACKSTAGE: 0, + RecipientType.CONSOLE: 1, + RecipientType.STANDALONE_WEB_APP: 2, +} + + +def is_recipient_type_allowed_for_surface( + recipient_type: RecipientType | None, + surface: HumanInputSurface, +) -> bool: + if recipient_type is None: + return False + return recipient_type in _ALLOWED_RECIPIENT_TYPES_BY_SURFACE[surface] + + +def get_preferred_form_token( + recipients: Sequence[tuple[RecipientType, str]], +) -> str | None: + chosen_token: str | None = None + chosen_priority: int | None = None + for recipient_type, token in recipients: + priority = _RECIPIENT_TOKEN_PRIORITY.get(recipient_type) + if priority is None or not token: + continue + if chosen_priority is None or priority < chosen_priority: + chosen_priority = priority + chosen_token = token + return chosen_token + + +def enrich_human_input_pause_reasons( + reasons: Sequence[Mapping[str, Any]], + *, + form_tokens_by_form_id: Mapping[str, str], + expiration_times_by_form_id: Mapping[str, int], +) -> list[dict[str, Any]]: + enriched: list[dict[str, Any]] = [] + for reason in reasons: + updated = dict(reason) + if updated.get("TYPE") == PauseReasonType.HUMAN_INPUT_REQUIRED: + form_id = updated.get("form_id") + if isinstance(form_id, str): + updated["form_token"] = form_tokens_by_form_id.get(form_id) + expiration_time = expiration_times_by_form_id.get(form_id) + if expiration_time is not None: + updated["expiration_time"] = expiration_time + enriched.append(updated) + return enriched diff --git a/api/repositories/sqlalchemy_api_workflow_run_repository.py b/api/repositories/sqlalchemy_api_workflow_run_repository.py index 474b200fc5..71a2554a60 100644 --- a/api/repositories/sqlalchemy_api_workflow_run_repository.py +++ b/api/repositories/sqlalchemy_api_workflow_run_repository.py @@ -42,7 +42,7 @@ from libs.helper import convert_datetime_to_date from libs.infinite_scroll_pagination import InfiniteScrollPagination from libs.time_parser import get_time_threshold from models.enums import WorkflowRunTriggeredFrom -from models.human_input import HumanInputForm +from models.human_input import HumanInputForm, HumanInputFormRecipient from models.workflow import WorkflowAppLog, WorkflowArchiveLog, WorkflowPause, WorkflowPauseReason, WorkflowRun from repositories.api_workflow_run_repository import APIWorkflowRunRepository, RunsWithRelatedCountsDict from repositories.entities.workflow_pause import WorkflowPauseEntity @@ -63,6 +63,7 @@ class _WorkflowRunError(Exception): def _build_human_input_required_reason( reason_model: WorkflowPauseReason, form_model: HumanInputForm | None, + recipients: Sequence[HumanInputFormRecipient] = (), ) -> HumanInputRequired: form_content = "" inputs = [] @@ -89,7 +90,7 @@ def _build_human_input_required_reason( resolved_default_values = dict(definition.default_values) node_title = definition.node_title or node_title - return HumanInputRequired( + reason = HumanInputRequired( form_id=form_id, form_content=form_content, inputs=inputs, @@ -98,6 +99,7 @@ def _build_human_input_required_reason( node_title=node_title, resolved_default_values=resolved_default_values, ) + return reason class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): @@ -804,12 +806,23 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): form_stmt = select(HumanInputForm).where(HumanInputForm.id.in_(form_ids)) for form in session.scalars(form_stmt).all(): form_models[form.id] = form + recipients_by_form_id: dict[str, list[HumanInputFormRecipient]] = {} + if form_ids: + recipient_stmt = select(HumanInputFormRecipient).where(HumanInputFormRecipient.form_id.in_(form_ids)) + for recipient in session.scalars(recipient_stmt).all(): + recipients_by_form_id.setdefault(recipient.form_id, []).append(recipient) pause_reasons: list[PauseReason] = [] for reason in pause_reason_models: if reason.type_ == PauseReasonType.HUMAN_INPUT_REQUIRED: form_model = form_models.get(reason.form_id) - pause_reasons.append(_build_human_input_required_reason(reason, form_model)) + pause_reasons.append( + _build_human_input_required_reason( + reason, + form_model, + recipients_by_form_id.get(reason.form_id, ()), + ) + ) else: pause_reasons.append(reason.to_entity()) return pause_reasons diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 5e8c7aa337..8ff53d143b 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -162,6 +162,7 @@ class AppGenerateService: invoke_from=invoke_from, streaming=True, call_depth=0, + workflow_run_id=str(uuid.uuid4()), ) payload_json = payload.model_dump_json() @@ -183,6 +184,10 @@ class AppGenerateService: else: # Blocking mode: run synchronously and return JSON instead of SSE # Keep behaviour consistent with WORKFLOW blocking branch. + pause_config = PauseStateLayerConfig( + session_factory=session_factory.get_session_maker(), + state_owner_user_id=workflow.created_by, + ) advanced_generator = AdvancedChatAppGenerator() return rate_limit.generate( advanced_generator.convert_to_event_stream( @@ -194,6 +199,7 @@ class AppGenerateService: invoke_from=invoke_from, workflow_run_id=str(uuid.uuid4()), streaming=False, + pause_state_config=pause_config, ) ), request_id=request_id, diff --git a/api/services/workflow_event_snapshot_service.py b/api/services/workflow_event_snapshot_service.py index 5fca444723..94f88f8c49 100644 --- a/api/services/workflow_event_snapshot_service.py +++ b/api/services/workflow_event_snapshot_service.py @@ -14,6 +14,7 @@ from sqlalchemy.orm import Session, sessionmaker from core.app.apps.message_generator import MessageGenerator from core.app.entities.task_entities import ( + HumanInputRequiredResponse, MessageReplaceStreamResponse, NodeFinishStreamResponse, NodeStartStreamResponse, @@ -22,10 +23,14 @@ from core.app.entities.task_entities import ( WorkflowStartStreamResponse, ) from core.app.layers.pause_state_persist_layer import WorkflowResumptionContext +from core.workflow.human_input_forms import load_form_tokens_by_form_id +from core.workflow.human_input_policy import HumanInputSurface, enrich_human_input_pause_reasons from graphon.entities import WorkflowStartReason +from graphon.entities.pause_reason import PauseReasonType from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus from graphon.runtime import GraphRuntimeState from graphon.workflow_type_encoder import WorkflowRuntimeTypeConverter +from models.human_input import HumanInputForm from models.model import AppMode, Message from models.workflow import WorkflowNodeExecutionTriggeredFrom, WorkflowRun from repositories.api_workflow_node_execution_repository import WorkflowNodeExecutionSnapshot @@ -59,8 +64,10 @@ def build_workflow_event_stream( tenant_id: str, app_id: str, session_maker: sessionmaker[Session], + human_input_surface: HumanInputSurface | None = None, idle_timeout: float = 300, ping_interval: float = 10.0, + close_on_pause: bool = True, ) -> Generator[Mapping[str, Any] | str, None, None]: topic = MessageGenerator.get_response_topic(app_mode, workflow_run.id) workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) @@ -115,13 +122,15 @@ def build_workflow_event_stream( message_context=message_context, pause_entity=pause_entity, resumption_context=resumption_context, + session_maker=session_maker, + human_input_surface=human_input_surface, ) for event in snapshot_events: last_msg_time = time.time() last_ping_time = last_msg_time yield event - if _is_terminal_event(event, include_paused=True): + if _is_terminal_event(event, close_on_pause=close_on_pause): return while True: @@ -146,7 +155,7 @@ def build_workflow_event_stream( last_msg_time = time.time() last_ping_time = last_msg_time yield event - if _is_terminal_event(event, include_paused=True): + if _is_terminal_event(event, close_on_pause=close_on_pause): return finally: buffer_state.stop_event.set() @@ -207,6 +216,8 @@ def _build_snapshot_events( message_context: MessageContext | None, pause_entity: WorkflowPauseEntity | None, resumption_context: WorkflowResumptionContext | None, + session_maker: sessionmaker[Session] | None = None, + human_input_surface: HumanInputSurface | None = None, ) -> list[Mapping[str, Any]]: events: list[Mapping[str, Any]] = [] @@ -241,12 +252,24 @@ def _build_snapshot_events( events.append(node_finished) if workflow_run.status == WorkflowExecutionStatus.PAUSED and pause_entity is not None: + for human_input_event in _build_human_input_required_events( + workflow_run_id=workflow_run.id, + task_id=task_id, + pause_entity=pause_entity, + session_maker=session_maker, + human_input_surface=human_input_surface, + ): + _apply_message_context(human_input_event, message_context) + events.append(human_input_event) + pause_event = _build_pause_event( workflow_run=workflow_run, workflow_run_id=workflow_run.id, task_id=task_id, pause_entity=pause_entity, resumption_context=resumption_context, + session_maker=session_maker, + human_input_surface=human_input_surface, ) if pause_event is not None: _apply_message_context(pause_event, message_context) @@ -314,6 +337,97 @@ def _build_node_started_event( return response.to_ignore_detail_dict() +def _build_human_input_required_events( + *, + workflow_run_id: str, + task_id: str, + pause_entity: WorkflowPauseEntity, + session_maker: sessionmaker[Session] | None, + human_input_surface: HumanInputSurface | None, +) -> list[dict[str, Any]]: + reasons = [reason.model_dump(mode="json") for reason in pause_entity.get_pause_reasons()] + human_input_form_ids = [ + form_id + for reason in reasons + if reason.get("TYPE") == PauseReasonType.HUMAN_INPUT_REQUIRED + for form_id in [reason.get("form_id")] + if isinstance(form_id, str) + ] + + expiration_times_by_form_id: dict[str, int] = {} + display_in_ui_by_form_id: dict[str, bool] = {} + form_tokens_by_form_id: dict[str, str] = {} + if human_input_form_ids and session_maker is not None: + stmt = select(HumanInputForm.id, HumanInputForm.expiration_time, HumanInputForm.form_definition).where( + HumanInputForm.id.in_(human_input_form_ids) + ) + with session_maker() as session: + for form_id, expiration_time, form_definition in session.execute(stmt): + expiration_times_by_form_id[str(form_id)] = int(expiration_time.timestamp()) + try: + definition_payload = json.loads(form_definition) if form_definition else {} + except (TypeError, json.JSONDecodeError): + definition_payload = {} + display_in_ui_by_form_id[str(form_id)] = bool(definition_payload.get("display_in_ui")) + form_tokens_by_form_id = load_form_tokens_by_form_id( + human_input_form_ids, + session=session, + surface=human_input_surface, + ) + + events: list[dict[str, Any]] = [] + for reason in reasons: + if reason.get("TYPE") != PauseReasonType.HUMAN_INPUT_REQUIRED: + continue + + form_id_raw = reason.get("form_id") + node_id_raw = reason.get("node_id") + node_title_raw = reason.get("node_title") + form_content_raw = reason.get("form_content") + if not isinstance(form_id_raw, str): + continue + if not isinstance(node_id_raw, str): + continue + if not isinstance(node_title_raw, str): + continue + if not isinstance(form_content_raw, str): + continue + form_id = form_id_raw + node_id = node_id_raw + node_title = node_title_raw + form_content = form_content_raw + + inputs = reason.get("inputs") + actions = reason.get("actions") + resolved_default_values = reason.get("resolved_default_values") + + expiration_time = expiration_times_by_form_id.get(form_id) + if expiration_time is None: + continue + + response = HumanInputRequiredResponse( + task_id=task_id, + workflow_run_id=workflow_run_id, + data=HumanInputRequiredResponse.Data( + form_id=form_id, + node_id=node_id, + node_title=node_title, + form_content=form_content, + inputs=inputs if isinstance(inputs, list) else [], + actions=actions if isinstance(actions, list) else [], + display_in_ui=display_in_ui_by_form_id.get(form_id, False), + form_token=form_tokens_by_form_id.get(form_id), + resolved_default_values=(resolved_default_values if isinstance(resolved_default_values, dict) else {}), + expiration_time=expiration_time, + ), + ) + payload = response.model_dump(mode="json") + payload["event"] = response.event.value + events.append(payload) + + return events + + def _build_node_finished_event( *, workflow_run_id: str, @@ -356,6 +470,8 @@ def _build_pause_event( task_id: str, pause_entity: WorkflowPauseEntity, resumption_context: WorkflowResumptionContext | None, + session_maker: sessionmaker[Session] | None, + human_input_surface: HumanInputSurface | None = None, ) -> dict[str, Any] | None: paused_nodes: list[str] = [] outputs: dict[str, Any] = {} @@ -365,6 +481,36 @@ def _build_pause_event( outputs = dict(WorkflowRuntimeTypeConverter().to_json_encodable(state.outputs or {})) reasons = [reason.model_dump(mode="json") for reason in pause_entity.get_pause_reasons()] + human_input_form_ids = [ + form_id + for reason in reasons + if reason.get("TYPE") == PauseReasonType.HUMAN_INPUT_REQUIRED + for form_id in [reason.get("form_id")] + if isinstance(form_id, str) + ] + form_tokens_by_form_id: dict[str, str] = {} + expiration_times_by_form_id: dict[str, int] = {} + if human_input_form_ids and session_maker is not None: + with session_maker() as session: + form_tokens_by_form_id = load_form_tokens_by_form_id( + human_input_form_ids, + session=session, + surface=human_input_surface, + ) + stmt = select(HumanInputForm.id, HumanInputForm.expiration_time).where( + HumanInputForm.id.in_(human_input_form_ids) + ) + for row in session.execute(stmt): + form_id, expiration_time, *_rest = row + expiration_times_by_form_id[str(form_id)] = int(expiration_time.timestamp()) + # Reconnect paths must preserve the same pause-reason contract as live streams; + # otherwise clients see schema drift after resume. + reasons = enrich_human_input_pause_reasons( + reasons, + form_tokens_by_form_id=form_tokens_by_form_id, + expiration_times_by_form_id=expiration_times_by_form_id, + ) + response = WorkflowPauseStreamResponse( task_id=task_id, workflow_run_id=workflow_run_id, @@ -449,12 +595,19 @@ def _parse_event_message(message: bytes) -> Mapping[str, Any] | None: return event -def _is_terminal_event(event: Mapping[str, Any] | str, include_paused=False) -> bool: +def _is_terminal_event( + event: Mapping[str, Any] | str, + close_on_pause: bool = True, + *, + include_paused: bool | None = None, +) -> bool: + if include_paused is not None: + close_on_pause = include_paused if not isinstance(event, Mapping): return False event_type = event.get("event") if event_type == StreamEvent.WORKFLOW_FINISHED.value: return True - if include_paused: + if close_on_pause: return event_type == StreamEvent.WORKFLOW_PAUSED.value return False diff --git a/api/tasks/app_generate/workflow_execute_task.py b/api/tasks/app_generate/workflow_execute_task.py index c22e7e9918..5ceeb302c8 100644 --- a/api/tasks/app_generate/workflow_execute_task.py +++ b/api/tasks/app_generate/workflow_execute_task.py @@ -399,6 +399,8 @@ def _resume_advanced_chat( workflow_run_id: str, workflow_run: WorkflowRun, ) -> None: + resumed_generate_entity = generate_entity.model_copy(update={"stream": True}) + try: triggered_from = WorkflowRunTriggeredFrom(workflow_run.triggered_from) except ValueError: @@ -426,7 +428,7 @@ def _resume_advanced_chat( user=user, conversation=conversation, message=message, - application_generate_entity=generate_entity, + application_generate_entity=resumed_generate_entity, workflow_execution_repository=workflow_execution_repository, workflow_node_execution_repository=workflow_node_execution_repository, graph_runtime_state=graph_runtime_state, @@ -436,9 +438,8 @@ def _resume_advanced_chat( logger.exception("Failed to resume chatflow execution for workflow run %s", workflow_run_id) raise - if generate_entity.stream: - assert isinstance(response, Generator) - _publish_streaming_response(response, workflow_run_id, AppMode.ADVANCED_CHAT) + assert isinstance(response, Generator) + _publish_streaming_response(response, workflow_run_id, AppMode.ADVANCED_CHAT) def _resume_workflow( @@ -455,6 +456,8 @@ def _resume_workflow( workflow_run_repo, pause_entity, ) -> None: + resumed_generate_entity = generate_entity.model_copy(update={"stream": True}) + try: triggered_from = WorkflowRunTriggeredFrom(workflow_run.triggered_from) except ValueError: @@ -480,7 +483,7 @@ def _resume_workflow( app_model=app_model, workflow=workflow, user=user, - application_generate_entity=generate_entity, + application_generate_entity=resumed_generate_entity, graph_runtime_state=graph_runtime_state, workflow_execution_repository=workflow_execution_repository, workflow_node_execution_repository=workflow_node_execution_repository, @@ -490,11 +493,18 @@ def _resume_workflow( logger.exception("Failed to resume workflow execution for workflow run %s", workflow_run_id) raise - if generate_entity.stream: - assert isinstance(response, Generator) - _publish_streaming_response(response, workflow_run_id, AppMode.WORKFLOW) + assert isinstance(response, Generator) + _publish_streaming_response(response, workflow_run_id, AppMode.WORKFLOW) - workflow_run_repo.delete_workflow_pause(pause_entity) + try: + workflow_run_repo.delete_workflow_pause(pause_entity) + except Exception as exc: + if exc.__class__.__name__ != "_WorkflowRunError" or "WorkflowPause not found" not in str(exc): + raise + logger.info( + "Skipped deleting workflow pause %s after resume because it was already replaced or removed", + pause_entity.id, + ) @shared_task(queue=WORKFLOW_BASED_APP_EXECUTION_QUEUE, name="resume_app_execution") diff --git a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py index aebe87839c..d9828e19c5 100644 --- a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py +++ b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py @@ -2,6 +2,7 @@ from __future__ import annotations +import secrets from dataclasses import dataclass, field from datetime import datetime, timedelta from unittest.mock import Mock @@ -11,6 +12,7 @@ import pytest from sqlalchemy import Engine, delete, select from sqlalchemy.orm import Session, sessionmaker +from core.workflow.human_input_adapter import DeliveryMethodType from extensions.ext_storage import storage from graphon.entities import WorkflowExecution from graphon.entities.pause_reason import HumanInputRequired, PauseReasonType @@ -20,9 +22,11 @@ from graphon.nodes.human_input.enums import FormInputType, HumanInputFormStatus from libs.datetime_utils import naive_utc_now from models.enums import CreatorUserRole, WorkflowRunTriggeredFrom from models.human_input import ( + BackstageRecipientPayload, HumanInputDelivery, HumanInputForm, HumanInputFormRecipient, + RecipientType, ) from models.workflow import WorkflowAppLog, WorkflowAppLogCreatedFrom, WorkflowPause, WorkflowPauseReason, WorkflowRun from repositories.entities.workflow_pause import WorkflowPauseEntity @@ -628,12 +632,12 @@ class TestPrivateWorkflowPauseEntity: class TestBuildHumanInputRequiredReason: """Integration tests for _build_human_input_required_reason using real DB models.""" - def test_builds_reason_from_form_definition( + def test_prefers_standalone_web_app_token_when_available( self, db_session_with_containers: Session, test_scope: _TestScope, ) -> None: - """Build the graph pause reason from the stored form definition.""" + """Use the public standalone web-app token for service API payloads.""" expiration_time = naive_utc_now() form_definition = FormDefinition( @@ -660,6 +664,40 @@ class TestBuildHumanInputRequiredReason: db_session_with_containers.add(form_model) db_session_with_containers.flush() + delivery = HumanInputDelivery( + form_id=form_model.id, + delivery_method_type=DeliveryMethodType.WEBAPP, + channel_payload="{}", + ) + db_session_with_containers.add(delivery) + db_session_with_containers.flush() + + backstage_access_token = secrets.token_urlsafe(8) + backstage_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.BACKSTAGE, + recipient_payload=BackstageRecipientPayload().model_dump_json(), + access_token=backstage_access_token, + ) + console_access_token = secrets.token_urlsafe(8) + console_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.CONSOLE, + recipient_payload="{}", + access_token=console_access_token, + ) + web_app_access_token = secrets.token_urlsafe(8) + web_app_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.STANDALONE_WEB_APP, + recipient_payload="{}", + access_token=web_app_access_token, + ) + db_session_with_containers.add_all([backstage_recipient, console_recipient, web_app_recipient]) + db_session_with_containers.flush() # Create a pause so the reason has a valid pause_id workflow_run = _create_workflow_run( db_session_with_containers, @@ -688,8 +726,15 @@ class TestBuildHumanInputRequiredReason: # Refresh to ensure we have DB-round-tripped objects db_session_with_containers.refresh(form_model) db_session_with_containers.refresh(reason_model) + db_session_with_containers.refresh(backstage_recipient) + db_session_with_containers.refresh(console_recipient) + db_session_with_containers.refresh(web_app_recipient) - reason = _build_human_input_required_reason(reason_model, form_model) + reason = _build_human_input_required_reason( + reason_model, + form_model, + [backstage_recipient, console_recipient, web_app_recipient], + ) assert isinstance(reason, HumanInputRequired) assert reason.node_title == "Ask Name" @@ -697,3 +742,92 @@ class TestBuildHumanInputRequiredReason: assert reason.inputs[0].output_variable_name == "name" assert reason.actions[0].id == "approve" assert reason.resolved_default_values == {"name": "Alice"} + assert not hasattr(reason, "form_token") + + def test_falls_back_to_console_token_when_web_app_token_missing( + self, + db_session_with_containers: Session, + test_scope: _TestScope, + ) -> None: + """Use the console token only when no standalone web-app token exists.""" + + expiration_time = naive_utc_now() + form_definition = FormDefinition( + form_content="content", + inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], + user_actions=[UserAction(id="approve", title="Approve")], + rendered_content="rendered", + expiration_time=expiration_time, + default_values={"name": "Alice"}, + node_title="Ask Name", + display_in_ui=True, + ) + + form_model = HumanInputForm( + tenant_id=test_scope.tenant_id, + app_id=test_scope.app_id, + workflow_run_id=str(uuid4()), + node_id="node-1", + form_definition=form_definition.model_dump_json(), + rendered_content="rendered", + status=HumanInputFormStatus.WAITING, + expiration_time=expiration_time, + ) + db_session_with_containers.add(form_model) + db_session_with_containers.flush() + + delivery = HumanInputDelivery( + form_id=form_model.id, + delivery_method_type=DeliveryMethodType.WEBAPP, + channel_payload="{}", + ) + db_session_with_containers.add(delivery) + db_session_with_containers.flush() + + backstage_access_token = secrets.token_urlsafe(8) + backstage_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.BACKSTAGE, + recipient_payload=BackstageRecipientPayload().model_dump_json(), + access_token=backstage_access_token, + ) + console_access_token = secrets.token_urlsafe(8) + console_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.CONSOLE, + recipient_payload="{}", + access_token=console_access_token, + ) + db_session_with_containers.add_all([backstage_recipient, console_recipient]) + db_session_with_containers.flush() + + workflow_run = _create_workflow_run( + db_session_with_containers, + test_scope, + status=WorkflowExecutionStatus.RUNNING, + ) + pause = WorkflowPause( + workflow_id=test_scope.workflow_id, + workflow_run_id=workflow_run.id, + state_object_key=f"workflow-state-{uuid4()}.json", + ) + db_session_with_containers.add(pause) + db_session_with_containers.flush() + test_scope.state_keys.add(pause.state_object_key) + + reason_model = WorkflowPauseReason( + pause_id=pause.id, + type_=PauseReasonType.HUMAN_INPUT_REQUIRED, + form_id=form_model.id, + node_id="node-1", + message="", + ) + db_session_with_containers.add(reason_model) + db_session_with_containers.commit() + + reason = _build_human_input_required_reason(reason_model, form_model, [backstage_recipient, console_recipient]) + + assert isinstance(reason, HumanInputRequired) + assert not hasattr(reason, "form_token") diff --git a/api/tests/unit_tests/controllers/console/test_human_input_form.py b/api/tests/unit_tests/controllers/console/test_human_input_form.py index 232b6eee79..ebf803cac9 100644 --- a/api/tests/unit_tests/controllers/console/test_human_input_form.py +++ b/api/tests/unit_tests/controllers/console/test_human_input_form.py @@ -122,6 +122,35 @@ def test_post_form_invalid_recipient_type(app, monkeypatch: pytest.MonkeyPatch) handler(api, form_token="token") +def test_post_form_rejects_webapp_recipient_type(app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace(tenant_id="tenant-1", recipient_type=RecipientType.STANDALONE_WEB_APP) + + class _ServiceStub: + def __init__(self, *_args, **_kwargs): + pass + + def get_form_by_token(self, _token): + return form + + monkeypatch.setattr("controllers.console.human_input_form.HumanInputService", _ServiceStub) + monkeypatch.setattr( + "controllers.console.human_input_form.current_account_with_tenant", + lambda: (SimpleNamespace(id="user-1"), "tenant-1"), + ) + monkeypatch.setattr("controllers.console.human_input_form.db", SimpleNamespace(engine=object())) + + api = ConsoleHumanInputFormApi() + handler = _unwrap(api.post) + + with app.test_request_context( + "/console/api/form/human_input/token", + method="POST", + json={"inputs": {"content": "ok"}, "action": "approve"}, + ): + with pytest.raises(NotFoundError): + handler(api, form_token="token") + + def test_post_form_success(app, monkeypatch: pytest.MonkeyPatch) -> None: submit_mock = Mock() form = SimpleNamespace(tenant_id="tenant-1", recipient_type=RecipientType.CONSOLE) diff --git a/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py new file mode 100644 index 0000000000..846d5368f3 --- /dev/null +++ b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py @@ -0,0 +1,707 @@ +"""Dedicated tests for HITL behavior exposed through the Service API.""" + +from __future__ import annotations + +import json +import sys +from collections.abc import Sequence +from dataclasses import dataclass +from datetime import UTC, datetime +from types import SimpleNamespace +from unittest.mock import ANY, MagicMock, Mock + +import pytest + +import services.app_generate_service as ags_module +from controllers.service_api.app.workflow_events import WorkflowEventsApi +from core.app.app_config.entities import AppAdditionalFeatures, WorkflowUIBasedAppConfig +from core.app.apps.common import workflow_response_converter +from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter +from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom, WorkflowAppGenerateEntity +from core.app.entities.queue_entities import QueueWorkflowPausedEvent +from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, + HumanInputRequiredResponse, + WorkflowAppPausedBlockingResponse, + WorkflowPauseStreamResponse, +) +from core.app.layers.pause_state_persist_layer import WorkflowResumptionContext, _WorkflowGenerateEntityWrapper +from core.workflow.human_input_policy import HumanInputSurface +from core.workflow.system_variables import build_system_variables +from graphon.entities import WorkflowStartReason +from graphon.entities.pause_reason import HumanInputRequired, PauseReasonType +from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus +from graphon.nodes.human_input.entities import FormInput, UserAction +from graphon.nodes.human_input.enums import FormInputType +from graphon.runtime import GraphRuntimeState, VariablePool +from models.account import Account +from models.enums import CreatorUserRole +from models.model import AppMode +from models.workflow import WorkflowRun +from repositories.api_workflow_node_execution_repository import WorkflowNodeExecutionSnapshot +from repositories.entities.workflow_pause import WorkflowPauseEntity +from services.app_generate_service import AppGenerateService +from services.workflow_event_snapshot_service import _build_snapshot_events +from tests.unit_tests.controllers.service_api.conftest import _unwrap + + +class _DummyRateLimit: + @staticmethod + def gen_request_key() -> str: + return "dummy-request-id" + + def __init__(self, client_id: str, max_active_requests: int) -> None: + self.client_id = client_id + self.max_active_requests = max_active_requests + + def enter(self, request_id: str | None = None) -> str: + return request_id or "dummy-request-id" + + def exit(self, request_id: str) -> None: + return None + + def generate(self, generator, request_id: str): + return generator + + +def _mock_repo_for_run(monkeypatch: pytest.MonkeyPatch, workflow_run): + workflow_events_module = sys.modules["controllers.service_api.app.workflow_events"] + repo = SimpleNamespace(get_workflow_run_by_id_and_tenant_id=lambda **_kwargs: workflow_run) + monkeypatch.setattr( + workflow_events_module.DifyAPIRepositoryFactory, + "create_api_workflow_run_repository", + lambda *_args, **_kwargs: repo, + ) + monkeypatch.setattr(workflow_events_module, "db", SimpleNamespace(engine=object())) + return workflow_events_module + + +def _build_service_api_pause_converter() -> WorkflowResponseConverter: + application_generate_entity = SimpleNamespace( + inputs={}, + files=[], + invoke_from=InvokeFrom.SERVICE_API, + app_config=SimpleNamespace(app_id="app-id", tenant_id="tenant-id"), + ) + system_variables = build_system_variables( + user_id="user", + app_id="app-id", + workflow_id="workflow-id", + workflow_execution_id="run-id", + ) + user = MagicMock(spec=Account) + user.id = "account-id" + user.name = "Tester" + user.email = "tester@example.com" + return WorkflowResponseConverter( + application_generate_entity=application_generate_entity, + user=user, + system_variables=system_variables, + ) + + +def _build_advanced_chat_paused_blocking_response() -> AdvancedChatPausedBlockingResponse: + data = AdvancedChatPausedBlockingResponse.Data( + id="msg-1", + mode="chat", + conversation_id="c1", + message_id="m1", + workflow_run_id="run-1", + answer="partial", + metadata={"usage": {"total_tokens": 1}}, + created_at=1, + paused_nodes=["node-1"], + reasons=[ + { + "type": PauseReasonType.HUMAN_INPUT_REQUIRED, + "form_id": "form-1", + "expiration_time": 100, + } + ], + status=WorkflowExecutionStatus.PAUSED, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ) + return AdvancedChatPausedBlockingResponse(task_id="t1", data=data) + + +def _build_workflow_paused_blocking_response() -> WorkflowAppPausedBlockingResponse: + return WorkflowAppPausedBlockingResponse( + task_id="t1", + workflow_run_id="r1", + data=WorkflowAppPausedBlockingResponse.Data( + id="r1", + workflow_id="wf-1", + status=WorkflowExecutionStatus.PAUSED, + outputs={}, + error=None, + elapsed_time=0.5, + total_tokens=0, + total_steps=2, + created_at=1, + finished_at=None, + paused_nodes=["node-1"], + reasons=[{"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 100}], + ), + ) + + +@dataclass(frozen=True) +class _FakePauseEntity(WorkflowPauseEntity): + pause_id: str + workflow_run_id: str + paused_at_value: datetime + pause_reasons: Sequence[HumanInputRequired] + + @property + def id(self) -> str: + return self.pause_id + + @property + def workflow_execution_id(self) -> str: + return self.workflow_run_id + + def get_state(self) -> bytes: + raise AssertionError("state is not required for snapshot tests") + + @property + def resumed_at(self) -> datetime | None: + return None + + @property + def paused_at(self) -> datetime: + return self.paused_at_value + + def get_pause_reasons(self) -> Sequence[HumanInputRequired]: + return self.pause_reasons + + +def _build_workflow_run(status: WorkflowExecutionStatus) -> WorkflowRun: + return WorkflowRun( + id="run-1", + tenant_id="tenant-1", + app_id="app-1", + workflow_id="workflow-1", + type="workflow", + triggered_from="app-run", + version="v1", + graph=None, + inputs=json.dumps({"input": "value"}), + status=status, + outputs=json.dumps({}), + error=None, + elapsed_time=0.0, + total_tokens=0, + total_steps=0, + created_by_role=CreatorUserRole.END_USER, + created_by="user-1", + created_at=datetime(2024, 1, 1, tzinfo=UTC), + ) + + +def _build_snapshot(status: WorkflowNodeExecutionStatus) -> WorkflowNodeExecutionSnapshot: + created_at = datetime(2024, 1, 1, tzinfo=UTC) + finished_at = datetime(2024, 1, 1, 0, 0, 5, tzinfo=UTC) + return WorkflowNodeExecutionSnapshot( + execution_id="exec-1", + node_id="node-1", + node_type="human-input", + title="Human Input", + index=1, + status=status.value, + elapsed_time=0.5, + created_at=created_at, + finished_at=finished_at, + iteration_id=None, + loop_id=None, + ) + + +def _build_resumption_context(task_id: str) -> WorkflowResumptionContext: + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant-1", + app_id="app-1", + app_mode=AppMode.WORKFLOW, + workflow_id="workflow-1", + ) + generate_entity = WorkflowAppGenerateEntity( + task_id=task_id, + app_config=app_config, + inputs={}, + files=[], + user_id="user-1", + stream=True, + invoke_from=InvokeFrom.EXPLORE, + call_depth=0, + workflow_execution_id="run-1", + ) + runtime_state = GraphRuntimeState(variable_pool=VariablePool(), start_at=0.0) + runtime_state.register_paused_node("node-1") + runtime_state.outputs = {"result": "value"} + wrapper = _WorkflowGenerateEntityWrapper(entity=generate_entity) + return WorkflowResumptionContext( + generate_entity=wrapper, + serialized_graph_runtime_state=runtime_state.dumps(), + ) + + +class TestHitlServiceApi: + # Service API event-stream continuation + def test_workflow_events_continue_on_pause_keeps_stream_open(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + msg_generator.retrieve_events.return_value = ["raw-event"] + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: streamed\n\n"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1&continue_on_pause=true", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: streamed\n\n" + msg_generator.retrieve_events.assert_called_once_with( + AppMode.WORKFLOW, + "run-1", + terminal_events=[], + ) + workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) + + def test_workflow_events_snapshot_continue_on_pause_keeps_pause_open( + self, app, monkeypatch: pytest.MonkeyPatch + ) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: snapshot\n\n"]) + snapshot_builder = Mock(return_value=["snapshot-events"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + monkeypatch.setattr(workflow_events_module, "build_workflow_event_stream", snapshot_builder) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context( + "/workflow/run-1/events?user=u1&include_state_snapshot=true&continue_on_pause=true", + method="GET", + ): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: snapshot\n\n" + msg_generator.retrieve_events.assert_not_called() + snapshot_builder.assert_called_once_with( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=ANY, + human_input_surface=HumanInputSurface.SERVICE_API, + close_on_pause=False, + ) + workflow_generator.convert_to_event_stream.assert_called_once_with(["snapshot-events"]) + + def test_advanced_chat_blocking_injects_pause_state_config(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) + monkeypatch.setattr(ags_module, "RateLimit", _DummyRateLimit) + + workflow = MagicMock() + workflow.created_by = "owner-id" + monkeypatch.setattr(AppGenerateService, "_get_workflow", lambda *args, **kwargs: workflow) + monkeypatch.setattr(ags_module.session_factory, "get_session_maker", lambda: "session-maker") + + generator_instance = MagicMock() + generator_instance.generate.return_value = {"result": "advanced-blocking"} + generator_instance.convert_to_event_stream.side_effect = lambda payload: payload + monkeypatch.setattr(ags_module, "AdvancedChatAppGenerator", lambda: generator_instance) + + app_model = MagicMock() + app_model.mode = AppMode.ADVANCED_CHAT + app_model.id = "app-id" + app_model.tenant_id = "tenant-id" + app_model.max_active_requests = 0 + app_model.is_agent = False + + user = MagicMock() + user.id = "user-id" + + result = AppGenerateService.generate( + app_model=app_model, + user=user, + args={"workflow_id": None, "query": "hi", "inputs": {}}, + invoke_from=InvokeFrom.SERVICE_API, + streaming=False, + ) + + assert result == {"result": "advanced-blocking"} + call_kwargs = generator_instance.generate.call_args.kwargs + assert call_kwargs["streaming"] is False + assert call_kwargs["pause_state_config"] is not None + assert call_kwargs["pause_state_config"].session_factory == "session-maker" + assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" + + # Blocking payload contract + def test_advanced_chat_blocking_pause_payload_contract(self) -> None: + from core.app.apps.advanced_chat.generate_response_converter import AdvancedChatAppGenerateResponseConverter + + response = AdvancedChatAppGenerateResponseConverter.convert_blocking_full_response( + _build_advanced_chat_paused_blocking_response() + ) + + assert response["event"] == "workflow_paused" + assert response["workflow_run_id"] == "run-1" + assert response["answer"] == "partial" + assert response["data"]["reasons"][0]["type"] == PauseReasonType.HUMAN_INPUT_REQUIRED + assert response["data"]["reasons"][0]["expiration_time"] == 100 + assert "human_input_forms" not in response["data"] + + def test_workflow_blocking_pause_payload_contract(self) -> None: + from core.app.apps.workflow.generate_response_converter import WorkflowAppGenerateResponseConverter + + response = WorkflowAppGenerateResponseConverter.convert_blocking_full_response( + _build_workflow_paused_blocking_response() + ) + + assert response["workflow_run_id"] == "r1" + assert response["data"]["status"] == WorkflowExecutionStatus.PAUSED + assert response["data"]["paused_nodes"] == ["node-1"] + assert response["data"]["reasons"] == [ + {"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 100} + ] + assert "human_input_forms" not in response["data"] + + def test_advanced_chat_blocking_pipeline_pause_payload_contract(self) -> None: + from core.app.app_config.entities import AppAdditionalFeatures + from core.app.apps.advanced_chat.generate_task_pipeline import AdvancedChatAppGenerateTaskPipeline + from models.enums import MessageStatus + from models.model import EndUser + + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant", + app_id="app", + app_mode=AppMode.ADVANCED_CHAT, + additional_features=AppAdditionalFeatures(), + variables=[], + workflow_id="workflow-id", + ) + application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( + task_id="task", + app_config=app_config, + inputs={}, + query="hello", + files=[], + user_id="user", + stream=False, + invoke_from=InvokeFrom.WEB_APP, + extras={}, + trace_manager=None, + workflow_run_id="run-id", + ) + pipeline = AdvancedChatAppGenerateTaskPipeline( + application_generate_entity=application_generate_entity, + workflow=SimpleNamespace(id="workflow-id", tenant_id="tenant", features_dict={}), + queue_manager=SimpleNamespace(invoke_from=InvokeFrom.WEB_APP, graph_runtime_state=None), + conversation=SimpleNamespace(id="conv-id", mode=AppMode.ADVANCED_CHAT), + message=SimpleNamespace( + id="message-id", + query="hello", + created_at=datetime.utcnow(), + status=MessageStatus.NORMAL, + answer="", + ), + user=EndUser(tenant_id="tenant", type="session", name="tester", session_id="session"), + stream=False, + dialogue_count=1, + draft_var_saver_factory=lambda **kwargs: None, + ) + pipeline._task_state.answer = "partial answer" + pipeline._workflow_run_id = "run-id" + + def _gen(): + yield HumanInputRequiredResponse( + task_id="task", + workflow_run_id="run-id", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Approval", + form_content="Need approval", + inputs=[], + actions=[UserAction(id="approve", title="Approve")], + display_in_ui=True, + form_token="token-1", + resolved_default_values={}, + expiration_time=123, + ), + ) + yield WorkflowPauseStreamResponse( + task_id="task", + workflow_run_id="run-id", + data=WorkflowPauseStreamResponse.Data( + workflow_run_id="run-id", + paused_nodes=["node-1"], + outputs={}, + reasons=[ + { + "type": PauseReasonType.HUMAN_INPUT_REQUIRED, + "form_id": "form-1", + "node_id": "node-1", + "expiration_time": 123, + }, + ], + status="paused", + created_at=1, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ), + ) + + response = pipeline._to_blocking_response(_gen()) + + assert isinstance(response, AdvancedChatPausedBlockingResponse) + assert response.data.answer == "partial answer" + assert response.data.workflow_run_id == "run-id" + assert response.data.reasons[0]["form_id"] == "form-1" + assert response.data.reasons[0]["expiration_time"] == 123 + + def test_workflow_blocking_pipeline_pause_payload_contract(self, monkeypatch: pytest.MonkeyPatch) -> None: + from core.app.apps.workflow import generate_task_pipeline as workflow_pipeline_module + from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline + + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant", + app_id="app", + app_mode=AppMode.WORKFLOW, + additional_features=AppAdditionalFeatures(), + variables=[], + workflow_id="workflow-id", + ) + application_generate_entity = WorkflowAppGenerateEntity.model_construct( + task_id="task", + app_config=app_config, + inputs={}, + files=[], + user_id="user", + stream=False, + invoke_from=InvokeFrom.WEB_APP, + trace_manager=None, + workflow_execution_id="run-id", + extras={}, + call_depth=0, + ) + pipeline = WorkflowAppGenerateTaskPipeline( + application_generate_entity=application_generate_entity, + workflow=SimpleNamespace(id="workflow-id", tenant_id="tenant", features_dict={}), + queue_manager=SimpleNamespace(invoke_from=InvokeFrom.WEB_APP, graph_runtime_state=None), + user=SimpleNamespace(id="user", session_id="session"), + stream=False, + draft_var_saver_factory=lambda **kwargs: None, + ) + monkeypatch.setattr(workflow_pipeline_module.time, "time", lambda: 1700000000) + + def _gen(): + yield HumanInputRequiredResponse( + task_id="task", + workflow_run_id="run", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Human Input", + form_content="content", + expiration_time=1, + ), + ) + yield WorkflowPauseStreamResponse( + task_id="task", + workflow_run_id="run", + data=WorkflowPauseStreamResponse.Data( + workflow_run_id="run", + status=WorkflowExecutionStatus.PAUSED, + outputs={}, + paused_nodes=["node-1"], + reasons=[{"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 1}], + created_at=1, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ), + ) + + response = pipeline._to_blocking_response(_gen()) + + assert isinstance(response, WorkflowAppPausedBlockingResponse) + assert response.data.status == WorkflowExecutionStatus.PAUSED + assert response.data.paused_nodes == ["node-1"] + assert response.data.reasons == [{"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 1}] + + def test_service_api_pause_event_serializes_hitl_reason(self, monkeypatch: pytest.MonkeyPatch) -> None: + converter = _build_service_api_pause_converter() + converter.workflow_start_to_stream_response( + task_id="task", + workflow_run_id="run-id", + workflow_id="workflow-id", + reason=WorkflowStartReason.INITIAL, + ) + + expiration_time = datetime(2024, 1, 1, tzinfo=UTC) + + class _FakeSession: + def execute(self, _stmt): + return [("form-1", expiration_time, '{"display_in_ui": true}')] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc, tb): + return False + + monkeypatch.setattr(workflow_response_converter, "Session", lambda **_: _FakeSession()) + monkeypatch.setattr(workflow_response_converter, "db", SimpleNamespace(engine=object())) + monkeypatch.setattr( + workflow_response_converter, + "load_form_tokens_by_form_id", + lambda form_ids, session=None, surface=None: {"form-1": "token"}, + ) + + reason = HumanInputRequired( + form_id="form-1", + form_content="Rendered", + inputs=[ + FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="field", default=None), + ], + actions=[UserAction(id="approve", title="Approve")], + display_in_ui=True, + node_id="node-id", + node_title="Human Step", + form_token="token", + ) + queue_event = QueueWorkflowPausedEvent( + reasons=[reason], + outputs={"answer": "value"}, + paused_nodes=["node-id"], + ) + + runtime_state = SimpleNamespace(total_tokens=0, node_run_steps=0) + responses = converter.workflow_pause_to_stream_response( + event=queue_event, + task_id="task", + graph_runtime_state=runtime_state, + ) + + assert isinstance(responses[-1], WorkflowPauseStreamResponse) + pause_resp = responses[-1] + assert pause_resp.workflow_run_id == "run-id" + assert pause_resp.data.paused_nodes == ["node-id"] + assert pause_resp.data.outputs == {} + assert pause_resp.data.reasons[0]["TYPE"] == "human_input_required" + assert pause_resp.data.reasons[0]["form_id"] == "form-1" + assert pause_resp.data.reasons[0]["form_token"] == "token" + assert pause_resp.data.reasons[0]["expiration_time"] == int(expiration_time.timestamp()) + + assert isinstance(responses[0], HumanInputRequiredResponse) + hi_resp = responses[0] + assert hi_resp.data.form_id == "form-1" + assert hi_resp.data.node_id == "node-id" + assert hi_resp.data.node_title == "Human Step" + assert hi_resp.data.inputs[0].output_variable_name == "field" + assert hi_resp.data.actions[0].id == "approve" + assert hi_resp.data.display_in_ui is True + assert hi_resp.data.form_token == "token" + assert hi_resp.data.expiration_time == int(expiration_time.timestamp()) + + # Snapshot payload contract + def test_snapshot_events_include_pause_payload_contract(self, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = _build_workflow_run(WorkflowExecutionStatus.PAUSED) + snapshot = _build_snapshot(WorkflowNodeExecutionStatus.PAUSED) + resumption_context = _build_resumption_context("task-ctx") + monkeypatch.setattr( + "services.workflow_event_snapshot_service.load_form_tokens_by_form_id", + lambda form_ids, session=None, surface=None: {"form-1": "wtok"}, + ) + + class _SessionContext: + def __init__(self, session): + self._session = session + + def __enter__(self): + return self._session + + def __exit__(self, exc_type, exc, tb): + return False + + def session_maker() -> _SessionContext: + return _SessionContext( + SimpleNamespace( + execute=lambda _stmt: [("form-1", datetime(2024, 1, 1, tzinfo=UTC), '{"display_in_ui": true}')], + ) + ) + + pause_entity = _FakePauseEntity( + pause_id="pause-1", + workflow_run_id="run-1", + paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), + pause_reasons=[ + HumanInputRequired( + form_id="form-1", + form_content="content", + node_id="node-1", + node_title="Human Input", + form_token="wtok", + ) + ], + ) + + events = _build_snapshot_events( + workflow_run=workflow_run, + node_snapshots=[snapshot], + task_id="task-ctx", + message_context=None, + pause_entity=pause_entity, + resumption_context=resumption_context, + session_maker=session_maker, + ) + + assert [event["event"] for event in events] == [ + "workflow_started", + "node_started", + "node_finished", + "human_input_required", + "workflow_paused", + ] + assert events[2]["data"]["status"] == WorkflowNodeExecutionStatus.PAUSED.value + assert events[3]["data"]["form_token"] == "wtok" + assert events[3]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + pause_data = events[-1]["data"] + assert pause_data["paused_nodes"] == ["node-1"] + assert pause_data["outputs"] == {"result": "value"} + assert pause_data["reasons"][0]["TYPE"] == "human_input_required" + assert pause_data["reasons"][0]["form_token"] == "wtok" + assert pause_data["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + assert pause_data["status"] == WorkflowExecutionStatus.PAUSED.value + assert pause_data["created_at"] == int(workflow_run.created_at.timestamp()) + assert pause_data["elapsed_time"] == workflow_run.elapsed_time + assert pause_data["total_tokens"] == workflow_run.total_tokens + assert pause_data["total_steps"] == workflow_run.total_steps diff --git a/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py b/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py new file mode 100644 index 0000000000..531f722ceb --- /dev/null +++ b/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py @@ -0,0 +1,184 @@ +"""Unit tests for Service API human input form endpoints.""" + +from __future__ import annotations + +import json +import sys +from datetime import UTC, datetime +from types import SimpleNamespace +from unittest.mock import Mock + +import pytest +from werkzeug.exceptions import NotFound + +from controllers.service_api.app.human_input_form import WorkflowHumanInputFormApi +from models.human_input import RecipientType +from tests.unit_tests.controllers.service_api.conftest import _unwrap + + +class TestWorkflowHumanInputFormApi: + def test_get_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + definition = SimpleNamespace( + model_dump=lambda: { + "rendered_content": "Rendered form content", + "inputs": [{"output_variable_name": "name"}], + "default_values": {"name": "Alice", "age": 30, "meta": {"k": "v"}}, + "user_actions": [{"id": "approve", "title": "Approve"}], + } + ) + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + expiration_time=datetime(2099, 1, 1, tzinfo=UTC), + get_definition=lambda: definition, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + + with app.test_request_context("/form/human_input/token-1", method="GET"): + response = handler(api, app_model=app_model, form_token="token-1") + + payload = json.loads(response.get_data(as_text=True)) + assert payload == { + "form_content": "Rendered form content", + "inputs": [{"output_variable_name": "name"}], + "resolved_default_values": {"name": "Alice", "age": "30", "meta": '{"k": "v"}'}, + "user_actions": [{"id": "approve", "title": "Approve"}], + "expiration_time": int(form.expiration_time.timestamp()), + } + service_mock.get_form_by_token.assert_called_once_with("token-1") + service_mock.ensure_form_active.assert_called_once_with(form) + + def test_get_form_not_in_app(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace( + app_id="another-app", + tenant_id="tenant-1", + expiration_time=datetime(2099, 1, 1, tzinfo=UTC), + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + + with app.test_request_context("/form/human_input/token-1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, form_token="token-1") + + @pytest.mark.parametrize( + "recipient_type", + [ + RecipientType.CONSOLE, + RecipientType.BACKSTAGE, + RecipientType.EMAIL_MEMBER, + RecipientType.EMAIL_EXTERNAL, + ], + ) + def test_get_rejects_non_service_api_recipient_types( + self, app, monkeypatch: pytest.MonkeyPatch, recipient_type: RecipientType + ) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=recipient_type, + expiration_time=datetime(2099, 1, 1, tzinfo=UTC), + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + + with app.test_request_context("/form/human_input/token-1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, form_token="token-1") + + service_mock.ensure_form_active.assert_not_called() + + def test_post_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.post) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context( + "/form/human_input/token-1", + method="POST", + json={"inputs": {"name": "Alice"}, "action": "approve", "user": "external-1"}, + ): + response, status = handler(api, app_model=app_model, end_user=end_user, form_token="token-1") + + assert response == {} + assert status == 200 + service_mock.submit_form_by_token.assert_called_once_with( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token-1", + selected_action_id="approve", + form_data={"name": "Alice"}, + submission_end_user_id="end-user-1", + ) + + @pytest.mark.parametrize( + "recipient_type", + [ + RecipientType.CONSOLE, + RecipientType.BACKSTAGE, + RecipientType.EMAIL_MEMBER, + RecipientType.EMAIL_EXTERNAL, + ], + ) + def test_post_rejects_non_service_api_recipient_types( + self, app, monkeypatch: pytest.MonkeyPatch, recipient_type: RecipientType + ) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=recipient_type, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.post) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context( + "/form/human_input/token-1", + method="POST", + json={"inputs": {"name": "Alice"}, "action": "approve", "user": "external-1"}, + ): + with pytest.raises(NotFound): + handler(api, app_model=app_model, end_user=end_user, form_token="token-1") + + service_mock.submit_form_by_token.assert_not_called() diff --git a/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py new file mode 100644 index 0000000000..f45a7f9632 --- /dev/null +++ b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py @@ -0,0 +1,166 @@ +"""Unit tests for Service API workflow event stream endpoints.""" + +from __future__ import annotations + +import json +import sys +from datetime import UTC, datetime +from types import SimpleNamespace +from unittest.mock import Mock + +import pytest +from werkzeug.exceptions import NotFound + +from controllers.service_api.app.error import NotWorkflowAppError +from controllers.service_api.app.workflow_events import WorkflowEventsApi +from models.enums import CreatorUserRole +from models.model import AppMode +from tests.unit_tests.controllers.service_api.conftest import _unwrap + + +def _mock_repo_for_run(monkeypatch: pytest.MonkeyPatch, workflow_run): + workflow_events_module = sys.modules["controllers.service_api.app.workflow_events"] + repo = SimpleNamespace(get_workflow_run_by_id_and_tenant_id=lambda **_kwargs: workflow_run) + monkeypatch.setattr( + workflow_events_module.DifyAPIRepositoryFactory, + "create_api_workflow_run_repository", + lambda *_args, **_kwargs: repo, + ) + monkeypatch.setattr(workflow_events_module, "db", SimpleNamespace(engine=object())) + return workflow_events_module + + +class TestWorkflowEventsApi: + def test_wrong_app_mode(self, app) -> None: + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(mode=AppMode.CHAT.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + with pytest.raises(NotWorkflowAppError): + handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + def test_workflow_run_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + _mock_repo_for_run(monkeypatch, workflow_run=None) + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + def test_workflow_run_permission_denied(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.ACCOUNT, + created_by="another-user", + finished_at=None, + ) + _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + def test_finished_run_returns_sse(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=datetime(2099, 1, 1, tzinfo=UTC), + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + monkeypatch.setattr( + workflow_events_module.WorkflowResponseConverter, + "workflow_run_result_to_finish_response", + lambda **_kwargs: SimpleNamespace( + model_dump=lambda mode="json": {"task_id": "run-1", "status": "succeeded"}, + event=SimpleNamespace(value="workflow_finished"), + ), + ) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.mimetype == "text/event-stream" + body = response.get_data(as_text=True).strip() + assert body.startswith("data: ") + payload = json.loads(body[len("data: ") :]) + assert payload["task_id"] == "run-1" + assert payload["event"] == "workflow_finished" + + def test_running_run_streams_events(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + msg_generator.retrieve_events.return_value = ["raw-event"] + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: streamed\n\n"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: streamed\n\n" + msg_generator.retrieve_events.assert_called_once_with( + AppMode.WORKFLOW, + "run-1", + terminal_events=None, + ) + workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) + + def test_running_run_with_snapshot(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: snapshot\n\n"]) + snapshot_builder = Mock(return_value=["snapshot-events"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + monkeypatch.setattr(workflow_events_module, "build_workflow_event_stream", snapshot_builder) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1&include_state_snapshot=true", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: snapshot\n\n" + msg_generator.retrieve_events.assert_not_called() + snapshot_builder.assert_called_once() + workflow_generator.convert_to_event_stream.assert_called_once_with(["snapshot-events"]) diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py index f2df35d7d0..6debeb4fdd 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py @@ -1,7 +1,10 @@ from collections.abc import Generator +import pytest + from core.app.apps.advanced_chat.generate_response_converter import AdvancedChatAppGenerateResponseConverter from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, ChatbotAppBlockingResponse, ChatbotAppStreamResponse, ErrorStreamResponse, @@ -10,7 +13,8 @@ from core.app.entities.task_entities import ( NodeStartStreamResponse, PingStreamResponse, ) -from graphon.enums import WorkflowNodeExecutionStatus +from graphon.entities.pause_reason import PauseReasonType +from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus class TestAdvancedChatGenerateResponseConverter: @@ -28,6 +32,37 @@ class TestAdvancedChatGenerateResponseConverter: response = AdvancedChatAppGenerateResponseConverter.convert_blocking_simple_response(blocking) assert "usage" not in response["metadata"] + def test_blocking_full_response_derives_pause_data_from_model_dump(self, monkeypatch: pytest.MonkeyPatch): + data = AdvancedChatPausedBlockingResponse.Data( + id="msg-1", + mode="chat", + conversation_id="c1", + message_id="m1", + workflow_run_id="run-1", + answer="partial", + metadata={"usage": {"total_tokens": 1}}, + created_at=1, + paused_nodes=["node-1"], + reasons=[{"type": PauseReasonType.HUMAN_INPUT_REQUIRED, "form_id": "form-1"}], + status=WorkflowExecutionStatus.PAUSED, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ) + original_model_dump = type(data).model_dump + + def _model_dump_with_future_field(self, *args, **kwargs): + payload = original_model_dump(self, *args, **kwargs) + payload["future_field"] = "future-value" + return payload + + monkeypatch.setattr(type(data), "model_dump", _model_dump_with_future_field) + blocking = AdvancedChatPausedBlockingResponse(task_id="t1", data=data) + + response = AdvancedChatAppGenerateResponseConverter.convert_blocking_full_response(blocking) + + assert response["data"]["future_field"] == "future-value" + def test_stream_simple_response_includes_node_events(self): node_start = NodeStartStreamResponse( task_id="t1", diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 29fd63c063..64bcfa9a18 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -39,15 +39,19 @@ from core.app.entities.queue_entities import ( QueueWorkflowSucceededEvent, ) from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, AnnotationReply, AnnotationReplyAccount, + HumanInputRequiredResponse, MessageAudioStreamResponse, MessageEndStreamResponse, PingStreamResponse, ) from core.base.tts.app_generator_tts_publisher import AudioTrunk from core.workflow.system_variables import build_system_variables +from graphon.entities.pause_reason import PauseReasonType from graphon.enums import BuiltinNodeTypes +from graphon.nodes.human_input.entities import UserAction from graphon.runtime import GraphRuntimeState, VariablePool from libs.datetime_utils import naive_utc_now from models.enums import MessageStatus @@ -123,6 +127,57 @@ class TestAdvancedChatGenerateTaskPipeline: assert response.data.answer == "done" assert response.data.metadata == {"k": "v"} + def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): + pipeline = _make_pipeline() + pipeline._task_state.answer = "partial answer" + pipeline._workflow_run_id = "run-id" + pipeline._graph_runtime_state = GraphRuntimeState( + variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + start_at=0.0, + total_tokens=7, + node_run_steps=3, + ) + + def _gen(): + yield HumanInputRequiredResponse( + task_id="task", + workflow_run_id="run-id", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Approval", + form_content="Need approval", + inputs=[], + actions=[UserAction(id="approve", title="Approve")], + display_in_ui=True, + form_token="token-1", + resolved_default_values={}, + expiration_time=123, + ), + ) + + response = pipeline._to_blocking_response(_gen()) + + assert isinstance(response, AdvancedChatPausedBlockingResponse) + assert response.data.workflow_run_id == "run-id" + assert response.data.status == "paused" + assert response.data.paused_nodes == ["node-1"] + assert response.data.reasons == [ + { + "TYPE": PauseReasonType.HUMAN_INPUT_REQUIRED, + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Approval", + "form_content": "Need approval", + "inputs": [], + "actions": [{"id": "approve", "title": "Approve", "button_style": "default"}], + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {}, + "expiration_time": 123, + } + ] + def test_handle_text_chunk_event_updates_state(self): pipeline = _make_pipeline() pipeline._message_cycle_manager = SimpleNamespace( diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py b/api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py new file mode 100644 index 0000000000..560652f8cb --- /dev/null +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from collections.abc import Generator + +from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter +from core.app.entities.app_invoke_entities import InvokeFrom +from core.app.entities.task_entities import ( + AppStreamResponse, + PingStreamResponse, + WorkflowAppBlockingResponse, + WorkflowAppStreamResponse, +) +from graphon.enums import WorkflowExecutionStatus + + +class _DummyConverter(AppGenerateResponseConverter[WorkflowAppBlockingResponse]): + blocking_full_calls: list[WorkflowAppBlockingResponse] = [] + blocking_simple_calls: list[WorkflowAppBlockingResponse] = [] + stream_full_calls: list[Generator[AppStreamResponse, None, None]] = [] + stream_simple_calls: list[Generator[AppStreamResponse, None, None]] = [] + + @classmethod + def reset(cls) -> None: + cls.blocking_full_calls = [] + cls.blocking_simple_calls = [] + cls.stream_full_calls = [] + cls.stream_simple_calls = [] + + @classmethod + def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: + cls.blocking_full_calls.append(blocking_response) + return {"kind": "blocking-full", "task_id": blocking_response.task_id} + + @classmethod + def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: + cls.blocking_simple_calls.append(blocking_response) + return {"kind": "blocking-simple", "task_id": blocking_response.task_id} + + @classmethod + def convert_stream_full_response( + cls, stream_response: Generator[AppStreamResponse, None, None] + ) -> Generator[dict | str, None, None]: + cls.stream_full_calls.append(stream_response) + yield {"kind": "stream-full"} + + @classmethod + def convert_stream_simple_response( + cls, stream_response: Generator[AppStreamResponse, None, None] + ) -> Generator[dict | str, None, None]: + cls.stream_simple_calls.append(stream_response) + yield {"kind": "stream-simple"} + + +def _build_blocking_response() -> WorkflowAppBlockingResponse: + return WorkflowAppBlockingResponse( + task_id="task-1", + workflow_run_id="run-1", + data=WorkflowAppBlockingResponse.Data( + id="run-1", + workflow_id="workflow-1", + status=WorkflowExecutionStatus.SUCCEEDED, + outputs={"ok": True}, + error=None, + elapsed_time=0.1, + total_tokens=0, + total_steps=1, + created_at=1, + finished_at=2, + ), + ) + + +def _build_stream_response() -> Generator[AppStreamResponse, None, None]: + yield WorkflowAppStreamResponse( + workflow_run_id="run-1", + stream_response=PingStreamResponse(task_id="task-1"), + ) + + +def test_convert_routes_blocking_response_by_invoke_from() -> None: + _DummyConverter.reset() + blocking_response = _build_blocking_response() + + full_result = _DummyConverter.convert(blocking_response, InvokeFrom.SERVICE_API) + simple_result = _DummyConverter.convert(blocking_response, InvokeFrom.WEB_APP) + + assert full_result == {"kind": "blocking-full", "task_id": "task-1"} + assert simple_result == {"kind": "blocking-simple", "task_id": "task-1"} + assert _DummyConverter.blocking_full_calls == [blocking_response] + assert _DummyConverter.blocking_simple_calls == [blocking_response] + + +def test_convert_routes_stream_response_by_invoke_from() -> None: + _DummyConverter.reset() + + full_result = list(_DummyConverter.convert(_build_stream_response(), InvokeFrom.SERVICE_API)) + simple_result = list(_DummyConverter.convert(_build_stream_response(), InvokeFrom.WEB_APP)) + + assert full_result == [{"kind": "stream-full"}] + assert simple_result == [{"kind": "stream-simple"}] + assert len(_DummyConverter.stream_full_calls) == 1 + assert len(_DummyConverter.stream_simple_calls) == 1 diff --git a/api/tests/unit_tests/core/app/apps/test_message_generator.py b/api/tests/unit_tests/core/app/apps/test_message_generator.py index 25377e633e..90c9abf35c 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_generator.py @@ -1,6 +1,7 @@ from unittest.mock import Mock, patch from core.app.apps.message_generator import MessageGenerator +from core.app.entities.task_entities import StreamEvent from models.model import AppMode @@ -23,7 +24,21 @@ class TestMessageGenerator: "core.app.apps.message_generator.stream_topic_events", return_value=iter([{"event": "ping"}]) ) as mock_stream, ): - events = list(MessageGenerator.retrieve_events(AppMode.WORKFLOW, "run-1", idle_timeout=1, ping_interval=2)) + events = list( + MessageGenerator.retrieve_events( + AppMode.WORKFLOW, + "run-1", + idle_timeout=1, + ping_interval=2, + terminal_events=[StreamEvent.WORKFLOW_FINISHED.value], + ) + ) assert events == [{"event": "ping"}] - mock_stream.assert_called_once() + mock_stream.assert_called_once_with( + topic="topic", + idle_timeout=1, + ping_interval=2, + on_subscribe=None, + terminal_events=[StreamEvent.WORKFLOW_FINISHED.value], + ) diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index a7714c56ce..58f0e47a4b 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -88,6 +88,10 @@ def test_normalize_terminal_events_defaults(): } +def test_normalize_terminal_events_empty_values(): + assert _normalize_terminal_events([]) == set({}) + + def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] @@ -106,3 +110,21 @@ def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): assert next(generator) == StreamEvent.PING.value # next receive yields None -> ping interval triggers assert next(generator) == StreamEvent.PING.value + + +def test_stream_topic_events_can_continue_past_pause(): + topic = FakeTopic() + topic.publish(json.dumps({"event": StreamEvent.WORKFLOW_PAUSED.value}).encode()) + topic.publish(json.dumps({"event": StreamEvent.WORKFLOW_FINISHED.value}).encode()) + + generator = stream_topic_events( + topic=topic, + idle_timeout=1.0, + terminal_events=[StreamEvent.WORKFLOW_FINISHED.value], + ) + + assert next(generator) == StreamEvent.PING.value + assert next(generator)["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert next(generator)["event"] == StreamEvent.WORKFLOW_FINISHED.value + with pytest.raises(StopIteration): + next(generator) diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 99433478d3..0bcc1029b0 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -36,11 +36,12 @@ from core.app.entities.queue_entities import ( ) from core.app.entities.task_entities import ( ErrorStreamResponse, + HumanInputRequiredResponse, MessageAudioEndStreamResponse, MessageAudioStreamResponse, PingStreamResponse, + WorkflowAppPausedBlockingResponse, WorkflowFinishStreamResponse, - WorkflowPauseStreamResponse, WorkflowStartStreamResponse, ) from core.base.tts.app_generator_tts_publisher import AudioTrunk @@ -91,27 +92,50 @@ def _make_pipeline(): class TestWorkflowGenerateTaskPipeline: - def test_to_blocking_response_handles_pause(self): + def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): pipeline = _make_pipeline() + pipeline._graph_runtime_state = GraphRuntimeState( + variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + start_at=0.0, + total_tokens=5, + node_run_steps=2, + ) def _gen(): - yield WorkflowPauseStreamResponse( + yield HumanInputRequiredResponse( task_id="task", - workflow_run_id="run", - data=WorkflowPauseStreamResponse.Data( - workflow_run_id="run", - status=WorkflowExecutionStatus.PAUSED, - outputs={}, - created_at=1, - elapsed_time=0.1, - total_tokens=0, - total_steps=0, + workflow_run_id="run-id", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Human Input", + form_content="content", + expiration_time=1, ), ) response = pipeline._to_blocking_response(_gen()) + assert isinstance(response, WorkflowAppPausedBlockingResponse) + assert response.workflow_run_id == "run-id" assert response.data.status == WorkflowExecutionStatus.PAUSED + assert response.data.created_at == 0 + assert response.data.paused_nodes == ["node-1"] + assert response.data.reasons == [ + { + "TYPE": "human_input_required", + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "content", + "inputs": [], + "actions": [], + "display_in_ui": False, + "form_token": None, + "resolved_default_values": {}, + "expiration_time": 1, + } + ] def test_to_blocking_response_handles_finish(self): pipeline = _make_pipeline() diff --git a/api/tests/unit_tests/core/workflow/test_human_input_forms.py b/api/tests/unit_tests/core/workflow/test_human_input_forms.py index 6071a95a57..e508815b35 100644 --- a/api/tests/unit_tests/core/workflow/test_human_input_forms.py +++ b/api/tests/unit_tests/core/workflow/test_human_input_forms.py @@ -1,6 +1,7 @@ from types import SimpleNamespace -from core.workflow.human_input_forms import load_form_tokens_by_form_id +from core.workflow.human_input_forms import _load_form_tokens_by_form_id, load_form_tokens_by_form_id +from core.workflow.human_input_policy import HumanInputSurface from models.human_input import RecipientType @@ -53,3 +54,50 @@ def test_load_form_tokens_by_form_id_ignores_unsupported_recipients() -> None: ) assert load_form_tokens_by_form_id(["form-1"], session=session) == {} + + +def test_load_form_tokens_by_form_id_uses_shared_priority() -> None: + session = _FakeSession( + recipients=[ + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + access_token="web-token", + ), + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.CONSOLE, + access_token="console-token", + ), + ] + ) + + assert _load_form_tokens_by_form_id(session, ["form-1"]) == {"form-1": "console-token"} + + +def test_load_form_tokens_by_form_id_uses_web_token_for_service_api_surface() -> None: + session = _FakeSession( + recipients=[ + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + access_token="web-token", + ), + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.CONSOLE, + access_token="console-token", + ), + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.BACKSTAGE, + access_token="backstage-token", + ), + ] + ) + + assert load_form_tokens_by_form_id( + ["form-1"], + session=session, + surface=HumanInputSurface.SERVICE_API, + ) == {"form-1": "web-token"} diff --git a/api/tests/unit_tests/core/workflow/test_human_input_policy.py b/api/tests/unit_tests/core/workflow/test_human_input_policy.py new file mode 100644 index 0000000000..e6d0366af5 --- /dev/null +++ b/api/tests/unit_tests/core/workflow/test_human_input_policy.py @@ -0,0 +1,50 @@ +from core.workflow.human_input_policy import ( + HumanInputSurface, + get_preferred_form_token, + is_recipient_type_allowed_for_surface, +) +from models.human_input import RecipientType + + +def test_service_api_only_allows_public_webapp_forms() -> None: + assert is_recipient_type_allowed_for_surface( + RecipientType.STANDALONE_WEB_APP, + HumanInputSurface.SERVICE_API, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.CONSOLE, + HumanInputSurface.SERVICE_API, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.BACKSTAGE, + HumanInputSurface.SERVICE_API, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.EMAIL_MEMBER, + HumanInputSurface.SERVICE_API, + ) + + +def test_console_only_allows_internal_console_surfaces() -> None: + assert is_recipient_type_allowed_for_surface( + RecipientType.CONSOLE, + HumanInputSurface.CONSOLE, + ) + assert is_recipient_type_allowed_for_surface( + RecipientType.BACKSTAGE, + HumanInputSurface.CONSOLE, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.STANDALONE_WEB_APP, + HumanInputSurface.CONSOLE, + ) + + +def test_preferred_form_token_uses_shared_priority_order() -> None: + recipients = [ + (RecipientType.STANDALONE_WEB_APP, "web-token"), + (RecipientType.CONSOLE, "console-token"), + (RecipientType.BACKSTAGE, "backstage-token"), + ] + + assert get_preferred_form_token(recipients) == "backstage-token" diff --git a/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py b/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py new file mode 100644 index 0000000000..ac4b087b91 --- /dev/null +++ b/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from datetime import UTC, datetime +from types import SimpleNamespace + +from graphon.nodes.human_input.entities import FormDefinition, FormInput, UserAction +from graphon.nodes.human_input.enums import FormInputType +from models.human_input import RecipientType +from repositories.sqlalchemy_api_workflow_run_repository import _build_human_input_required_reason + + +def _build_form_model() -> SimpleNamespace: + expiration_time = datetime(2024, 1, 1, tzinfo=UTC) + definition = FormDefinition( + form_content="content", + inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], + user_actions=[UserAction(id="approve", title="Approve")], + rendered_content="rendered", + expiration_time=expiration_time, + default_values={"name": "Alice"}, + node_title="Ask Name", + display_in_ui=True, + ) + return SimpleNamespace( + id="form-1", + node_id="node-1", + form_definition=definition.model_dump_json(), + expiration_time=expiration_time, + ) + + +def _build_reason_model() -> SimpleNamespace: + return SimpleNamespace(form_id="form-1", node_id="node-1") + + +def test_build_human_input_required_reason_prefers_standalone_web_app_token() -> None: + reason = _build_human_input_required_reason( + _build_reason_model(), + _build_form_model(), + [ + SimpleNamespace(recipient_type=RecipientType.BACKSTAGE, access_token="btok"), + SimpleNamespace(recipient_type=RecipientType.CONSOLE, access_token="ctok"), + SimpleNamespace(recipient_type=RecipientType.STANDALONE_WEB_APP, access_token="wtok"), + ], + ) + + assert reason.node_title == "Ask Name" + assert reason.resolved_default_values == {"name": "Alice"} + assert not hasattr(reason, "form_token") + + +def test_build_human_input_required_reason_falls_back_to_console_token() -> None: + reason = _build_human_input_required_reason( + _build_reason_model(), + _build_form_model(), + [ + SimpleNamespace(recipient_type=RecipientType.BACKSTAGE, access_token="btok"), + SimpleNamespace(recipient_type=RecipientType.CONSOLE, access_token="ctok"), + ], + ) + + assert reason.node_id == "node-1" + assert reason.actions[0].id == "approve" + assert not hasattr(reason, "form_token") diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index c2b430c551..119a7adc45 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -327,7 +327,8 @@ class TestGenerate: streaming=False, ) assert result == {"result": "advanced-blocking"} - assert gen_spy.call_args.kwargs.get("streaming") is False + call_kwargs = gen_spy.call_args.kwargs + assert call_kwargs.get("streaming") is False retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index d570dce107..dfdbd9acd6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -1,14 +1,20 @@ import json import queue -from collections.abc import Sequence +from collections.abc import Mapping, Sequence from dataclasses import dataclass from datetime import UTC, datetime +from itertools import cycle from threading import Event +from types import SimpleNamespace +from typing import Any, cast +from unittest.mock import MagicMock import pytest +from sqlalchemy.orm import Session, sessionmaker from core.app.app_config.entities import WorkflowUIBasedAppConfig from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity +from core.app.entities.task_entities import StreamEvent from core.app.layers.pause_state_persist_layer import WorkflowResumptionContext, _WorkflowGenerateEntityWrapper from graphon.entities.pause_reason import HumanInputRequired from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus @@ -18,11 +24,14 @@ from models.model import AppMode from models.workflow import WorkflowRun from repositories.api_workflow_node_execution_repository import WorkflowNodeExecutionSnapshot from repositories.entities.workflow_pause import WorkflowPauseEntity +from services import workflow_event_snapshot_service as service_module from services.workflow_event_snapshot_service import ( BufferState, MessageContext, _build_snapshot_events, + _is_terminal_event, _resolve_task_id, + build_workflow_event_stream, ) @@ -125,50 +134,6 @@ def _build_resumption_context(task_id: str) -> WorkflowResumptionContext: ) -def test_build_snapshot_events_includes_pause_event() -> None: - workflow_run = _build_workflow_run(WorkflowExecutionStatus.PAUSED) - snapshot = _build_snapshot(WorkflowNodeExecutionStatus.PAUSED) - resumption_context = _build_resumption_context("task-ctx") - pause_entity = _FakePauseEntity( - pause_id="pause-1", - workflow_run_id="run-1", - paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), - pause_reasons=[ - HumanInputRequired( - form_id="form-1", - form_content="content", - node_id="node-1", - node_title="Human Input", - ) - ], - ) - - events = _build_snapshot_events( - workflow_run=workflow_run, - node_snapshots=[snapshot], - task_id="task-ctx", - message_context=None, - pause_entity=pause_entity, - resumption_context=resumption_context, - ) - - assert [event["event"] for event in events] == [ - "workflow_started", - "node_started", - "node_finished", - "workflow_paused", - ] - assert events[2]["data"]["status"] == WorkflowNodeExecutionStatus.PAUSED.value - pause_data = events[-1]["data"] - assert pause_data["paused_nodes"] == ["node-1"] - assert pause_data["outputs"] == {"result": "value"} - assert pause_data["status"] == WorkflowExecutionStatus.PAUSED.value - assert pause_data["created_at"] == int(workflow_run.created_at.timestamp()) - assert pause_data["elapsed_time"] == workflow_run.elapsed_time - assert pause_data["total_tokens"] == workflow_run.total_tokens - assert pause_data["total_steps"] == workflow_run.total_steps - - def test_build_snapshot_events_applies_message_context() -> None: workflow_run = _build_workflow_run(WorkflowExecutionStatus.RUNNING) snapshot = _build_snapshot(WorkflowNodeExecutionStatus.SUCCEEDED) @@ -222,3 +187,656 @@ def test_resolve_task_id_priority(context_task_id, buffered_task_id, expected) - buffer_state.task_id_ready.set() task_id = _resolve_task_id(resumption_context, buffer_state, "run-1", wait_timeout=0.0) assert task_id == expected + + +def _build_workflow_run_additional(status: WorkflowExecutionStatus = WorkflowExecutionStatus.RUNNING) -> WorkflowRun: + return WorkflowRun( + id="run-1", + tenant_id="tenant-1", + app_id="app-1", + workflow_id="workflow-1", + type="workflow", + triggered_from="app-run", + version="v1", + graph=None, + inputs=json.dumps({"query": "hello"}), + status=status, + outputs=json.dumps({}), + error=None, + elapsed_time=1.2, + total_tokens=5, + total_steps=2, + created_by_role=CreatorUserRole.END_USER, + created_by="user-1", + created_at=datetime(2024, 1, 1, tzinfo=UTC), + ) + + +def _build_resumption_context_additional(task_id: str) -> WorkflowResumptionContext: + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant-1", + app_id="app-1", + app_mode=AppMode.WORKFLOW, + workflow_id="workflow-1", + ) + generate_entity = WorkflowAppGenerateEntity( + task_id=task_id, + app_config=app_config, + inputs={}, + files=[], + user_id="user-1", + stream=True, + invoke_from=InvokeFrom.EXPLORE, + call_depth=0, + workflow_execution_id="run-1", + ) + runtime_state = GraphRuntimeState(variable_pool=VariablePool(), start_at=0.0) + runtime_state.outputs = {"answer": "ok"} + wrapper = _WorkflowGenerateEntityWrapper(entity=generate_entity) + return WorkflowResumptionContext( + generate_entity=wrapper, + serialized_graph_runtime_state=runtime_state.dumps(), + ) + + +class _SessionContext: + def __init__(self, session: Any) -> None: + self._session = session + + def __enter__(self) -> Any: + return self._session + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> bool: + return False + + +class _SessionMaker: + def __init__(self, session: Any) -> None: + self._session = session + + def __call__(self) -> _SessionContext: + return _SessionContext(self._session) + + +class _SubscriptionContext: + def __init__(self, subscription: Any) -> None: + self._subscription = subscription + + def __enter__(self) -> Any: + return self._subscription + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> bool: + return False + + +class _Topic: + def __init__(self, subscription: Any) -> None: + self._subscription = subscription + + def subscribe(self) -> _SubscriptionContext: + return _SubscriptionContext(self._subscription) + + +class _StaticSubscription: + def receive(self, timeout: int = 1) -> None: + return None + + +@dataclass(frozen=True) +class _PauseEntity(WorkflowPauseEntity): + state: bytes + + @property + def id(self) -> str: + return "pause-1" + + @property + def workflow_execution_id(self) -> str: + return "run-1" + + @property + def resumed_at(self) -> datetime | None: + return None + + @property + def paused_at(self) -> datetime: + return datetime(2024, 1, 1, tzinfo=UTC) + + def get_state(self) -> bytes: + return self.state + + def get_pause_reasons(self) -> list[Any]: + return [] + + +def test_get_message_context_should_return_none_when_no_message() -> None: + # Arrange + session = SimpleNamespace(scalar=MagicMock(return_value=None)) + session_maker = _SessionMaker(session) + + # Act + result = service_module._get_message_context(cast(sessionmaker[Session], session_maker), "run-1") + + # Assert + assert result is None + + +def test_get_message_context_should_default_created_at_to_zero_when_message_has_no_timestamp() -> None: + # Arrange + message = SimpleNamespace( + id="msg-1", + conversation_id="conv-1", + created_at=None, + answer="answer", + ) + session = SimpleNamespace(scalar=MagicMock(return_value=message)) + session_maker = _SessionMaker(session) + + # Act + result = service_module._get_message_context(cast(sessionmaker[Session], session_maker), "run-1") + + # Assert + assert result is not None + assert result.created_at == 0 + assert result.message_id == "msg-1" + assert result.conversation_id == "conv-1" + assert result.answer == "answer" + + +def test_load_resumption_context_should_return_none_when_pause_entity_missing() -> None: + # Arrange + + # Act + result = service_module._load_resumption_context(None) + + # Assert + assert result is None + + +def test_load_resumption_context_should_return_none_when_pause_entity_state_is_invalid() -> None: + # Arrange + pause_entity = _PauseEntity(state=b"not-a-valid-state") + + # Act + result = service_module._load_resumption_context(pause_entity) + + # Assert + assert result is None + + +def test_load_resumption_context_should_parse_valid_state_into_context() -> None: + # Arrange + context = _build_resumption_context_additional(task_id="task-ctx") + pause_entity = _PauseEntity(state=context.dumps().encode()) + + # Act + result = service_module._load_resumption_context(pause_entity) + + # Assert + assert result is not None + assert result.get_generate_entity().task_id == "task-ctx" + + +def test_resolve_task_id_should_return_workflow_run_id_when_buffer_state_is_missing() -> None: + # Arrange + + # Act + result = service_module._resolve_task_id( + resumption_context=None, + buffer_state=None, + workflow_run_id="run-1", + ) + + # Assert + assert result == "run-1" + + +@pytest.mark.parametrize( + ("payload", "expected"), + [ + (b'{"event":"node_started"}', {"event": "node_started"}), + (b"invalid-json", None), + (b"[]", None), + ], +) +def test_parse_event_message_should_parse_only_json_object( + payload: bytes, + expected: dict[str, Any] | None, +) -> None: + # Arrange + + # Act + result = service_module._parse_event_message(payload) + + # Assert + assert result == expected + + +def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: + # Arrange + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + + # Act + is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) + paused_without_flag = service_module._is_terminal_event(paused_event, close_on_pause=False) + paused_with_flag = service_module._is_terminal_event(paused_event, close_on_pause=True) + + # Assert + assert is_finished is True + assert paused_without_flag is False + assert paused_with_flag is True + assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + + +def test_apply_message_context_should_update_payload_when_context_exists() -> None: + # Arrange + payload: dict[str, Any] = {"event": "workflow_started"} + context = MessageContext(conversation_id="conv-1", message_id="msg-1", created_at=1700000000) + + # Act + service_module._apply_message_context(payload, context) + + # Assert + assert payload["conversation_id"] == "conv-1" + assert payload["message_id"] == "msg-1" + assert payload["created_at"] == 1700000000 + + +def test_start_buffering_should_capture_task_id_and_enqueue_event() -> None: + # Arrange + class Subscription: + def __init__(self) -> None: + self._calls = 0 + + def receive(self, timeout: int = 1) -> bytes | None: + self._calls += 1 + if self._calls == 1: + return b'{"event":"node_started","task_id":"task-1"}' + return None + + subscription = Subscription() + + # Act + buffer_state = service_module._start_buffering(subscription) + ready = buffer_state.task_id_ready.wait(timeout=1) + event = buffer_state.queue.get(timeout=1) + buffer_state.stop_event.set() + finished = buffer_state.done_event.wait(timeout=1) + + # Assert + assert ready is True + assert finished is True + assert buffer_state.task_id_hint == "task-1" + assert event["event"] == "node_started" + + +def test_start_buffering_should_drop_old_event_when_queue_is_full( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + class QueueWithSingleFull: + def __init__(self) -> None: + self._first_put = True + self.items: list[dict[str, Any]] = [{"event": "old"}] + + def put_nowait(self, item: dict[str, Any]) -> None: + if self._first_put: + self._first_put = False + raise queue.Full + self.items.append(item) + + def get_nowait(self) -> dict[str, Any]: + if not self.items: + raise queue.Empty + return self.items.pop(0) + + def empty(self) -> bool: + return len(self.items) == 0 + + fake_queue = QueueWithSingleFull() + monkeypatch.setattr(service_module.queue, "Queue", lambda maxsize=2048: fake_queue) + + class Subscription: + def __init__(self) -> None: + self._calls = 0 + + def receive(self, timeout: int = 1) -> bytes | None: + self._calls += 1 + if self._calls == 1: + return b'{"event":"node_started","task_id":"task-2"}' + return None + + subscription = Subscription() + + # Act + buffer_state = service_module._start_buffering(subscription) + ready = buffer_state.task_id_ready.wait(timeout=1) + buffer_state.stop_event.set() + finished = buffer_state.done_event.wait(timeout=1) + + # Assert + assert ready is True + assert finished is True + assert fake_queue.items[-1]["task_id"] == "task-2" + + +def test_start_buffering_should_set_done_event_when_subscription_raises() -> None: + # Arrange + class Subscription: + def receive(self, timeout: int = 1) -> bytes | None: + raise RuntimeError("subscription failure") + + subscription = Subscription() + + # Act + buffer_state = service_module._start_buffering(subscription) + finished = buffer_state.done_event.wait(timeout=1) + + # Assert + assert finished is True + + +def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_event( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.RUNNING) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock()) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr( + service_module, + "_get_message_context", + MagicMock(return_value=MessageContext("conv-1", "msg-1", 1700000000)), + ) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + buffer_state = BufferState( + queue=queue.Queue(), + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + monkeypatch.setattr( + service_module, + "_build_snapshot_events", + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + ) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.ADVANCED_CHAT, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + ) + ) + + # Assert + assert events[0] == StreamEvent.PING.value + finished_event = cast(Mapping[str, Any], events[1]) + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert buffer_state.stop_event.is_set() is True + node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() + called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs + assert called_kwargs["workflow_run_id"] == "run-1" + + +def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_idle_timeout( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.RUNNING) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock()) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + monkeypatch.setattr(service_module, "_build_snapshot_events", MagicMock(return_value=[])) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + + class AlwaysEmptyQueue: + def empty(self) -> bool: + return False + + def get(self, timeout: int = 1) -> None: + raise queue.Empty + + buffer_state = BufferState( + queue=AlwaysEmptyQueue(), # type: ignore[arg-type] + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + time_values = cycle([0.0, 6.0, 21.0, 26.0]) + monkeypatch.setattr(service_module.time, "time", lambda: next(time_values)) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + idle_timeout=20.0, + ping_interval=5.0, + ) + ) + + # Assert + assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert buffer_state.stop_event.is_set() is True + + +def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.RUNNING) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock()) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + monkeypatch.setattr(service_module, "_build_snapshot_events", MagicMock(return_value=[])) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + buffer_state = BufferState( + queue=queue.Queue(), + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + buffer_state.done_event.set() + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + ) + ) + + # Assert + assert events == [StreamEvent.PING.value] + assert buffer_state.stop_event.is_set() is True + + +def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.PAUSED) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock(side_effect=RuntimeError("boom"))) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) + buffer_state = BufferState( + queue=queue.Queue(), + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + ) + ) + + # Assert + assert events[0] == StreamEvent.PING.value + assert snapshot_builder.call_args.kwargs["pause_entity"] is None + + +def test_is_terminal_event_respects_close_on_pause_flag() -> None: + pause_event = {"event": "workflow_paused"} + finish_event = {"event": "workflow_finished"} + + assert _is_terminal_event(pause_event, close_on_pause=True) is True + assert _is_terminal_event(pause_event, close_on_pause=False) is False + assert _is_terminal_event(finish_event, close_on_pause=False) is True + + +def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = _build_workflow_run(WorkflowExecutionStatus.PAUSED) + snapshot = _build_snapshot(WorkflowNodeExecutionStatus.PAUSED) + resumption_context = _build_resumption_context("task-ctx") + monkeypatch.setattr( + service_module, "load_form_tokens_by_form_id", lambda form_ids, session=None, surface=None: {"form-1": "wtok"} + ) + session_maker = _SessionMaker( + SimpleNamespace( + execute=lambda _stmt: [("form-1", datetime(2024, 1, 1, tzinfo=UTC), '{"display_in_ui": true}')], + ) + ) + pause_entity = _FakePauseEntity( + pause_id="pause-1", + workflow_run_id="run-1", + paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), + pause_reasons=[ + HumanInputRequired( + form_id="form-1", + form_content="content", + node_id="node-1", + node_title="Human Input", + form_token="wtok", + ) + ], + ) + + events = _build_snapshot_events( + workflow_run=workflow_run, + node_snapshots=[snapshot], + task_id="task-ctx", + message_context=None, + pause_entity=pause_entity, + resumption_context=resumption_context, + session_maker=cast(sessionmaker[Session], session_maker), + ) + + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["data"]["form_token"] == "wtok" + assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + pause_data = events[-1]["data"] + assert pause_data["reasons"][0]["form_token"] == "wtok" + assert pause_data["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + + +def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_context( + monkeypatch: pytest.MonkeyPatch, +) -> None: + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.PAUSED) + topic = _Topic(_StaticSubscription()) + pause_entity = _FakePauseEntity( + pause_id="pause-1", + workflow_run_id="run-1", + paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), + pause_reasons=[ + HumanInputRequired( + form_id="form-1", + form_content="content", + node_id="node-1", + node_title="Human Input", + ) + ], + ) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock(return_value=pause_entity)) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr( + service_module, "_load_resumption_context", MagicMock(return_value=_build_resumption_context("task-1")) + ) + monkeypatch.setattr( + service_module, "load_form_tokens_by_form_id", lambda form_ids, session=None, surface=None: {"form-1": "wtok"} + ) + + session = SimpleNamespace( + scalar=MagicMock(return_value=None), + execute=lambda _stmt: [("form-1", datetime(2024, 1, 1, tzinfo=UTC), '{"display_in_ui": true}')], + ) + session_maker = _SessionMaker(session) + + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=cast(sessionmaker[Session], session_maker), + ) + ) + + pause_event = cast(Mapping[str, Any], events[-1]) + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" + assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index d3cf632b47..72508bef52 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -7,11 +7,17 @@ from unittest.mock import MagicMock import pytest -from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom +from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom, WorkflowAppGenerateEntity from models.enums import CreatorUserRole from models.model import App, AppMode, Conversation from models.workflow import Workflow, WorkflowRun -from tasks.app_generate.workflow_execute_task import _publish_streaming_response, _resume_app_execution +from repositories.sqlalchemy_api_workflow_run_repository import _WorkflowRunError +from tasks.app_generate.workflow_execute_task import ( + _publish_streaming_response, + _resume_advanced_chat, + _resume_app_execution, + _resume_workflow, +) class _FakeSessionContext: @@ -38,12 +44,28 @@ def _build_advanced_chat_generate_entity(conversation_id: str | None) -> Advance ) +def _build_workflow_generate_entity(stream: bool) -> WorkflowAppGenerateEntity: + return WorkflowAppGenerateEntity( + task_id="task-id", + inputs={}, + files=[], + user_id="user-id", + stream=stream, + invoke_from=InvokeFrom.WEB_APP, + workflow_execution_id="workflow-run-id", + ) + + +def _single_event_generator(payload): + yield payload + + @pytest.fixture -def mock_topic(mocker) -> MagicMock: +def mock_topic(monkeypatch: pytest.MonkeyPatch) -> MagicMock: topic = MagicMock() - mocker.patch( + monkeypatch.setattr( "tasks.app_generate.workflow_execute_task.MessageBasedAppGenerator.get_response_topic", - return_value=topic, + lambda *_args, **_kwargs: topic, ) return topic @@ -67,31 +89,35 @@ def test_publish_streaming_response_coerces_string_uuid(mock_topic: MagicMock): mock_topic.publish.assert_called_once_with(json.dumps({"event": "bar"}).encode()) -def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(mocker): +def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(monkeypatch: pytest.MonkeyPatch): workflow_run_id = "run-id" conversation_id = "conversation-id" message = MagicMock() - mocker.patch("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) + monkeypatch.setattr("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) pause_entity = MagicMock() pause_entity.get_state.return_value = b"state" workflow_run_repo = MagicMock() workflow_run_repo.get_workflow_pause.return_value = pause_entity - mocker.patch( + monkeypatch.setattr( "tasks.app_generate.workflow_execute_task.DifyAPIRepositoryFactory.create_api_workflow_run_repository", - return_value=workflow_run_repo, + lambda *_args, **_kwargs: workflow_run_repo, ) generate_entity = _build_advanced_chat_generate_entity(conversation_id) resumption_context = MagicMock() resumption_context.serialized_graph_runtime_state = "{}" resumption_context.get_generate_entity.return_value = generate_entity - mocker.patch( - "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", return_value=resumption_context + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", + lambda *_args, **_kwargs: resumption_context, + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", + lambda *_args, **_kwargs: MagicMock(), ) - mocker.patch("tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", return_value=MagicMock()) workflow_run = SimpleNamespace( workflow_id="wf-id", @@ -120,10 +146,15 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m session.get.side_effect = _session_get session.scalar.return_value = message - mocker.patch("tasks.app_generate.workflow_execute_task.Session", return_value=_FakeSessionContext(session)) - mocker.patch("tasks.app_generate.workflow_execute_task._resolve_user_for_run", return_value=MagicMock()) - resume_advanced_chat = mocker.patch("tasks.app_generate.workflow_execute_task._resume_advanced_chat") - mocker.patch("tasks.app_generate.workflow_execute_task._resume_workflow") + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.Session", lambda *_args, **_kwargs: _FakeSessionContext(session) + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._resolve_user_for_run", lambda *_args, **_kwargs: MagicMock() + ) + resume_advanced_chat = MagicMock() + monkeypatch.setattr("tasks.app_generate.workflow_execute_task._resume_advanced_chat", resume_advanced_chat) + monkeypatch.setattr("tasks.app_generate.workflow_execute_task._resume_workflow", MagicMock()) _resume_app_execution({"workflow_run_id": workflow_run_id}) @@ -144,29 +175,35 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m assert resume_advanced_chat.call_args.kwargs["message"] is message -def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversation_id(mocker): +def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversation_id( + monkeypatch: pytest.MonkeyPatch, +): workflow_run_id = "run-id" - mocker.patch("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) + monkeypatch.setattr("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) pause_entity = MagicMock() pause_entity.get_state.return_value = b"state" workflow_run_repo = MagicMock() workflow_run_repo.get_workflow_pause.return_value = pause_entity - mocker.patch( + monkeypatch.setattr( "tasks.app_generate.workflow_execute_task.DifyAPIRepositoryFactory.create_api_workflow_run_repository", - return_value=workflow_run_repo, + lambda *_args, **_kwargs: workflow_run_repo, ) generate_entity = _build_advanced_chat_generate_entity(conversation_id=None) resumption_context = MagicMock() resumption_context.serialized_graph_runtime_state = "{}" resumption_context.get_generate_entity.return_value = generate_entity - mocker.patch( - "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", return_value=resumption_context + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", + lambda *_args, **_kwargs: resumption_context, + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", + lambda *_args, **_kwargs: MagicMock(), ) - mocker.patch("tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", return_value=MagicMock()) workflow_run = SimpleNamespace( workflow_id="wf-id", @@ -191,12 +228,152 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat session.get.side_effect = _session_get - mocker.patch("tasks.app_generate.workflow_execute_task.Session", return_value=_FakeSessionContext(session)) - mocker.patch("tasks.app_generate.workflow_execute_task._resolve_user_for_run", return_value=MagicMock()) - resume_advanced_chat = mocker.patch("tasks.app_generate.workflow_execute_task._resume_advanced_chat") + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.Session", lambda *_args, **_kwargs: _FakeSessionContext(session) + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._resolve_user_for_run", lambda *_args, **_kwargs: MagicMock() + ) + resume_advanced_chat = MagicMock() + monkeypatch.setattr("tasks.app_generate.workflow_execute_task._resume_advanced_chat", resume_advanced_chat) _resume_app_execution({"workflow_run_id": workflow_run_id}) session.scalar.assert_not_called() workflow_run_repo.resume_workflow_pause.assert_not_called() resume_advanced_chat.assert_not_called() + + +def test_resume_advanced_chat_publishes_events_for_originally_blocking_runs(monkeypatch: pytest.MonkeyPatch): + generate_entity = _build_advanced_chat_generate_entity(conversation_id="conversation-id") + generate_entity.stream = False + + generator_instance = MagicMock() + response_stream = _single_event_generator({"event": "message"}) + generator_instance.resume.return_value = response_stream + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.AdvancedChatAppGenerator", + lambda: generator_instance, + ) + + publish_streaming_response = MagicMock() + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._publish_streaming_response", publish_streaming_response + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_execution_repository", + lambda **kwargs: MagicMock(), + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + lambda **kwargs: MagicMock(), + ) + + _resume_advanced_chat( + app_model=SimpleNamespace(id="app-id"), + workflow=SimpleNamespace(created_by="workflow-owner"), + user=MagicMock(), + conversation=SimpleNamespace(id="conversation-id"), + message=MagicMock(), + generate_entity=generate_entity, + graph_runtime_state=MagicMock(), + session_factory=MagicMock(), + pause_state_config=MagicMock(), + workflow_run_id="workflow-run-id", + workflow_run=SimpleNamespace(triggered_from="app_run"), + ) + + resumed_entity = generator_instance.resume.call_args.kwargs["application_generate_entity"] + assert resumed_entity.stream is True + publish_streaming_response.assert_called_once_with(response_stream, "workflow-run-id", AppMode.ADVANCED_CHAT) + + +def test_resume_workflow_publishes_events_for_originally_blocking_runs(monkeypatch: pytest.MonkeyPatch): + generate_entity = _build_workflow_generate_entity(stream=False) + + generator_instance = MagicMock() + response_stream = _single_event_generator({"event": "workflow_finished"}) + generator_instance.resume.return_value = response_stream + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowAppGenerator", + lambda: generator_instance, + ) + + publish_streaming_response = MagicMock() + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._publish_streaming_response", publish_streaming_response + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_execution_repository", + lambda **kwargs: MagicMock(), + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + lambda **kwargs: MagicMock(), + ) + workflow_run_repo = MagicMock() + pause_entity = MagicMock() + + _resume_workflow( + app_model=SimpleNamespace(id="app-id"), + workflow=SimpleNamespace(created_by="workflow-owner"), + user=MagicMock(), + generate_entity=generate_entity, + graph_runtime_state=MagicMock(), + session_factory=MagicMock(), + pause_state_config=MagicMock(), + workflow_run_id="workflow-run-id", + workflow_run=SimpleNamespace(triggered_from="app_run"), + workflow_run_repo=workflow_run_repo, + pause_entity=pause_entity, + ) + + resumed_entity = generator_instance.resume.call_args.kwargs["application_generate_entity"] + assert resumed_entity.stream is True + publish_streaming_response.assert_called_once_with(response_stream, "workflow-run-id", AppMode.WORKFLOW) + workflow_run_repo.delete_workflow_pause.assert_called_once_with(pause_entity) + + +def test_resume_workflow_ignores_missing_old_pause_after_repause(monkeypatch: pytest.MonkeyPatch): + generate_entity = _build_workflow_generate_entity(stream=False) + + generator_instance = MagicMock() + response_stream = _single_event_generator({"event": "workflow_paused"}) + generator_instance.resume.return_value = response_stream + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowAppGenerator", + lambda: generator_instance, + ) + + publish_streaming_response = MagicMock() + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._publish_streaming_response", publish_streaming_response + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_execution_repository", + lambda **kwargs: MagicMock(), + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + lambda **kwargs: MagicMock(), + ) + workflow_run_repo = MagicMock() + workflow_run_repo.delete_workflow_pause.side_effect = _WorkflowRunError("WorkflowPause not found: old-pause") + pause_entity = MagicMock() + + _resume_workflow( + app_model=SimpleNamespace(id="app-id"), + workflow=SimpleNamespace(created_by="workflow-owner"), + user=MagicMock(), + generate_entity=generate_entity, + graph_runtime_state=MagicMock(), + session_factory=MagicMock(), + pause_state_config=MagicMock(), + workflow_run_id="workflow-run-id", + workflow_run=SimpleNamespace(triggered_from="app_run"), + workflow_run_repo=workflow_run_repo, + pause_entity=pause_entity, + ) + + publish_streaming_response.assert_called_once_with(response_stream, "workflow-run-id", AppMode.WORKFLOW) + workflow_run_repo.delete_workflow_pause.assert_called_once_with(pause_entity) diff --git a/web/app/components/develop/template/template_advanced_chat.en.mdx b/web/app/components/develop/template/template_advanced_chat.en.mdx index bdfe7a41c1..d9ee9bcc1e 100644 --- a/web/app/components/develop/template/template_advanced_chat.en.mdx +++ b/web/app/components/develop/template/template_advanced_chat.en.mdx @@ -191,6 +191,24 @@ Chat applications support session persistence, allowing previous chat history to - `total_price` (decimal) optional Total cost - `currency` (string) optional e.g. `USD` / `RMB` - `created_at` (timestamp) timestamp of start, e.g., 1705395332 + - `event: human_input_required` Workflow paused and requires Human-in-the-Loop input + - `task_id` (string) Task ID, used for request tracking + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `human_input_required` + - `data` (object) detail + - `form_id` (string) Human input form ID + - `node_id` (string) Human input node ID + - `node_title` (string) Human input node title + - `form_content` (string) Rendered form content + - `inputs` (array[object]) Input field definitions + - `actions` (array[object]) User action buttons + - `id` (string) Action ID + - `title` (string) Button text + - `button_style` (string) Button style + - `display_in_ui` (bool) Whether this form should be shown in UI + - `form_token` (string) Token used by `/form/human_input/:form_token` APIs + - `resolved_default_values` (object) Runtime-resolved default values + - `expiration_time` (timestamp) Form expiration time (Unix seconds) - `event: workflow_finished` workflow execution ends, success or failure in different states in the same event - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API - `workflow_run_id` (string) Unique ID of workflow execution @@ -254,6 +272,12 @@ Chat applications support session persistence, allowing previous chat history to }'`} /> ### Blocking Mode + Blocking mode can return a normal chat message or a paused workflow response. + + When advanced chat pauses for Human-in-the-Loop, `event` becomes `workflow_paused`. + The payload still includes `message_id`, `conversation_id`, `answer`, and `workflow_run_id`, and `data` adds `paused_nodes` plus `reasons`. + For `human_input_required`, each reason contains the `form_id` and its `expiration_time`. + ```json {{ title: 'Response' }} { @@ -296,6 +320,83 @@ Chat applications support session persistence, allowing previous chat history to } ``` + + ```json {{ title: 'Paused Response Example' }} + { + "event": "workflow_paused", + "task_id": "8a9cbfcf-e7e0-4b17-aeef-24de57a2659a", + "id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "message_id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "conversation_id": "098e19be-356a-435d-9ec3-a406f4f1a97a", + "mode": "advanced-chat", + "answer": "", + "metadata": { + "annotation_reply": null, + "retriever_resources": [], + "usage": null + }, + "created_at": 1776074715, + "workflow_run_id": "7a4d6509-8a65-4c7d-a4fd-cf081dcf169f", + "data": { + "id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "mode": "advanced-chat", + "conversation_id": "098e19be-356a-435d-9ec3-a406f4f1a97a", + "message_id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "workflow_run_id": "7a4d6509-8a65-4c7d-a4fd-cf081dcf169f", + "answer": "", + "metadata": { + "annotation_reply": null, + "retriever_resources": [], + "usage": null + }, + "created_at": 1776074715, + "paused_nodes": [ + "1775724080699" + ], + "reasons": [ + { + "form_id": "019d864d-6f55-752c-9f4c-feee67508d5b", + "form_content": "this is form 2:\n\n{{#$output.some_field_2#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field_2", + "default": { + "type": "constant", + "selector": [], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "yes", + "button_style": "default" + }, + { + "id": "reject", + "title": "no", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775724080699", + "node_title": "Human Input 2", + "resolved_default_values": {}, + "form_token": "0dvwTdpTFXgCZmAo2FoiJ5", + "type": "human_input_required", + "expiration_time": 1776333914 + } + ], + "status": "paused", + "elapsed_time": 0.034081, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + ### Streaming Mode ```streaming {{ title: 'Response' }} @@ -314,6 +415,220 @@ Chat applications support session persistence, allowing previous chat history to data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + Streaming mode can also pause for Human-in-the-Loop. In that case, the SSE stream emits `human_input_required` first and then `workflow_paused`. + + + ```streaming {{ title: 'Paused Streaming Response Example' }} + event: ping + + data: { + "event": "workflow_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "created_at": 1776129228, + "reason": "initial" + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "node_finished", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "inputs_truncated": false, + "process_data": {}, + "process_data_truncated": false, + "outputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "outputs_truncated": false, + "status": "succeeded", + "error": null, + "elapsed_time": 0.000097, + "execution_metadata": null, + "created_at": 1776129228, + "finished_at": 1776129228, + "files": [], + "iteration_id": null, + "loop_id": null + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "c09ff568-1d55-4f0d-9a07-512bcbfeb289", + "node_id": "1775717346519", + "node_type": "human-input", + "title": "Human Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "human_input_required", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "node_id": "1775717346519", + "node_title": "Human Input", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "expiration_time": 1776388428 + } + } + + data: { + "event": "workflow_paused", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "paused_nodes": [ + "1775717346519" + ], + "outputs": {}, + "reasons": [ + { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "type": "human_input_required", + "expiration_time": 1776388428 + } + ], + "status": "paused", + "created_at": 1776129228, + "elapsed_time": 0.070478, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + @@ -578,6 +893,198 @@ Chat applications support session persistence, allowing previous chat history to --- + + + + Retrieve a pending Human-in-the-Loop form by `form_token`. + + Use this endpoint when streaming returns `human_input_required` with a `form_token`. + + ### Path + - `form_token` (string) Required, token returned by the pause event. + + ### Response + - `form_content` (string) Rendered form content (markdown/plain text) + - `inputs` (array[object]) Form input definitions + - `resolved_default_values` (object) Default values resolved to strings + - `user_actions` (array[object]) Action buttons + - `expiration_time` (timestamp) Form expiration time (Unix seconds) + + ### Errors + - 404, form not found or does not belong to current app + - 412, `human_input_form_submitted`, form already submitted + - 412, `human_input_form_expired`, form expired + + + + + + ```json {{ title: 'Response' }} + { + "form_content": "Please confirm the final answer: {{#$output.answer#}}", + "inputs": [ + { + "label": "Answer", + "type": "text-input", + "required": true, + "output_variable_name": "answer" + } + ], + "resolved_default_values": { + "answer": "Initial value" + }, + "user_actions": [ + { "id": "approve", "title": "Approve", "button_style": "primary" }, + { "id": "reject", "title": "Reject", "button_style": "warning" } + ], + "expiration_time": 1735689600 + } + ``` + + + + +--- + + + + + Submit a pending Human-in-the-Loop form. + + ### Path + - `form_token` (string) Required, token returned by the pause event. + + ### Request Body + - `inputs` (object) Required, key/value pairs for form fields. + - `action` (string) Required, selected action ID from `user_actions`. + - `user` (string) Required, end-user identifier. + + ### Response + Returns an empty object on success. + + ### Errors + - 400, `invalid_form_data`, submitted data does not match the form schema + - 404, form not found or does not belong to current app + - 412, `human_input_form_submitted`, form already submitted + - 412, `human_input_form_expired`, form expired + + + + + + ```json {{ title: 'Response' }} + {} + ``` + + + + +--- + + + + + Continue receiving workflow events after submitting a human input form. + + This endpoint returns `text/event-stream` and can be used to observe resumed execution until completion. + + ### Path + - `task_id` (string) Required, workflow run ID (`workflow_run_id`). + + ### Query + - `user` (string) Required, end-user identifier. + - `include_state_snapshot` (bool) Optional, set to `true` to replay from persisted state snapshot before continuing with live events. + - `continue_on_pause` (bool) Optional, set to `true` to keep the stream open across `workflow_paused` events until `workflow_finished`. + + ### Response + Server-Sent Events stream (`text/event-stream`). + Typical events include `workflow_paused`, `node_started`, `node_finished`, `human_input_form_filled`, `human_input_form_timeout`, and `workflow_finished`. + If the workflow has already finished when you call this endpoint, the server returns a single finished event immediately. + + + + + + ```streaming {{ title: 'Response' }} + event: ping + + data: {"event":"workflow_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","sys.timestamp":1776087863},"created_at":1776087863,"reason":"initial"}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"succeeded","error":null,"elapsed_time":0.00032,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"paused","error":null,"elapsed_time":0.007381,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_paused","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","paused_nodes":["1775717346519"],"outputs":{},"reasons":[{"form_id":"019d8716-0fde-75da-8207-1458ccde76e5","form_content":"this is form 1:\n{{#$output.some_field#}}\n","inputs":[{"type":"paragraph","output_variable_name":"some_field","default":{"type":"variable","selector":["sys","workflow_run_id"],"value":""}}],"actions":[{"id":"approve","title":"YES","button_style":"default"},{"id":"reject","title":"NO","button_style":"default"}],"display_in_ui":true,"node_id":"1775717346519","node_title":"Human Input","resolved_default_values":{"some_field":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"form_token":"n7hFG4ZDYdGcgZ5VDc7EGM","type":"human_input_required"}],"status":"paused","created_at":1776087863,"elapsed_time":0.0,"total_tokens":0,"total_steps":2}} + + data: {"event":"workflow_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"created_at":1776087877,"reason":"resumption"}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"human_input_form_filled","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"node_id":"1775717346519","node_title":"Human Input","rendered_content":"this is form 1:\nfield 1 filled!\n","action_id":"approve","action_text":"YES"}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":{},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"some_field":"field 1 filled!","some_field_2":"from bruno with love","__action_id":"approve","__rendered_content":"this is form 1:\nfield 1 filled!\n"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.004431,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"text_chunk","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"text":"field 1 filled!","from_variable_selector":["1775717350710","output"]}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":{"some_field":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.264614,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":{"output":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.00003,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","status":"succeeded","outputs":{"output":"field 1 filled!"},"error":null,"elapsed_time":0.364935,"total_tokens":0,"total_steps":5,"created_by":{"id":"7932d34c-dcf4-4fba-b770-f2a9de88c0a0","user":"abc-123"},"created_at":1776087877,"finished_at":1776087877,"exceptions_count":0,"files":[]}} + ``` + + + + +--- + ### ブロッキングモード + ブロッキングモードでは、通常のチャット応答、または一時停止したワークフロー応答のいずれかが返されます。 + + Advanced Chat が Human-in-the-Loop で一時停止すると、`event` は `workflow_paused` になります。 + それでもペイロードには `message_id`、`conversation_id`、`answer`、`workflow_run_id` が含まれ、`data` には `paused_nodes` と `reasons` が追加されます。 + `human_input_required` の各 reason には `form_id` と `expiration_time` が含まれます。 + ```json {{ title: '応答' }} { @@ -296,6 +320,83 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' } ``` + + ```json {{ title: '一時停止レスポンス例' }} + { + "event": "workflow_paused", + "task_id": "8a9cbfcf-e7e0-4b17-aeef-24de57a2659a", + "id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "message_id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "conversation_id": "098e19be-356a-435d-9ec3-a406f4f1a97a", + "mode": "advanced-chat", + "answer": "", + "metadata": { + "annotation_reply": null, + "retriever_resources": [], + "usage": null + }, + "created_at": 1776074715, + "workflow_run_id": "7a4d6509-8a65-4c7d-a4fd-cf081dcf169f", + "data": { + "id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "mode": "advanced-chat", + "conversation_id": "098e19be-356a-435d-9ec3-a406f4f1a97a", + "message_id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "workflow_run_id": "7a4d6509-8a65-4c7d-a4fd-cf081dcf169f", + "answer": "", + "metadata": { + "annotation_reply": null, + "retriever_resources": [], + "usage": null + }, + "created_at": 1776074715, + "paused_nodes": [ + "1775724080699" + ], + "reasons": [ + { + "form_id": "019d864d-6f55-752c-9f4c-feee67508d5b", + "form_content": "this is form 2:\n\n{{#$output.some_field_2#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field_2", + "default": { + "type": "constant", + "selector": [], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "yes", + "button_style": "default" + }, + { + "id": "reject", + "title": "no", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775724080699", + "node_title": "Human Input 2", + "resolved_default_values": {}, + "form_token": "0dvwTdpTFXgCZmAo2FoiJ5", + "type": "human_input_required", + "expiration_time": 1776333914 + } + ], + "status": "paused", + "elapsed_time": 0.034081, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + ### ストリーミングモード ```streaming {{ title: '応答' }} @@ -314,6 +415,220 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + ストリーミングモードでも Human-in-the-Loop により一時停止する場合があります。その場合、SSE ストリームではまず `human_input_required` が送られ、その後に `workflow_paused` が送られます。 + + + ```streaming {{ title: '一時停止ストリーミングレスポンス例' }} + event: ping + + data: { + "event": "workflow_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "created_at": 1776129228, + "reason": "initial" + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "node_finished", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "inputs_truncated": false, + "process_data": {}, + "process_data_truncated": false, + "outputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "outputs_truncated": false, + "status": "succeeded", + "error": null, + "elapsed_time": 0.000097, + "execution_metadata": null, + "created_at": 1776129228, + "finished_at": 1776129228, + "files": [], + "iteration_id": null, + "loop_id": null + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "c09ff568-1d55-4f0d-9a07-512bcbfeb289", + "node_id": "1775717346519", + "node_type": "human-input", + "title": "Human Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "human_input_required", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "node_id": "1775717346519", + "node_title": "Human Input", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "expiration_time": 1776388428 + } + } + + data: { + "event": "workflow_paused", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "paused_nodes": [ + "1775717346519" + ], + "outputs": {}, + "reasons": [ + { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "type": "human_input_required", + "expiration_time": 1776388428 + } + ], + "status": "paused", + "created_at": 1776129228, + "elapsed_time": 0.070478, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + @@ -579,6 +894,198 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' --- + + + + `form_token` から保留中の Human-in-the-Loop フォームを取得します。 + + ストリーミングイベントで `human_input_required`(`form_token` を含む)が返された際に使用します。 + + ### パス + - `form_token` (string) 必須、一時停止イベントで返されたフォームトークン + + ### 応答 + - `form_content` (string) レンダリング済みフォーム内容(markdown/plain text) + - `inputs` (array[object]) 入力項目定義 + - `resolved_default_values` (object) 解決済みデフォルト値(文字列) + - `user_actions` (array[object]) アクションボタン一覧 + - `expiration_time` (timestamp) フォーム有効期限(Unix 秒) + + ### エラー + - 404, フォームが存在しない、または現在のアプリに属していない + - 412, `human_input_form_submitted`, 既に送信済み + - 412, `human_input_form_expired`, 期限切れ + + + + + + ```json {{ title: '応答' }} + { + "form_content": "最終回答を確認してください: {{#$output.answer#}}", + "inputs": [ + { + "label": "回答", + "type": "text-input", + "required": true, + "output_variable_name": "answer" + } + ], + "resolved_default_values": { + "answer": "初期値" + }, + "user_actions": [ + { "id": "approve", "title": "承認", "button_style": "primary" }, + { "id": "reject", "title": "却下", "button_style": "warning" } + ], + "expiration_time": 1735689600 + } + ``` + + + + +--- + + + + + 保留中の Human-in-the-Loop フォームを送信します。 + + ### パス + - `form_token` (string) 必須、一時停止イベントで返されたフォームトークン + + ### リクエストボディ + - `inputs` (object) 必須、フォーム項目の key/value + - `action` (string) 必須、`user_actions` から選択したアクション ID + - `user` (string) 必須、エンドユーザー識別子 + + ### 応答 + 成功時は空オブジェクトを返します。 + + ### エラー + - 400, `invalid_form_data`, 送信データがフォームスキーマに一致しない + - 404, フォームが存在しない、または現在のアプリに属していない + - 412, `human_input_form_submitted`, 既に送信済み + - 412, `human_input_form_expired`, 期限切れ + + + + + + ```json {{ title: '応答' }} + {} + ``` + + + + +--- + + + + + Human Input フォーム送信後に、ワークフロー再開後のイベントを継続受信します。 + + このエンドポイントは `text/event-stream` を返し、完了までイベントを購読できます。 + + ### パス + - `task_id` (string) 必須、workflow 実行 ID(`workflow_run_id`) + + ### クエリ + - `user` (string) 必須、エンドユーザー識別子 + - `include_state_snapshot` (bool) 任意、`true` の場合は永続化済み状態スナップショットを先に再生してからリアルタイムイベントへ移行 + - `continue_on_pause` (bool) 任意、`true` にすると `workflow_paused` イベントをまたいでもストリームを維持し、`workflow_finished` で終了します + + ### 応答 + Server-Sent Events ストリーム(`text/event-stream`)。 + 主なイベントは `workflow_paused`、`node_started`、`node_finished`、`human_input_form_filled`、`human_input_form_timeout`、`workflow_finished` です。 + 呼び出し時点でワークフローがすでに完了している場合、このエンドポイントは完了イベントを 1 件だけ即座に返します。 + + + + + + ```streaming {{ title: '応答' }} + event: ping + + data: {"event":"workflow_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","sys.timestamp":1776087863},"created_at":1776087863,"reason":"initial"}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"succeeded","error":null,"elapsed_time":0.00032,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"paused","error":null,"elapsed_time":0.007381,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_paused","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","paused_nodes":["1775717346519"],"outputs":{},"reasons":[{"form_id":"019d8716-0fde-75da-8207-1458ccde76e5","form_content":"this is form 1:\n{{#$output.some_field#}}\n","inputs":[{"type":"paragraph","output_variable_name":"some_field","default":{"type":"variable","selector":["sys","workflow_run_id"],"value":""}}],"actions":[{"id":"approve","title":"YES","button_style":"default"},{"id":"reject","title":"NO","button_style":"default"}],"display_in_ui":true,"node_id":"1775717346519","node_title":"Human Input","resolved_default_values":{"some_field":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"form_token":"n7hFG4ZDYdGcgZ5VDc7EGM","type":"human_input_required"}],"status":"paused","created_at":1776087863,"elapsed_time":0.0,"total_tokens":0,"total_steps":2}} + + data: {"event":"workflow_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"created_at":1776087877,"reason":"resumption"}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"human_input_form_filled","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"node_id":"1775717346519","node_title":"Human Input","rendered_content":"this is form 1:\nfield 1 filled!\n","action_id":"approve","action_text":"YES"}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":{},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"some_field":"field 1 filled!","some_field_2":"from bruno with love","__action_id":"approve","__rendered_content":"this is form 1:\nfield 1 filled!\n"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.004431,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"text_chunk","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"text":"field 1 filled!","from_variable_selector":["1775717350710","output"]}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":{"some_field":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.264614,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":{"output":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.00003,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","status":"succeeded","outputs":{"output":"field 1 filled!"},"error":null,"elapsed_time":0.364935,"total_tokens":0,"total_steps":5,"created_by":{"id":"7932d34c-dcf4-4fba-b770-f2a9de88c0a0","user":"abc-123"},"created_at":1776087877,"finished_at":1776087877,"exceptions_count":0,"files":[]}} + ``` + + + + +--- + ### 阻塞模式 + 阻塞模式可能返回普通聊天响应,也可能返回暂停中的工作流响应。 + + 当 Advanced Chat 因 Human-in-the-Loop 暂停时,`event` 会变为 `workflow_paused`。 + 响应仍然包含 `message_id`、`conversation_id`、`answer` 和 `workflow_run_id`,并且 `data` 中会新增 `paused_nodes` 和 `reasons`。 + 对于 `human_input_required`,每个 reason 都会包含 `form_id` 和 `expiration_time`。 + ```json {{ title: 'Response' }} { @@ -295,6 +319,83 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' } ``` + + ```json {{ title: 'Paused Response Example' }} + { + "event": "workflow_paused", + "task_id": "8a9cbfcf-e7e0-4b17-aeef-24de57a2659a", + "id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "message_id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "conversation_id": "098e19be-356a-435d-9ec3-a406f4f1a97a", + "mode": "advanced-chat", + "answer": "", + "metadata": { + "annotation_reply": null, + "retriever_resources": [], + "usage": null + }, + "created_at": 1776074715, + "workflow_run_id": "7a4d6509-8a65-4c7d-a4fd-cf081dcf169f", + "data": { + "id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "mode": "advanced-chat", + "conversation_id": "098e19be-356a-435d-9ec3-a406f4f1a97a", + "message_id": "31714374-88cb-485f-9fa4-e3ab2a9ed95e", + "workflow_run_id": "7a4d6509-8a65-4c7d-a4fd-cf081dcf169f", + "answer": "", + "metadata": { + "annotation_reply": null, + "retriever_resources": [], + "usage": null + }, + "created_at": 1776074715, + "paused_nodes": [ + "1775724080699" + ], + "reasons": [ + { + "form_id": "019d864d-6f55-752c-9f4c-feee67508d5b", + "form_content": "this is form 2:\n\n{{#$output.some_field_2#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field_2", + "default": { + "type": "constant", + "selector": [], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "yes", + "button_style": "default" + }, + { + "id": "reject", + "title": "no", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775724080699", + "node_title": "Human Input 2", + "resolved_default_values": {}, + "form_token": "0dvwTdpTFXgCZmAo2FoiJ5", + "type": "human_input_required", + "expiration_time": 1776333914 + } + ], + "status": "paused", + "elapsed_time": 0.034081, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + ### 流式模式 ```streaming {{ title: 'Response' }} @@ -313,6 +414,220 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + 流式模式同样可能因为 Human-in-the-Loop 而暂停。此时 SSE 流会先返回 `human_input_required`,随后返回 `workflow_paused`。 + + + ```streaming {{ title: 'Paused Streaming Response Example' }} + event: ping + + data: { + "event": "workflow_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "created_at": 1776129228, + "reason": "initial" + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "node_finished", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "inputs_truncated": false, + "process_data": {}, + "process_data_truncated": false, + "outputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "outputs_truncated": false, + "status": "succeeded", + "error": null, + "elapsed_time": 0.000097, + "execution_metadata": null, + "created_at": 1776129228, + "finished_at": 1776129228, + "files": [], + "iteration_id": null, + "loop_id": null + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "c09ff568-1d55-4f0d-9a07-512bcbfeb289", + "node_id": "1775717346519", + "node_type": "human-input", + "title": "Human Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "human_input_required", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "node_id": "1775717346519", + "node_title": "Human Input", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "expiration_time": 1776388428 + } + } + + data: { + "event": "workflow_paused", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "paused_nodes": [ + "1775717346519" + ], + "outputs": {}, + "reasons": [ + { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "type": "human_input_required", + "expiration_time": 1776388428 + } + ], + "status": "paused", + "created_at": 1776129228, + "elapsed_time": 0.070478, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + @@ -572,6 +887,198 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' --- + + + + 通过 `form_token` 获取待处理的 Human-in-the-Loop 表单。 + + 当流式事件返回 `human_input_required`(包含 `form_token`)时,可调用此接口拉取表单详情。 + + ### Path + - `form_token` (string) 必填,暂停事件返回的表单 token + + ### Response + - `form_content` (string) 已渲染的表单内容(markdown/plain text) + - `inputs` (array[object]) 表单输入项定义 + - `resolved_default_values` (object) 已解析的默认值(字符串) + - `user_actions` (array[object]) 操作按钮列表 + - `expiration_time` (timestamp) 表单过期时间(Unix 秒) + + ### Errors + - 404,表单不存在或不属于当前应用 + - 412,`human_input_form_submitted`,表单已被提交 + - 412,`human_input_form_expired`,表单已过期 + + + + + + ```json {{ title: 'Response' }} + { + "form_content": "请确认最终结果:{{#$output.answer#}}", + "inputs": [ + { + "label": "答案", + "type": "text-input", + "required": true, + "output_variable_name": "answer" + } + ], + "resolved_default_values": { + "answer": "初始值" + }, + "user_actions": [ + { "id": "approve", "title": "通过", "button_style": "primary" }, + { "id": "reject", "title": "拒绝", "button_style": "warning" } + ], + "expiration_time": 1735689600 + } + ``` + + + + +--- + + + + + 提交待处理的 Human-in-the-Loop 表单。 + + ### Path + - `form_token` (string) 必填,暂停事件返回的表单 token + + ### Request Body + - `inputs` (object) 必填,表单字段的 key/value + - `action` (string) 必填,从 `user_actions` 中选择的动作 ID + - `user` (string) 必填,终端用户标识 + + ### Response + 成功时返回空对象。 + + ### Errors + - 400,`invalid_form_data`,提交数据与表单 schema 不匹配 + - 404,表单不存在或不属于当前应用 + - 412,`human_input_form_submitted`,表单已被提交 + - 412,`human_input_form_expired`,表单已过期 + + + + + + ```json {{ title: 'Response' }} + {} + ``` + + + + +--- + + + + + 在提交人工输入表单后,继续订阅工作流后续执行事件。 + + 返回 `text/event-stream`,可持续接收直到工作流结束。 + + ### Path + - `task_id` (string) 必填,workflow 运行 ID(`workflow_run_id`) + + ### Query + - `user` (string) 必填,终端用户标识 + - `include_state_snapshot` (bool) 可选,设为 `true` 时会先回放持久化状态快照,再继续实时事件 + - `continue_on_pause` (bool) 可选,设为 `true` 时,流会在 `workflow_paused` 事件之间保持连接,直到 `workflow_finished` 才结束 + + ### Response + Server-Sent Events 流(`text/event-stream`)。 + 常见事件包括 `workflow_paused`、`node_started`、`node_finished`、`human_input_form_filled`、`human_input_form_timeout`、`workflow_finished`。 + 如果调用该接口时工作流已经结束,服务端会立即返回单个完成事件。 + + + + + + ```streaming {{ title: 'Response' }} + event: ping + + data: {"event":"workflow_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","sys.timestamp":1776087863},"created_at":1776087863,"reason":"initial"}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"succeeded","error":null,"elapsed_time":0.00032,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"paused","error":null,"elapsed_time":0.007381,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_paused","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","paused_nodes":["1775717346519"],"outputs":{},"reasons":[{"form_id":"019d8716-0fde-75da-8207-1458ccde76e5","form_content":"this is form 1:\n{{#$output.some_field#}}\n","inputs":[{"type":"paragraph","output_variable_name":"some_field","default":{"type":"variable","selector":["sys","workflow_run_id"],"value":""}}],"actions":[{"id":"approve","title":"YES","button_style":"default"},{"id":"reject","title":"NO","button_style":"default"}],"display_in_ui":true,"node_id":"1775717346519","node_title":"Human Input","resolved_default_values":{"some_field":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"form_token":"n7hFG4ZDYdGcgZ5VDc7EGM","type":"human_input_required"}],"status":"paused","created_at":1776087863,"elapsed_time":0.0,"total_tokens":0,"total_steps":2}} + + data: {"event":"workflow_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"created_at":1776087877,"reason":"resumption"}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"human_input_form_filled","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"node_id":"1775717346519","node_title":"Human Input","rendered_content":"this is form 1:\nfield 1 filled!\n","action_id":"approve","action_text":"YES"}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":{},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"some_field":"field 1 filled!","some_field_2":"from bruno with love","__action_id":"approve","__rendered_content":"this is form 1:\nfield 1 filled!\n"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.004431,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"text_chunk","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"text":"field 1 filled!","from_variable_selector":["1775717350710","output"]}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":{"some_field":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.264614,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":{"output":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.00003,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","status":"succeeded","outputs":{"output":"field 1 filled!"},"error":null,"elapsed_time":0.364935,"total_tokens":0,"total_steps":5,"created_by":{"id":"7932d34c-dcf4-4fba-b770-f2a9de88c0a0","user":"abc-123"},"created_at":1776087877,"finished_at":1776087877,"exceptions_count":0,"files":[]}} + ``` + + + + +--- + ### Blocking Mode + Blocking mode can return either a completed workflow result or a paused workflow result. + + When execution pauses for Human-in-the-Loop, the response still includes `workflow_run_id` and `task_id`, but `data.status` becomes `paused`. + The paused payload also includes `paused_nodes` and `reasons`. For `human_input_required`, each reason contains the `form_id` and its `expiration_time`. + ```json {{ title: 'Response' }} { @@ -236,6 +259,70 @@ Workflow applications offers non-session support and is ideal for translation, a } ``` + + ```json {{ title: 'Paused Response Example' }} + { + "task_id": "3938b985-f4c6-4806-87b6-215e0aca9d81", + "workflow_run_id": "4a80f375-682b-49c5-b199-e950aac4968f", + "data": { + "id": "4a80f375-682b-49c5-b199-e950aac4968f", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "status": "paused", + "outputs": {}, + "error": null, + "elapsed_time": 0.035667, + "total_tokens": 0, + "total_steps": 2, + "created_at": 1776074783, + "finished_at": null, + "paused_nodes": [ + "1775717346519" + ], + "reasons": [ + { + "form_id": "019d864e-7a36-74a2-b94e-e5660c47f5a7", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "4a80f375-682b-49c5-b199-e950aac4968f" + }, + "form_token": "SZwvfmL47fTIsZynP2Jr9i", + "type": "human_input_required", + "expiration_time": 1776333983 + } + ] + } + } + ``` + ### Streaming Mode ```streaming {{ title: 'Response' }} @@ -247,6 +334,220 @@ Workflow applications offers non-session support and is ideal for translation, a data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + Streaming mode can also pause for Human-in-the-Loop. In that case, the SSE stream emits `human_input_required` first and then `workflow_paused`. + + + ```streaming {{ title: 'Paused Streaming Response Example' }} + event: ping + + data: { + "event": "workflow_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "created_at": 1776129228, + "reason": "initial" + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "node_finished", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "inputs_truncated": false, + "process_data": {}, + "process_data_truncated": false, + "outputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "outputs_truncated": false, + "status": "succeeded", + "error": null, + "elapsed_time": 0.000097, + "execution_metadata": null, + "created_at": 1776129228, + "finished_at": 1776129228, + "files": [], + "iteration_id": null, + "loop_id": null + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "c09ff568-1d55-4f0d-9a07-512bcbfeb289", + "node_id": "1775717346519", + "node_type": "human-input", + "title": "Human Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "human_input_required", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "node_id": "1775717346519", + "node_title": "Human Input", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "expiration_time": 1776388428 + } + } + + data: { + "event": "workflow_paused", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "paused_nodes": [ + "1775717346519" + ], + "outputs": {}, + "reasons": [ + { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "type": "human_input_required", + "expiration_time": 1776388428 + } + ], + "status": "paused", + "created_at": 1776129228, + "elapsed_time": 0.070478, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + ```json {{ title: 'File upload sample code' }} import requests @@ -457,6 +758,24 @@ Workflow applications offers non-session support and is ideal for translation, a - `total_price` (decimal) optional total cost - `currency` (string) optional currency, such as `USD` / `RMB` - `created_at` (timestamp) timestamp of start, e.g., 1705395332 + - `event: human_input_required` Workflow paused and requires Human-in-the-Loop input + - `task_id` (string) Task ID, used for request tracking + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `human_input_required` + - `data` (object) detail + - `form_id` (string) Human input form ID + - `node_id` (string) Human input node ID + - `node_title` (string) Human input node title + - `form_content` (string) Rendered form content + - `inputs` (array[object]) Input field definitions + - `actions` (array[object]) User action buttons + - `id` (string) Action ID + - `title` (string) Button text + - `button_style` (string) Button style + - `display_in_ui` (bool) Whether this form should be shown in UI + - `form_token` (string) Token used by `/form/human_input/:form_token` APIs + - `resolved_default_values` (object) Runtime-resolved default values + - `expiration_time` (timestamp) Form expiration time (Unix seconds) - `event: workflow_finished` workflow execution finished, success and failure are different states in the same event - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API - `workflow_run_id` (string) Unique ID of workflow execution @@ -666,6 +985,198 @@ Workflow applications offers non-session support and is ideal for translation, a --- + + + + Retrieve a pending Human-in-the-Loop form by `form_token`. + + Use this endpoint when a workflow pauses with `human_input_required` and returns a `form_token`. + + ### Path + - `form_token` (string) Required, token returned by the pause event. + + ### Response + - `form_content` (string) Rendered form content (markdown/plain text) + - `inputs` (array[object]) Form input definitions + - `resolved_default_values` (object) Default values resolved to strings + - `user_actions` (array[object]) Action buttons + - `expiration_time` (timestamp) Form expiration time (Unix seconds) + + ### Errors + - 404, form not found or does not belong to current app + - 412, `human_input_form_submitted`, form already submitted + - 412, `human_input_form_expired`, form expired + + + + + + ```json {{ title: 'Response' }} + { + "form_content": "Please confirm the final answer: {{#$output.answer#}}", + "inputs": [ + { + "label": "Answer", + "type": "text-input", + "required": true, + "output_variable_name": "answer" + } + ], + "resolved_default_values": { + "answer": "Initial value" + }, + "user_actions": [ + { "id": "approve", "title": "Approve", "button_style": "primary" }, + { "id": "reject", "title": "Reject", "button_style": "warning" } + ], + "expiration_time": 1735689600 + } + ``` + + + + +--- + + + + + Submit a pending Human-in-the-Loop form. + + ### Path + - `form_token` (string) Required, token returned by the pause event. + + ### Request Body + - `inputs` (object) Required, key/value pairs for form fields. + - `action` (string) Required, selected action ID from `user_actions`. + - `user` (string) Required, end-user identifier. + + ### Response + Returns an empty object on success. + + ### Errors + - 400, `invalid_form_data`, submitted data does not match the form schema + - 404, form not found or does not belong to current app + - 412, `human_input_form_submitted`, form already submitted + - 412, `human_input_form_expired`, form expired + + + + + + ```json {{ title: 'Response' }} + {} + ``` + + + + +--- + + + + + Continue receiving workflow events after submitting a human input form. + + This endpoint returns `text/event-stream` and can be used to observe the resumed run until completion. + + ### Path + - `task_id` (string) Required, workflow run ID (`workflow_run_id`). + + ### Query + - `user` (string) Required, end-user identifier. + - `include_state_snapshot` (bool) Optional, set to `true` to replay from persisted state snapshot before continuing with live events. + - `continue_on_pause` (bool) Optional, set to `true` to keep the stream open across `workflow_paused` events until `workflow_finished`. + + ### Response + Server-Sent Events stream (`text/event-stream`). + Typical events include `workflow_paused`, `node_started`, `node_finished`, `human_input_form_filled`, `human_input_form_timeout`, and `workflow_finished`. + If the workflow has already finished when you call this endpoint, the server returns a single finished event immediately. + + + + + + ```streaming {{ title: 'Response' }} + event: ping + + data: {"event":"workflow_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","sys.timestamp":1776087863},"created_at":1776087863,"reason":"initial"}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"succeeded","error":null,"elapsed_time":0.00032,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"paused","error":null,"elapsed_time":0.007381,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_paused","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","paused_nodes":["1775717346519"],"outputs":{},"reasons":[{"form_id":"019d8716-0fde-75da-8207-1458ccde76e5","form_content":"this is form 1:\n{{#$output.some_field#}}\n","inputs":[{"type":"paragraph","output_variable_name":"some_field","default":{"type":"variable","selector":["sys","workflow_run_id"],"value":""}}],"actions":[{"id":"approve","title":"YES","button_style":"default"},{"id":"reject","title":"NO","button_style":"default"}],"display_in_ui":true,"node_id":"1775717346519","node_title":"Human Input","resolved_default_values":{"some_field":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"form_token":"n7hFG4ZDYdGcgZ5VDc7EGM","type":"human_input_required"}],"status":"paused","created_at":1776087863,"elapsed_time":0.0,"total_tokens":0,"total_steps":2}} + + data: {"event":"workflow_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"created_at":1776087877,"reason":"resumption"}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"human_input_form_filled","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"node_id":"1775717346519","node_title":"Human Input","rendered_content":"this is form 1:\nfield 1 filled!\n","action_id":"approve","action_text":"YES"}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":{},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"some_field":"field 1 filled!","some_field_2":"from bruno with love","__action_id":"approve","__rendered_content":"this is form 1:\nfield 1 filled!\n"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.004431,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"text_chunk","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"text":"field 1 filled!","from_variable_selector":["1775717350710","output"]}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":{"some_field":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.264614,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":{"output":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.00003,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","status":"succeeded","outputs":{"output":"field 1 filled!"},"error":null,"elapsed_time":0.364935,"total_tokens":0,"total_steps":5,"created_by":{"id":"7932d34c-dcf4-4fba-b770-f2a9de88c0a0","user":"abc-123"},"created_at":1776087877,"finished_at":1776087877,"exceptions_count":0,"files":[]}} + ``` + + + + +--- + ### ブロッキングモード + ブロッキングモードでは、完了済みのワークフロー結果、または一時停止中のワークフロー結果のいずれかが返されます。 + + Human-in-the-Loop で実行が一時停止した場合も、レスポンスには `workflow_run_id` と `task_id` が含まれますが、`data.status` は `paused` になります。 + 一時停止レスポンスには `paused_nodes` と `reasons` も含まれます。`human_input_required` の各 reason には `form_id` と `expiration_time` が含まれます。 + ```json {{ title: '応答' }} { @@ -236,6 +259,70 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' } ``` + + ```json {{ title: '一時停止レスポンス例' }} + { + "task_id": "3938b985-f4c6-4806-87b6-215e0aca9d81", + "workflow_run_id": "4a80f375-682b-49c5-b199-e950aac4968f", + "data": { + "id": "4a80f375-682b-49c5-b199-e950aac4968f", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "status": "paused", + "outputs": {}, + "error": null, + "elapsed_time": 0.035667, + "total_tokens": 0, + "total_steps": 2, + "created_at": 1776074783, + "finished_at": null, + "paused_nodes": [ + "1775717346519" + ], + "reasons": [ + { + "form_id": "019d864e-7a36-74a2-b94e-e5660c47f5a7", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "4a80f375-682b-49c5-b199-e950aac4968f" + }, + "form_token": "SZwvfmL47fTIsZynP2Jr9i", + "type": "human_input_required", + "expiration_time": 1776333983 + } + ] + } + } + ``` + ### ストリーミングモード ```streaming {{ title: '応答' }} @@ -247,6 +334,220 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + ストリーミングモードでも Human-in-the-Loop により一時停止する場合があります。その場合、SSE ストリームではまず `human_input_required` が送られ、その後に `workflow_paused` が送られます。 + + + ```streaming {{ title: '一時停止ストリーミングレスポンス例' }} + event: ping + + data: { + "event": "workflow_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "created_at": 1776129228, + "reason": "initial" + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "node_finished", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "inputs_truncated": false, + "process_data": {}, + "process_data_truncated": false, + "outputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "outputs_truncated": false, + "status": "succeeded", + "error": null, + "elapsed_time": 0.000097, + "execution_metadata": null, + "created_at": 1776129228, + "finished_at": 1776129228, + "files": [], + "iteration_id": null, + "loop_id": null + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "c09ff568-1d55-4f0d-9a07-512bcbfeb289", + "node_id": "1775717346519", + "node_type": "human-input", + "title": "Human Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "human_input_required", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "node_id": "1775717346519", + "node_title": "Human Input", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "expiration_time": 1776388428 + } + } + + data: { + "event": "workflow_paused", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "paused_nodes": [ + "1775717346519" + ], + "outputs": {}, + "reasons": [ + { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "type": "human_input_required", + "expiration_time": 1776388428 + } + ], + "status": "paused", + "created_at": 1776129228, + "elapsed_time": 0.070478, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + ```json {{ title: 'ファイルアップロードのサンプルコード' }} import requests @@ -452,6 +753,24 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' - `total_price` (decimal) オプション 総費用 - `currency` (string) オプション 通貨、例:`USD` / `RMB` - `created_at` (timestamp) 開始時間 + - `event: human_input_required` ワークフローが一時停止し、Human-in-the-Loop 入力が必要 + - `task_id` (string) タスク ID、リクエスト追跡に使用 + - `workflow_run_id` (string) ワークフロー実行 ID + - `event` (string) `human_input_required` に固定 + - `data` (object) 詳細内容 + - `form_id` (string) ヒューマン入力フォーム ID + - `node_id` (string) Human Input ノード ID + - `node_title` (string) Human Input ノードタイトル + - `form_content` (string) レンダリング済みフォーム内容 + - `inputs` (array[object]) フォーム入力項目の定義 + - `actions` (array[object]) ユーザーが選択できるアクションボタン + - `id` (string) アクション ID + - `title` (string) ボタンラベル + - `button_style` (string) ボタンスタイル + - `display_in_ui` (bool) UI にこのフォームを表示するかどうか + - `form_token` (string) `/form/human_input/:form_token` API で使用するトークン + - `resolved_default_values` (object) 実行時に解決されたデフォルト値 + - `expiration_time` (timestamp) フォームの有効期限(Unix 秒) - `event: workflow_finished` ワークフロー実行終了、成功と失敗は同じイベント内の異なる状態 - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 - `workflow_run_id` (string) ワークフロー実行ID @@ -661,6 +980,198 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' --- + + + + `form_token` から保留中の Human-in-the-Loop フォームを取得します。 + + Workflow が `human_input_required`(`form_token` を含む)で一時停止した際に使用します。 + + ### パス + - `form_token` (string) 必須、一時停止イベントで返されたフォームトークン + + ### 応答 + - `form_content` (string) レンダリング済みフォーム内容(markdown/plain text) + - `inputs` (array[object]) 入力項目定義 + - `resolved_default_values` (object) 解決済みデフォルト値(文字列) + - `user_actions` (array[object]) アクションボタン一覧 + - `expiration_time` (timestamp) フォーム有効期限(Unix 秒) + + ### エラー + - 404, フォームが存在しない、または現在のアプリに属していない + - 412, `human_input_form_submitted`, 既に送信済み + - 412, `human_input_form_expired`, 期限切れ + + + + + + ```json {{ title: '応答' }} + { + "form_content": "最終回答を確認してください: {{#$output.answer#}}", + "inputs": [ + { + "label": "回答", + "type": "text-input", + "required": true, + "output_variable_name": "answer" + } + ], + "resolved_default_values": { + "answer": "初期値" + }, + "user_actions": [ + { "id": "approve", "title": "承認", "button_style": "primary" }, + { "id": "reject", "title": "却下", "button_style": "warning" } + ], + "expiration_time": 1735689600 + } + ``` + + + + +--- + + + + + 保留中の Human-in-the-Loop フォームを送信します。 + + ### パス + - `form_token` (string) 必須、一時停止イベントで返されたフォームトークン + + ### リクエストボディ + - `inputs` (object) 必須、フォーム項目の key/value + - `action` (string) 必須、`user_actions` から選択したアクション ID + - `user` (string) 必須、エンドユーザー識別子 + + ### 応答 + 成功時は空オブジェクトを返します。 + + ### エラー + - 400, `invalid_form_data`, 送信データがフォームスキーマに一致しない + - 404, フォームが存在しない、または現在のアプリに属していない + - 412, `human_input_form_submitted`, 既に送信済み + - 412, `human_input_form_expired`, 期限切れ + + + + + + ```json {{ title: '応答' }} + {} + ``` + + + + +--- + + + + + Human Input フォーム送信後に、ワークフロー再開後のイベントを継続受信します。 + + このエンドポイントは `text/event-stream` を返し、完了までイベントを購読できます。 + + ### パス + - `task_id` (string) 必須、workflow 実行 ID(`workflow_run_id`) + + ### クエリ + - `user` (string) 必須、エンドユーザー識別子 + - `include_state_snapshot` (bool) 任意、`true` の場合は永続化済み状態スナップショットを先に再生してからリアルタイムイベントへ移行 + - `continue_on_pause` (bool) 任意、`true` にすると `workflow_paused` イベントをまたいでもストリームを維持し、`workflow_finished` で終了します + + ### 応答 + Server-Sent Events ストリーム(`text/event-stream`)。 + 主なイベントは `workflow_paused`、`node_started`、`node_finished`、`human_input_form_filled`、`human_input_form_timeout`、`workflow_finished` です。 + 呼び出し時点でワークフローがすでに完了している場合、このエンドポイントは完了イベントを 1 件だけ即座に返します。 + + + + + + ```streaming {{ title: '応答' }} + event: ping + + data: {"event":"workflow_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","sys.timestamp":1776087863},"created_at":1776087863,"reason":"initial"}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"succeeded","error":null,"elapsed_time":0.00032,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"paused","error":null,"elapsed_time":0.007381,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_paused","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","paused_nodes":["1775717346519"],"outputs":{},"reasons":[{"form_id":"019d8716-0fde-75da-8207-1458ccde76e5","form_content":"this is form 1:\n{{#$output.some_field#}}\n","inputs":[{"type":"paragraph","output_variable_name":"some_field","default":{"type":"variable","selector":["sys","workflow_run_id"],"value":""}}],"actions":[{"id":"approve","title":"YES","button_style":"default"},{"id":"reject","title":"NO","button_style":"default"}],"display_in_ui":true,"node_id":"1775717346519","node_title":"Human Input","resolved_default_values":{"some_field":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"form_token":"n7hFG4ZDYdGcgZ5VDc7EGM","type":"human_input_required"}],"status":"paused","created_at":1776087863,"elapsed_time":0.0,"total_tokens":0,"total_steps":2}} + + data: {"event":"workflow_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"created_at":1776087877,"reason":"resumption"}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"human_input_form_filled","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"node_id":"1775717346519","node_title":"Human Input","rendered_content":"this is form 1:\nfield 1 filled!\n","action_id":"approve","action_text":"YES"}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":{},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"some_field":"field 1 filled!","some_field_2":"from bruno with love","__action_id":"approve","__rendered_content":"this is form 1:\nfield 1 filled!\n"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.004431,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"text_chunk","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"text":"field 1 filled!","from_variable_selector":["1775717350710","output"]}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":{"some_field":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.264614,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":{"output":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.00003,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","status":"succeeded","outputs":{"output":"field 1 filled!"},"error":null,"elapsed_time":0.364935,"total_tokens":0,"total_steps":5,"created_by":{"id":"7932d34c-dcf4-4fba-b770-f2a9de88c0a0","user":"abc-123"},"created_at":1776087877,"finished_at":1776087877,"exceptions_count":0,"files":[]}} + ``` + + + + +--- + ### Blocking Mode + 阻塞模式可能返回已完成的工作流结果,也可能返回暂停中的工作流结果。 + + 当执行因 Human-in-the-Loop 暂停时,响应仍然会包含 `workflow_run_id` 和 `task_id`,但 `data.status` 会变为 `paused`。 + 暂停响应还会包含 `paused_nodes` 和 `reasons`。对于 `human_input_required`,每个 reason 都会包含 `form_id` 和 `expiration_time`。 + ```json {{ title: 'Response' }} { @@ -226,6 +249,70 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 } ``` + + ```json {{ title: 'Paused Response Example' }} + { + "task_id": "3938b985-f4c6-4806-87b6-215e0aca9d81", + "workflow_run_id": "4a80f375-682b-49c5-b199-e950aac4968f", + "data": { + "id": "4a80f375-682b-49c5-b199-e950aac4968f", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "status": "paused", + "outputs": {}, + "error": null, + "elapsed_time": 0.035667, + "total_tokens": 0, + "total_steps": 2, + "created_at": 1776074783, + "finished_at": null, + "paused_nodes": [ + "1775717346519" + ], + "reasons": [ + { + "form_id": "019d864e-7a36-74a2-b94e-e5660c47f5a7", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "4a80f375-682b-49c5-b199-e950aac4968f" + }, + "form_token": "SZwvfmL47fTIsZynP2Jr9i", + "type": "human_input_required", + "expiration_time": 1776333983 + } + ] + } + } + ``` + ### Streaming Mode ```streaming {{ title: 'Response' }} @@ -237,6 +324,220 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + 流式模式同样可能因为 Human-in-the-Loop 而暂停。此时 SSE 流会先返回 `human_input_required`,随后返回 `workflow_paused`。 + + + ```streaming {{ title: 'Paused Streaming Response Example' }} + event: ping + + data: { + "event": "workflow_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "created_at": 1776129228, + "reason": "initial" + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "node_finished", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "7d9bb041-5ecb-497f-a674-d8706eed0ab1", + "node_id": "1775717266623", + "node_type": "start", + "title": "User Input", + "index": 1, + "predecessor_node_id": null, + "inputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "inputs_truncated": false, + "process_data": {}, + "process_data_truncated": false, + "outputs": { + "sys.files": [], + "sys.user_id": "abc-123", + "sys.app_id": "d1074979-f67e-4114-8691-e35878df9a89", + "sys.workflow_id": "e46514f1-c008-41ff-94b0-4f33d4b97d36", + "sys.workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "sys.timestamp": 1776129228 + }, + "outputs_truncated": false, + "status": "succeeded", + "error": null, + "elapsed_time": 0.000097, + "execution_metadata": null, + "created_at": 1776129228, + "finished_at": 1776129228, + "files": [], + "iteration_id": null, + "loop_id": null + } + } + + data: { + "event": "node_started", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "id": "c09ff568-1d55-4f0d-9a07-512bcbfeb289", + "node_id": "1775717346519", + "node_type": "human-input", + "title": "Human Input", + "index": 1, + "predecessor_node_id": null, + "inputs": null, + "inputs_truncated": false, + "created_at": 1776129228, + "extras": {}, + "iteration_id": null, + "loop_id": null, + "agent_strategy": null + } + } + + data: { + "event": "human_input_required", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "node_id": "1775717346519", + "node_title": "Human Input", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "expiration_time": 1776388428 + } + } + + data: { + "event": "workflow_paused", + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "task_id": "0399c5c2-181b-4493-a78e-1421914e8a25", + "data": { + "workflow_run_id": "a4959eb4-c852-4e0c-ac7a-348233f7f345", + "paused_nodes": [ + "1775717346519" + ], + "outputs": {}, + "reasons": [ + { + "form_id": "019d898d-3d80-7105-b920-9899ead4ff3e", + "form_content": "this is form 1:\n{{#$output.some_field#}}\n", + "inputs": [ + { + "type": "paragraph", + "output_variable_name": "some_field", + "default": { + "type": "variable", + "selector": [ + "sys", + "workflow_run_id" + ], + "value": "" + } + } + ], + "actions": [ + { + "id": "approve", + "title": "YES", + "button_style": "default" + }, + { + "id": "reject", + "title": "NO", + "button_style": "default" + } + ], + "display_in_ui": true, + "node_id": "1775717346519", + "node_title": "Human Input", + "resolved_default_values": { + "some_field": "a4959eb4-c852-4e0c-ac7a-348233f7f345" + }, + "form_token": "0Tb1nXYe4hzQUD706nHB4y", + "type": "human_input_required", + "expiration_time": 1776388428 + } + ], + "status": "paused", + "created_at": 1776129228, + "elapsed_time": 0.070478, + "total_tokens": 0, + "total_steps": 2 + } + } + ``` + ```json {{ title: 'File upload sample code' }} import requests @@ -445,6 +746,24 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 - `total_price` (decimal) optional 总费用 - `currency` (string) optional 货币,如 `USD` / `RMB` - `created_at` (timestamp) 开始时间 + - `event: human_input_required` Workflow 已暂停,等待 Human-in-the-Loop 输入 + - `task_id` (string) 任务 ID,用于请求跟踪 + - `workflow_run_id` (string) workflow 执行 ID + - `event` (string) 固定为 `human_input_required` + - `data` (object) 详细内容 + - `form_id` (string) 人工输入表单 ID + - `node_id` (string) Human Input 节点 ID + - `node_title` (string) Human Input 节点标题 + - `form_content` (string) 渲染后的表单内容 + - `inputs` (array[object]) 表单输入项定义 + - `actions` (array[object]) 用户可选动作按钮 + - `id` (string) 动作 ID + - `title` (string) 按钮文案 + - `button_style` (string) 按钮样式 + - `display_in_ui` (bool) 是否需要在 UI 展示该表单 + - `form_token` (string) 用于 `/form/human_input/:form_token` 接口的令牌 + - `resolved_default_values` (object) 运行时解析后的默认值 + - `expiration_time` (timestamp) 表单过期时间(Unix 秒级时间戳) - `event: workflow_finished` workflow 执行结束,成功失败同一事件中不同状态 - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 - `workflow_run_id` (string) workflow 执行 ID @@ -654,6 +973,198 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 --- + + + + 通过 `form_token` 获取待处理的 Human-in-the-Loop 表单。 + + 当 Workflow 在流式事件中返回 `human_input_required`(包含 `form_token`)时,可调用此接口拉取表单详情。 + + ### Path + - `form_token` (string) 必填,暂停事件返回的表单 token + + ### Response + - `form_content` (string) 已渲染的表单内容(markdown/plain text) + - `inputs` (array[object]) 表单输入项定义 + - `resolved_default_values` (object) 已解析的默认值(字符串) + - `user_actions` (array[object]) 操作按钮列表 + - `expiration_time` (timestamp) 表单过期时间(Unix 秒) + + ### Errors + - 404,表单不存在或不属于当前应用 + - 412,`human_input_form_submitted`,表单已被提交 + - 412,`human_input_form_expired`,表单已过期 + + + + + + ```json {{ title: 'Response' }} + { + "form_content": "请确认最终结果:{{#$output.answer#}}", + "inputs": [ + { + "label": "答案", + "type": "text-input", + "required": true, + "output_variable_name": "answer" + } + ], + "resolved_default_values": { + "answer": "初始值" + }, + "user_actions": [ + { "id": "approve", "title": "通过", "button_style": "primary" }, + { "id": "reject", "title": "拒绝", "button_style": "warning" } + ], + "expiration_time": 1735689600 + } + ``` + + + + +--- + + + + + 提交待处理的 Human-in-the-Loop 表单。 + + ### Path + - `form_token` (string) 必填,暂停事件返回的表单 token + + ### Request Body + - `inputs` (object) 必填,表单字段的 key/value + - `action` (string) 必填,从 `user_actions` 中选择的动作 ID + - `user` (string) 必填,终端用户标识 + + ### Response + 成功时返回空对象。 + + ### Errors + - 400,`invalid_form_data`,提交数据与表单 schema 不匹配 + - 404,表单不存在或不属于当前应用 + - 412,`human_input_form_submitted`,表单已被提交 + - 412,`human_input_form_expired`,表单已过期 + + + + + + ```json {{ title: 'Response' }} + {} + ``` + + + + +--- + + + + + 在提交人工输入表单后,继续订阅工作流后续执行事件。 + + 返回 `text/event-stream`,可持续接收直到工作流结束。 + + ### Path + - `task_id` (string) 必填,workflow 运行 ID(`workflow_run_id`) + + ### Query + - `user` (string) 必填,终端用户标识 + - `include_state_snapshot` (bool) 可选,设为 `true` 时会先回放持久化状态快照,再继续实时事件 + - `continue_on_pause` (bool) 可选,设为 `true` 时,流会在 `workflow_paused` 事件之间保持连接,直到 `workflow_finished` 才结束 + + ### Response + Server-Sent Events 流(`text/event-stream`)。 + 常见事件包括 `workflow_paused`、`node_started`、`node_finished`、`human_input_form_filled`、`human_input_form_timeout`、`workflow_finished`。 + 如果调用该接口时工作流已经结束,服务端会立即返回单个完成事件。 + + + + + + ```streaming {{ title: 'Response' }} + event: ping + + data: {"event":"workflow_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","sys.timestamp":1776087863},"created_at":1776087863,"reason":"initial"}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"b552d685-1119-4e6a-9a81-e91a23e5324b","node_id":"1775717266623","node_type":"start","title":"User Input","index":1,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"succeeded","error":null,"elapsed_time":0.00032,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"created_at":1776087863,"extras":{},"iteration_id":null,"loop_id":null}} + + data: {"event":"node_finished","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":2,"predecessor_node_id":null,"inputs":null,"process_data":null,"outputs":null,"status":"paused","error":null,"elapsed_time":0.007381,"execution_metadata":null,"created_at":1776087863,"finished_at":1776087863,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_paused","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","data":{"workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","paused_nodes":["1775717346519"],"outputs":{},"reasons":[{"form_id":"019d8716-0fde-75da-8207-1458ccde76e5","form_content":"this is form 1:\n{{#$output.some_field#}}\n","inputs":[{"type":"paragraph","output_variable_name":"some_field","default":{"type":"variable","selector":["sys","workflow_run_id"],"value":""}}],"actions":[{"id":"approve","title":"YES","button_style":"default"},{"id":"reject","title":"NO","button_style":"default"}],"display_in_ui":true,"node_id":"1775717346519","node_title":"Human Input","resolved_default_values":{"some_field":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"form_token":"n7hFG4ZDYdGcgZ5VDc7EGM","type":"human_input_required"}],"status":"paused","created_at":1776087863,"elapsed_time":0.0,"total_tokens":0,"total_steps":2}} + + data: {"event":"workflow_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","inputs":{"sys.files":[],"sys.user_id":"abc-123","sys.app_id":"d1074979-f67e-4114-8691-e35878df9a89","sys.workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","sys.workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c"},"created_at":1776087877,"reason":"resumption"}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"human_input_form_filled","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"node_id":"1775717346519","node_title":"Human Input","rendered_content":"this is form 1:\nfield 1 filled!\n","action_id":"approve","action_text":"YES"}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"8d7e8e01-5159-4089-a4b6-3aa394992cc2","node_id":"1775717346519","node_type":"human-input","title":"Human Input","index":1,"predecessor_node_id":null,"inputs":{},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"some_field":"field 1 filled!","some_field_2":"from bruno with love","__action_id":"approve","__rendered_content":"this is form 1:\nfield 1 filled!\n"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.004431,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"text_chunk","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"text":"field 1 filled!","from_variable_selector":["1775717350710","output"]}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"6d8fc3cb-19f7-440b-b83e-eed4e847a332","node_id":"1775717350710","node_type":"template-transform","title":"Template","index":1,"predecessor_node_id":null,"inputs":{"some_field":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.264614,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"node_started","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":null,"inputs_truncated":false,"created_at":1776087877,"extras":{},"iteration_id":null,"loop_id":null,"agent_strategy":null}} + + data: {"event":"node_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"e88dec7e-aa2c-41f7-8d73-032b749e23f5","node_id":"1775717354177","node_type":"end","title":"Output","index":1,"predecessor_node_id":null,"inputs":{"output":"field 1 filled!"},"inputs_truncated":false,"process_data":{},"process_data_truncated":false,"outputs":{"output":"field 1 filled!"},"outputs_truncated":false,"status":"succeeded","error":null,"elapsed_time":0.00003,"execution_metadata":null,"created_at":1776087877,"finished_at":1776087877,"files":[],"iteration_id":null,"loop_id":null}} + + data: {"event":"workflow_finished","workflow_run_id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","task_id":"1784c3dd-20eb-4919-bd5d-a8d800b74ada","data":{"id":"5d7ef348-e1c1-4f6d-bb9b-62cc2fb2ef3c","workflow_id":"e46514f1-c008-41ff-94b0-4f33d4b97d36","status":"succeeded","outputs":{"output":"field 1 filled!"},"error":null,"elapsed_time":0.364935,"total_tokens":0,"total_steps":5,"created_by":{"id":"7932d34c-dcf4-4fba-b770-f2a9de88c0a0","user":"abc-123"},"created_at":1776087877,"finished_at":1776087877,"exceptions_count":0,"files":[]}} + ``` + + + + +--- +