feat: introduce trigger functionality (#27644)

Signed-off-by: lyzno1 <yuanyouhuilyz@gmail.com>
Co-authored-by: Stream <Stream_2@qq.com>
Co-authored-by: lyzno1 <92089059+lyzno1@users.noreply.github.com>
Co-authored-by: zhsama <torvalds@linux.do>
Co-authored-by: Harry <xh001x@hotmail.com>
Co-authored-by: lyzno1 <yuanyouhuilyz@gmail.com>
Co-authored-by: yessenia <yessenia.contact@gmail.com>
Co-authored-by: hjlarry <hjlarry@163.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: WTW0313 <twwu@dify.ai>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Yeuoly
2025-11-12 17:59:37 +08:00
committed by GitHub
parent ca7794305b
commit b76e17b25d
785 changed files with 41186 additions and 3725 deletions

View File

@ -0,0 +1,186 @@
"""
Celery tasks for async workflow execution.
These tasks handle workflow execution for different subscription tiers
with appropriate retry policies and error handling.
"""
from datetime import UTC, datetime
from typing import Any
from celery import shared_task
from sqlalchemy import select
from sqlalchemy.orm import Session, sessionmaker
from configs import dify_config
from core.app.apps.workflow.app_generator import WorkflowAppGenerator
from core.app.entities.app_invoke_entities import InvokeFrom
from core.app.layers.timeslice_layer import TimeSliceLayer
from core.app.layers.trigger_post_layer import TriggerPostLayer
from extensions.ext_database import db
from models.account import Account
from models.enums import CreatorUserRole, WorkflowTriggerStatus
from models.model import App, EndUser, Tenant
from models.trigger import WorkflowTriggerLog
from models.workflow import Workflow
from repositories.sqlalchemy_workflow_trigger_log_repository import SQLAlchemyWorkflowTriggerLogRepository
from services.errors.app import WorkflowNotFoundError
from services.workflow.entities import (
TriggerData,
WorkflowTaskData,
)
from tasks.workflow_cfs_scheduler.cfs_scheduler import AsyncWorkflowCFSPlanEntity, AsyncWorkflowCFSPlanScheduler
from tasks.workflow_cfs_scheduler.entities import AsyncWorkflowQueue, AsyncWorkflowSystemStrategy
@shared_task(queue=AsyncWorkflowQueue.PROFESSIONAL_QUEUE)
def execute_workflow_professional(task_data_dict: dict[str, Any]):
"""Execute workflow for professional tier with highest priority"""
task_data = WorkflowTaskData.model_validate(task_data_dict)
cfs_plan_scheduler_entity = AsyncWorkflowCFSPlanEntity(
queue=AsyncWorkflowQueue.PROFESSIONAL_QUEUE,
schedule_strategy=AsyncWorkflowSystemStrategy,
granularity=dify_config.ASYNC_WORKFLOW_SCHEDULER_GRANULARITY,
)
_execute_workflow_common(
task_data,
AsyncWorkflowCFSPlanScheduler(plan=cfs_plan_scheduler_entity),
cfs_plan_scheduler_entity,
)
@shared_task(queue=AsyncWorkflowQueue.TEAM_QUEUE)
def execute_workflow_team(task_data_dict: dict[str, Any]):
"""Execute workflow for team tier"""
task_data = WorkflowTaskData.model_validate(task_data_dict)
cfs_plan_scheduler_entity = AsyncWorkflowCFSPlanEntity(
queue=AsyncWorkflowQueue.TEAM_QUEUE,
schedule_strategy=AsyncWorkflowSystemStrategy,
granularity=dify_config.ASYNC_WORKFLOW_SCHEDULER_GRANULARITY,
)
_execute_workflow_common(
task_data,
AsyncWorkflowCFSPlanScheduler(plan=cfs_plan_scheduler_entity),
cfs_plan_scheduler_entity,
)
@shared_task(queue=AsyncWorkflowQueue.SANDBOX_QUEUE)
def execute_workflow_sandbox(task_data_dict: dict[str, Any]):
"""Execute workflow for free tier with lower retry limit"""
task_data = WorkflowTaskData.model_validate(task_data_dict)
cfs_plan_scheduler_entity = AsyncWorkflowCFSPlanEntity(
queue=AsyncWorkflowQueue.SANDBOX_QUEUE,
schedule_strategy=AsyncWorkflowSystemStrategy,
granularity=dify_config.ASYNC_WORKFLOW_SCHEDULER_GRANULARITY,
)
_execute_workflow_common(
task_data,
AsyncWorkflowCFSPlanScheduler(plan=cfs_plan_scheduler_entity),
cfs_plan_scheduler_entity,
)
def _execute_workflow_common(
task_data: WorkflowTaskData,
cfs_plan_scheduler: AsyncWorkflowCFSPlanScheduler,
cfs_plan_scheduler_entity: AsyncWorkflowCFSPlanEntity,
):
"""Execute workflow with common logic and trigger log updates."""
# Create a new session for this task
session_factory = sessionmaker(bind=db.engine, expire_on_commit=False)
with session_factory() as session:
trigger_log_repo = SQLAlchemyWorkflowTriggerLogRepository(session)
# Get trigger log
trigger_log = trigger_log_repo.get_by_id(task_data.workflow_trigger_log_id)
if not trigger_log:
# This should not happen, but handle gracefully
return
# Reconstruct execution data from trigger log
trigger_data = TriggerData.model_validate_json(trigger_log.trigger_data)
# Update status to running
trigger_log.status = WorkflowTriggerStatus.RUNNING
trigger_log_repo.update(trigger_log)
session.commit()
start_time = datetime.now(UTC)
try:
# Get app and workflow models
app_model = session.scalar(select(App).where(App.id == trigger_log.app_id))
if not app_model:
raise WorkflowNotFoundError(f"App not found: {trigger_log.app_id}")
workflow = session.scalar(select(Workflow).where(Workflow.id == trigger_log.workflow_id))
if not workflow:
raise WorkflowNotFoundError(f"Workflow not found: {trigger_log.workflow_id}")
user = _get_user(session, trigger_log)
# Execute workflow using WorkflowAppGenerator
generator = WorkflowAppGenerator()
# Prepare args matching AppGenerateService.generate format
args: dict[str, Any] = {"inputs": dict(trigger_data.inputs), "files": list(trigger_data.files)}
# If workflow_id was specified, add it to args
if trigger_data.workflow_id:
args["workflow_id"] = str(trigger_data.workflow_id)
# Execute the workflow with the trigger type
generator.generate(
app_model=app_model,
workflow=workflow,
user=user,
args=args,
invoke_from=InvokeFrom.SERVICE_API,
streaming=False,
call_depth=0,
triggered_from=trigger_data.trigger_from,
root_node_id=trigger_data.root_node_id,
graph_engine_layers=[
TimeSliceLayer(cfs_plan_scheduler),
TriggerPostLayer(cfs_plan_scheduler_entity, start_time, trigger_log.id, session_factory),
],
)
except Exception as e:
# Calculate elapsed time for failed execution
elapsed_time = (datetime.now(UTC) - start_time).total_seconds()
# Update trigger log with failure
trigger_log.status = WorkflowTriggerStatus.FAILED
trigger_log.error = str(e)
trigger_log.finished_at = datetime.now(UTC)
trigger_log.elapsed_time = elapsed_time
trigger_log_repo.update(trigger_log)
# Final failure - no retry logic (simplified like RAG tasks)
session.commit()
def _get_user(session: Session, trigger_log: WorkflowTriggerLog) -> Account | EndUser:
"""Compose user from trigger log"""
tenant = session.scalar(select(Tenant).where(Tenant.id == trigger_log.tenant_id))
if not tenant:
raise ValueError(f"Tenant not found: {trigger_log.tenant_id}")
# Get user from trigger log
if trigger_log.created_by_role == CreatorUserRole.ACCOUNT:
user = session.scalar(select(Account).where(Account.id == trigger_log.created_by))
if user:
user.current_tenant = tenant
else: # CreatorUserRole.END_USER
user = session.scalar(select(EndUser).where(EndUser.id == trigger_log.created_by))
if not user:
raise ValueError(f"User not found: {trigger_log.created_by} (role: {trigger_log.created_by_role})")
return user

View File

@ -17,6 +17,7 @@ from models import (
AppDatasetJoin,
AppMCPServer,
AppModelConfig,
AppTrigger,
Conversation,
EndUser,
InstalledApp,
@ -30,8 +31,10 @@ from models import (
Site,
TagBinding,
TraceAppConfig,
WorkflowSchedulePlan,
)
from models.tools import WorkflowToolProvider
from models.trigger import WorkflowPluginTrigger, WorkflowTriggerLog, WorkflowWebhookTrigger
from models.web import PinnedConversation, SavedMessage
from models.workflow import (
ConversationVariable,
@ -69,6 +72,11 @@ def remove_app_and_related_data_task(self, tenant_id: str, app_id: str):
_delete_trace_app_configs(tenant_id, app_id)
_delete_conversation_variables(app_id=app_id)
_delete_draft_variables(app_id)
_delete_app_triggers(tenant_id, app_id)
_delete_workflow_plugin_triggers(tenant_id, app_id)
_delete_workflow_webhook_triggers(tenant_id, app_id)
_delete_workflow_schedule_plans(tenant_id, app_id)
_delete_workflow_trigger_logs(tenant_id, app_id)
end_at = time.perf_counter()
logger.info(click.style(f"App and related data deleted: {app_id} latency: {end_at - start_at}", fg="green"))
@ -484,6 +492,72 @@ def _delete_draft_variable_offload_data(conn, file_ids: list[str]) -> int:
return files_deleted
def _delete_app_triggers(tenant_id: str, app_id: str):
def del_app_trigger(trigger_id: str):
db.session.query(AppTrigger).where(AppTrigger.id == trigger_id).delete(synchronize_session=False)
_delete_records(
"""select id from app_triggers where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_app_trigger,
"app trigger",
)
def _delete_workflow_plugin_triggers(tenant_id: str, app_id: str):
def del_plugin_trigger(trigger_id: str):
db.session.query(WorkflowPluginTrigger).where(WorkflowPluginTrigger.id == trigger_id).delete(
synchronize_session=False
)
_delete_records(
"""select id from workflow_plugin_triggers where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_plugin_trigger,
"workflow plugin trigger",
)
def _delete_workflow_webhook_triggers(tenant_id: str, app_id: str):
def del_webhook_trigger(trigger_id: str):
db.session.query(WorkflowWebhookTrigger).where(WorkflowWebhookTrigger.id == trigger_id).delete(
synchronize_session=False
)
_delete_records(
"""select id from workflow_webhook_triggers where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_webhook_trigger,
"workflow webhook trigger",
)
def _delete_workflow_schedule_plans(tenant_id: str, app_id: str):
def del_schedule_plan(plan_id: str):
db.session.query(WorkflowSchedulePlan).where(WorkflowSchedulePlan.id == plan_id).delete(
synchronize_session=False
)
_delete_records(
"""select id from workflow_schedule_plans where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_schedule_plan,
"workflow schedule plan",
)
def _delete_workflow_trigger_logs(tenant_id: str, app_id: str):
def del_trigger_log(log_id: str):
db.session.query(WorkflowTriggerLog).where(WorkflowTriggerLog.id == log_id).delete(synchronize_session=False)
_delete_records(
"""select id from workflow_trigger_logs where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_trigger_log,
"workflow trigger log",
)
def _delete_records(query_sql: str, params: dict, delete_func: Callable, name: str) -> None:
while True:
with db.engine.begin() as conn:

View File

@ -0,0 +1,492 @@
"""
Celery tasks for async trigger processing.
These tasks handle trigger workflow execution asynchronously
to avoid blocking the main request thread.
"""
import json
import logging
from collections.abc import Mapping, Sequence
from datetime import UTC, datetime
from typing import Any
from celery import shared_task
from sqlalchemy import func, select
from sqlalchemy.orm import Session
from core.app.entities.app_invoke_entities import InvokeFrom
from core.plugin.entities.plugin_daemon import CredentialType
from core.plugin.entities.request import TriggerInvokeEventResponse
from core.plugin.impl.exc import PluginInvokeError
from core.trigger.debug.event_bus import TriggerDebugEventBus
from core.trigger.debug.events import PluginTriggerDebugEvent, build_plugin_pool_key
from core.trigger.entities.entities import TriggerProviderEntity
from core.trigger.provider import PluginTriggerProviderController
from core.trigger.trigger_manager import TriggerManager
from core.workflow.enums import NodeType, WorkflowExecutionStatus
from core.workflow.nodes.trigger_plugin.entities import TriggerEventNodeData
from extensions.ext_database import db
from models.enums import AppTriggerType, CreatorUserRole, WorkflowRunTriggeredFrom, WorkflowTriggerStatus
from models.model import EndUser
from models.provider_ids import TriggerProviderID
from models.trigger import TriggerSubscription, WorkflowPluginTrigger, WorkflowTriggerLog
from models.workflow import Workflow, WorkflowAppLog, WorkflowAppLogCreatedFrom, WorkflowRun
from services.async_workflow_service import AsyncWorkflowService
from services.end_user_service import EndUserService
from services.trigger.trigger_provider_service import TriggerProviderService
from services.trigger.trigger_request_service import TriggerHttpRequestCachingService
from services.trigger.trigger_subscription_operator_service import TriggerSubscriptionOperatorService
from services.workflow.entities import PluginTriggerData, PluginTriggerDispatchData, PluginTriggerMetadata
from services.workflow.queue_dispatcher import QueueDispatcherManager
logger = logging.getLogger(__name__)
# Use workflow queue for trigger processing
TRIGGER_QUEUE = "triggered_workflow_dispatcher"
def dispatch_trigger_debug_event(
events: list[str],
user_id: str,
timestamp: int,
request_id: str,
subscription: TriggerSubscription,
) -> int:
debug_dispatched = 0
try:
for event_name in events:
pool_key: str = build_plugin_pool_key(
name=event_name,
tenant_id=subscription.tenant_id,
subscription_id=subscription.id,
provider_id=subscription.provider_id,
)
trigger_debug_event: PluginTriggerDebugEvent = PluginTriggerDebugEvent(
timestamp=timestamp,
user_id=user_id,
name=event_name,
request_id=request_id,
subscription_id=subscription.id,
provider_id=subscription.provider_id,
)
debug_dispatched += TriggerDebugEventBus.dispatch(
tenant_id=subscription.tenant_id,
event=trigger_debug_event,
pool_key=pool_key,
)
logger.debug(
"Trigger debug dispatched %d sessions to pool %s for event %s for subscription %s provider %s",
debug_dispatched,
pool_key,
event_name,
subscription.id,
subscription.provider_id,
)
return debug_dispatched
except Exception:
logger.exception("Failed to dispatch to debug sessions")
return 0
def _get_latest_workflows_by_app_ids(
session: Session, subscribers: Sequence[WorkflowPluginTrigger]
) -> Mapping[str, Workflow]:
"""Get the latest workflows by app_ids"""
workflow_query = (
select(Workflow.app_id, func.max(Workflow.created_at).label("max_created_at"))
.where(
Workflow.app_id.in_({t.app_id for t in subscribers}),
Workflow.version != Workflow.VERSION_DRAFT,
)
.group_by(Workflow.app_id)
.subquery()
)
workflows = session.scalars(
select(Workflow).join(
workflow_query,
(Workflow.app_id == workflow_query.c.app_id) & (Workflow.created_at == workflow_query.c.max_created_at),
)
).all()
return {w.app_id: w for w in workflows}
def _record_trigger_failure_log(
*,
session: Session,
workflow: Workflow,
plugin_trigger: WorkflowPluginTrigger,
subscription: TriggerSubscription,
trigger_metadata: PluginTriggerMetadata,
end_user: EndUser | None,
error_message: str,
event_name: str,
request_id: str,
) -> None:
"""
Persist a workflow run, workflow app log, and trigger log entry for failed trigger invocations.
"""
now = datetime.now(UTC)
if end_user:
created_by_role = CreatorUserRole.END_USER
created_by = end_user.id
else:
created_by_role = CreatorUserRole.ACCOUNT
created_by = subscription.user_id
failure_inputs = {
"event_name": event_name,
"subscription_id": subscription.id,
"request_id": request_id,
"plugin_trigger_id": plugin_trigger.id,
}
workflow_run = WorkflowRun(
tenant_id=workflow.tenant_id,
app_id=workflow.app_id,
workflow_id=workflow.id,
type=workflow.type,
triggered_from=WorkflowRunTriggeredFrom.PLUGIN.value,
version=workflow.version,
graph=workflow.graph,
inputs=json.dumps(failure_inputs),
status=WorkflowExecutionStatus.FAILED.value,
outputs="{}",
error=error_message,
elapsed_time=0.0,
total_tokens=0,
total_steps=0,
created_by_role=created_by_role.value,
created_by=created_by,
created_at=now,
finished_at=now,
exceptions_count=0,
)
session.add(workflow_run)
session.flush()
workflow_app_log = WorkflowAppLog(
tenant_id=workflow.tenant_id,
app_id=workflow.app_id,
workflow_id=workflow.id,
workflow_run_id=workflow_run.id,
created_from=WorkflowAppLogCreatedFrom.SERVICE_API.value,
created_by_role=created_by_role.value,
created_by=created_by,
)
session.add(workflow_app_log)
dispatcher = QueueDispatcherManager.get_dispatcher(subscription.tenant_id)
queue_name = dispatcher.get_queue_name()
trigger_data = PluginTriggerData(
app_id=plugin_trigger.app_id,
tenant_id=subscription.tenant_id,
workflow_id=workflow.id,
root_node_id=plugin_trigger.node_id,
inputs={},
trigger_metadata=trigger_metadata,
plugin_id=subscription.provider_id,
endpoint_id=subscription.endpoint_id,
)
trigger_log = WorkflowTriggerLog(
tenant_id=workflow.tenant_id,
app_id=workflow.app_id,
workflow_id=workflow.id,
workflow_run_id=workflow_run.id,
root_node_id=plugin_trigger.node_id,
trigger_metadata=trigger_metadata.model_dump_json(),
trigger_type=AppTriggerType.TRIGGER_PLUGIN,
trigger_data=trigger_data.model_dump_json(),
inputs=json.dumps({}),
status=WorkflowTriggerStatus.FAILED,
error=error_message,
queue_name=queue_name,
retry_count=0,
created_by_role=created_by_role.value,
created_by=created_by,
triggered_at=now,
finished_at=now,
elapsed_time=0.0,
total_tokens=0,
)
session.add(trigger_log)
session.commit()
def dispatch_triggered_workflow(
user_id: str,
subscription: TriggerSubscription,
event_name: str,
request_id: str,
) -> int:
"""Process triggered workflows.
Args:
subscription: The trigger subscription
event: The trigger entity that was activated
request_id: The ID of the stored request in storage system
"""
request = TriggerHttpRequestCachingService.get_request(request_id)
payload = TriggerHttpRequestCachingService.get_payload(request_id)
subscribers: list[WorkflowPluginTrigger] = TriggerSubscriptionOperatorService.get_subscriber_triggers(
tenant_id=subscription.tenant_id, subscription_id=subscription.id, event_name=event_name
)
if not subscribers:
logger.warning(
"No workflows found for trigger event '%s' in subscription '%s'",
event_name,
subscription.id,
)
return 0
dispatched_count = 0
provider_controller: PluginTriggerProviderController = TriggerManager.get_trigger_provider(
tenant_id=subscription.tenant_id, provider_id=TriggerProviderID(subscription.provider_id)
)
trigger_entity: TriggerProviderEntity = provider_controller.entity
with Session(db.engine) as session:
workflows: Mapping[str, Workflow] = _get_latest_workflows_by_app_ids(session, subscribers)
end_users: Mapping[str, EndUser] = EndUserService.create_end_user_batch(
type=InvokeFrom.TRIGGER,
tenant_id=subscription.tenant_id,
app_ids=[plugin_trigger.app_id for plugin_trigger in subscribers],
user_id=user_id,
)
for plugin_trigger in subscribers:
# Get workflow from mapping
workflow: Workflow | None = workflows.get(plugin_trigger.app_id)
if not workflow:
logger.error(
"Workflow not found for app %s",
plugin_trigger.app_id,
)
continue
# Find the trigger node in the workflow
event_node = None
for node_id, node_config in workflow.walk_nodes(NodeType.TRIGGER_PLUGIN):
if node_id == plugin_trigger.node_id:
event_node = node_config
break
if not event_node:
logger.error("Trigger event node not found for app %s", plugin_trigger.app_id)
continue
# invoke trigger
trigger_metadata = PluginTriggerMetadata(
plugin_unique_identifier=provider_controller.plugin_unique_identifier or "",
endpoint_id=subscription.endpoint_id,
provider_id=subscription.provider_id,
event_name=event_name,
icon_filename=trigger_entity.identity.icon or "",
icon_dark_filename=trigger_entity.identity.icon_dark or "",
)
node_data: TriggerEventNodeData = TriggerEventNodeData.model_validate(event_node)
invoke_response: TriggerInvokeEventResponse | None = None
try:
invoke_response = TriggerManager.invoke_trigger_event(
tenant_id=subscription.tenant_id,
user_id=user_id,
provider_id=TriggerProviderID(subscription.provider_id),
event_name=event_name,
parameters=node_data.resolve_parameters(
parameter_schemas=provider_controller.get_event_parameters(event_name=event_name)
),
credentials=subscription.credentials,
credential_type=CredentialType.of(subscription.credential_type),
subscription=subscription.to_entity(),
request=request,
payload=payload,
)
except PluginInvokeError as e:
error_message = e.to_user_friendly_error(plugin_name=trigger_entity.identity.name)
try:
end_user = end_users.get(plugin_trigger.app_id)
_record_trigger_failure_log(
session=session,
workflow=workflow,
plugin_trigger=plugin_trigger,
subscription=subscription,
trigger_metadata=trigger_metadata,
end_user=end_user,
error_message=error_message,
event_name=event_name,
request_id=request_id,
)
except Exception:
logger.exception(
"Failed to record trigger failure log for app %s",
plugin_trigger.app_id,
)
continue
except Exception:
logger.exception(
"Failed to invoke trigger event for app %s",
plugin_trigger.app_id,
)
continue
if invoke_response is not None and invoke_response.cancelled:
logger.info(
"Trigger ignored for app %s with trigger event %s",
plugin_trigger.app_id,
event_name,
)
continue
# Create trigger data for async execution
trigger_data = PluginTriggerData(
app_id=plugin_trigger.app_id,
tenant_id=subscription.tenant_id,
workflow_id=workflow.id,
root_node_id=plugin_trigger.node_id,
plugin_id=subscription.provider_id,
endpoint_id=subscription.endpoint_id,
inputs=invoke_response.variables,
trigger_metadata=trigger_metadata,
)
# Trigger async workflow
try:
end_user = end_users.get(plugin_trigger.app_id)
if not end_user:
raise ValueError(f"End user not found for app {plugin_trigger.app_id}")
AsyncWorkflowService.trigger_workflow_async(session=session, user=end_user, trigger_data=trigger_data)
dispatched_count += 1
logger.info(
"Triggered workflow for app %s with trigger event %s",
plugin_trigger.app_id,
event_name,
)
except Exception:
logger.exception(
"Failed to trigger workflow for app %s",
plugin_trigger.app_id,
)
return dispatched_count
def dispatch_triggered_workflows(
user_id: str,
events: list[str],
subscription: TriggerSubscription,
request_id: str,
) -> int:
dispatched_count = 0
for event_name in events:
try:
dispatched_count += dispatch_triggered_workflow(
user_id=user_id,
subscription=subscription,
event_name=event_name,
request_id=request_id,
)
except Exception:
logger.exception(
"Failed to dispatch trigger '%s' for subscription %s and provider %s. Continuing...",
event_name,
subscription.id,
subscription.provider_id,
)
# Continue processing other triggers even if one fails
continue
logger.info(
"Completed async trigger dispatching: processed %d/%d triggers for subscription %s and provider %s",
dispatched_count,
len(events),
subscription.id,
subscription.provider_id,
)
return dispatched_count
@shared_task(queue=TRIGGER_QUEUE)
def dispatch_triggered_workflows_async(
dispatch_data: Mapping[str, Any],
) -> Mapping[str, Any]:
"""
Dispatch triggers asynchronously.
Args:
endpoint_id: Endpoint ID
provider_id: Provider ID
subscription_id: Subscription ID
timestamp: Timestamp of the event
triggers: List of triggers to dispatch
request_id: Unique ID of the stored request
Returns:
dict: Execution result with status and dispatched trigger count
"""
dispatch_params: PluginTriggerDispatchData = PluginTriggerDispatchData.model_validate(dispatch_data)
user_id = dispatch_params.user_id
tenant_id = dispatch_params.tenant_id
endpoint_id = dispatch_params.endpoint_id
provider_id = dispatch_params.provider_id
subscription_id = dispatch_params.subscription_id
timestamp = dispatch_params.timestamp
events = dispatch_params.events
request_id = dispatch_params.request_id
try:
logger.info(
"Starting trigger dispatching uid=%s, endpoint=%s, events=%s, req_id=%s, sub_id=%s, provider_id=%s",
user_id,
endpoint_id,
events,
request_id,
subscription_id,
provider_id,
)
subscription: TriggerSubscription | None = TriggerProviderService.get_subscription_by_id(
tenant_id=tenant_id,
subscription_id=subscription_id,
)
if not subscription:
logger.error("Subscription not found: %s", subscription_id)
return {"status": "failed", "error": "Subscription not found"}
workflow_dispatched = dispatch_triggered_workflows(
user_id=user_id,
events=events,
subscription=subscription,
request_id=request_id,
)
debug_dispatched = dispatch_trigger_debug_event(
events=events,
user_id=user_id,
timestamp=timestamp,
request_id=request_id,
subscription=subscription,
)
return {
"status": "completed",
"total_count": len(events),
"workflows": workflow_dispatched,
"debug_events": debug_dispatched,
}
except Exception as e:
logger.exception(
"Error in async trigger dispatching for endpoint %s data %s for subscription %s and provider %s",
endpoint_id,
dispatch_data,
subscription_id,
provider_id,
)
return {
"status": "failed",
"error": str(e),
}

View File

@ -0,0 +1,115 @@
import logging
import time
from collections.abc import Mapping
from typing import Any
from celery import shared_task
from sqlalchemy.orm import Session
from core.plugin.entities.plugin_daemon import CredentialType
from core.trigger.utils.locks import build_trigger_refresh_lock_key
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.trigger import TriggerSubscription
from services.trigger.trigger_provider_service import TriggerProviderService
logger = logging.getLogger(__name__)
def _now_ts() -> int:
return int(time.time())
def _load_subscription(session: Session, tenant_id: str, subscription_id: str) -> TriggerSubscription | None:
return session.query(TriggerSubscription).filter_by(tenant_id=tenant_id, id=subscription_id).first()
def _refresh_oauth_if_expired(tenant_id: str, subscription: TriggerSubscription, now: int) -> None:
if (
subscription.credential_expires_at != -1
and int(subscription.credential_expires_at) <= now
and CredentialType.of(subscription.credential_type) == CredentialType.OAUTH2
):
logger.info(
"Refreshing OAuth token: tenant=%s subscription_id=%s expires_at=%s now=%s",
tenant_id,
subscription.id,
subscription.credential_expires_at,
now,
)
try:
result: Mapping[str, Any] = TriggerProviderService.refresh_oauth_token(
tenant_id=tenant_id, subscription_id=subscription.id
)
logger.info(
"OAuth token refreshed: tenant=%s subscription_id=%s result=%s", tenant_id, subscription.id, result
)
except Exception:
logger.exception("OAuth refresh failed: tenant=%s subscription_id=%s", tenant_id, subscription.id)
def _refresh_subscription_if_expired(
tenant_id: str,
subscription: TriggerSubscription,
now: int,
) -> None:
if subscription.expires_at == -1 or int(subscription.expires_at) > now:
logger.debug(
"Subscription not due: tenant=%s subscription_id=%s expires_at=%s now=%s",
tenant_id,
subscription.id,
subscription.expires_at,
now,
)
return
try:
result: Mapping[str, Any] = TriggerProviderService.refresh_subscription(
tenant_id=tenant_id, subscription_id=subscription.id, now=now
)
logger.info(
"Subscription refreshed: tenant=%s subscription_id=%s result=%s",
tenant_id,
subscription.id,
result.get("result"),
)
except Exception:
logger.exception("Subscription refresh failed: tenant=%s id=%s", tenant_id, subscription.id)
@shared_task(queue="trigger_refresh_executor")
def trigger_subscription_refresh(tenant_id: str, subscription_id: str) -> None:
"""Refresh a trigger subscription if needed, guarded by a Redis in-flight lock."""
lock_key: str = build_trigger_refresh_lock_key(tenant_id, subscription_id)
if not redis_client.get(lock_key):
logger.debug("Refresh lock missing, skip: %s", lock_key)
return
logger.info("Begin subscription refresh: tenant=%s id=%s", tenant_id, subscription_id)
try:
now: int = _now_ts()
with Session(db.engine) as session:
subscription: TriggerSubscription | None = _load_subscription(session, tenant_id, subscription_id)
if not subscription:
logger.warning("Subscription not found: tenant=%s id=%s", tenant_id, subscription_id)
return
logger.debug(
"Loaded subscription: tenant=%s id=%s cred_exp=%s sub_exp=%s now=%s",
tenant_id,
subscription.id,
subscription.credential_expires_at,
subscription.expires_at,
now,
)
_refresh_oauth_if_expired(tenant_id=tenant_id, subscription=subscription, now=now)
_refresh_subscription_if_expired(tenant_id=tenant_id, subscription=subscription, now=now)
finally:
try:
redis_client.delete(lock_key)
logger.debug("Lock released: %s", lock_key)
except Exception:
# Best-effort lock cleanup
logger.warning("Failed to release lock: %s", lock_key, exc_info=True)

View File

@ -0,0 +1,32 @@
from services.workflow.entities import WorkflowScheduleCFSPlanEntity
from services.workflow.scheduler import CFSPlanScheduler, SchedulerCommand
from tasks.workflow_cfs_scheduler.entities import AsyncWorkflowQueue
class AsyncWorkflowCFSPlanEntity(WorkflowScheduleCFSPlanEntity):
"""
Trigger workflow CFS plan entity.
"""
queue: AsyncWorkflowQueue
class AsyncWorkflowCFSPlanScheduler(CFSPlanScheduler):
"""
Trigger workflow CFS plan scheduler.
"""
plan: AsyncWorkflowCFSPlanEntity
def can_schedule(self) -> SchedulerCommand:
"""
Check if the workflow can be scheduled.
"""
if self.plan.queue in [AsyncWorkflowQueue.PROFESSIONAL_QUEUE, AsyncWorkflowQueue.TEAM_QUEUE]:
"""
permitted all paid users to schedule the workflow any time
"""
return SchedulerCommand.NONE
# FIXME: avoid the sandbox user's workflow at a running state for ever
return SchedulerCommand.RESOURCE_LIMIT_REACHED

View File

@ -0,0 +1,25 @@
from enum import StrEnum
from configs import dify_config
from services.workflow.entities import WorkflowScheduleCFSPlanEntity
# Determine queue names based on edition
if dify_config.EDITION == "CLOUD":
# Cloud edition: separate queues for different tiers
_professional_queue = "workflow_professional"
_team_queue = "workflow_team"
_sandbox_queue = "workflow_sandbox"
AsyncWorkflowSystemStrategy = WorkflowScheduleCFSPlanEntity.Strategy.TimeSlice
else:
# Community edition: single workflow queue (not dataset)
_professional_queue = "workflow"
_team_queue = "workflow"
_sandbox_queue = "workflow"
AsyncWorkflowSystemStrategy = WorkflowScheduleCFSPlanEntity.Strategy.Nop
class AsyncWorkflowQueue(StrEnum):
# Define constants
PROFESSIONAL_QUEUE = _professional_queue
TEAM_QUEUE = _team_queue
SANDBOX_QUEUE = _sandbox_queue

View File

@ -0,0 +1,60 @@
import logging
from celery import shared_task
from sqlalchemy.orm import sessionmaker
from core.workflow.nodes.trigger_schedule.exc import (
ScheduleExecutionError,
ScheduleNotFoundError,
TenantOwnerNotFoundError,
)
from extensions.ext_database import db
from models.trigger import WorkflowSchedulePlan
from services.async_workflow_service import AsyncWorkflowService
from services.trigger.schedule_service import ScheduleService
from services.workflow.entities import ScheduleTriggerData
logger = logging.getLogger(__name__)
@shared_task(queue="schedule_executor")
def run_schedule_trigger(schedule_id: str) -> None:
"""
Execute a scheduled workflow trigger.
Note: No retry logic needed as schedules will run again at next interval.
The execution result is tracked via WorkflowTriggerLog.
Raises:
ScheduleNotFoundError: If schedule doesn't exist
TenantOwnerNotFoundError: If no owner/admin for tenant
ScheduleExecutionError: If workflow trigger fails
"""
session_factory = sessionmaker(bind=db.engine, expire_on_commit=False)
with session_factory() as session:
schedule = session.get(WorkflowSchedulePlan, schedule_id)
if not schedule:
raise ScheduleNotFoundError(f"Schedule {schedule_id} not found")
tenant_owner = ScheduleService.get_tenant_owner(session, schedule.tenant_id)
if not tenant_owner:
raise TenantOwnerNotFoundError(f"No owner or admin found for tenant {schedule.tenant_id}")
try:
# Production dispatch: Trigger the workflow normally
response = AsyncWorkflowService.trigger_workflow_async(
session=session,
user=tenant_owner,
trigger_data=ScheduleTriggerData(
app_id=schedule.app_id,
root_node_id=schedule.node_id,
inputs={},
tenant_id=schedule.tenant_id,
),
)
logger.info("Schedule %s triggered workflow: %s", schedule_id, response.workflow_trigger_log_id)
except Exception as e:
raise ScheduleExecutionError(
f"Failed to trigger workflow for schedule {schedule_id}, app {schedule.app_id}"
) from e