Compare commits

..

16 Commits

Author SHA1 Message Date
d5104a4268 test: remove dataset permission mock tests superseded by testcontainers (#34936) 2026-04-11 04:29:39 +00:00
9069c01f9c refactor: replace inline api.model with register_schema_models in billing (#34928)
Co-authored-by: ai-hpc <ai-hpc@users.noreply.github.com>
2026-04-11 04:01:03 +00:00
0ff41a1127 test: remove dataset metadata mock tests superseded by testcontainers (#34931) 2026-04-11 03:37:20 +00:00
7192af41e4 test: remove dataset service update/delete mock tests superseded by testcontainers (#34937) 2026-04-11 00:54:58 +00:00
5ec387b644 refactor: replace inline api.model with Pydantic BaseModel in model_config (#34930)
Co-authored-by: ai-hpc <ai-hpc@users.noreply.github.com>
2026-04-11 00:53:13 +00:00
4be479fa06 refactor(api): type SQLALCHEMY_ENGINE_OPTIONS with TypedDict (#34941) 2026-04-11 00:39:37 +00:00
e0d69204cd refactor(api): type DatasourceInvokeMeta.to_dict with TypedDict (#34940) 2026-04-11 00:39:06 +00:00
f2d6275da4 refactor(api): type get_prompt_template with TypedDict (#34943) 2026-04-11 00:38:16 +00:00
992ac38d0d refactor(api): type ToolInvokeMeta.to_dict with TypedDict (#34942) 2026-04-11 00:37:10 +00:00
f962e61315 chore(deps): bump pypdf from 6.9.2 to 6.10.0 in /api (#34946)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-04-11 00:36:28 +00:00
b3aebb71ff refactor(api): type Document.to_dict with DocumentDict TypedDict (#34924)
Co-authored-by: bittoby <bittoby@users.noreply.github.com>
2026-04-10 17:36:50 +00:00
98d3bcd079 test: migrate SQLAlchemyWorkflowNodeExecutionRepository tests to testcontainers (#34926)
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-04-10 17:35:52 +00:00
1703df5c00 test: add unit tests for workflow components including tools and inspect vars (#34843)
Co-authored-by: CodingOnStar <hanxujiang@dify.com>
2026-04-10 13:11:36 +00:00
674495680d refactor(api): type Redis connection param builder functions with TypedDicts (#34875) 2026-04-10 11:36:39 +00:00
04f5fe5e38 fix: fix outputs share same name var (#34604) 2026-04-10 11:30:21 +00:00
1b7d0bd4e6 chore: should hide change action when node is undeletable (#34592) 2026-04-10 11:29:29 +00:00
294 changed files with 27087 additions and 6110 deletions

View File

@ -1,5 +1,5 @@
import os
from typing import Any, Literal
from typing import Any, Literal, TypedDict
from urllib.parse import parse_qsl, quote_plus
from pydantic import Field, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, computed_field
@ -107,6 +107,17 @@ class KeywordStoreConfig(BaseSettings):
)
class SQLAlchemyEngineOptionsDict(TypedDict):
pool_size: int
max_overflow: int
pool_recycle: int
pool_pre_ping: bool
connect_args: dict[str, str]
pool_use_lifo: bool
pool_reset_on_return: None
pool_timeout: int
class DatabaseConfig(BaseSettings):
# Database type selector
DB_TYPE: Literal["postgresql", "mysql", "oceanbase", "seekdb"] = Field(
@ -209,11 +220,11 @@ class DatabaseConfig(BaseSettings):
@computed_field # type: ignore[prop-decorator]
@property
def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
def SQLALCHEMY_ENGINE_OPTIONS(self) -> SQLAlchemyEngineOptionsDict:
# Parse DB_EXTRAS for 'options'
db_extras_dict = dict(parse_qsl(self.DB_EXTRAS))
options = db_extras_dict.get("options", "")
connect_args = {}
connect_args: dict[str, str] = {}
# Use the dynamic SQLALCHEMY_DATABASE_URI_SCHEME property
if self.SQLALCHEMY_DATABASE_URI_SCHEME.startswith("postgresql"):
timezone_opt = "-c timezone=UTC"
@ -223,7 +234,7 @@ class DatabaseConfig(BaseSettings):
merged_options = timezone_opt
connect_args = {"options": merged_options}
return {
result: SQLAlchemyEngineOptionsDict = {
"pool_size": self.SQLALCHEMY_POOL_SIZE,
"max_overflow": self.SQLALCHEMY_MAX_OVERFLOW,
"pool_recycle": self.SQLALCHEMY_POOL_RECYCLE,
@ -233,6 +244,7 @@ class DatabaseConfig(BaseSettings):
"pool_reset_on_return": None,
"pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT,
}
return result
class CeleryConfig(DatabaseConfig):

View File

@ -1,9 +1,11 @@
import json
from typing import cast
from typing import Any, cast
from flask import request
from flask_restx import Resource, fields
from flask_restx import Resource
from pydantic import BaseModel, Field
from controllers.common.schema import register_schema_models
from controllers.console import console_ns
from controllers.console.app.wraps import get_app_model
from controllers.console.wraps import account_initialization_required, edit_permission_required, setup_required
@ -18,30 +20,30 @@ from models.model import AppMode, AppModelConfig
from services.app_model_config_service import AppModelConfigService
class ModelConfigRequest(BaseModel):
provider: str | None = Field(default=None, description="Model provider")
model: str | None = Field(default=None, description="Model name")
configs: dict[str, Any] | None = Field(default=None, description="Model configuration parameters")
opening_statement: str | None = Field(default=None, description="Opening statement")
suggested_questions: list[str] | None = Field(default=None, description="Suggested questions")
more_like_this: dict[str, Any] | None = Field(default=None, description="More like this configuration")
speech_to_text: dict[str, Any] | None = Field(default=None, description="Speech to text configuration")
text_to_speech: dict[str, Any] | None = Field(default=None, description="Text to speech configuration")
retrieval_model: dict[str, Any] | None = Field(default=None, description="Retrieval model configuration")
tools: list[dict[str, Any]] | None = Field(default=None, description="Available tools")
dataset_configs: dict[str, Any] | None = Field(default=None, description="Dataset configurations")
agent_mode: dict[str, Any] | None = Field(default=None, description="Agent mode configuration")
register_schema_models(console_ns, ModelConfigRequest)
@console_ns.route("/apps/<uuid:app_id>/model-config")
class ModelConfigResource(Resource):
@console_ns.doc("update_app_model_config")
@console_ns.doc(description="Update application model configuration")
@console_ns.doc(params={"app_id": "Application ID"})
@console_ns.expect(
console_ns.model(
"ModelConfigRequest",
{
"provider": fields.String(description="Model provider"),
"model": fields.String(description="Model name"),
"configs": fields.Raw(description="Model configuration parameters"),
"opening_statement": fields.String(description="Opening statement"),
"suggested_questions": fields.List(fields.String(), description="Suggested questions"),
"more_like_this": fields.Raw(description="More like this configuration"),
"speech_to_text": fields.Raw(description="Speech to text configuration"),
"text_to_speech": fields.Raw(description="Text to speech configuration"),
"retrieval_model": fields.Raw(description="Retrieval model configuration"),
"tools": fields.List(fields.Raw(), description="Available tools"),
"dataset_configs": fields.Raw(description="Dataset configurations"),
"agent_mode": fields.Raw(description="Agent mode configuration"),
},
)
)
@console_ns.expect(console_ns.models[ModelConfigRequest.__name__])
@console_ns.response(200, "Model configuration updated successfully")
@console_ns.response(400, "Invalid configuration")
@console_ns.response(404, "App not found")

View File

@ -2,18 +2,17 @@ import base64
from typing import Literal
from flask import request
from flask_restx import Resource, fields
from flask_restx import Resource
from pydantic import BaseModel, Field
from werkzeug.exceptions import BadRequest
from controllers.common.schema import register_schema_models
from controllers.console import console_ns
from controllers.console.wraps import account_initialization_required, only_edition_cloud, setup_required
from enums.cloud_plan import CloudPlan
from libs.login import current_account_with_tenant, login_required
from services.billing_service import BillingService
DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
class SubscriptionQuery(BaseModel):
plan: Literal[CloudPlan.PROFESSIONAL, CloudPlan.TEAM] = Field(..., description="Subscription plan")
@ -24,8 +23,7 @@ class PartnerTenantsPayload(BaseModel):
click_id: str = Field(..., description="Click Id from partner referral link")
for model in (SubscriptionQuery, PartnerTenantsPayload):
console_ns.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0))
register_schema_models(console_ns, SubscriptionQuery, PartnerTenantsPayload)
@console_ns.route("/billing/subscription")
@ -58,12 +56,7 @@ class PartnerTenants(Resource):
@console_ns.doc("sync_partner_tenants_bindings")
@console_ns.doc(description="Sync partner tenants bindings")
@console_ns.doc(params={"partner_key": "Partner key"})
@console_ns.expect(
console_ns.model(
"SyncPartnerTenantsBindingsRequest",
{"click_id": fields.String(required=True, description="Click Id from partner referral link")},
)
)
@console_ns.expect(console_ns.models[PartnerTenantsPayload.__name__])
@console_ns.response(200, "Tenants synced to partner successfully")
@console_ns.response(400, "Invalid partner information")
@setup_required

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import enum
from enum import StrEnum
from typing import Any
from typing import Any, TypedDict
from pydantic import BaseModel, Field, ValidationInfo, field_validator
from yarl import URL
@ -179,6 +179,12 @@ class DatasourceProviderEntityWithPlugin(DatasourceProviderEntity):
datasources: list[DatasourceEntity] = Field(default_factory=list)
class DatasourceInvokeMetaDict(TypedDict):
time_cost: float
error: str | None
tool_config: dict[str, Any] | None
class DatasourceInvokeMeta(BaseModel):
"""
Datasource invoke meta
@ -202,12 +208,13 @@ class DatasourceInvokeMeta(BaseModel):
"""
return cls(time_cost=0.0, error=error, tool_config={})
def to_dict(self) -> dict:
return {
def to_dict(self) -> DatasourceInvokeMetaDict:
result: DatasourceInvokeMetaDict = {
"time_cost": self.time_cost,
"error": self.error,
"tool_config": self.tool_config,
}
return result
class DatasourceLabel(BaseModel):

View File

@ -2,7 +2,7 @@ import json
import os
from collections.abc import Mapping, Sequence
from enum import StrEnum, auto
from typing import TYPE_CHECKING, Any, cast
from typing import TYPE_CHECKING, Any, TypedDict, cast
from graphon.file import file_manager
from graphon.model_runtime.entities.message_entities import (
@ -34,6 +34,13 @@ class ModelMode(StrEnum):
prompt_file_contents: dict[str, Any] = {}
class PromptTemplateConfigDict(TypedDict):
prompt_template: PromptTemplateParser
custom_variable_keys: list[str]
special_variable_keys: list[str]
prompt_rules: dict[str, Any]
class SimplePromptTransform(PromptTransform):
"""
Simple Prompt Transform for Chatbot App Basic Mode.
@ -105,18 +112,13 @@ class SimplePromptTransform(PromptTransform):
with_memory_prompt=histories is not None,
)
custom_variable_keys_obj = prompt_template_config["custom_variable_keys"]
special_variable_keys_obj = prompt_template_config["special_variable_keys"]
custom_variable_keys = prompt_template_config["custom_variable_keys"]
if not isinstance(custom_variable_keys, list):
raise TypeError(f"Expected list for custom_variable_keys, got {type(custom_variable_keys)}")
# Type check for custom_variable_keys
if not isinstance(custom_variable_keys_obj, list):
raise TypeError(f"Expected list for custom_variable_keys, got {type(custom_variable_keys_obj)}")
custom_variable_keys = cast(list[str], custom_variable_keys_obj)
# Type check for special_variable_keys
if not isinstance(special_variable_keys_obj, list):
raise TypeError(f"Expected list for special_variable_keys, got {type(special_variable_keys_obj)}")
special_variable_keys = cast(list[str], special_variable_keys_obj)
special_variable_keys = prompt_template_config["special_variable_keys"]
if not isinstance(special_variable_keys, list):
raise TypeError(f"Expected list for special_variable_keys, got {type(special_variable_keys)}")
variables = {k: inputs[k] for k in custom_variable_keys if k in inputs}
@ -150,7 +152,7 @@ class SimplePromptTransform(PromptTransform):
has_context: bool,
query_in_prompt: bool,
with_memory_prompt: bool = False,
) -> dict[str, object]:
) -> PromptTemplateConfigDict:
prompt_rules = self._get_prompt_rule(app_mode=app_mode, provider=provider, model=model)
custom_variable_keys: list[str] = []
@ -173,12 +175,13 @@ class SimplePromptTransform(PromptTransform):
prompt += prompt_rules.get("query_prompt", "{{#query#}}")
special_variable_keys.append("#query#")
return {
result: PromptTemplateConfigDict = {
"prompt_template": PromptTemplateParser(template=prompt),
"custom_variable_keys": custom_variable_keys,
"special_variable_keys": special_variable_keys,
"prompt_rules": prompt_rules,
}
return result
def _get_chat_model_prompt_messages(
self,

View File

@ -450,6 +450,12 @@ class WorkflowToolParameterConfiguration(BaseModel):
form: ToolParameter.ToolParameterForm = Field(..., description="The form of the parameter")
class ToolInvokeMetaDict(TypedDict):
time_cost: float
error: str | None
tool_config: dict[str, Any] | None
class ToolInvokeMeta(BaseModel):
"""
Tool invoke meta
@ -473,12 +479,13 @@ class ToolInvokeMeta(BaseModel):
"""
return cls(time_cost=0.0, error=error, tool_config={})
def to_dict(self):
return {
def to_dict(self) -> ToolInvokeMetaDict:
result: ToolInvokeMetaDict = {
"time_cost": self.time_cost,
"error": self.error,
"tool_config": self.tool_config,
}
return result
class ToolLabel(BaseModel):

View File

@ -14,6 +14,7 @@ from redis.cluster import ClusterNode, RedisCluster
from redis.connection import Connection, SSLConnection
from redis.retry import Retry
from redis.sentinel import Sentinel
from typing_extensions import TypedDict
from configs import dify_config
from dify_app import DifyApp
@ -126,6 +127,35 @@ redis_client: RedisClientWrapper = RedisClientWrapper()
_pubsub_redis_client: redis.Redis | RedisCluster | None = None
class RedisSSLParamsDict(TypedDict):
ssl_cert_reqs: int
ssl_ca_certs: str | None
ssl_certfile: str | None
ssl_keyfile: str | None
class RedisHealthParamsDict(TypedDict):
retry: Retry
socket_timeout: float | None
socket_connect_timeout: float | None
health_check_interval: int | None
class RedisBaseParamsDict(TypedDict):
username: str | None
password: str | None
db: int
encoding: str
encoding_errors: str
decode_responses: bool
protocol: int
cache_config: CacheConfig | None
retry: Retry
socket_timeout: float | None
socket_connect_timeout: float | None
health_check_interval: int | None
def _get_ssl_configuration() -> tuple[type[Union[Connection, SSLConnection]], dict[str, Any]]:
"""Get SSL configuration for Redis connection."""
if not dify_config.REDIS_USE_SSL:
@ -171,14 +201,14 @@ def _get_retry_policy() -> Retry:
)
def _get_connection_health_params() -> dict[str, Any]:
def _get_connection_health_params() -> RedisHealthParamsDict:
"""Get connection health and retry parameters for standalone and Sentinel Redis clients."""
return {
"retry": _get_retry_policy(),
"socket_timeout": dify_config.REDIS_SOCKET_TIMEOUT,
"socket_connect_timeout": dify_config.REDIS_SOCKET_CONNECT_TIMEOUT,
"health_check_interval": dify_config.REDIS_HEALTH_CHECK_INTERVAL,
}
return RedisHealthParamsDict(
retry=_get_retry_policy(),
socket_timeout=dify_config.REDIS_SOCKET_TIMEOUT,
socket_connect_timeout=dify_config.REDIS_SOCKET_CONNECT_TIMEOUT,
health_check_interval=dify_config.REDIS_HEALTH_CHECK_INTERVAL,
)
def _get_cluster_connection_health_params() -> dict[str, Any]:
@ -189,26 +219,26 @@ def _get_cluster_connection_health_params() -> dict[str, Any]:
here. Only ``retry``, ``socket_timeout``, and ``socket_connect_timeout``
are passed through.
"""
params = _get_connection_health_params()
params: dict[str, Any] = dict(_get_connection_health_params())
return {k: v for k, v in params.items() if k != "health_check_interval"}
def _get_base_redis_params() -> dict[str, Any]:
def _get_base_redis_params() -> RedisBaseParamsDict:
"""Get base Redis connection parameters including retry and health policy."""
return {
"username": dify_config.REDIS_USERNAME,
"password": dify_config.REDIS_PASSWORD or None,
"db": dify_config.REDIS_DB,
"encoding": "utf-8",
"encoding_errors": "strict",
"decode_responses": False,
"protocol": dify_config.REDIS_SERIALIZATION_PROTOCOL,
"cache_config": _get_cache_configuration(),
return RedisBaseParamsDict(
username=dify_config.REDIS_USERNAME,
password=dify_config.REDIS_PASSWORD or None,
db=dify_config.REDIS_DB,
encoding="utf-8",
encoding_errors="strict",
decode_responses=False,
protocol=dify_config.REDIS_SERIALIZATION_PROTOCOL,
cache_config=_get_cache_configuration(),
**_get_connection_health_params(),
}
)
def _create_sentinel_client(redis_params: dict[str, Any]) -> Union[redis.Redis, RedisCluster]:
def _create_sentinel_client(redis_params: RedisBaseParamsDict) -> Union[redis.Redis, RedisCluster]:
"""Create Redis client using Sentinel configuration."""
if not dify_config.REDIS_SENTINELS:
raise ValueError("REDIS_SENTINELS must be set when REDIS_USE_SENTINEL is True")
@ -232,7 +262,8 @@ def _create_sentinel_client(redis_params: dict[str, Any]) -> Union[redis.Redis,
sentinel_kwargs=sentinel_kwargs,
)
master: redis.Redis = sentinel.master_for(dify_config.REDIS_SENTINEL_SERVICE_NAME, **redis_params)
params: dict[str, Any] = {**redis_params}
master: redis.Redis = sentinel.master_for(dify_config.REDIS_SENTINEL_SERVICE_NAME, **params)
return master
@ -259,18 +290,16 @@ def _create_cluster_client() -> Union[redis.Redis, RedisCluster]:
return cluster
def _create_standalone_client(redis_params: dict[str, Any]) -> Union[redis.Redis, RedisCluster]:
def _create_standalone_client(redis_params: RedisBaseParamsDict) -> Union[redis.Redis, RedisCluster]:
"""Create standalone Redis client."""
connection_class, ssl_kwargs = _get_ssl_configuration()
params = {**redis_params}
params.update(
{
"host": dify_config.REDIS_HOST,
"port": dify_config.REDIS_PORT,
"connection_class": connection_class,
}
)
params: dict[str, Any] = {
**redis_params,
"host": dify_config.REDIS_HOST,
"port": dify_config.REDIS_PORT,
"connection_class": connection_class,
}
if dify_config.REDIS_MAX_CONNECTIONS:
params["max_connections"] = dify_config.REDIS_MAX_CONNECTIONS
@ -293,8 +322,8 @@ def _create_pubsub_client(pubsub_url: str, use_clusters: bool) -> redis.Redis |
kwargs["max_connections"] = max_conns
return RedisCluster.from_url(pubsub_url, **kwargs)
health_params = _get_connection_health_params()
kwargs = {**health_params}
standalone_health_params: dict[str, Any] = dict(_get_connection_health_params())
kwargs = {**standalone_health_params}
if max_conns:
kwargs["max_connections"] = max_conns
return redis.Redis.from_url(pubsub_url, **kwargs)

View File

@ -108,6 +108,56 @@ class ExternalKnowledgeApiDict(TypedDict):
created_at: str
class DocumentDict(TypedDict):
id: str
tenant_id: str
dataset_id: str
position: int
data_source_type: str
data_source_info: str | None
dataset_process_rule_id: str | None
batch: str
name: str
created_from: str
created_by: str
created_api_request_id: str | None
created_at: datetime
processing_started_at: datetime | None
file_id: str | None
word_count: int | None
parsing_completed_at: datetime | None
cleaning_completed_at: datetime | None
splitting_completed_at: datetime | None
tokens: int | None
indexing_latency: float | None
completed_at: datetime | None
is_paused: bool | None
paused_by: str | None
paused_at: datetime | None
error: str | None
stopped_at: datetime | None
indexing_status: str
enabled: bool
disabled_at: datetime | None
disabled_by: str | None
archived: bool
archived_reason: str | None
archived_by: str | None
archived_at: datetime | None
updated_at: datetime
doc_type: str | None
doc_metadata: Any
doc_form: IndexStructureType
doc_language: str | None
display_status: str | None
data_source_info_dict: dict[str, Any]
average_segment_length: int
dataset_process_rule: ProcessRuleDict | None
dataset: None
segment_count: int | None
hit_count: int | None
class DatasetPermissionEnum(enum.StrEnum):
ONLY_ME = "only_me"
ALL_TEAM = "all_team_members"
@ -675,8 +725,8 @@ class Document(Base):
)
return built_in_fields
def to_dict(self) -> dict[str, Any]:
return {
def to_dict(self) -> DocumentDict:
result: DocumentDict = {
"id": self.id,
"tenant_id": self.tenant_id,
"dataset_id": self.dataset_id,
@ -721,10 +771,11 @@ class Document(Base):
"data_source_info_dict": self.data_source_info_dict,
"average_segment_length": self.average_segment_length,
"dataset_process_rule": self.dataset_process_rule.to_dict() if self.dataset_process_rule else None,
"dataset": None, # Dataset class doesn't have a to_dict method
"dataset": None,
"segment_count": self.segment_count,
"hit_count": self.hit_count,
}
return result
@classmethod
def from_dict(cls, data: dict[str, Any]):

View File

@ -0,0 +1,395 @@
"""Testcontainers integration tests for SQLAlchemyWorkflowNodeExecutionRepository."""
from __future__ import annotations
import json
from datetime import datetime
from decimal import Decimal
from uuid import uuid4
from graphon.entities import WorkflowNodeExecution
from graphon.enums import (
BuiltinNodeTypes,
WorkflowNodeExecutionMetadataKey,
WorkflowNodeExecutionStatus,
)
from graphon.model_runtime.utils.encoders import jsonable_encoder
from sqlalchemy import Engine
from sqlalchemy.orm import Session, sessionmaker
from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository
from core.repositories.factory import OrderConfig
from models.account import Account, Tenant
from models.enums import CreatorUserRole
from models.workflow import WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom
def _create_account_with_tenant(session: Session) -> Account:
tenant = Tenant(name="Test Workspace")
session.add(tenant)
session.flush()
account = Account(name="test", email=f"test-{uuid4()}@example.com")
session.add(account)
session.flush()
account._current_tenant = tenant
return account
def _make_repo(session: Session, account: Account, app_id: str) -> SQLAlchemyWorkflowNodeExecutionRepository:
engine = session.get_bind()
assert isinstance(engine, Engine)
return SQLAlchemyWorkflowNodeExecutionRepository(
session_factory=sessionmaker(bind=engine, expire_on_commit=False),
user=account,
app_id=app_id,
triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN,
)
def _create_node_execution_model(
session: Session,
*,
tenant_id: str,
app_id: str,
workflow_id: str,
workflow_run_id: str,
index: int = 1,
status: WorkflowNodeExecutionStatus = WorkflowNodeExecutionStatus.RUNNING,
) -> WorkflowNodeExecutionModel:
model = WorkflowNodeExecutionModel(
id=str(uuid4()),
tenant_id=tenant_id,
app_id=app_id,
workflow_id=workflow_id,
triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN,
workflow_run_id=workflow_run_id,
index=index,
predecessor_node_id=None,
node_execution_id=str(uuid4()),
node_id=f"node-{index}",
node_type=BuiltinNodeTypes.START,
title=f"Test Node {index}",
inputs='{"input_key": "input_value"}',
process_data='{"process_key": "process_value"}',
outputs='{"output_key": "output_value"}',
status=status,
error=None,
elapsed_time=1.5,
execution_metadata="{}",
created_at=datetime.now(),
created_by_role=CreatorUserRole.ACCOUNT,
created_by=str(uuid4()),
finished_at=None,
)
session.add(model)
session.flush()
return model
class TestSave:
def test_save_new_record(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
app_id = str(uuid4())
repo = _make_repo(db_session_with_containers, account, app_id)
execution = WorkflowNodeExecution(
id=str(uuid4()),
workflow_id=str(uuid4()),
node_execution_id=str(uuid4()),
workflow_execution_id=str(uuid4()),
index=1,
predecessor_node_id=None,
node_id="node-1",
node_type=BuiltinNodeTypes.START,
title="Test Node",
inputs={"input_key": "input_value"},
process_data={"process_key": "process_value"},
outputs={"result": "success"},
status=WorkflowNodeExecutionStatus.RUNNING,
error=None,
elapsed_time=1.5,
metadata={WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 100},
created_at=datetime.now(),
finished_at=None,
)
repo.save(execution)
engine = db_session_with_containers.get_bind()
assert isinstance(engine, Engine)
with sessionmaker(bind=engine, expire_on_commit=False)() as verify_session:
saved = verify_session.get(WorkflowNodeExecutionModel, execution.id)
assert saved is not None
assert saved.tenant_id == account.current_tenant_id
assert saved.app_id == app_id
assert saved.node_id == "node-1"
assert saved.status == WorkflowNodeExecutionStatus.RUNNING
def test_save_updates_existing_record(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
repo = _make_repo(db_session_with_containers, account, str(uuid4()))
execution = WorkflowNodeExecution(
id=str(uuid4()),
workflow_id=str(uuid4()),
node_execution_id=str(uuid4()),
workflow_execution_id=str(uuid4()),
index=1,
predecessor_node_id=None,
node_id="node-1",
node_type=BuiltinNodeTypes.START,
title="Test Node",
inputs=None,
process_data=None,
outputs=None,
status=WorkflowNodeExecutionStatus.RUNNING,
error=None,
elapsed_time=0.0,
metadata=None,
created_at=datetime.now(),
finished_at=None,
)
repo.save(execution)
execution.status = WorkflowNodeExecutionStatus.SUCCEEDED
execution.elapsed_time = 2.5
repo.save(execution)
engine = db_session_with_containers.get_bind()
assert isinstance(engine, Engine)
with sessionmaker(bind=engine, expire_on_commit=False)() as verify_session:
saved = verify_session.get(WorkflowNodeExecutionModel, execution.id)
assert saved is not None
assert saved.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert saved.elapsed_time == 2.5
class TestGetByWorkflowExecution:
def test_returns_executions_ordered(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
tenant_id = account.current_tenant_id
app_id = str(uuid4())
workflow_id = str(uuid4())
workflow_run_id = str(uuid4())
repo = _make_repo(db_session_with_containers, account, app_id)
_create_node_execution_model(
db_session_with_containers,
tenant_id=tenant_id,
app_id=app_id,
workflow_id=workflow_id,
workflow_run_id=workflow_run_id,
index=1,
status=WorkflowNodeExecutionStatus.SUCCEEDED,
)
_create_node_execution_model(
db_session_with_containers,
tenant_id=tenant_id,
app_id=app_id,
workflow_id=workflow_id,
workflow_run_id=workflow_run_id,
index=2,
status=WorkflowNodeExecutionStatus.SUCCEEDED,
)
db_session_with_containers.commit()
order_config = OrderConfig(order_by=["index"], order_direction="desc")
result = repo.get_by_workflow_execution(
workflow_execution_id=workflow_run_id,
order_config=order_config,
)
assert len(result) == 2
assert result[0].index == 2
assert result[1].index == 1
assert all(isinstance(r, WorkflowNodeExecution) for r in result)
def test_excludes_paused_executions(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
tenant_id = account.current_tenant_id
app_id = str(uuid4())
workflow_id = str(uuid4())
workflow_run_id = str(uuid4())
repo = _make_repo(db_session_with_containers, account, app_id)
_create_node_execution_model(
db_session_with_containers,
tenant_id=tenant_id,
app_id=app_id,
workflow_id=workflow_id,
workflow_run_id=workflow_run_id,
index=1,
status=WorkflowNodeExecutionStatus.RUNNING,
)
_create_node_execution_model(
db_session_with_containers,
tenant_id=tenant_id,
app_id=app_id,
workflow_id=workflow_id,
workflow_run_id=workflow_run_id,
index=2,
status=WorkflowNodeExecutionStatus.PAUSED,
)
db_session_with_containers.commit()
result = repo.get_by_workflow_execution(workflow_execution_id=workflow_run_id)
assert len(result) == 1
assert result[0].index == 1
class TestToDbModel:
def test_converts_domain_to_db_model(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
app_id = str(uuid4())
repo = _make_repo(db_session_with_containers, account, app_id)
domain_model = WorkflowNodeExecution(
id="test-id",
workflow_id="test-workflow-id",
node_execution_id="test-node-execution-id",
workflow_execution_id="test-workflow-run-id",
index=1,
predecessor_node_id="test-predecessor-id",
node_id="test-node-id",
node_type=BuiltinNodeTypes.START,
title="Test Node",
inputs={"input_key": "input_value"},
process_data={"process_key": "process_value"},
outputs={"output_key": "output_value"},
status=WorkflowNodeExecutionStatus.RUNNING,
error=None,
elapsed_time=1.5,
metadata={
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 100,
WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: Decimal("0.0"),
},
created_at=datetime.now(),
finished_at=None,
)
db_model = repo._to_db_model(domain_model)
assert isinstance(db_model, WorkflowNodeExecutionModel)
assert db_model.id == domain_model.id
assert db_model.tenant_id == account.current_tenant_id
assert db_model.app_id == app_id
assert db_model.workflow_id == domain_model.workflow_id
assert db_model.triggered_from == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN
assert db_model.workflow_run_id == domain_model.workflow_execution_id
assert db_model.index == domain_model.index
assert db_model.predecessor_node_id == domain_model.predecessor_node_id
assert db_model.node_execution_id == domain_model.node_execution_id
assert db_model.node_id == domain_model.node_id
assert db_model.node_type == domain_model.node_type
assert db_model.title == domain_model.title
assert db_model.inputs_dict == domain_model.inputs
assert db_model.process_data_dict == domain_model.process_data
assert db_model.outputs_dict == domain_model.outputs
assert db_model.execution_metadata_dict == jsonable_encoder(domain_model.metadata)
assert db_model.status == domain_model.status
assert db_model.error == domain_model.error
assert db_model.elapsed_time == domain_model.elapsed_time
assert db_model.created_at == domain_model.created_at
assert db_model.created_by_role == CreatorUserRole.ACCOUNT
assert db_model.created_by == account.id
assert db_model.finished_at == domain_model.finished_at
class TestToDomainModel:
def test_converts_db_to_domain_model(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
app_id = str(uuid4())
repo = _make_repo(db_session_with_containers, account, app_id)
inputs_dict = {"input_key": "input_value"}
process_data_dict = {"process_key": "process_value"}
outputs_dict = {"output_key": "output_value"}
metadata_dict = {str(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS): 100}
now = datetime.now()
db_model = WorkflowNodeExecutionModel()
db_model.id = "test-id"
db_model.tenant_id = account.current_tenant_id
db_model.app_id = app_id
db_model.workflow_id = "test-workflow-id"
db_model.triggered_from = "workflow-run"
db_model.workflow_run_id = "test-workflow-run-id"
db_model.index = 1
db_model.predecessor_node_id = "test-predecessor-id"
db_model.node_execution_id = "test-node-execution-id"
db_model.node_id = "test-node-id"
db_model.node_type = BuiltinNodeTypes.START
db_model.title = "Test Node"
db_model.inputs = json.dumps(inputs_dict)
db_model.process_data = json.dumps(process_data_dict)
db_model.outputs = json.dumps(outputs_dict)
db_model.status = WorkflowNodeExecutionStatus.RUNNING
db_model.error = None
db_model.elapsed_time = 1.5
db_model.execution_metadata = json.dumps(metadata_dict)
db_model.created_at = now
db_model.created_by_role = "account"
db_model.created_by = account.id
db_model.finished_at = None
domain_model = repo._to_domain_model(db_model)
assert isinstance(domain_model, WorkflowNodeExecution)
assert domain_model.id == "test-id"
assert domain_model.workflow_id == "test-workflow-id"
assert domain_model.workflow_execution_id == "test-workflow-run-id"
assert domain_model.index == 1
assert domain_model.predecessor_node_id == "test-predecessor-id"
assert domain_model.node_execution_id == "test-node-execution-id"
assert domain_model.node_id == "test-node-id"
assert domain_model.node_type == BuiltinNodeTypes.START
assert domain_model.title == "Test Node"
assert domain_model.inputs == inputs_dict
assert domain_model.process_data == process_data_dict
assert domain_model.outputs == outputs_dict
assert domain_model.status == WorkflowNodeExecutionStatus.RUNNING
assert domain_model.error is None
assert domain_model.elapsed_time == 1.5
assert domain_model.metadata == {WorkflowNodeExecutionMetadataKey(k): v for k, v in metadata_dict.items()}
assert domain_model.created_at == now
assert domain_model.finished_at is None
def test_domain_model_without_offload_data(self, db_session_with_containers: Session) -> None:
account = _create_account_with_tenant(db_session_with_containers)
repo = _make_repo(db_session_with_containers, account, str(uuid4()))
process_data = {"normal": "data"}
db_model = WorkflowNodeExecutionModel()
db_model.id = str(uuid4())
db_model.tenant_id = account.current_tenant_id
db_model.app_id = str(uuid4())
db_model.workflow_id = str(uuid4())
db_model.triggered_from = "workflow-run"
db_model.workflow_run_id = None
db_model.index = 1
db_model.predecessor_node_id = None
db_model.node_execution_id = str(uuid4())
db_model.node_id = "test-node-id"
db_model.node_type = "llm"
db_model.title = "Test Node"
db_model.inputs = None
db_model.process_data = json.dumps(process_data)
db_model.outputs = None
db_model.status = "succeeded"
db_model.error = None
db_model.elapsed_time = 1.5
db_model.execution_metadata = "{}"
db_model.created_at = datetime.now()
db_model.created_by_role = "account"
db_model.created_by = account.id
db_model.finished_at = None
domain_model = repo._to_domain_model(db_model)
assert domain_model.process_data == process_data
assert domain_model.process_data_truncated is False
assert domain_model.get_truncated_process_data() is None

View File

@ -1,3 +0,0 @@
"""
Unit tests for workflow_node_execution repositories.
"""

View File

@ -1,340 +0,0 @@
"""
Unit tests for the SQLAlchemy implementation of WorkflowNodeExecutionRepository.
"""
import json
import uuid
from datetime import datetime
from decimal import Decimal
from unittest.mock import MagicMock, PropertyMock
import pytest
from graphon.entities import (
WorkflowNodeExecution,
)
from graphon.enums import (
BuiltinNodeTypes,
WorkflowNodeExecutionMetadataKey,
WorkflowNodeExecutionStatus,
)
from graphon.model_runtime.utils.encoders import jsonable_encoder
from pytest_mock import MockerFixture
from sqlalchemy.orm import Session, sessionmaker
from core.repositories import SQLAlchemyWorkflowNodeExecutionRepository
from core.repositories.factory import OrderConfig
from models.account import Account, Tenant
from models.workflow import WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom
def configure_mock_execution(mock_execution):
"""Configure a mock execution with proper JSON serializable values."""
# Configure inputs, outputs, process_data, and execution_metadata to return JSON serializable values
type(mock_execution).inputs = PropertyMock(return_value='{"key": "value"}')
type(mock_execution).outputs = PropertyMock(return_value='{"result": "success"}')
type(mock_execution).process_data = PropertyMock(return_value='{"process": "data"}')
type(mock_execution).execution_metadata = PropertyMock(return_value='{"metadata": "info"}')
# Configure status and triggered_from to be valid enum values
mock_execution.status = "running"
mock_execution.triggered_from = "workflow-run"
return mock_execution
@pytest.fixture
def session():
"""Create a mock SQLAlchemy session."""
session = MagicMock(spec=Session)
# Configure the session to be used as a context manager
session.__enter__ = MagicMock(return_value=session)
session.__exit__ = MagicMock(return_value=None)
# Configure the session factory to return the session
session_factory = MagicMock(spec=sessionmaker)
session_factory.return_value = session
return session, session_factory
@pytest.fixture
def mock_user():
"""Create a user instance for testing."""
user = Account(name="test", email="test@example.com")
user.id = "test-user-id"
tenant = Tenant(name="Test Workspace")
tenant.id = "test-tenant"
user._current_tenant = MagicMock()
user._current_tenant.id = "test-tenant"
return user
@pytest.fixture
def repository(session, mock_user):
"""Create a repository instance with test data."""
_, session_factory = session
app_id = "test-app"
return SQLAlchemyWorkflowNodeExecutionRepository(
session_factory=session_factory,
user=mock_user,
app_id=app_id,
triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN,
)
def test_save(repository, session):
"""Test save method."""
session_obj, _ = session
# Create a mock execution
execution = MagicMock(spec=WorkflowNodeExecution)
execution.id = "test-id"
execution.node_execution_id = "test-node-execution-id"
execution.tenant_id = None
execution.app_id = None
execution.inputs = None
execution.process_data = None
execution.outputs = None
execution.metadata = None
execution.workflow_id = str(uuid.uuid4())
# Mock the to_db_model method to return the execution itself
# This simulates the behavior of setting tenant_id and app_id
db_model = MagicMock(spec=WorkflowNodeExecutionModel)
db_model.id = "test-id"
db_model.node_execution_id = "test-node-execution-id"
repository._to_db_model = MagicMock(return_value=db_model)
# Mock session.get to return None (no existing record)
session_obj.get.return_value = None
# Call save method
repository.save(execution)
# Assert to_db_model was called with the execution
repository._to_db_model.assert_called_once_with(execution)
# Assert session.get was called to check for existing record
session_obj.get.assert_called_once_with(WorkflowNodeExecutionModel, db_model.id)
# Assert session.add was called for new record
session_obj.add.assert_called_once_with(db_model)
# Assert session.commit was called
session_obj.commit.assert_called_once()
def test_save_with_existing_tenant_id(repository, session):
"""Test save method with existing tenant_id."""
session_obj, _ = session
# Create a mock execution with existing tenant_id
execution = MagicMock(spec=WorkflowNodeExecutionModel)
execution.id = "existing-id"
execution.node_execution_id = "existing-node-execution-id"
execution.tenant_id = "existing-tenant"
execution.app_id = None
execution.inputs = None
execution.process_data = None
execution.outputs = None
execution.metadata = None
# Create a modified execution that will be returned by _to_db_model
modified_execution = MagicMock(spec=WorkflowNodeExecutionModel)
modified_execution.id = "existing-id"
modified_execution.node_execution_id = "existing-node-execution-id"
modified_execution.tenant_id = "existing-tenant" # Tenant ID should not change
modified_execution.app_id = repository._app_id # App ID should be set
# Create a dictionary to simulate __dict__ for updating attributes
modified_execution.__dict__ = {
"id": "existing-id",
"node_execution_id": "existing-node-execution-id",
"tenant_id": "existing-tenant",
"app_id": repository._app_id,
}
# Mock the to_db_model method to return the modified execution
repository._to_db_model = MagicMock(return_value=modified_execution)
# Mock session.get to return an existing record
existing_model = MagicMock(spec=WorkflowNodeExecutionModel)
session_obj.get.return_value = existing_model
# Call save method
repository.save(execution)
# Assert to_db_model was called with the execution
repository._to_db_model.assert_called_once_with(execution)
# Assert session.get was called to check for existing record
session_obj.get.assert_called_once_with(WorkflowNodeExecutionModel, modified_execution.id)
# Assert session.add was NOT called since we're updating existing
session_obj.add.assert_not_called()
# Assert session.commit was called
session_obj.commit.assert_called_once()
def test_get_by_workflow_execution(repository, session, mocker: MockerFixture):
"""Test get_by_workflow_execution method."""
session_obj, _ = session
# Set up mock
mock_select = mocker.patch("core.repositories.sqlalchemy_workflow_node_execution_repository.select")
mock_asc = mocker.patch("core.repositories.sqlalchemy_workflow_node_execution_repository.asc")
mock_desc = mocker.patch("core.repositories.sqlalchemy_workflow_node_execution_repository.desc")
mock_WorkflowNodeExecutionModel = mocker.patch(
"core.repositories.sqlalchemy_workflow_node_execution_repository.WorkflowNodeExecutionModel"
)
mock_stmt = mocker.MagicMock()
mock_select.return_value = mock_stmt
mock_stmt.where.return_value = mock_stmt
mock_stmt.order_by.return_value = mock_stmt
mock_asc.return_value = mock_stmt
mock_desc.return_value = mock_stmt
mock_WorkflowNodeExecutionModel.preload_offload_data_and_files.return_value = mock_stmt
# Create a properly configured mock execution
mock_execution = mocker.MagicMock(spec=WorkflowNodeExecutionModel)
configure_mock_execution(mock_execution)
session_obj.scalars.return_value.all.return_value = [mock_execution]
# Create a mock domain model to be returned by _to_domain_model
mock_domain_model = mocker.MagicMock()
# Mock the _to_domain_model method to return our mock domain model
repository._to_domain_model = mocker.MagicMock(return_value=mock_domain_model)
# Call method
order_config = OrderConfig(order_by=["index"], order_direction="desc")
result = repository.get_by_workflow_execution(
workflow_execution_id="test-workflow-run-id",
order_config=order_config,
)
# Assert select was called with correct parameters
mock_select.assert_called_once()
session_obj.scalars.assert_called_once_with(mock_stmt)
mock_WorkflowNodeExecutionModel.preload_offload_data_and_files.assert_called_once_with(mock_stmt)
# Assert _to_domain_model was called with the mock execution
repository._to_domain_model.assert_called_once_with(mock_execution)
# Assert the result contains our mock domain model
assert len(result) == 1
assert result[0] is mock_domain_model
def test_to_db_model(repository):
"""Test to_db_model method."""
# Create a domain model
domain_model = WorkflowNodeExecution(
id="test-id",
workflow_id="test-workflow-id",
node_execution_id="test-node-execution-id",
workflow_execution_id="test-workflow-run-id",
index=1,
predecessor_node_id="test-predecessor-id",
node_id="test-node-id",
node_type=BuiltinNodeTypes.START,
title="Test Node",
inputs={"input_key": "input_value"},
process_data={"process_key": "process_value"},
outputs={"output_key": "output_value"},
status=WorkflowNodeExecutionStatus.RUNNING,
error=None,
elapsed_time=1.5,
metadata={
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 100,
WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: Decimal("0.0"),
},
created_at=datetime.now(),
finished_at=None,
)
# Convert to DB model
db_model = repository._to_db_model(domain_model)
# Assert DB model has correct values
assert isinstance(db_model, WorkflowNodeExecutionModel)
assert db_model.id == domain_model.id
assert db_model.tenant_id == repository._tenant_id
assert db_model.app_id == repository._app_id
assert db_model.workflow_id == domain_model.workflow_id
assert db_model.triggered_from == repository._triggered_from
assert db_model.workflow_run_id == domain_model.workflow_execution_id
assert db_model.index == domain_model.index
assert db_model.predecessor_node_id == domain_model.predecessor_node_id
assert db_model.node_execution_id == domain_model.node_execution_id
assert db_model.node_id == domain_model.node_id
assert db_model.node_type == domain_model.node_type
assert db_model.title == domain_model.title
assert db_model.inputs_dict == domain_model.inputs
assert db_model.process_data_dict == domain_model.process_data
assert db_model.outputs_dict == domain_model.outputs
assert db_model.execution_metadata_dict == jsonable_encoder(domain_model.metadata)
assert db_model.status == domain_model.status
assert db_model.error == domain_model.error
assert db_model.elapsed_time == domain_model.elapsed_time
assert db_model.created_at == domain_model.created_at
assert db_model.created_by_role == repository._creator_user_role
assert db_model.created_by == repository._creator_user_id
assert db_model.finished_at == domain_model.finished_at
def test_to_domain_model(repository):
"""Test _to_domain_model method."""
# Create input dictionaries
inputs_dict = {"input_key": "input_value"}
process_data_dict = {"process_key": "process_value"}
outputs_dict = {"output_key": "output_value"}
metadata_dict = {str(WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS): 100}
# Create a DB model using our custom subclass
db_model = WorkflowNodeExecutionModel()
db_model.id = "test-id"
db_model.tenant_id = "test-tenant-id"
db_model.app_id = "test-app-id"
db_model.workflow_id = "test-workflow-id"
db_model.triggered_from = "workflow-run"
db_model.workflow_run_id = "test-workflow-run-id"
db_model.index = 1
db_model.predecessor_node_id = "test-predecessor-id"
db_model.node_execution_id = "test-node-execution-id"
db_model.node_id = "test-node-id"
db_model.node_type = BuiltinNodeTypes.START
db_model.title = "Test Node"
db_model.inputs = json.dumps(inputs_dict)
db_model.process_data = json.dumps(process_data_dict)
db_model.outputs = json.dumps(outputs_dict)
db_model.status = WorkflowNodeExecutionStatus.RUNNING
db_model.error = None
db_model.elapsed_time = 1.5
db_model.execution_metadata = json.dumps(metadata_dict)
db_model.created_at = datetime.now()
db_model.created_by_role = "account"
db_model.created_by = "test-user-id"
db_model.finished_at = None
# Convert to domain model
domain_model = repository._to_domain_model(db_model)
# Assert domain model has correct values
assert isinstance(domain_model, WorkflowNodeExecution)
assert domain_model.id == db_model.id
assert domain_model.workflow_id == db_model.workflow_id
assert domain_model.workflow_execution_id == db_model.workflow_run_id
assert domain_model.index == db_model.index
assert domain_model.predecessor_node_id == db_model.predecessor_node_id
assert domain_model.node_execution_id == db_model.node_execution_id
assert domain_model.node_id == db_model.node_id
assert domain_model.node_type == db_model.node_type
assert domain_model.title == db_model.title
assert domain_model.inputs == inputs_dict
assert domain_model.process_data == process_data_dict
assert domain_model.outputs == outputs_dict
assert domain_model.status == WorkflowNodeExecutionStatus(db_model.status)
assert domain_model.error == db_model.error
assert domain_model.elapsed_time == db_model.elapsed_time
assert domain_model.metadata == metadata_dict
assert domain_model.created_at == db_model.created_at
assert domain_model.finished_at == db_model.finished_at

View File

@ -1,106 +0,0 @@
"""
Unit tests for SQLAlchemyWorkflowNodeExecutionRepository, focusing on process_data truncation functionality.
"""
from datetime import datetime
from typing import Any
from unittest.mock import MagicMock, Mock
from graphon.entities import WorkflowNodeExecution
from graphon.enums import BuiltinNodeTypes
from sqlalchemy.orm import sessionmaker
from core.repositories.sqlalchemy_workflow_node_execution_repository import (
SQLAlchemyWorkflowNodeExecutionRepository,
)
from models import Account, WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom
class TestSQLAlchemyWorkflowNodeExecutionRepositoryProcessData:
"""Test process_data truncation functionality in SQLAlchemyWorkflowNodeExecutionRepository."""
def create_mock_account(self) -> Account:
"""Create a mock Account for testing."""
account = Mock(spec=Account)
account.id = "test-user-id"
account.tenant_id = "test-tenant-id"
return account
def create_mock_session_factory(self) -> sessionmaker:
"""Create a mock session factory for testing."""
mock_session = MagicMock()
mock_session_factory = MagicMock(spec=sessionmaker)
mock_session_factory.return_value.__enter__.return_value = mock_session
mock_session_factory.return_value.__exit__.return_value = None
return mock_session_factory
def create_repository(self, mock_file_service=None) -> SQLAlchemyWorkflowNodeExecutionRepository:
"""Create a repository instance for testing."""
mock_account = self.create_mock_account()
mock_session_factory = self.create_mock_session_factory()
repository = SQLAlchemyWorkflowNodeExecutionRepository(
session_factory=mock_session_factory,
user=mock_account,
app_id="test-app-id",
triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN,
)
if mock_file_service:
repository._file_service = mock_file_service
return repository
def create_workflow_node_execution(
self,
process_data: dict[str, Any] | None = None,
execution_id: str = "test-execution-id",
) -> WorkflowNodeExecution:
"""Create a WorkflowNodeExecution instance for testing."""
return WorkflowNodeExecution(
id=execution_id,
workflow_id="test-workflow-id",
index=1,
node_id="test-node-id",
node_type=BuiltinNodeTypes.LLM,
title="Test Node",
process_data=process_data,
created_at=datetime.now(),
)
def test_to_domain_model_without_offload_data(self):
"""Test _to_domain_model without offload data."""
repository = self.create_repository()
# Create mock database model without offload data
db_model = Mock(spec=WorkflowNodeExecutionModel)
db_model.id = "test-execution-id"
db_model.node_execution_id = "test-node-execution-id"
db_model.workflow_id = "test-workflow-id"
db_model.workflow_run_id = None
db_model.index = 1
db_model.predecessor_node_id = None
db_model.node_id = "test-node-id"
db_model.node_type = "llm"
db_model.title = "Test Node"
db_model.status = "succeeded"
db_model.error = None
db_model.elapsed_time = 1.5
db_model.created_at = datetime.now()
db_model.finished_at = None
process_data = {"normal": "data"}
db_model.process_data_dict = process_data
db_model.inputs_dict = None
db_model.outputs_dict = None
db_model.execution_metadata_dict = {}
db_model.offload_data = None
domain_model = repository._to_domain_model(db_model)
# Domain model should have the data from database
assert domain_model.process_data == process_data
# Should not be truncated
assert domain_model.process_data_truncated is False
assert domain_model.get_truncated_process_data() is None

File diff suppressed because it is too large Load Diff

View File

@ -1,825 +0,0 @@
"""
Comprehensive unit tests for DatasetPermissionService and DatasetService permission methods.
This module contains extensive unit tests for dataset permission management,
including partial member list operations, permission validation, and permission
enum handling.
The DatasetPermissionService provides methods for:
- Retrieving partial member permissions (get_dataset_partial_member_list)
- Updating partial member lists (update_partial_member_list)
- Validating permissions before operations (check_permission)
- Clearing partial member lists (clear_partial_member_list)
The DatasetService provides permission checking methods:
- check_dataset_permission - validates user access to dataset
- check_dataset_operator_permission - validates operator permissions
These operations are critical for dataset access control and security, ensuring
that users can only access datasets they have permission to view or modify.
This test suite ensures:
- Correct retrieval of partial member lists
- Proper update of partial member permissions
- Accurate permission validation logic
- Proper handling of permission enums (only_me, all_team_members, partial_members)
- Security boundaries are maintained
- Error conditions are handled correctly
================================================================================
ARCHITECTURE OVERVIEW
================================================================================
The Dataset permission system is a multi-layered access control mechanism
that provides fine-grained control over who can access and modify datasets.
1. Permission Levels:
- only_me: Only the dataset creator can access
- all_team_members: All members of the tenant can access
- partial_members: Only specific users listed in DatasetPermission can access
2. Permission Storage:
- Dataset.permission: Stores the permission level enum
- DatasetPermission: Stores individual user permissions for partial_members
- Each DatasetPermission record links a dataset to a user account
3. Permission Validation:
- Tenant-level checks: Users must be in the same tenant
- Role-based checks: OWNER role bypasses some restrictions
- Explicit permission checks: For partial_members, explicit DatasetPermission
records are required
4. Permission Operations:
- Partial member list management: Add/remove users from partial access
- Permission validation: Check before allowing operations
- Permission clearing: Remove all partial members when changing permission level
================================================================================
TESTING STRATEGY
================================================================================
This test suite follows a comprehensive testing strategy that covers:
1. Partial Member List Operations:
- Retrieving member lists
- Adding new members
- Updating existing members
- Removing members
- Empty list handling
2. Permission Validation:
- Dataset editor permissions
- Dataset operator restrictions
- Permission enum validation
- Partial member list validation
- Tenant isolation
3. Permission Enum Handling:
- only_me permission behavior
- all_team_members permission behavior
- partial_members permission behavior
- Permission transitions
- Edge cases for each enum value
4. Security and Access Control:
- Tenant boundary enforcement
- Role-based access control
- Creator privilege validation
- Explicit permission requirement
5. Error Handling:
- Invalid permission changes
- Missing required data
- Database transaction failures
- Permission denial scenarios
================================================================================
"""
from unittest.mock import Mock, create_autospec, patch
import pytest
from models import Account, TenantAccountRole
from models.dataset import (
Dataset,
DatasetPermission,
DatasetPermissionEnum,
)
from services.dataset_service import DatasetPermissionService, DatasetService
from services.errors.account import NoPermissionError
# ============================================================================
# Test Data Factory
# ============================================================================
# The Test Data Factory pattern is used here to centralize the creation of
# test objects and mock instances. This approach provides several benefits:
#
# 1. Consistency: All test objects are created using the same factory methods,
# ensuring consistent structure across all tests.
#
# 2. Maintainability: If the structure of models or services changes, we only
# need to update the factory methods rather than every individual test.
#
# 3. Reusability: Factory methods can be reused across multiple test classes,
# reducing code duplication.
#
# 4. Readability: Tests become more readable when they use descriptive factory
# method calls instead of complex object construction logic.
#
# ============================================================================
class DatasetPermissionTestDataFactory:
"""
Factory class for creating test data and mock objects for dataset permission tests.
This factory provides static methods to create mock objects for:
- Dataset instances with various permission configurations
- User/Account instances with different roles and permissions
- DatasetPermission instances
- Permission enum values
- Database query results
The factory methods help maintain consistency across tests and reduce
code duplication when setting up test scenarios.
"""
@staticmethod
def create_dataset_mock(
dataset_id: str = "dataset-123",
tenant_id: str = "tenant-123",
permission: DatasetPermissionEnum = DatasetPermissionEnum.ONLY_ME,
created_by: str = "user-123",
name: str = "Test Dataset",
**kwargs,
) -> Mock:
"""
Create a mock Dataset with specified attributes.
Args:
dataset_id: Unique identifier for the dataset
tenant_id: Tenant identifier
permission: Permission level enum
created_by: ID of user who created the dataset
name: Dataset name
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as a Dataset instance
"""
dataset = Mock(spec=Dataset)
dataset.id = dataset_id
dataset.tenant_id = tenant_id
dataset.permission = permission
dataset.created_by = created_by
dataset.name = name
for key, value in kwargs.items():
setattr(dataset, key, value)
return dataset
@staticmethod
def create_user_mock(
user_id: str = "user-123",
tenant_id: str = "tenant-123",
role: TenantAccountRole = TenantAccountRole.NORMAL,
is_dataset_editor: bool = True,
is_dataset_operator: bool = False,
**kwargs,
) -> Mock:
"""
Create a mock user (Account) with specified attributes.
Args:
user_id: Unique identifier for the user
tenant_id: Tenant identifier
role: User role (OWNER, ADMIN, NORMAL, DATASET_OPERATOR, etc.)
is_dataset_editor: Whether user has dataset editor permissions
is_dataset_operator: Whether user is a dataset operator
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as an Account instance
"""
user = create_autospec(Account, instance=True)
user.id = user_id
user.current_tenant_id = tenant_id
user.current_role = role
user.is_dataset_editor = is_dataset_editor
user.is_dataset_operator = is_dataset_operator
for key, value in kwargs.items():
setattr(user, key, value)
return user
@staticmethod
def create_dataset_permission_mock(
permission_id: str = "permission-123",
dataset_id: str = "dataset-123",
account_id: str = "user-456",
tenant_id: str = "tenant-123",
has_permission: bool = True,
**kwargs,
) -> Mock:
"""
Create a mock DatasetPermission instance.
Args:
permission_id: Unique identifier for the permission
dataset_id: Dataset ID
account_id: User account ID
tenant_id: Tenant identifier
has_permission: Whether permission is granted
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as a DatasetPermission instance
"""
permission = Mock(spec=DatasetPermission)
permission.id = permission_id
permission.dataset_id = dataset_id
permission.account_id = account_id
permission.tenant_id = tenant_id
permission.has_permission = has_permission
for key, value in kwargs.items():
setattr(permission, key, value)
return permission
@staticmethod
def create_user_list_mock(user_ids: list[str]) -> list[dict[str, str]]:
"""
Create a list of user dictionaries for partial member list operations.
Args:
user_ids: List of user IDs to include
Returns:
List of user dictionaries with "user_id" keys
"""
return [{"user_id": user_id} for user_id in user_ids]
# ============================================================================
# Tests for check_permission
# ============================================================================
class TestDatasetPermissionServiceCheckPermission:
"""
Comprehensive unit tests for DatasetPermissionService.check_permission method.
This test class covers the permission validation logic that ensures
users have the appropriate permissions to modify dataset permissions.
The check_permission method:
1. Validates user is a dataset editor
2. Checks if dataset operator is trying to change permissions
3. Validates partial member list when setting to partial_members
4. Ensures dataset operators cannot change permission levels
5. Ensures dataset operators cannot modify partial member lists
Test scenarios include:
- Valid permission changes by dataset editors
- Dataset operator restrictions
- Partial member list validation
- Missing dataset editor permissions
- Invalid permission changes
"""
@pytest.fixture
def mock_get_partial_member_list(self):
"""
Mock get_dataset_partial_member_list method.
Provides a mocked version of the get_dataset_partial_member_list
method for testing permission validation logic.
"""
with patch.object(DatasetPermissionService, "get_dataset_partial_member_list") as mock_get_list:
yield mock_get_list
def test_check_permission_dataset_editor_success(self, mock_get_partial_member_list):
"""
Test successful permission check for dataset editor.
Verifies that when a dataset editor (not operator) tries to
change permissions, the check passes.
This test ensures:
- Dataset editors can change permissions
- No errors are raised for valid changes
- Partial member list validation is skipped for non-operators
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(is_dataset_editor=True, is_dataset_operator=False)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.ONLY_ME)
requested_permission = DatasetPermissionEnum.ALL_TEAM
requested_partial_member_list = None
# Act (should not raise)
DatasetPermissionService.check_permission(user, dataset, requested_permission, requested_partial_member_list)
# Assert
# Verify get_partial_member_list was not called (not needed for non-operators)
mock_get_partial_member_list.assert_not_called()
def test_check_permission_not_dataset_editor_error(self):
"""
Test error when user is not a dataset editor.
Verifies that when a user without dataset editor permissions
tries to change permissions, a NoPermissionError is raised.
This test ensures:
- Non-editors cannot change permissions
- Error message is clear
- Error type is correct
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(is_dataset_editor=False)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock()
requested_permission = DatasetPermissionEnum.ALL_TEAM
requested_partial_member_list = None
# Act & Assert
with pytest.raises(NoPermissionError, match="User does not have permission to edit this dataset"):
DatasetPermissionService.check_permission(
user, dataset, requested_permission, requested_partial_member_list
)
def test_check_permission_operator_cannot_change_permission_error(self):
"""
Test error when dataset operator tries to change permission level.
Verifies that when a dataset operator tries to change the permission
level, a NoPermissionError is raised.
This test ensures:
- Dataset operators cannot change permission levels
- Error message is clear
- Current permission is preserved
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(is_dataset_editor=True, is_dataset_operator=True)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.ONLY_ME)
requested_permission = DatasetPermissionEnum.ALL_TEAM # Trying to change
requested_partial_member_list = None
# Act & Assert
with pytest.raises(NoPermissionError, match="Dataset operators cannot change the dataset permissions"):
DatasetPermissionService.check_permission(
user, dataset, requested_permission, requested_partial_member_list
)
def test_check_permission_operator_partial_members_missing_list_error(self, mock_get_partial_member_list):
"""
Test error when operator sets partial_members without providing list.
Verifies that when a dataset operator tries to set permission to
partial_members without providing a member list, a ValueError is raised.
This test ensures:
- Partial member list is required for partial_members permission
- Error message is clear
- Error type is correct
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(is_dataset_editor=True, is_dataset_operator=True)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.PARTIAL_TEAM)
requested_permission = "partial_members"
requested_partial_member_list = None # Missing list
# Act & Assert
with pytest.raises(ValueError, match="Partial member list is required when setting to partial members"):
DatasetPermissionService.check_permission(
user, dataset, requested_permission, requested_partial_member_list
)
def test_check_permission_operator_cannot_modify_partial_list_error(self, mock_get_partial_member_list):
"""
Test error when operator tries to modify partial member list.
Verifies that when a dataset operator tries to change the partial
member list, a ValueError is raised.
This test ensures:
- Dataset operators cannot modify partial member lists
- Error message is clear
- Current member list is preserved
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(is_dataset_editor=True, is_dataset_operator=True)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.PARTIAL_TEAM)
requested_permission = "partial_members"
# Current member list
current_member_list = ["user-456", "user-789"]
mock_get_partial_member_list.return_value = current_member_list
# Requested member list (different from current)
requested_partial_member_list = DatasetPermissionTestDataFactory.create_user_list_mock(
["user-456", "user-999"] # Different list
)
# Act & Assert
with pytest.raises(ValueError, match="Dataset operators cannot change the dataset permissions"):
DatasetPermissionService.check_permission(
user, dataset, requested_permission, requested_partial_member_list
)
def test_check_permission_operator_can_keep_same_partial_list(self, mock_get_partial_member_list):
"""
Test that operator can keep the same partial member list.
Verifies that when a dataset operator keeps the same partial member
list, the check passes.
This test ensures:
- Operators can keep existing partial member lists
- No errors are raised for unchanged lists
- Permission validation works correctly
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(is_dataset_editor=True, is_dataset_operator=True)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.PARTIAL_TEAM)
requested_permission = "partial_members"
# Current member list
current_member_list = ["user-456", "user-789"]
mock_get_partial_member_list.return_value = current_member_list
# Requested member list (same as current)
requested_partial_member_list = DatasetPermissionTestDataFactory.create_user_list_mock(
["user-456", "user-789"] # Same list
)
# Act (should not raise)
DatasetPermissionService.check_permission(user, dataset, requested_permission, requested_partial_member_list)
# Assert
# Verify get_partial_member_list was called to compare lists
mock_get_partial_member_list.assert_called_once_with(dataset.id)
# ============================================================================
# Tests for DatasetService.check_dataset_permission
# ============================================================================
class TestDatasetServiceCheckDatasetPermission:
"""
Comprehensive unit tests for DatasetService.check_dataset_permission method.
This test class covers the dataset permission checking logic that validates
whether a user has access to a dataset based on permission enums.
The check_dataset_permission method:
1. Validates tenant match
2. Checks OWNER role (bypasses some restrictions)
3. Validates only_me permission (creator only)
4. Validates partial_members permission (explicit permission required)
5. Validates all_team_members permission (all tenant members)
Test scenarios include:
- Tenant boundary enforcement
- OWNER role bypass
- only_me permission validation
- partial_members permission validation
- all_team_members permission validation
- Permission denial scenarios
"""
@pytest.fixture
def mock_db_session(self):
"""
Mock database session for testing.
Provides a mocked database session that can be used to verify
database queries for permission checks.
"""
with patch("services.dataset_service.db.session") as mock_db:
yield mock_db
def test_check_dataset_permission_owner_bypass(self, mock_db_session):
"""
Test that OWNER role bypasses permission checks.
Verifies that when a user has OWNER role, they can access any
dataset in their tenant regardless of permission level.
This test ensures:
- OWNER role bypasses permission restrictions
- No database queries are needed for OWNER
- Access is granted automatically
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(role=TenantAccountRole.OWNER, tenant_id="tenant-123")
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ONLY_ME,
created_by="other-user-123", # Not the current user
)
# Act (should not raise)
DatasetService.check_dataset_permission(dataset, user)
# Assert
# Verify no permission queries were made (OWNER bypasses)
mock_db_session.query.assert_not_called()
def test_check_dataset_permission_tenant_mismatch_error(self):
"""
Test error when user and dataset are in different tenants.
Verifies that when a user tries to access a dataset from a different
tenant, a NoPermissionError is raised.
This test ensures:
- Tenant boundary is enforced
- Error message is clear
- Error type is correct
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(tenant_id="tenant-123")
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(tenant_id="tenant-456") # Different tenant
# Act & Assert
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset"):
DatasetService.check_dataset_permission(dataset, user)
def test_check_dataset_permission_only_me_creator_success(self):
"""
Test that creator can access only_me dataset.
Verifies that when a user is the creator of an only_me dataset,
they can access it successfully.
This test ensures:
- Creators can access their own only_me datasets
- No explicit permission record is needed
- Access is granted correctly
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(user_id="user-123", role=TenantAccountRole.NORMAL)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ONLY_ME,
created_by="user-123", # User is the creator
)
# Act (should not raise)
DatasetService.check_dataset_permission(dataset, user)
def test_check_dataset_permission_only_me_non_creator_error(self):
"""
Test error when non-creator tries to access only_me dataset.
Verifies that when a user who is not the creator tries to access
an only_me dataset, a NoPermissionError is raised.
This test ensures:
- Non-creators cannot access only_me datasets
- Error message is clear
- Error type is correct
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(user_id="user-123", role=TenantAccountRole.NORMAL)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ONLY_ME,
created_by="other-user-456", # Different creator
)
# Act & Assert
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset"):
DatasetService.check_dataset_permission(dataset, user)
def test_check_dataset_permission_partial_members_creator_success(self, mock_db_session):
"""
Test that creator can access partial_members dataset without explicit permission.
Verifies that when a user is the creator of a partial_members dataset,
they can access it even without an explicit DatasetPermission record.
This test ensures:
- Creators can access their own datasets
- No explicit permission record is needed for creators
- Access is granted correctly
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(user_id="user-123", role=TenantAccountRole.NORMAL)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.PARTIAL_TEAM,
created_by="user-123", # User is the creator
)
# Act (should not raise)
DatasetService.check_dataset_permission(dataset, user)
# Assert
# Verify permission query was not executed (creator bypasses)
mock_db_session.query.assert_not_called()
def test_check_dataset_permission_all_team_members_success(self):
"""
Test that any tenant member can access all_team_members dataset.
Verifies that when a dataset has all_team_members permission, any
user in the same tenant can access it.
This test ensures:
- All team members can access
- No explicit permission record is needed
- Access is granted correctly
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(user_id="user-123", role=TenantAccountRole.NORMAL)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ALL_TEAM,
created_by="other-user-456", # Not the creator
)
# Act (should not raise)
DatasetService.check_dataset_permission(dataset, user)
# ============================================================================
# Tests for DatasetService.check_dataset_operator_permission
# ============================================================================
class TestDatasetServiceCheckDatasetOperatorPermission:
"""
Comprehensive unit tests for DatasetService.check_dataset_operator_permission method.
This test class covers the dataset operator permission checking logic,
which validates whether a dataset operator has access to a dataset.
The check_dataset_operator_permission method:
1. Validates dataset exists
2. Validates user exists
3. Checks OWNER role (bypasses restrictions)
4. Validates only_me permission (creator only)
5. Validates partial_members permission (explicit permission required)
Test scenarios include:
- Dataset not found error
- User not found error
- OWNER role bypass
- only_me permission validation
- partial_members permission validation
- Permission denial scenarios
"""
@pytest.fixture
def mock_db_session(self):
"""
Mock database session for testing.
Provides a mocked database session that can be used to verify
database queries for permission checks.
"""
with patch("services.dataset_service.db.session") as mock_db:
yield mock_db
def test_check_dataset_operator_permission_dataset_not_found_error(self):
"""
Test error when dataset is None.
Verifies that when dataset is None, a ValueError is raised.
This test ensures:
- Dataset existence is validated
- Error message is clear
- Error type is correct
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock()
dataset = None
# Act & Assert
with pytest.raises(ValueError, match="Dataset not found"):
DatasetService.check_dataset_operator_permission(user=user, dataset=dataset)
def test_check_dataset_operator_permission_user_not_found_error(self):
"""
Test error when user is None.
Verifies that when user is None, a ValueError is raised.
This test ensures:
- User existence is validated
- Error message is clear
- Error type is correct
"""
# Arrange
user = None
dataset = DatasetPermissionTestDataFactory.create_dataset_mock()
# Act & Assert
with pytest.raises(ValueError, match="User not found"):
DatasetService.check_dataset_operator_permission(user=user, dataset=dataset)
def test_check_dataset_operator_permission_owner_bypass(self):
"""
Test that OWNER role bypasses permission checks.
Verifies that when a user has OWNER role, they can access any
dataset in their tenant regardless of permission level.
This test ensures:
- OWNER role bypasses permission restrictions
- No database queries are needed for OWNER
- Access is granted automatically
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(role=TenantAccountRole.OWNER, tenant_id="tenant-123")
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ONLY_ME,
created_by="other-user-123", # Not the current user
)
# Act (should not raise)
DatasetService.check_dataset_operator_permission(user=user, dataset=dataset)
def test_check_dataset_operator_permission_only_me_creator_success(self):
"""
Test that creator can access only_me dataset.
Verifies that when a user is the creator of an only_me dataset,
they can access it successfully.
This test ensures:
- Creators can access their own only_me datasets
- No explicit permission record is needed
- Access is granted correctly
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(user_id="user-123", role=TenantAccountRole.NORMAL)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ONLY_ME,
created_by="user-123", # User is the creator
)
# Act (should not raise)
DatasetService.check_dataset_operator_permission(user=user, dataset=dataset)
def test_check_dataset_operator_permission_only_me_non_creator_error(self):
"""
Test error when non-creator tries to access only_me dataset.
Verifies that when a user who is not the creator tries to access
an only_me dataset, a NoPermissionError is raised.
This test ensures:
- Non-creators cannot access only_me datasets
- Error message is clear
- Error type is correct
"""
# Arrange
user = DatasetPermissionTestDataFactory.create_user_mock(user_id="user-123", role=TenantAccountRole.NORMAL)
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123",
permission=DatasetPermissionEnum.ONLY_ME,
created_by="other-user-456", # Different creator
)
# Act & Assert
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset"):
DatasetService.check_dataset_operator_permission(user=user, dataset=dataset)
# ============================================================================
# Additional Documentation and Notes
# ============================================================================
#
# This test suite covers the core permission management operations for datasets.
# Additional test scenarios that could be added:
#
# 1. Permission Enum Transitions:
# - Testing transitions between permission levels
# - Testing validation during transitions
# - Testing partial member list updates during transitions
#
# 2. Bulk Operations:
# - Testing bulk permission updates
# - Testing bulk partial member list updates
# - Testing performance with large member lists
#
# 3. Edge Cases:
# - Testing with very large partial member lists
# - Testing with special characters in user IDs
# - Testing with deleted users
# - Testing with inactive permissions
#
# 4. Integration Scenarios:
# - Testing permission changes followed by access attempts
# - Testing concurrent permission updates
# - Testing permission inheritance
#
# These scenarios are not currently implemented but could be added if needed
# based on real-world usage patterns or discovered edge cases.
#
# ============================================================================

View File

@ -1,818 +0,0 @@
"""
Comprehensive unit tests for DatasetService update and delete operations.
This module contains extensive unit tests for the DatasetService class,
specifically focusing on update and delete operations for datasets.
The DatasetService provides methods for:
- Updating dataset configuration and settings (update_dataset)
- Deleting datasets with proper cleanup (delete_dataset)
- Updating RAG pipeline dataset settings (update_rag_pipeline_dataset_settings)
- Checking if dataset is in use (dataset_use_check)
- Updating dataset API access status (update_dataset_api_status)
These operations are critical for dataset lifecycle management and require
careful handling of permissions, dependencies, and data integrity.
This test suite ensures:
- Correct update of dataset properties
- Proper permission validation before updates/deletes
- Cascade deletion handling
- Event signaling for cleanup operations
- RAG pipeline dataset configuration updates
- API status management
- Use check validation
================================================================================
ARCHITECTURE OVERVIEW
================================================================================
The DatasetService update and delete operations are part of the dataset
lifecycle management system. These operations interact with multiple
components:
1. Permission System: All update/delete operations require proper
permission validation to ensure users can only modify datasets they
have access to.
2. Event System: Dataset deletion triggers the dataset_was_deleted event,
which notifies other components to clean up related data (documents,
segments, vector indices, etc.).
3. Dependency Checking: Before deletion, the system checks if the dataset
is in use by any applications (via AppDatasetJoin).
4. RAG Pipeline Integration: RAG pipeline datasets have special update
logic that handles chunk structure, indexing techniques, and embedding
model configuration.
5. API Status Management: Datasets can have their API access enabled or
disabled, which affects whether they can be accessed via the API.
================================================================================
TESTING STRATEGY
================================================================================
This test suite follows a comprehensive testing strategy that covers:
1. Update Operations:
- Internal dataset updates
- External dataset updates
- RAG pipeline dataset updates
- Permission validation
- Name duplicate checking
- Configuration validation
2. Delete Operations:
- Successful deletion
- Permission validation
- Event signaling
- Database cleanup
- Not found handling
3. Use Check Operations:
- Dataset in use detection
- Dataset not in use detection
- AppDatasetJoin query validation
4. API Status Operations:
- Enable API access
- Disable API access
- Permission validation
- Current user validation
5. RAG Pipeline Operations:
- Unpublished dataset updates
- Published dataset updates
- Chunk structure validation
- Indexing technique changes
- Embedding model configuration
================================================================================
"""
import datetime
from unittest.mock import Mock, create_autospec, patch
import pytest
from sqlalchemy.orm import Session
from core.rag.index_processor.constant.index_type import IndexTechniqueType
from models import Account, TenantAccountRole
from models.dataset import (
AppDatasetJoin,
Dataset,
DatasetPermissionEnum,
)
from services.dataset_service import DatasetService
from services.errors.account import NoPermissionError
# ============================================================================
# Test Data Factory
# ============================================================================
# The Test Data Factory pattern is used here to centralize the creation of
# test objects and mock instances. This approach provides several benefits:
#
# 1. Consistency: All test objects are created using the same factory methods,
# ensuring consistent structure across all tests.
#
# 2. Maintainability: If the structure of models or services changes, we only
# need to update the factory methods rather than every individual test.
#
# 3. Reusability: Factory methods can be reused across multiple test classes,
# reducing code duplication.
#
# 4. Readability: Tests become more readable when they use descriptive factory
# method calls instead of complex object construction logic.
#
# ============================================================================
class DatasetUpdateDeleteTestDataFactory:
"""
Factory class for creating test data and mock objects for dataset update/delete tests.
This factory provides static methods to create mock objects for:
- Dataset instances with various configurations
- User/Account instances with different roles
- Knowledge configuration objects
- Database session mocks
- Event signal mocks
The factory methods help maintain consistency across tests and reduce
code duplication when setting up test scenarios.
"""
@staticmethod
def create_dataset_mock(
dataset_id: str = "dataset-123",
provider: str = "vendor",
name: str = "Test Dataset",
description: str = "Test description",
tenant_id: str = "tenant-123",
indexing_technique: str = IndexTechniqueType.HIGH_QUALITY,
embedding_model_provider: str | None = "openai",
embedding_model: str | None = "text-embedding-ada-002",
collection_binding_id: str | None = "binding-123",
enable_api: bool = True,
permission: DatasetPermissionEnum = DatasetPermissionEnum.ONLY_ME,
created_by: str = "user-123",
chunk_structure: str | None = None,
runtime_mode: str = "general",
**kwargs,
) -> Mock:
"""
Create a mock Dataset with specified attributes.
Args:
dataset_id: Unique identifier for the dataset
provider: Dataset provider (vendor, external)
name: Dataset name
description: Dataset description
tenant_id: Tenant identifier
indexing_technique: Indexing technique (high_quality, economy)
embedding_model_provider: Embedding model provider
embedding_model: Embedding model name
collection_binding_id: Collection binding ID
enable_api: Whether API access is enabled
permission: Dataset permission level
created_by: ID of user who created the dataset
chunk_structure: Chunk structure for RAG pipeline datasets
runtime_mode: Runtime mode (general, rag_pipeline)
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as a Dataset instance
"""
dataset = Mock(spec=Dataset)
dataset.id = dataset_id
dataset.provider = provider
dataset.name = name
dataset.description = description
dataset.tenant_id = tenant_id
dataset.indexing_technique = indexing_technique
dataset.embedding_model_provider = embedding_model_provider
dataset.embedding_model = embedding_model
dataset.collection_binding_id = collection_binding_id
dataset.enable_api = enable_api
dataset.permission = permission
dataset.created_by = created_by
dataset.chunk_structure = chunk_structure
dataset.runtime_mode = runtime_mode
dataset.retrieval_model = {}
dataset.keyword_number = 10
for key, value in kwargs.items():
setattr(dataset, key, value)
return dataset
@staticmethod
def create_user_mock(
user_id: str = "user-123",
tenant_id: str = "tenant-123",
role: TenantAccountRole = TenantAccountRole.NORMAL,
is_dataset_editor: bool = True,
**kwargs,
) -> Mock:
"""
Create a mock user (Account) with specified attributes.
Args:
user_id: Unique identifier for the user
tenant_id: Tenant identifier
role: User role (OWNER, ADMIN, NORMAL, etc.)
is_dataset_editor: Whether user has dataset editor permissions
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as an Account instance
"""
user = create_autospec(Account, instance=True)
user.id = user_id
user.current_tenant_id = tenant_id
user.current_role = role
user.is_dataset_editor = is_dataset_editor
for key, value in kwargs.items():
setattr(user, key, value)
return user
@staticmethod
def create_knowledge_configuration_mock(
chunk_structure: str = "tree",
indexing_technique: str = IndexTechniqueType.HIGH_QUALITY,
embedding_model_provider: str = "openai",
embedding_model: str = "text-embedding-ada-002",
keyword_number: int = 10,
retrieval_model: dict | None = None,
**kwargs,
) -> Mock:
"""
Create a mock KnowledgeConfiguration entity.
Args:
chunk_structure: Chunk structure type
indexing_technique: Indexing technique
embedding_model_provider: Embedding model provider
embedding_model: Embedding model name
keyword_number: Keyword number for economy indexing
retrieval_model: Retrieval model configuration
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as a KnowledgeConfiguration instance
"""
config = Mock()
config.chunk_structure = chunk_structure
config.indexing_technique = indexing_technique
config.embedding_model_provider = embedding_model_provider
config.embedding_model = embedding_model
config.keyword_number = keyword_number
config.retrieval_model = Mock()
config.retrieval_model.model_dump.return_value = retrieval_model or {
"search_method": "semantic_search",
"top_k": 2,
}
for key, value in kwargs.items():
setattr(config, key, value)
return config
@staticmethod
def create_app_dataset_join_mock(
app_id: str = "app-123",
dataset_id: str = "dataset-123",
**kwargs,
) -> Mock:
"""
Create a mock AppDatasetJoin instance.
Args:
app_id: Application ID
dataset_id: Dataset ID
**kwargs: Additional attributes to set on the mock
Returns:
Mock object configured as an AppDatasetJoin instance
"""
join = Mock(spec=AppDatasetJoin)
join.app_id = app_id
join.dataset_id = dataset_id
for key, value in kwargs.items():
setattr(join, key, value)
return join
# ============================================================================
# Tests for update_dataset
# ============================================================================
class TestDatasetServiceUpdateDataset:
"""
Comprehensive unit tests for DatasetService.update_dataset method.
This test class covers the dataset update functionality, including
internal and external dataset updates, permission validation, and
name duplicate checking.
The update_dataset method:
1. Retrieves the dataset by ID
2. Validates dataset exists
3. Checks for duplicate names
4. Validates user permissions
5. Routes to appropriate update handler (internal or external)
6. Returns the updated dataset
Test scenarios include:
- Successful internal dataset updates
- Successful external dataset updates
- Permission validation
- Duplicate name detection
- Dataset not found errors
"""
@pytest.fixture
def mock_dataset_service_dependencies(self):
"""
Mock dataset service dependencies for testing.
Provides mocked dependencies including:
- get_dataset method
- check_dataset_permission method
- _has_dataset_same_name method
- Database session
- Current time utilities
"""
with (
patch("services.dataset_service.DatasetService.get_dataset") as mock_get_dataset,
patch("services.dataset_service.DatasetService.check_dataset_permission") as mock_check_perm,
patch("services.dataset_service.DatasetService._has_dataset_same_name") as mock_has_same_name,
patch("extensions.ext_database.db.session") as mock_db,
patch("services.dataset_service.naive_utc_now") as mock_naive_utc_now,
):
current_time = datetime.datetime(2023, 1, 1, 12, 0, 0)
mock_naive_utc_now.return_value = current_time
yield {
"get_dataset": mock_get_dataset,
"check_permission": mock_check_perm,
"has_same_name": mock_has_same_name,
"db_session": mock_db,
"naive_utc_now": mock_naive_utc_now,
"current_time": current_time,
}
def test_update_dataset_internal_success(self, mock_dataset_service_dependencies):
"""
Test successful update of an internal dataset.
Verifies that when all validation passes, an internal dataset
is updated correctly through the _update_internal_dataset method.
This test ensures:
- Dataset is retrieved correctly
- Permission is checked
- Name duplicate check is performed
- Internal update handler is called
- Updated dataset is returned
"""
# Arrange
dataset_id = "dataset-123"
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(
dataset_id=dataset_id, provider="vendor", name="Old Name"
)
user = DatasetUpdateDeleteTestDataFactory.create_user_mock()
update_data = {
"name": "New Name",
"description": "New Description",
}
mock_dataset_service_dependencies["get_dataset"].return_value = dataset
mock_dataset_service_dependencies["has_same_name"].return_value = False
with patch("services.dataset_service.DatasetService._update_internal_dataset") as mock_update_internal:
mock_update_internal.return_value = dataset
# Act
result = DatasetService.update_dataset(dataset_id, update_data, user)
# Assert
assert result == dataset
# Verify dataset was retrieved
mock_dataset_service_dependencies["get_dataset"].assert_called_once_with(dataset_id)
# Verify permission was checked
mock_dataset_service_dependencies["check_permission"].assert_called_once_with(dataset, user)
# Verify name duplicate check was performed
mock_dataset_service_dependencies["has_same_name"].assert_called_once()
# Verify internal update handler was called
mock_update_internal.assert_called_once()
def test_update_dataset_external_success(self, mock_dataset_service_dependencies):
"""
Test successful update of an external dataset.
Verifies that when all validation passes, an external dataset
is updated correctly through the _update_external_dataset method.
This test ensures:
- Dataset is retrieved correctly
- Permission is checked
- Name duplicate check is performed
- External update handler is called
- Updated dataset is returned
"""
# Arrange
dataset_id = "dataset-123"
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(
dataset_id=dataset_id, provider="external", name="Old Name"
)
user = DatasetUpdateDeleteTestDataFactory.create_user_mock()
update_data = {
"name": "New Name",
"external_knowledge_id": "new-knowledge-id",
}
mock_dataset_service_dependencies["get_dataset"].return_value = dataset
mock_dataset_service_dependencies["has_same_name"].return_value = False
with patch("services.dataset_service.DatasetService._update_external_dataset") as mock_update_external:
mock_update_external.return_value = dataset
# Act
result = DatasetService.update_dataset(dataset_id, update_data, user)
# Assert
assert result == dataset
# Verify external update handler was called
mock_update_external.assert_called_once()
def test_update_dataset_not_found_error(self, mock_dataset_service_dependencies):
"""
Test error handling when dataset is not found.
Verifies that when the dataset ID doesn't exist, a ValueError
is raised with an appropriate message.
This test ensures:
- Dataset not found error is handled correctly
- No update operations are performed
- Error message is clear
"""
# Arrange
dataset_id = "non-existent-dataset"
user = DatasetUpdateDeleteTestDataFactory.create_user_mock()
update_data = {"name": "New Name"}
mock_dataset_service_dependencies["get_dataset"].return_value = None
# Act & Assert
with pytest.raises(ValueError, match="Dataset not found"):
DatasetService.update_dataset(dataset_id, update_data, user)
# Verify no update operations were attempted
mock_dataset_service_dependencies["check_permission"].assert_not_called()
mock_dataset_service_dependencies["has_same_name"].assert_not_called()
def test_update_dataset_duplicate_name_error(self, mock_dataset_service_dependencies):
"""
Test error handling when dataset name already exists.
Verifies that when a dataset with the same name already exists
in the tenant, a ValueError is raised.
This test ensures:
- Duplicate name detection works correctly
- Error message is clear
- No update operations are performed
"""
# Arrange
dataset_id = "dataset-123"
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(dataset_id=dataset_id)
user = DatasetUpdateDeleteTestDataFactory.create_user_mock()
update_data = {"name": "Existing Name"}
mock_dataset_service_dependencies["get_dataset"].return_value = dataset
mock_dataset_service_dependencies["has_same_name"].return_value = True # Duplicate exists
# Act & Assert
with pytest.raises(ValueError, match="Dataset name already exists"):
DatasetService.update_dataset(dataset_id, update_data, user)
# Verify permission check was not called (fails before that)
mock_dataset_service_dependencies["check_permission"].assert_not_called()
def test_update_dataset_permission_denied_error(self, mock_dataset_service_dependencies):
"""
Test error handling when user lacks permission.
Verifies that when the user doesn't have permission to update
the dataset, a NoPermissionError is raised.
This test ensures:
- Permission validation works correctly
- Error is raised before any updates
- Error type is correct
"""
# Arrange
dataset_id = "dataset-123"
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(dataset_id=dataset_id)
user = DatasetUpdateDeleteTestDataFactory.create_user_mock()
update_data = {"name": "New Name"}
mock_dataset_service_dependencies["get_dataset"].return_value = dataset
mock_dataset_service_dependencies["has_same_name"].return_value = False
mock_dataset_service_dependencies["check_permission"].side_effect = NoPermissionError("No permission")
# Act & Assert
with pytest.raises(NoPermissionError):
DatasetService.update_dataset(dataset_id, update_data, user)
# ============================================================================
# Tests for update_rag_pipeline_dataset_settings
# ============================================================================
class TestDatasetServiceUpdateRagPipelineDatasetSettings:
"""
Comprehensive unit tests for DatasetService.update_rag_pipeline_dataset_settings method.
This test class covers the RAG pipeline dataset settings update functionality,
including chunk structure, indexing technique, and embedding model configuration.
The update_rag_pipeline_dataset_settings method:
1. Validates current_user and tenant
2. Merges dataset into session
3. Handles unpublished vs published datasets differently
4. Updates chunk structure, indexing technique, and retrieval model
5. Configures embedding model for high_quality indexing
6. Updates keyword_number for economy indexing
7. Commits transaction
8. Triggers index update tasks if needed
Test scenarios include:
- Unpublished dataset updates
- Published dataset updates
- Chunk structure validation
- Indexing technique changes
- Embedding model configuration
- Error handling
"""
@pytest.fixture
def mock_session(self):
"""
Mock database session for testing.
Provides a mocked SQLAlchemy session for testing session operations.
"""
return Mock(spec=Session)
@pytest.fixture
def mock_dataset_service_dependencies(self):
"""
Mock dataset service dependencies for testing.
Provides mocked dependencies including:
- current_user context
- ModelManager
- DatasetCollectionBindingService
- Database session operations
- Task scheduling
"""
with (
patch(
"services.dataset_service.current_user", create_autospec(Account, instance=True)
) as mock_current_user,
patch("services.dataset_service.ModelManager.for_tenant") as mock_model_manager,
patch(
"services.dataset_service.DatasetCollectionBindingService.get_dataset_collection_binding"
) as mock_get_binding,
patch("services.dataset_service.deal_dataset_index_update_task") as mock_task,
):
mock_current_user.current_tenant_id = "tenant-123"
mock_current_user.id = "user-123"
yield {
"current_user": mock_current_user,
"model_manager": mock_model_manager,
"get_binding": mock_get_binding,
"task": mock_task,
}
def test_update_rag_pipeline_dataset_settings_unpublished_success(
self, mock_session, mock_dataset_service_dependencies
):
"""
Test successful update of unpublished RAG pipeline dataset.
Verifies that when a dataset is not published, all settings can
be updated including chunk structure and indexing technique.
This test ensures:
- Current user validation passes
- Dataset is merged into session
- Chunk structure is updated
- Indexing technique is updated
- Embedding model is configured for high_quality
- Retrieval model is updated
- Dataset is added to session
"""
# Arrange
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(
dataset_id="dataset-123",
runtime_mode="rag_pipeline",
chunk_structure="tree",
indexing_technique=IndexTechniqueType.HIGH_QUALITY,
)
knowledge_config = DatasetUpdateDeleteTestDataFactory.create_knowledge_configuration_mock(
chunk_structure="list",
indexing_technique=IndexTechniqueType.HIGH_QUALITY,
embedding_model_provider="openai",
embedding_model="text-embedding-ada-002",
)
# Mock embedding model
mock_embedding_model = Mock()
mock_embedding_model.model_name = "text-embedding-ada-002"
mock_embedding_model.provider = "openai"
mock_embedding_model.credentials = {}
mock_model_schema = Mock()
mock_model_schema.features = []
mock_text_embedding_model = Mock()
mock_text_embedding_model.get_model_schema.return_value = mock_model_schema
mock_embedding_model.model_type_instance = mock_text_embedding_model
mock_model_instance = Mock()
mock_model_instance.get_model_instance.return_value = mock_embedding_model
mock_dataset_service_dependencies["model_manager"].return_value = mock_model_instance
# Mock collection binding
mock_binding = Mock()
mock_binding.id = "binding-123"
mock_dataset_service_dependencies["get_binding"].return_value = mock_binding
mock_session.merge.return_value = dataset
# Act
DatasetService.update_rag_pipeline_dataset_settings(
mock_session, dataset, knowledge_config, has_published=False
)
# Assert
assert dataset.chunk_structure == "list"
assert dataset.indexing_technique == IndexTechniqueType.HIGH_QUALITY
assert dataset.embedding_model == "text-embedding-ada-002"
assert dataset.embedding_model_provider == "openai"
assert dataset.collection_binding_id == "binding-123"
# Verify dataset was added to session
mock_session.add.assert_called_once_with(dataset)
def test_update_rag_pipeline_dataset_settings_published_chunk_structure_error(
self, mock_session, mock_dataset_service_dependencies
):
"""
Test error handling when trying to update chunk structure of published dataset.
Verifies that when a dataset is published and has an existing chunk structure,
attempting to change it raises a ValueError.
This test ensures:
- Chunk structure change is detected
- ValueError is raised with appropriate message
- No updates are committed
"""
# Arrange
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(
dataset_id="dataset-123",
runtime_mode="rag_pipeline",
chunk_structure="tree", # Existing structure
indexing_technique=IndexTechniqueType.HIGH_QUALITY,
)
knowledge_config = DatasetUpdateDeleteTestDataFactory.create_knowledge_configuration_mock(
chunk_structure="list", # Different structure
indexing_technique=IndexTechniqueType.HIGH_QUALITY,
)
mock_session.merge.return_value = dataset
# Act & Assert
with pytest.raises(ValueError, match="Chunk structure is not allowed to be updated"):
DatasetService.update_rag_pipeline_dataset_settings(
mock_session, dataset, knowledge_config, has_published=True
)
# Verify no commit was attempted
mock_session.commit.assert_not_called()
def test_update_rag_pipeline_dataset_settings_published_economy_error(
self, mock_session, mock_dataset_service_dependencies
):
"""
Test error handling when trying to change to economy indexing on published dataset.
Verifies that when a dataset is published, changing indexing technique to
economy is not allowed and raises a ValueError.
This test ensures:
- Economy indexing change is detected
- ValueError is raised with appropriate message
- No updates are committed
"""
# Arrange
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock(
dataset_id="dataset-123",
runtime_mode="rag_pipeline",
indexing_technique=IndexTechniqueType.HIGH_QUALITY, # Current technique
)
knowledge_config = DatasetUpdateDeleteTestDataFactory.create_knowledge_configuration_mock(
indexing_technique=IndexTechniqueType.ECONOMY, # Trying to change to economy
)
mock_session.merge.return_value = dataset
# Act & Assert
with pytest.raises(
ValueError, match="Knowledge base indexing technique is not allowed to be updated to economy"
):
DatasetService.update_rag_pipeline_dataset_settings(
mock_session, dataset, knowledge_config, has_published=True
)
def test_update_rag_pipeline_dataset_settings_missing_current_user_error(
self, mock_session, mock_dataset_service_dependencies
):
"""
Test error handling when current_user is missing.
Verifies that when current_user is None or has no tenant ID, a ValueError
is raised.
This test ensures:
- Current user validation works correctly
- Error message is clear
- No updates are performed
"""
# Arrange
dataset = DatasetUpdateDeleteTestDataFactory.create_dataset_mock()
knowledge_config = DatasetUpdateDeleteTestDataFactory.create_knowledge_configuration_mock()
mock_dataset_service_dependencies["current_user"].current_tenant_id = None # Missing tenant
# Act & Assert
with pytest.raises(ValueError, match="Current user or current tenant not found"):
DatasetService.update_rag_pipeline_dataset_settings(
mock_session, dataset, knowledge_config, has_published=False
)
# ============================================================================
# Additional Documentation and Notes
# ============================================================================
#
# This test suite covers the core update and delete operations for datasets.
# Additional test scenarios that could be added:
#
# 1. Update Operations:
# - Testing with different indexing techniques
# - Testing embedding model provider changes
# - Testing retrieval model updates
# - Testing icon_info updates
# - Testing partial_member_list updates
#
# 2. Delete Operations:
# - Testing cascade deletion of related data
# - Testing event handler execution
# - Testing with datasets that have documents
# - Testing with datasets that have segments
#
# 3. RAG Pipeline Operations:
# - Testing economy indexing technique updates
# - Testing embedding model provider errors
# - Testing keyword_number updates
# - Testing index update task triggering
#
# 4. Integration Scenarios:
# - Testing update followed by delete
# - Testing multiple updates in sequence
# - Testing concurrent update attempts
# - Testing with different user roles
#
# These scenarios are not currently implemented but could be added if needed
# based on real-world usage patterns or discovered edge cases.
#
# ============================================================================

6
api/uv.lock generated
View File

@ -4763,11 +4763,11 @@ wheels = [
[[package]]
name = "pypdf"
version = "6.9.2"
version = "6.10.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/31/83/691bdb309306232362503083cb15777491045dd54f45393a317dc7d8082f/pypdf-6.9.2.tar.gz", hash = "sha256:7f850faf2b0d4ab936582c05da32c52214c2b089d61a316627b5bfb5b0dab46c", size = 5311837, upload-time = "2026-03-23T14:53:27.983Z" }
sdist = { url = "https://files.pythonhosted.org/packages/b8/9f/ca96abf18683ca12602065e4ed2bec9050b672c87d317f1079abc7b6d993/pypdf-6.10.0.tar.gz", hash = "sha256:4c5a48ba258c37024ec2505f7e8fd858525f5502784a2e1c8d415604af29f6ef", size = 5314833, upload-time = "2026-04-10T09:34:57.102Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a5/7e/c85f41243086a8fe5d1baeba527cb26a1918158a565932b41e0f7c0b32e9/pypdf-6.9.2-py3-none-any.whl", hash = "sha256:662cf29bcb419a36a1365232449624ab40b7c2d0cfc28e54f42eeecd1fd7e844", size = 333744, upload-time = "2026-03-23T14:53:26.573Z" },
{ url = "https://files.pythonhosted.org/packages/55/f2/7ebe366f633f30a6ad105f650f44f24f98cb1335c4157d21ae47138b3482/pypdf-6.10.0-py3-none-any.whl", hash = "sha256:90005e959e1596c6e6c84c8b0ad383285b3e17011751cedd17f2ce8fcdfc86de", size = 334459, upload-time = "2026-04-10T09:34:54.966Z" },
]
[[package]]

View File

@ -165,137 +165,3 @@ Open the HTML report locally with:
```bash
open cucumber-report/report.html
```
## Writing new scenarios
### Workflow
1. Create a `.feature` file under `features/<capability>/`
2. Add step definitions under `features/step-definitions/<capability>/`
3. Reuse existing steps from `common/` and other definition files before writing new ones
4. Run with `pnpm -C e2e e2e -- --tags @your-tag` to verify
5. Run `pnpm -C e2e check` before committing
### Feature file conventions
Tag every feature with a capability tag and an auth tag:
```gherkin
@datasets @authenticated
Feature: Create dataset
Scenario: Create a new empty dataset
Given I am signed in as the default E2E admin
When I open the datasets page
...
```
- Capability tags (`@apps`, `@auth`, `@datasets`, …) group related scenarios for selective runs
- Auth tags control the `Before` hook behavior:
- `@authenticated` — injects the shared auth storageState into the BrowserContext
- `@unauthenticated` — uses a clean BrowserContext with no cookies or storage
- `@fresh` — only runs in `e2e:full` mode (requires uninitialized instance)
- `@skip` — excluded from all runs
Keep scenarios short and declarative. Each step should describe **what** the user does, not **how** the UI works.
### Step definition conventions
```typescript
import { When, Then } from '@cucumber/cucumber'
import { expect } from '@playwright/test'
import type { DifyWorld } from '../../support/world'
When('I open the datasets page', async function (this: DifyWorld) {
await this.getPage().goto('/datasets')
})
```
Rules:
- Always type `this` as `DifyWorld` for proper context access
- Use `async function` (not arrow functions — Cucumber binds `this`)
- One step = one user-visible action or one assertion
- Keep steps stateless across scenarios; use `DifyWorld` properties for in-scenario state
### Locator priority
Follow the Playwright recommended locator strategy, in order of preference:
| Priority | Locator | Example | When to use |
|---|---|---|---|
| 1 | `getByRole` | `getByRole('button', { name: 'Create' })` | Default choice — accessible and resilient |
| 2 | `getByLabel` | `getByLabel('App name')` | Form inputs with visible labels |
| 3 | `getByPlaceholder` | `getByPlaceholder('Enter name')` | Inputs without visible labels |
| 4 | `getByText` | `getByText('Welcome')` | Static text content |
| 5 | `getByTestId` | `getByTestId('workflow-canvas')` | Only when no semantic locator works |
Avoid raw CSS/XPath selectors. They break when the DOM structure changes.
### Assertions
Use `@playwright/test` `expect` — it auto-waits and retries until the condition is met or the timeout expires:
```typescript
// URL assertion
await expect(page).toHaveURL(/\/datasets\/[a-f0-9-]+\/documents/)
// Element visibility
await expect(page.getByRole('button', { name: 'Save' })).toBeVisible()
// Element state
await expect(page.getByRole('button', { name: 'Submit' })).toBeEnabled()
// Negation
await expect(page.getByText('Loading')).not.toBeVisible()
```
Do not use manual `waitForTimeout` or polling loops. If you need a longer wait for a specific assertion, pass `{ timeout: 30_000 }` to the assertion.
### Cucumber expressions
Use Cucumber expression parameter types to extract values from Gherkin steps:
| Type | Pattern | Example step |
|---|---|---|
| `{string}` | Quoted string | `I select the "Workflow" app type` |
| `{int}` | Integer | `I should see {int} items` |
| `{float}` | Decimal | `the progress is {float} percent` |
| `{word}` | Single word | `I click the {word} tab` |
Prefer `{string}` for UI labels, names, and text content — it maps naturally to Gherkin's quoted values.
### Scoping locators
When the page has multiple similar elements, scope locators to a container:
```typescript
When('I fill in the app name in the dialog', async function (this: DifyWorld) {
const dialog = this.getPage().getByRole('dialog')
await dialog.getByPlaceholder('Give your app a name').fill('My App')
})
```
### Failure diagnostics
The `After` hook automatically captures on failure:
- Full-page screenshot (PNG)
- Page HTML dump
- Console errors and page errors
Artifacts are saved to `cucumber-report/artifacts/` and attached to the HTML report. No extra code needed in step definitions.
## Reusing existing steps
Before writing a new step definition, check what already exists. Steps in `common/` are designed for broad reuse across all features.
List all registered step patterns:
```bash
grep -rn "Given\|When\|Then" e2e/features/step-definitions/ --include='*.ts' | grep -oP "'[^']+'"
```
Or browse the step definition files directly:
- `features/step-definitions/common/` — auth guards and navigation assertions shared by all features
- `features/step-definitions/<capability>/` — domain-specific steps scoped to a single feature area

View File

@ -0,0 +1,46 @@
import { render } from '@testing-library/react'
import { API_PREFIX } from '@/config'
import BlockIcon, { VarBlockIcon } from '../block-icon'
import { BlockEnum } from '../types'
describe('BlockIcon', () => {
it('renders the default workflow icon container for regular nodes', () => {
const { container } = render(<BlockIcon type={BlockEnum.Start} size="xs" className="extra-class" />)
const iconContainer = container.firstElementChild
expect(iconContainer).toHaveClass('w-4', 'h-4', 'bg-util-colors-blue-brand-blue-brand-500', 'extra-class')
expect(iconContainer?.querySelector('svg')).toBeInTheDocument()
})
it('normalizes protected plugin icon urls for tool-like nodes', () => {
const { container } = render(
<BlockIcon
type={BlockEnum.Tool}
toolIcon="/foo/workspaces/current/plugin/icon/plugin-tool.png"
/>,
)
const iconContainer = container.firstElementChild as HTMLElement
const backgroundIcon = iconContainer.querySelector('div') as HTMLElement
expect(iconContainer).not.toHaveClass('bg-util-colors-blue-blue-500')
expect(backgroundIcon.style.backgroundImage).toContain(
`${API_PREFIX}/workspaces/current/plugin/icon/plugin-tool.png`,
)
})
})
describe('VarBlockIcon', () => {
it('renders the compact icon variant without the default container wrapper', () => {
const { container } = render(
<VarBlockIcon
type={BlockEnum.Answer}
className="custom-var-icon"
/>,
)
expect(container.querySelector('.custom-var-icon')).toBeInTheDocument()
expect(container.querySelector('svg')).toBeInTheDocument()
expect(container.querySelector('.bg-util-colors-warning-warning-500')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,39 @@
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { WorkflowContextProvider } from '../context'
import { useStore, useWorkflowStore } from '../store'
const StoreConsumer = () => {
const showSingleRunPanel = useStore(s => s.showSingleRunPanel)
const store = useWorkflowStore()
return (
<button onClick={() => store.getState().setShowSingleRunPanel(!showSingleRunPanel)}>
{showSingleRunPanel ? 'open' : 'closed'}
</button>
)
}
describe('WorkflowContextProvider', () => {
it('provides the workflow store to descendants and keeps the same store across rerenders', async () => {
const user = userEvent.setup()
const { rerender } = render(
<WorkflowContextProvider>
<StoreConsumer />
</WorkflowContextProvider>,
)
expect(screen.getByRole('button', { name: 'closed' })).toBeInTheDocument()
await user.click(screen.getByRole('button', { name: 'closed' }))
expect(screen.getByRole('button', { name: 'open' })).toBeInTheDocument()
rerender(
<WorkflowContextProvider>
<StoreConsumer />
</WorkflowContextProvider>,
)
expect(screen.getByRole('button', { name: 'open' })).toBeInTheDocument()
})
})

View File

@ -0,0 +1,67 @@
import type { Edge, Node } from '../types'
import { render, screen } from '@testing-library/react'
import { useStoreApi } from 'reactflow'
import { useDatasetsDetailStore } from '../datasets-detail-store/store'
import WorkflowWithDefaultContext from '../index'
import { BlockEnum } from '../types'
import { useWorkflowHistoryStore } from '../workflow-history-store'
const nodes: Node[] = [
{
id: 'node-start',
type: 'custom',
position: { x: 0, y: 0 },
data: {
title: 'Start',
desc: '',
type: BlockEnum.Start,
},
},
]
const edges: Edge[] = [
{
id: 'edge-1',
source: 'node-start',
target: 'node-end',
sourceHandle: null,
targetHandle: null,
type: 'custom',
data: {
sourceType: BlockEnum.Start,
targetType: BlockEnum.End,
},
},
]
const ContextConsumer = () => {
const { store, shortcutsEnabled } = useWorkflowHistoryStore()
const datasetCount = useDatasetsDetailStore(state => Object.keys(state.datasetsDetail).length)
const reactFlowStore = useStoreApi()
return (
<div>
{`history:${store.getState().nodes.length}`}
{` shortcuts:${String(shortcutsEnabled)}`}
{` datasets:${datasetCount}`}
{` reactflow:${String(!!reactFlowStore)}`}
</div>
)
}
describe('WorkflowWithDefaultContext', () => {
it('wires the ReactFlow, workflow history, and datasets detail providers around its children', () => {
render(
<WorkflowWithDefaultContext
nodes={nodes}
edges={edges}
>
<ContextConsumer />
</WorkflowWithDefaultContext>,
)
expect(
screen.getByText('history:1 shortcuts:true datasets:0 reactflow:true'),
).toBeInTheDocument()
})
})

View File

@ -0,0 +1,51 @@
import { render, screen } from '@testing-library/react'
import ShortcutsName from '../shortcuts-name'
describe('ShortcutsName', () => {
const originalNavigator = globalThis.navigator
afterEach(() => {
Object.defineProperty(globalThis, 'navigator', {
value: originalNavigator,
writable: true,
configurable: true,
})
})
it('renders mac-friendly key labels and style variants', () => {
Object.defineProperty(globalThis, 'navigator', {
value: { userAgent: 'Macintosh' },
writable: true,
configurable: true,
})
const { container } = render(
<ShortcutsName
keys={['ctrl', 'shift', 's']}
bgColor="white"
textColor="secondary"
/>,
)
expect(screen.getByText('⌘')).toBeInTheDocument()
expect(screen.getByText('⇧')).toBeInTheDocument()
expect(screen.getByText('s')).toBeInTheDocument()
expect(container.querySelector('.system-kbd')).toHaveClass(
'bg-components-kbd-bg-white',
'text-text-tertiary',
)
})
it('keeps raw key names on non-mac systems', () => {
Object.defineProperty(globalThis, 'navigator', {
value: { userAgent: 'Windows NT' },
writable: true,
configurable: true,
})
render(<ShortcutsName keys={['ctrl', 'alt']} />)
expect(screen.getByText('ctrl')).toBeInTheDocument()
expect(screen.getByText('alt')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,97 @@
import type { Edge, Node } from '../types'
import type { WorkflowHistoryState } from '../workflow-history-store'
import { render, renderHook, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '../types'
import { useWorkflowHistoryStore, WorkflowHistoryProvider } from '../workflow-history-store'
const nodes: Node[] = [
{
id: 'node-1',
type: 'custom',
position: { x: 0, y: 0 },
data: {
title: 'Start',
desc: '',
type: BlockEnum.Start,
selected: true,
},
selected: true,
},
]
const edges: Edge[] = [
{
id: 'edge-1',
source: 'node-1',
target: 'node-2',
sourceHandle: null,
targetHandle: null,
type: 'custom',
selected: true,
data: {
sourceType: BlockEnum.Start,
targetType: BlockEnum.End,
},
},
]
const HistoryConsumer = () => {
const { store, shortcutsEnabled, setShortcutsEnabled } = useWorkflowHistoryStore()
return (
<button onClick={() => setShortcutsEnabled(!shortcutsEnabled)}>
{`nodes:${store.getState().nodes.length} shortcuts:${String(shortcutsEnabled)}`}
</button>
)
}
describe('WorkflowHistoryProvider', () => {
it('provides workflow history state and shortcut toggles', async () => {
const user = userEvent.setup()
render(
<WorkflowHistoryProvider
nodes={nodes}
edges={edges}
>
<HistoryConsumer />
</WorkflowHistoryProvider>,
)
expect(screen.getByRole('button', { name: 'nodes:1 shortcuts:true' })).toBeInTheDocument()
await user.click(screen.getByRole('button', { name: 'nodes:1 shortcuts:true' }))
expect(screen.getByRole('button', { name: 'nodes:1 shortcuts:false' })).toBeInTheDocument()
})
it('sanitizes selected flags when history state is replaced through the exposed store api', () => {
const wrapper = ({ children }: { children: React.ReactNode }) => (
<WorkflowHistoryProvider
nodes={nodes}
edges={edges}
>
{children}
</WorkflowHistoryProvider>
)
const { result } = renderHook(() => useWorkflowHistoryStore(), { wrapper })
const nextState: WorkflowHistoryState = {
workflowHistoryEvent: undefined,
workflowHistoryEventMeta: undefined,
nodes,
edges,
}
result.current.store.setState(nextState)
expect(result.current.store.getState().nodes[0].data.selected).toBe(false)
expect(result.current.store.getState().edges[0].selected).toBe(false)
})
it('throws when consumed outside the provider', () => {
expect(() => renderHook(() => useWorkflowHistoryStore())).toThrow(
'useWorkflowHistoryStoreApi must be used within a WorkflowHistoryProvider',
)
})
})

View File

@ -0,0 +1,140 @@
import { render, screen, waitFor } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { useMarketplacePlugins } from '@/app/components/plugins/marketplace/hooks'
import { useGlobalPublicStore } from '@/context/global-public-context'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import AllTools from '../all-tools'
import { createGlobalPublicStoreState, createToolProvider } from './factories'
vi.mock('@/context/global-public-context', () => ({
useGlobalPublicStore: vi.fn(),
}))
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/plugins/marketplace/hooks', () => ({
useMarketplacePlugins: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
vi.mock('@/utils/var', async importOriginal => ({
...(await importOriginal<typeof import('@/utils/var')>()),
getMarketplaceUrl: () => 'https://marketplace.test/tools',
}))
const mockUseMarketplacePlugins = vi.mocked(useMarketplacePlugins)
const mockUseGlobalPublicStore = vi.mocked(useGlobalPublicStore)
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
const createMarketplacePluginsMock = () => ({
plugins: [],
total: 0,
resetPlugins: vi.fn(),
queryPlugins: vi.fn(),
queryPluginsWithDebounced: vi.fn(),
cancelQueryPluginsWithDebounced: vi.fn(),
isLoading: false,
isFetchingNextPage: false,
hasNextPage: false,
fetchNextPage: vi.fn(),
page: 0,
})
describe('AllTools', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseGlobalPublicStore.mockImplementation(selector => selector(createGlobalPublicStoreState(false)))
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
mockUseMarketplacePlugins.mockReturnValue(createMarketplacePluginsMock())
})
it('filters tools by the active tab', async () => {
const user = userEvent.setup()
render(
<AllTools
searchText=""
tags={[]}
onSelect={vi.fn()}
buildInTools={[createToolProvider({
id: 'provider-built-in',
label: { en_US: 'Built In Provider', zh_Hans: 'Built In Provider' },
})]}
customTools={[createToolProvider({
id: 'provider-custom',
type: 'custom',
label: { en_US: 'Custom Provider', zh_Hans: 'Custom Provider' },
})]}
workflowTools={[]}
mcpTools={[]}
/>,
)
expect(screen.getByText('Built In Provider')).toBeInTheDocument()
expect(screen.getByText('Custom Provider')).toBeInTheDocument()
await user.click(screen.getByText('workflow.tabs.customTool'))
expect(screen.getByText('Custom Provider')).toBeInTheDocument()
expect(screen.queryByText('Built In Provider')).not.toBeInTheDocument()
})
it('filters the rendered tools by the search text', () => {
render(
<AllTools
searchText="report"
tags={[]}
onSelect={vi.fn()}
buildInTools={[
createToolProvider({
id: 'provider-report',
label: { en_US: 'Report Toolkit', zh_Hans: 'Report Toolkit' },
}),
createToolProvider({
id: 'provider-other',
label: { en_US: 'Other Toolkit', zh_Hans: 'Other Toolkit' },
}),
]}
customTools={[]}
workflowTools={[]}
mcpTools={[]}
/>,
)
expect(screen.getByText('Report Toolkit')).toBeInTheDocument()
expect(screen.queryByText('Other Toolkit')).not.toBeInTheDocument()
})
it('shows the empty state when no tool matches the current filter', async () => {
render(
<AllTools
searchText="missing"
tags={[]}
onSelect={vi.fn()}
buildInTools={[]}
customTools={[]}
workflowTools={[]}
mcpTools={[]}
/>,
)
await waitFor(() => {
expect(screen.getByText('workflow.tabs.noPluginsFound')).toBeInTheDocument()
})
})
})

View File

@ -0,0 +1,79 @@
import type { NodeDefault } from '../../types'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '../../types'
import Blocks from '../blocks'
import { BlockClassificationEnum } from '../types'
const runtimeState = vi.hoisted(() => ({
nodes: [] as Array<{ data: { type?: BlockEnum } }>,
}))
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => ({
getNodes: () => runtimeState.nodes,
}),
}),
}))
const createBlock = (type: BlockEnum, title: string, classification = BlockClassificationEnum.Default): NodeDefault => ({
metaData: {
classification,
sort: 0,
type,
title,
author: 'Dify',
description: `${title} description`,
},
defaultValue: {},
checkValid: () => ({ isValid: true }),
})
describe('Blocks', () => {
beforeEach(() => {
runtimeState.nodes = []
})
it('renders grouped blocks, filters duplicate knowledge-base nodes, and selects a block', async () => {
const user = userEvent.setup()
const onSelect = vi.fn()
runtimeState.nodes = [{ data: { type: BlockEnum.KnowledgeBase } }]
render(
<Blocks
searchText=""
onSelect={onSelect}
availableBlocksTypes={[BlockEnum.LLM, BlockEnum.LoopEnd, BlockEnum.KnowledgeBase]}
blocks={[
createBlock(BlockEnum.LLM, 'LLM'),
createBlock(BlockEnum.LoopEnd, 'Exit Loop', BlockClassificationEnum.Logic),
createBlock(BlockEnum.KnowledgeBase, 'Knowledge Retrieval'),
]}
/>,
)
expect(screen.getByText('LLM')).toBeInTheDocument()
expect(screen.getByText('Exit Loop')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.loop.loopNode')).toBeInTheDocument()
expect(screen.queryByText('Knowledge Retrieval')).not.toBeInTheDocument()
await user.click(screen.getByText('LLM'))
expect(onSelect).toHaveBeenCalledWith(BlockEnum.LLM)
})
it('shows the empty state when no block matches the search', () => {
render(
<Blocks
searchText="missing"
onSelect={vi.fn()}
availableBlocksTypes={[BlockEnum.LLM]}
blocks={[createBlock(BlockEnum.LLM, 'LLM')]}
/>,
)
expect(screen.getByText('workflow.tabs.noResult')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,101 @@
import type { ToolWithProvider } from '../../types'
import type { Plugin } from '@/app/components/plugins/types'
import type { Tool } from '@/app/components/tools/types'
import { PluginCategoryEnum } from '@/app/components/plugins/types'
import { CollectionType } from '@/app/components/tools/types'
import { defaultSystemFeatures } from '@/types/feature'
export const createTool = (
name: string,
label: string,
description = `${label} description`,
): Tool => ({
name,
author: 'author',
label: {
en_US: label,
zh_Hans: label,
},
description: {
en_US: description,
zh_Hans: description,
},
parameters: [],
labels: [],
output_schema: {},
})
export const createToolProvider = (
overrides: Partial<ToolWithProvider> = {},
): ToolWithProvider => ({
id: 'provider-1',
name: 'provider-one',
author: 'Provider Author',
description: {
en_US: 'Provider description',
zh_Hans: 'Provider description',
},
icon: 'icon',
icon_dark: 'icon-dark',
label: {
en_US: 'Provider One',
zh_Hans: 'Provider One',
},
type: CollectionType.builtIn,
team_credentials: {},
is_team_authorization: false,
allow_delete: false,
labels: [],
plugin_id: 'plugin-1',
tools: [createTool('tool-a', 'Tool A')],
meta: { version: '1.0.0' } as ToolWithProvider['meta'],
plugin_unique_identifier: 'plugin-1@1.0.0',
...overrides,
})
export const createPlugin = (overrides: Partial<Plugin> = {}): Plugin => ({
type: 'plugin',
org: 'org',
author: 'author',
name: 'Plugin One',
plugin_id: 'plugin-1',
version: '1.0.0',
latest_version: '1.0.0',
latest_package_identifier: 'plugin-1@1.0.0',
icon: 'icon',
verified: true,
label: {
en_US: 'Plugin One',
zh_Hans: 'Plugin One',
},
brief: {
en_US: 'Plugin description',
zh_Hans: 'Plugin description',
},
description: {
en_US: 'Plugin description',
zh_Hans: 'Plugin description',
},
introduction: 'Plugin introduction',
repository: 'https://example.com/plugin',
category: PluginCategoryEnum.tool,
tags: [],
badges: [],
install_count: 0,
endpoint: {
settings: [],
},
verification: {
authorized_category: 'community',
},
from: 'github',
...overrides,
})
export const createGlobalPublicStoreState = (enableMarketplace: boolean) => ({
systemFeatures: {
...defaultSystemFeatures,
enable_marketplace: enableMarketplace,
},
setSystemFeatures: vi.fn(),
})

View File

@ -0,0 +1,101 @@
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import FeaturedTools from '../featured-tools'
import { createPlugin, createToolProvider } from './factories'
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
vi.mock('@/utils/var', async importOriginal => ({
...(await importOriginal<typeof import('@/utils/var')>()),
getMarketplaceUrl: () => 'https://marketplace.test/tools',
}))
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
describe('FeaturedTools', () => {
beforeEach(() => {
vi.clearAllMocks()
localStorage.clear()
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
it('shows more featured tools when the list exceeds the initial quota', async () => {
const user = userEvent.setup()
const plugins = Array.from({ length: 6 }, (_, index) =>
createPlugin({
plugin_id: `plugin-${index + 1}`,
latest_package_identifier: `plugin-${index + 1}@1.0.0`,
label: { en_US: `Plugin ${index + 1}`, zh_Hans: `Plugin ${index + 1}` },
}))
const providers = plugins.map((plugin, index) =>
createToolProvider({
id: `provider-${index + 1}`,
plugin_id: plugin.plugin_id,
label: { en_US: `Provider ${index + 1}`, zh_Hans: `Provider ${index + 1}` },
}),
)
const providerMap = new Map(providers.map(provider => [provider.plugin_id!, provider]))
render(
<FeaturedTools
plugins={plugins}
providerMap={providerMap}
onSelect={vi.fn()}
/>,
)
expect(screen.getByText('Provider 1')).toBeInTheDocument()
expect(screen.queryByText('Provider 6')).not.toBeInTheDocument()
await user.click(screen.getByText('workflow.tabs.showMoreFeatured'))
expect(screen.getByText('Provider 6')).toBeInTheDocument()
})
it('honors the persisted collapsed state', () => {
localStorage.setItem('workflow_tools_featured_collapsed', 'true')
render(
<FeaturedTools
plugins={[createPlugin()]}
providerMap={new Map([[
'plugin-1',
createToolProvider(),
]])}
onSelect={vi.fn()}
/>,
)
expect(screen.getByText('workflow.tabs.featuredTools')).toBeInTheDocument()
expect(screen.queryByText('Provider One')).not.toBeInTheDocument()
})
it('shows the marketplace empty state when no featured tools are available', () => {
render(
<FeaturedTools
plugins={[]}
providerMap={new Map()}
onSelect={vi.fn()}
/>,
)
expect(screen.getByText('workflow.tabs.noFeaturedPlugins')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,52 @@
import { act, renderHook } from '@testing-library/react'
import { useTabs, useToolTabs } from '../hooks'
import { TabsEnum, ToolTypeEnum } from '../types'
describe('block-selector hooks', () => {
it('falls back to the first valid tab when the preferred start tab is disabled', () => {
const { result } = renderHook(() => useTabs({
noStart: false,
hasUserInputNode: true,
defaultActiveTab: TabsEnum.Start,
}))
expect(result.current.tabs.find(tab => tab.key === TabsEnum.Start)?.disabled).toBe(true)
expect(result.current.activeTab).toBe(TabsEnum.Blocks)
})
it('keeps the start tab enabled when forcing it on and resets to a valid tab after disabling blocks', () => {
const props: Parameters<typeof useTabs>[0] = {
noBlocks: false,
noStart: false,
hasUserInputNode: true,
forceEnableStartTab: true,
}
const { result, rerender } = renderHook(nextProps => useTabs(nextProps), {
initialProps: props,
})
expect(result.current.tabs.find(tab => tab.key === TabsEnum.Start)?.disabled).toBeFalsy()
act(() => {
result.current.setActiveTab(TabsEnum.Blocks)
})
rerender({
...props,
noBlocks: true,
noSources: true,
noTools: true,
})
expect(result.current.activeTab).toBe(TabsEnum.Start)
})
it('returns the MCP tab only when it is not hidden', () => {
const { result: visible } = renderHook(() => useToolTabs())
const { result: hidden } = renderHook(() => useToolTabs(true))
expect(visible.current.some(tab => tab.key === ToolTypeEnum.MCP)).toBe(true)
expect(hidden.current.some(tab => tab.key === ToolTypeEnum.MCP)).toBe(false)
})
})

View File

@ -0,0 +1,90 @@
import type { NodeDefault, ToolWithProvider } from '../../types'
import { screen } from '@testing-library/react'
import { renderWorkflowComponent } from '../../__tests__/workflow-test-env'
import { BlockEnum } from '../../types'
import NodeSelectorWrapper from '../index'
import { BlockClassificationEnum } from '../types'
vi.mock('reactflow', async () =>
(await import('../../__tests__/reactflow-mock-state')).createReactFlowModuleMock())
vi.mock('@/service/use-plugins', () => ({
useFeaturedToolsRecommendations: () => ({
plugins: [],
isLoading: false,
}),
}))
vi.mock('@/service/use-tools', () => ({
useAllBuiltInTools: () => ({ data: [] }),
useAllCustomTools: () => ({ data: [] }),
useAllWorkflowTools: () => ({ data: [] }),
useAllMCPTools: () => ({ data: [] }),
useInvalidateAllBuiltInTools: () => vi.fn(),
}))
vi.mock('@/context/global-public-context', () => ({
useGlobalPublicStore: (selector: (state: { systemFeatures: { enable_marketplace: boolean } }) => unknown) => selector({
systemFeatures: { enable_marketplace: false },
}),
}))
const createBlock = (type: BlockEnum, title: string): NodeDefault => ({
metaData: {
type,
title,
sort: 0,
classification: BlockClassificationEnum.Default,
author: 'Dify',
description: `${title} description`,
},
defaultValue: {},
checkValid: () => ({ isValid: true }),
})
const dataSource: ToolWithProvider = {
id: 'datasource-1',
name: 'datasource',
author: 'Dify',
description: { en_US: 'Data source', zh_Hans: '数据源' },
icon: 'icon',
label: { en_US: 'Data Source', zh_Hans: 'Data Source' },
type: 'datasource' as ToolWithProvider['type'],
team_credentials: {},
is_team_authorization: false,
allow_delete: false,
labels: [],
tools: [],
meta: { version: '1.0.0' } as ToolWithProvider['meta'],
}
describe('NodeSelectorWrapper', () => {
it('filters hidden block types from hooks store and forwards data sources', async () => {
renderWorkflowComponent(
<NodeSelectorWrapper
open
onSelect={vi.fn()}
availableBlocksTypes={[BlockEnum.Code]}
/>,
{
hooksStoreProps: {
availableNodesMetaData: {
nodes: [
createBlock(BlockEnum.Start, 'Start'),
createBlock(BlockEnum.Tool, 'Tool'),
createBlock(BlockEnum.Code, 'Code'),
createBlock(BlockEnum.DataSource, 'Data Source'),
],
},
},
initialStoreState: {
dataSourceList: [dataSource],
},
},
)
expect(await screen.findByText('Code')).toBeInTheDocument()
expect(screen.queryByText('Start')).not.toBeInTheDocument()
expect(screen.queryByText('Tool')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,95 @@
import type { NodeDefault } from '../../types'
import { screen, waitFor } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { renderWorkflowComponent } from '../../__tests__/workflow-test-env'
import { BlockEnum } from '../../types'
import NodeSelector from '../main'
import { BlockClassificationEnum } from '../types'
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => ({
getNodes: () => [],
}),
}),
}))
vi.mock('@/context/global-public-context', () => ({
useGlobalPublicStore: (selector: (state: { systemFeatures: { enable_marketplace: boolean } }) => unknown) => selector({
systemFeatures: { enable_marketplace: false },
}),
}))
vi.mock('@/service/use-plugins', () => ({
useFeaturedToolsRecommendations: () => ({
plugins: [],
isLoading: false,
}),
}))
vi.mock('@/service/use-tools', () => ({
useAllBuiltInTools: () => ({ data: [] }),
useAllCustomTools: () => ({ data: [] }),
useAllWorkflowTools: () => ({ data: [] }),
useAllMCPTools: () => ({ data: [] }),
useInvalidateAllBuiltInTools: () => vi.fn(),
}))
const createBlock = (type: BlockEnum, title: string): NodeDefault => ({
metaData: {
classification: BlockClassificationEnum.Default,
sort: 0,
type,
title,
author: 'Dify',
description: `${title} description`,
},
defaultValue: {},
checkValid: () => ({ isValid: true }),
})
describe('NodeSelector', () => {
it('opens with the real blocks tab, filters by search, selects a block, and clears search after close', async () => {
const user = userEvent.setup()
const onSelect = vi.fn()
renderWorkflowComponent(
<NodeSelector
onSelect={onSelect}
blocks={[
createBlock(BlockEnum.LLM, 'LLM'),
createBlock(BlockEnum.End, 'End'),
]}
availableBlocksTypes={[BlockEnum.LLM, BlockEnum.End]}
trigger={open => (
<button type="button">
{open ? 'selector-open' : 'selector-closed'}
</button>
)}
/>,
)
await user.click(screen.getByRole('button', { name: 'selector-closed' }))
const searchInput = screen.getByPlaceholderText('workflow.tabs.searchBlock')
expect(screen.getByText('LLM')).toBeInTheDocument()
expect(screen.getByText('End')).toBeInTheDocument()
await user.type(searchInput, 'LLM')
expect(screen.getByText('LLM')).toBeInTheDocument()
expect(screen.queryByText('End')).not.toBeInTheDocument()
await user.click(screen.getByText('LLM'))
expect(onSelect).toHaveBeenCalledWith(BlockEnum.LLM, undefined)
await waitFor(() => {
expect(screen.queryByPlaceholderText('workflow.tabs.searchBlock')).not.toBeInTheDocument()
})
await user.click(screen.getByRole('button', { name: 'selector-closed' }))
const reopenedInput = screen.getByPlaceholderText('workflow.tabs.searchBlock') as HTMLInputElement
expect(reopenedInput.value).toBe('')
expect(screen.getByText('End')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,95 @@
import { render, screen } from '@testing-library/react'
import { CollectionType } from '@/app/components/tools/types'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import Tools from '../tools'
import { ViewType } from '../view-type-select'
import { createToolProvider } from './factories'
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
describe('Tools', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
it('shows the empty state when there are no tools and no search text', () => {
render(
<Tools
tools={[]}
onSelect={vi.fn()}
viewType={ViewType.flat}
hasSearchText={false}
/>,
)
expect(screen.getByText('No tools available')).toBeInTheDocument()
})
it('renders tree groups for built-in and custom providers', () => {
render(
<Tools
tools={[
createToolProvider({
id: 'built-in-provider',
author: 'Built In',
label: { en_US: 'Built In Provider', zh_Hans: 'Built In Provider' },
}),
createToolProvider({
id: 'custom-provider',
type: CollectionType.custom,
label: { en_US: 'Custom Provider', zh_Hans: 'Custom Provider' },
}),
]}
onSelect={vi.fn()}
viewType={ViewType.tree}
hasSearchText={false}
/>,
)
expect(screen.getByText('Built In')).toBeInTheDocument()
expect(screen.getByText('workflow.tabs.customTool')).toBeInTheDocument()
expect(screen.getByText('Built In Provider')).toBeInTheDocument()
expect(screen.getByText('Custom Provider')).toBeInTheDocument()
})
it('shows the alphabetical index in flat view when enough tools are present', () => {
const { container } = render(
<Tools
tools={Array.from({ length: 11 }, (_, index) =>
createToolProvider({
id: `provider-${index}`,
label: {
en_US: `${String.fromCharCode(65 + index)} Provider`,
zh_Hans: `${String.fromCharCode(65 + index)} Provider`,
},
}))}
onSelect={vi.fn()}
viewType={ViewType.flat}
hasSearchText={false}
/>,
)
expect(container.querySelector('.index-bar')).toBeInTheDocument()
expect(screen.getByText('A Provider')).toBeInTheDocument()
expect(screen.getByText('K Provider')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,99 @@
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { trackEvent } from '@/app/components/base/amplitude'
import { CollectionType } from '@/app/components/tools/types'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import { BlockEnum } from '../../../types'
import { createTool, createToolProvider } from '../../__tests__/factories'
import { ViewType } from '../../view-type-select'
import Tool from '../tool'
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/base/amplitude', () => ({
trackEvent: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
const mockTrackEvent = vi.mocked(trackEvent)
describe('Tool', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
it('expands a provider and selects an action item', async () => {
const user = userEvent.setup()
const onSelect = vi.fn()
render(
<Tool
payload={createToolProvider({
tools: [
createTool('tool-a', 'Tool A'),
createTool('tool-b', 'Tool B'),
],
})}
viewType={ViewType.flat}
hasSearchText={false}
onSelect={onSelect}
/>,
)
await user.click(screen.getByText('Provider One'))
await user.click(screen.getByText('Tool B'))
expect(onSelect).toHaveBeenCalledWith(BlockEnum.Tool, expect.objectContaining({
provider_id: 'provider-1',
provider_name: 'provider-one',
tool_name: 'tool-b',
title: 'Tool B',
}))
expect(mockTrackEvent).toHaveBeenCalledWith('tool_selected', {
tool_name: 'tool-b',
plugin_id: 'plugin-1',
})
})
it('selects workflow tools directly without expanding the provider', async () => {
const user = userEvent.setup()
const onSelect = vi.fn()
render(
<Tool
payload={createToolProvider({
type: CollectionType.workflow,
tools: [createTool('workflow-tool', 'Workflow Tool')],
})}
viewType={ViewType.flat}
hasSearchText={false}
onSelect={onSelect}
/>,
)
await user.click(screen.getByText('Workflow Tool'))
expect(onSelect).toHaveBeenCalledWith(BlockEnum.Tool, expect.objectContaining({
provider_type: CollectionType.workflow,
tool_name: 'workflow-tool',
tool_label: 'Workflow Tool',
}))
})
})

View File

@ -0,0 +1,66 @@
import { render, screen } from '@testing-library/react'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import { createToolProvider } from '../../../__tests__/factories'
import List from '../list'
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
describe('ToolListFlatView', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
it('assigns the first tool of each letter to the shared refs and renders the index bar', () => {
const toolRefs = {
current: {} as Record<string, HTMLDivElement | null>,
}
render(
<List
letters={['A', 'B']}
payload={[
createToolProvider({
id: 'provider-a',
label: { en_US: 'A Provider', zh_Hans: 'A Provider' },
letter: 'A',
} as ReturnType<typeof createToolProvider>),
createToolProvider({
id: 'provider-b',
label: { en_US: 'B Provider', zh_Hans: 'B Provider' },
letter: 'B',
} as ReturnType<typeof createToolProvider>),
]}
isShowLetterIndex
indexBar={<div data-testid="index-bar" />}
hasSearchText={false}
onSelect={vi.fn()}
toolRefs={toolRefs}
/>,
)
expect(screen.getByText('A Provider')).toBeInTheDocument()
expect(screen.getByText('B Provider')).toBeInTheDocument()
expect(screen.getByTestId('index-bar')).toBeInTheDocument()
expect(toolRefs.current.A).toBeTruthy()
expect(toolRefs.current.B).toBeTruthy()
})
})

View File

@ -0,0 +1,47 @@
import { render, screen } from '@testing-library/react'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import { createToolProvider } from '../../../__tests__/factories'
import Item from '../item'
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
describe('ToolListTreeView Item', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
it('renders the group heading and its provider list', () => {
render(
<Item
groupName="My Group"
toolList={[createToolProvider({
label: { en_US: 'Provider Alpha', zh_Hans: 'Provider Alpha' },
})]}
hasSearchText={false}
onSelect={vi.fn()}
/>,
)
expect(screen.getByText('My Group')).toBeInTheDocument()
expect(screen.getByText('Provider Alpha')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,56 @@
import { render, screen } from '@testing-library/react'
import { useGetLanguage } from '@/context/i18n'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import { createToolProvider } from '../../../__tests__/factories'
import { CUSTOM_GROUP_NAME } from '../../../index-bar'
import List from '../list'
vi.mock('@/context/i18n', () => ({
useGetLanguage: vi.fn(),
}))
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/mcp-tool-availability', () => ({
useMCPToolAvailability: () => ({
allowed: true,
}),
}))
const mockUseGetLanguage = vi.mocked(useGetLanguage)
const mockUseTheme = vi.mocked(useTheme)
describe('ToolListTreeView', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseGetLanguage.mockReturnValue('en_US')
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
it('translates built-in special group names and renders the nested providers', () => {
render(
<List
payload={{
BuiltIn: [createToolProvider({
label: { en_US: 'Built In Provider', zh_Hans: 'Built In Provider' },
})],
[CUSTOM_GROUP_NAME]: [createToolProvider({
id: 'custom-provider',
type: 'custom',
label: { en_US: 'Custom Provider', zh_Hans: 'Custom Provider' },
})],
}}
hasSearchText={false}
onSelect={vi.fn()}
/>,
)
expect(screen.getByText('BuiltIn')).toBeInTheDocument()
expect(screen.getByText('workflow.tabs.customTool')).toBeInTheDocument()
expect(screen.getByText('Built In Provider')).toBeInTheDocument()
expect(screen.getByText('Custom Provider')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,91 @@
import type { DataSet } from '@/models/datasets'
import { renderHook } from '@testing-library/react'
import { ChunkingMode, DatasetPermission, DataSourceType } from '@/models/datasets'
import { DatasetsDetailContext } from '../provider'
import { createDatasetsDetailStore, useDatasetsDetailStore } from '../store'
const createDataset = (id: string, name = `dataset-${id}`): DataSet => ({
id,
name,
indexing_status: 'completed',
icon_info: {
icon: 'book',
icon_type: 'emoji' as DataSet['icon_info']['icon_type'],
},
description: `${name} description`,
permission: DatasetPermission.onlyMe,
data_source_type: DataSourceType.FILE,
indexing_technique: 'high_quality' as DataSet['indexing_technique'],
created_by: 'user-1',
updated_by: 'user-1',
updated_at: 1,
app_count: 0,
doc_form: ChunkingMode.text,
document_count: 0,
total_document_count: 0,
word_count: 0,
provider: 'provider',
embedding_model: 'model',
embedding_model_provider: 'provider',
embedding_available: true,
retrieval_model_dict: {} as DataSet['retrieval_model_dict'],
retrieval_model: {} as DataSet['retrieval_model'],
tags: [],
external_knowledge_info: {
external_knowledge_id: '',
external_knowledge_api_id: '',
external_knowledge_api_name: '',
external_knowledge_api_endpoint: '',
},
external_retrieval_model: {
top_k: 1,
score_threshold: 0,
score_threshold_enabled: false,
},
built_in_field_enabled: false,
runtime_mode: 'general',
enable_api: false,
is_multimodal: false,
})
describe('datasets-detail-store store', () => {
it('merges dataset details by id', () => {
const store = createDatasetsDetailStore()
store.getState().updateDatasetsDetail([
createDataset('dataset-1', 'Dataset One'),
createDataset('dataset-2', 'Dataset Two'),
])
store.getState().updateDatasetsDetail([
createDataset('dataset-2', 'Dataset Two Updated'),
])
expect(store.getState().datasetsDetail).toMatchObject({
'dataset-1': { name: 'Dataset One' },
'dataset-2': { name: 'Dataset Two Updated' },
})
})
it('reads state from the datasets detail context', () => {
const store = createDatasetsDetailStore()
store.getState().updateDatasetsDetail([createDataset('dataset-3')])
const wrapper = ({ children }: { children: React.ReactNode }) => (
<DatasetsDetailContext.Provider value={store}>
{children}
</DatasetsDetailContext.Provider>
)
const { result } = renderHook(
() => useDatasetsDetailStore(state => state.datasetsDetail['dataset-3']?.name),
{ wrapper },
)
expect(result.current).toBe('dataset-dataset-3')
})
it('throws when the datasets detail provider is missing', () => {
expect(() => renderHook(() => useDatasetsDetailStore(state => state.datasetsDetail))).toThrow(
'Missing DatasetsDetailContext.Provider in the tree',
)
})
})

View File

@ -0,0 +1,41 @@
import { renderHook } from '@testing-library/react'
import { HooksStoreContext } from '../provider'
import { createHooksStore, useHooksStore } from '../store'
describe('hooks-store store', () => {
it('creates default callbacks and refreshes selected handlers', () => {
const store = createHooksStore({})
const handleBackupDraft = vi.fn()
expect(store.getState().availableNodesMetaData).toEqual({ nodes: [] })
expect(store.getState().hasNodeInspectVars('node-1')).toBe(false)
expect(store.getState().getWorkflowRunAndTraceUrl('run-1')).toEqual({
runUrl: '',
traceUrl: '',
})
store.getState().refreshAll({ handleBackupDraft })
expect(store.getState().handleBackupDraft).toBe(handleBackupDraft)
})
it('reads state from the hooks store context', () => {
const handleRun = vi.fn()
const store = createHooksStore({ handleRun })
const wrapper = ({ children }: { children: React.ReactNode }) => (
<HooksStoreContext.Provider value={store}>
{children}
</HooksStoreContext.Provider>
)
const { result } = renderHook(() => useHooksStore(state => state.handleRun), { wrapper })
expect(result.current).toBe(handleRun)
})
it('throws when the hooks store provider is missing', () => {
expect(() => renderHook(() => useHooksStore(state => state.handleRun))).toThrow(
'Missing HooksStoreContext.Provider in the tree',
)
})
})

View File

@ -0,0 +1,19 @@
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useDSL } from '../use-DSL'
describe('useDSL', () => {
it('returns the DSL handlers from hooks store', () => {
const exportCheck = vi.fn()
const handleExportDSL = vi.fn()
const { result } = renderWorkflowHook(() => useDSL(), {
hooksStoreProps: {
exportCheck,
handleExportDSL,
},
})
expect(result.current.exportCheck).toBe(exportCheck)
expect(result.current.handleExportDSL).toBe(handleExportDSL)
})
})

View File

@ -0,0 +1,90 @@
import { act, waitFor } from '@testing-library/react'
import { useEdges } from 'reactflow'
import { createEdge, createNode } from '../../__tests__/fixtures'
import { renderWorkflowFlowHook } from '../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../types'
import { useEdgesInteractionsWithoutSync } from '../use-edges-interactions-without-sync'
type EdgeRuntimeState = {
_sourceRunningStatus?: NodeRunningStatus
_targetRunningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
const getEdgeRuntimeState = (edge?: { data?: unknown }): EdgeRuntimeState =>
(edge?.data ?? {}) as EdgeRuntimeState
const createFlowNodes = () => [
createNode({ id: 'a' }),
createNode({ id: 'b' }),
createNode({ id: 'c' }),
]
const createFlowEdges = () => [
createEdge({
id: 'e1',
source: 'a',
target: 'b',
data: {
_sourceRunningStatus: NodeRunningStatus.Running,
_targetRunningStatus: NodeRunningStatus.Running,
_waitingRun: true,
},
}),
createEdge({
id: 'e2',
source: 'b',
target: 'c',
data: {
_sourceRunningStatus: NodeRunningStatus.Succeeded,
_targetRunningStatus: undefined,
_waitingRun: false,
},
}),
]
const renderEdgesInteractionsHook = () =>
renderWorkflowFlowHook(() => ({
...useEdgesInteractionsWithoutSync(),
edges: useEdges(),
}), {
nodes: createFlowNodes(),
edges: createFlowEdges(),
})
describe('useEdgesInteractionsWithoutSync', () => {
it('clears running status and waitingRun on all edges', () => {
const { result } = renderEdgesInteractionsHook()
act(() => {
result.current.handleEdgeCancelRunningStatus()
})
return waitFor(() => {
result.current.edges.forEach((edge) => {
const edgeState = getEdgeRuntimeState(edge)
expect(edgeState._sourceRunningStatus).toBeUndefined()
expect(edgeState._targetRunningStatus).toBeUndefined()
expect(edgeState._waitingRun).toBe(false)
})
})
})
it('does not mutate the original edges array', () => {
const edges = createFlowEdges()
const originalData = { ...getEdgeRuntimeState(edges[0]) }
const { result } = renderWorkflowFlowHook(() => ({
...useEdgesInteractionsWithoutSync(),
edges: useEdges(),
}), {
nodes: createFlowNodes(),
edges,
})
act(() => {
result.current.handleEdgeCancelRunningStatus()
})
expect(getEdgeRuntimeState(edges[0])._sourceRunningStatus).toBe(originalData._sourceRunningStatus)
})
})

View File

@ -0,0 +1,114 @@
import { createEdge, createNode } from '../../__tests__/fixtures'
import { getNodesConnectedSourceOrTargetHandleIdsMap } from '../../utils'
import {
applyConnectedHandleNodeData,
buildContextMenuEdges,
clearEdgeMenuIfNeeded,
clearNodeSelectionState,
updateEdgeHoverState,
updateEdgeSelectionState,
} from '../use-edges-interactions.helpers'
vi.mock('../../utils', () => ({
getNodesConnectedSourceOrTargetHandleIdsMap: vi.fn(),
}))
const mockGetNodesConnectedSourceOrTargetHandleIdsMap = vi.mocked(getNodesConnectedSourceOrTargetHandleIdsMap)
describe('use-edges-interactions.helpers', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('applyConnectedHandleNodeData should merge connected handle metadata into matching nodes', () => {
mockGetNodesConnectedSourceOrTargetHandleIdsMap.mockReturnValue({
'node-1': {
_connectedSourceHandleIds: ['branch-a'],
},
})
const nodes = [
createNode({ id: 'node-1', data: { title: 'Source' } }),
createNode({ id: 'node-2', data: { title: 'Target' } }),
]
const edgeChanges = [{
type: 'add',
edge: createEdge({ id: 'edge-1', source: 'node-1', target: 'node-2' }),
}]
const result = applyConnectedHandleNodeData(nodes, edgeChanges)
expect(result[0].data._connectedSourceHandleIds).toEqual(['branch-a'])
expect(result[1].data._connectedSourceHandleIds).toEqual([])
expect(mockGetNodesConnectedSourceOrTargetHandleIdsMap).toHaveBeenCalledWith(edgeChanges, nodes)
})
it('clearEdgeMenuIfNeeded should return true only when the open menu belongs to a removed edge', () => {
expect(clearEdgeMenuIfNeeded({
edgeMenu: { edgeId: 'edge-1' },
edgeIds: ['edge-1', 'edge-2'],
})).toBe(true)
expect(clearEdgeMenuIfNeeded({
edgeMenu: { edgeId: 'edge-3' },
edgeIds: ['edge-1', 'edge-2'],
})).toBe(false)
expect(clearEdgeMenuIfNeeded({
edgeIds: ['edge-1'],
})).toBe(false)
})
it('updateEdgeHoverState should toggle only the hovered edge flag', () => {
const edges = [
createEdge({ id: 'edge-1', data: { _hovering: false } }),
createEdge({ id: 'edge-2', data: { _hovering: false } }),
]
const result = updateEdgeHoverState(edges, 'edge-2', true)
expect(result.find(edge => edge.id === 'edge-1')?.data._hovering).toBe(false)
expect(result.find(edge => edge.id === 'edge-2')?.data._hovering).toBe(true)
})
it('updateEdgeSelectionState should update selected flags for select changes only', () => {
const edges = [
createEdge({ id: 'edge-1', selected: false }),
createEdge({ id: 'edge-2', selected: true }),
]
const result = updateEdgeSelectionState(edges, [
{ type: 'select', id: 'edge-1', selected: true },
{ type: 'remove', id: 'edge-2' },
])
expect(result.find(edge => edge.id === 'edge-1')?.selected).toBe(true)
expect(result.find(edge => edge.id === 'edge-2')?.selected).toBe(true)
})
it('buildContextMenuEdges should select the target edge and clear bundled markers', () => {
const edges = [
createEdge({ id: 'edge-1', selected: true, data: { _isBundled: true } }),
createEdge({ id: 'edge-2', selected: false, data: { _isBundled: true } }),
]
const result = buildContextMenuEdges(edges, 'edge-2')
expect(result.find(edge => edge.id === 'edge-1')?.selected).toBe(false)
expect(result.find(edge => edge.id === 'edge-2')?.selected).toBe(true)
expect(result.every(edge => edge.data._isBundled === false)).toBe(true)
})
it('clearNodeSelectionState should clear selected state and bundled markers on every node', () => {
const nodes = [
createNode({ id: 'node-1', selected: true, data: { selected: true, _isBundled: true } }),
createNode({ id: 'node-2', selected: false, data: { selected: true, _isBundled: true } }),
]
const result = clearNodeSelectionState(nodes)
expect(result.every(node => node.selected === false)).toBe(true)
expect(result.every(node => node.data.selected === false)).toBe(true)
expect(result.every(node => node.data._isBundled === false)).toBe(true)
})
})

View File

@ -0,0 +1,187 @@
import type { SchemaTypeDefinition } from '@/service/use-common'
import type { VarInInspect } from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { FlowType } from '@/types/common'
import { createNode } from '../../__tests__/fixtures'
import { resetReactFlowMockState, rfState } from '../../__tests__/reactflow-mock-state'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { BlockEnum, VarType } from '../../types'
import { useSetWorkflowVarsWithValue } from '../use-fetch-workflow-inspect-vars'
const mockFetchAllInspectVars = vi.hoisted(() => vi.fn())
const mockInvalidateConversationVarValues = vi.hoisted(() => vi.fn())
const mockInvalidateSysVarValues = vi.hoisted(() => vi.fn())
const mockHandleCancelAllNodeSuccessStatus = vi.hoisted(() => vi.fn())
const mockToNodeOutputVars = vi.hoisted(() => vi.fn())
const schemaTypeDefinitions: SchemaTypeDefinition[] = [{
name: 'simple',
schema: {
properties: {},
},
}]
vi.mock('reactflow', async () =>
(await import('../../__tests__/reactflow-mock-state')).createReactFlowModuleMock())
vi.mock('@/service/use-tools', async () =>
(await import('../../__tests__/service-mock-factory')).createToolServiceMock())
vi.mock('@/service/use-workflow', () => ({
useInvalidateConversationVarValues: () => mockInvalidateConversationVarValues,
useInvalidateSysVarValues: () => mockInvalidateSysVarValues,
}))
vi.mock('@/service/workflow', () => ({
fetchAllInspectVars: (...args: unknown[]) => mockFetchAllInspectVars(...args),
}))
vi.mock('../use-nodes-interactions-without-sync', () => ({
useNodesInteractionsWithoutSync: () => ({
handleCancelAllNodeSuccessStatus: mockHandleCancelAllNodeSuccessStatus,
}),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/variable/use-match-schema-type', () => ({
default: () => ({
schemaTypeDefinitions,
}),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/variable/utils', () => ({
toNodeOutputVars: (...args: unknown[]) => mockToNodeOutputVars(...args),
}))
const createInspectVar = (overrides: Partial<VarInInspect> = {}): VarInInspect => ({
id: 'var-1',
type: 'node',
name: 'answer',
description: 'Answer',
selector: ['node-1', 'answer'],
value_type: VarType.string,
value: 'hello',
edited: false,
visible: true,
is_truncated: false,
full_content: {
size_bytes: 5,
download_url: 'https://example.com/answer.txt',
},
...overrides,
})
describe('use-fetch-workflow-inspect-vars', () => {
beforeEach(() => {
vi.clearAllMocks()
resetReactFlowMockState()
rfState.nodes = [
createNode({
id: 'node-1',
data: {
type: BlockEnum.Code,
title: 'Code',
desc: '',
},
}),
]
mockToNodeOutputVars.mockReturnValue([{
nodeId: 'node-1',
vars: [{
variable: 'answer',
schemaType: 'simple',
}],
}])
})
it('fetches inspect vars, invalidates cached values, and stores schema-enriched node vars', async () => {
mockFetchAllInspectVars.mockResolvedValue([
createInspectVar(),
createInspectVar({
id: 'missing-node-var',
selector: ['missing-node', 'answer'],
}),
])
const { result, store } = renderWorkflowHook(
() => useSetWorkflowVarsWithValue({
flowType: FlowType.appFlow,
flowId: 'flow-1',
}),
{
initialStoreState: {
dataSourceList: [],
},
},
)
await act(async () => {
await result.current.fetchInspectVars({})
})
expect(mockInvalidateConversationVarValues).toHaveBeenCalledTimes(1)
expect(mockInvalidateSysVarValues).toHaveBeenCalledTimes(1)
expect(mockFetchAllInspectVars).toHaveBeenCalledWith(FlowType.appFlow, 'flow-1')
expect(mockHandleCancelAllNodeSuccessStatus).toHaveBeenCalledTimes(1)
expect(store.getState().nodesWithInspectVars).toEqual([
expect.objectContaining({
nodeId: 'node-1',
nodeType: BlockEnum.Code,
title: 'Code',
vars: [
expect.objectContaining({
id: 'var-1',
selector: ['node-1', 'answer'],
schemaType: 'simple',
value: 'hello',
}),
],
}),
])
})
it('accepts passed-in vars and plugin metadata without refetching from the API', async () => {
const passedInVars = [
createInspectVar({
id: 'var-2',
value: 'passed-in',
}),
]
const passedInPluginInfo = {
buildInTools: [],
customTools: [],
workflowTools: [],
mcpTools: [],
dataSourceList: [],
}
const { result, store } = renderWorkflowHook(
() => useSetWorkflowVarsWithValue({
flowType: FlowType.appFlow,
flowId: 'flow-2',
}),
{
initialStoreState: {
dataSourceList: [],
},
},
)
await act(async () => {
await result.current.fetchInspectVars({
passInVars: true,
vars: passedInVars,
passedInAllPluginInfoList: passedInPluginInfo,
passedInSchemaTypeDefinitions: schemaTypeDefinitions,
})
})
await waitFor(() => {
expect(mockFetchAllInspectVars).not.toHaveBeenCalled()
expect(store.getState().nodesWithInspectVars[0]?.vars[0]).toMatchObject({
id: 'var-2',
value: 'passed-in',
schemaType: 'simple',
})
})
})
})

View File

@ -0,0 +1,210 @@
import type { SchemaTypeDefinition } from '@/service/use-common'
import type { VarInInspect } from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { FlowType } from '@/types/common'
import { createNode } from '../../__tests__/fixtures'
import { resetReactFlowMockState, rfState } from '../../__tests__/reactflow-mock-state'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { BlockEnum, VarType } from '../../types'
import { useInspectVarsCrudCommon } from '../use-inspect-vars-crud-common'
const mockFetchNodeInspectVars = vi.hoisted(() => vi.fn())
const mockDoDeleteAllInspectorVars = vi.hoisted(() => vi.fn())
const mockInvalidateConversationVarValues = vi.hoisted(() => vi.fn())
const mockInvalidateSysVarValues = vi.hoisted(() => vi.fn())
const mockHandleCancelNodeSuccessStatus = vi.hoisted(() => vi.fn())
const mockHandleEdgeCancelRunningStatus = vi.hoisted(() => vi.fn())
const mockToNodeOutputVars = vi.hoisted(() => vi.fn())
const schemaTypeDefinitions: SchemaTypeDefinition[] = [{
name: 'simple',
schema: {
properties: {},
},
}]
vi.mock('reactflow', async () =>
(await import('../../__tests__/reactflow-mock-state')).createReactFlowModuleMock())
vi.mock('@/service/use-flow', () => ({
default: () => ({
useInvalidateConversationVarValues: () => mockInvalidateConversationVarValues,
useInvalidateSysVarValues: () => mockInvalidateSysVarValues,
useResetConversationVar: () => ({ mutateAsync: vi.fn() }),
useResetToLastRunValue: () => ({ mutateAsync: vi.fn() }),
useDeleteAllInspectorVars: () => ({ mutateAsync: mockDoDeleteAllInspectorVars }),
useDeleteNodeInspectorVars: () => ({ mutate: vi.fn() }),
useDeleteInspectVar: () => ({ mutate: vi.fn() }),
useEditInspectorVar: () => ({ mutateAsync: vi.fn() }),
}),
}))
vi.mock('@/service/use-tools', async () =>
(await import('../../__tests__/service-mock-factory')).createToolServiceMock())
vi.mock('@/service/workflow', () => ({
fetchNodeInspectVars: (...args: unknown[]) => mockFetchNodeInspectVars(...args),
}))
vi.mock('../use-nodes-interactions-without-sync', () => ({
useNodesInteractionsWithoutSync: () => ({
handleCancelNodeSuccessStatus: mockHandleCancelNodeSuccessStatus,
}),
}))
vi.mock('../use-edges-interactions-without-sync', () => ({
useEdgesInteractionsWithoutSync: () => ({
handleEdgeCancelRunningStatus: mockHandleEdgeCancelRunningStatus,
}),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/variable/utils', async importOriginal => ({
...(await importOriginal<typeof import('@/app/components/workflow/nodes/_base/components/variable/utils')>()),
toNodeOutputVars: (...args: unknown[]) => mockToNodeOutputVars(...args),
}))
const createInspectVar = (overrides: Partial<VarInInspect> = {}): VarInInspect => ({
id: 'var-1',
type: 'node',
name: 'answer',
description: 'Answer',
selector: ['node-1', 'answer'],
value_type: VarType.string,
value: 'hello',
edited: false,
visible: true,
is_truncated: false,
full_content: {
size_bytes: 5,
download_url: 'https://example.com/answer.txt',
},
...overrides,
})
describe('useInspectVarsCrudCommon', () => {
beforeEach(() => {
vi.clearAllMocks()
resetReactFlowMockState()
rfState.nodes = [
createNode({
id: 'node-1',
data: {
type: BlockEnum.Code,
title: 'Code',
desc: '',
},
}),
]
mockToNodeOutputVars.mockReturnValue([{
nodeId: 'node-1',
vars: [{
variable: 'answer',
schemaType: 'simple',
}],
}])
})
it('invalidates cached system vars without refetching node values for system selectors', async () => {
const { result } = renderWorkflowHook(
() => useInspectVarsCrudCommon({
flowId: 'flow-1',
flowType: FlowType.appFlow,
}),
{
initialStoreState: {
dataSourceList: [],
},
},
)
await act(async () => {
await result.current.fetchInspectVarValue(['sys', 'query'], schemaTypeDefinitions)
})
expect(mockInvalidateSysVarValues).toHaveBeenCalledTimes(1)
expect(mockFetchNodeInspectVars).not.toHaveBeenCalled()
})
it('fetches node inspect vars, adds schema types, and marks the node as fetched', async () => {
mockFetchNodeInspectVars.mockResolvedValue([
createInspectVar(),
])
const { result, store } = renderWorkflowHook(
() => useInspectVarsCrudCommon({
flowId: 'flow-1',
flowType: FlowType.appFlow,
}),
{
initialStoreState: {
dataSourceList: [],
nodesWithInspectVars: [{
nodeId: 'node-1',
nodePayload: {
type: BlockEnum.Code,
title: 'Code',
desc: '',
} as never,
nodeType: BlockEnum.Code,
title: 'Code',
vars: [],
}],
},
},
)
await act(async () => {
await result.current.fetchInspectVarValue(['node-1', 'answer'], schemaTypeDefinitions)
})
await waitFor(() => {
expect(mockFetchNodeInspectVars).toHaveBeenCalledWith(FlowType.appFlow, 'flow-1', 'node-1')
expect(store.getState().nodesWithInspectVars[0]).toMatchObject({
nodeId: 'node-1',
isValueFetched: true,
vars: [
expect.objectContaining({
id: 'var-1',
schemaType: 'simple',
}),
],
})
})
})
it('deletes all inspect vars, invalidates cached values, and clears edge running state', async () => {
mockDoDeleteAllInspectorVars.mockResolvedValue(undefined)
const { result, store } = renderWorkflowHook(
() => useInspectVarsCrudCommon({
flowId: 'flow-1',
flowType: FlowType.appFlow,
}),
{
initialStoreState: {
nodesWithInspectVars: [{
nodeId: 'node-1',
nodePayload: {
type: BlockEnum.Code,
title: 'Code',
desc: '',
} as never,
nodeType: BlockEnum.Code,
title: 'Code',
vars: [createInspectVar()],
}],
},
},
)
await act(async () => {
await result.current.deleteAllInspectorVars()
})
expect(mockDoDeleteAllInspectorVars).toHaveBeenCalledTimes(1)
expect(mockInvalidateConversationVarValues).toHaveBeenCalledTimes(1)
expect(mockInvalidateSysVarValues).toHaveBeenCalledTimes(1)
expect(mockHandleEdgeCancelRunningStatus).toHaveBeenCalledTimes(1)
expect(store.getState().nodesWithInspectVars).toEqual([])
})
})

View File

@ -0,0 +1,135 @@
import type { VarInInspect } from '@/types/workflow'
import { FlowType } from '@/types/common'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { BlockEnum, VarType } from '../../types'
import useInspectVarsCrud from '../use-inspect-vars-crud'
const mockUseConversationVarValues = vi.hoisted(() => vi.fn())
const mockUseSysVarValues = vi.hoisted(() => vi.fn())
vi.mock('@/service/use-workflow', () => ({
useConversationVarValues: (...args: unknown[]) => mockUseConversationVarValues(...args),
useSysVarValues: (...args: unknown[]) => mockUseSysVarValues(...args),
}))
const createInspectVar = (overrides: Partial<VarInInspect> = {}): VarInInspect => ({
id: 'var-1',
type: 'node',
name: 'answer',
description: 'Answer',
selector: ['node-1', 'answer'],
value_type: VarType.string,
value: 'hello',
edited: false,
visible: true,
is_truncated: false,
full_content: {
size_bytes: 5,
download_url: 'https://example.com/answer.txt',
},
...overrides,
})
describe('useInspectVarsCrud', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseConversationVarValues.mockReturnValue({
data: [createInspectVar({
id: 'conversation-var',
name: 'history',
selector: ['conversation', 'history'],
})],
})
mockUseSysVarValues.mockReturnValue({
data: [
createInspectVar({
id: 'query-var',
name: 'query',
selector: ['sys', 'query'],
}),
createInspectVar({
id: 'files-var',
name: 'files',
selector: ['sys', 'files'],
}),
createInspectVar({
id: 'time-var',
name: 'time',
selector: ['sys', 'time'],
}),
],
})
})
it('appends query/files system vars to start-node inspect vars and filters them from the system list', () => {
const hasNodeInspectVars = vi.fn(() => true)
const deleteAllInspectorVars = vi.fn()
const fetchInspectVarValue = vi.fn()
const { result } = renderWorkflowHook(() => useInspectVarsCrud(), {
initialStoreState: {
nodesWithInspectVars: [{
nodeId: 'start-node',
nodePayload: {
type: BlockEnum.Start,
title: 'Start',
desc: '',
} as never,
nodeType: BlockEnum.Start,
title: 'Start',
vars: [createInspectVar({
id: 'start-answer',
selector: ['start-node', 'answer'],
})],
}],
},
hooksStoreProps: {
configsMap: {
flowId: 'flow-1',
flowType: FlowType.appFlow,
fileSettings: {} as never,
},
hasNodeInspectVars,
fetchInspectVarValue,
editInspectVarValue: vi.fn(),
renameInspectVarName: vi.fn(),
appendNodeInspectVars: vi.fn(),
deleteInspectVar: vi.fn(),
deleteNodeInspectorVars: vi.fn(),
deleteAllInspectorVars,
isInspectVarEdited: vi.fn(() => false),
resetToLastRunVar: vi.fn(),
invalidateSysVarValues: vi.fn(),
resetConversationVar: vi.fn(),
invalidateConversationVarValues: vi.fn(),
hasSetInspectVar: vi.fn(() => false),
},
})
expect(result.current.conversationVars).toHaveLength(1)
expect(result.current.systemVars.map(item => item.name)).toEqual(['time'])
expect(result.current.nodesWithInspectVars[0]?.vars.map(item => item.name)).toEqual([
'answer',
'query',
'files',
])
expect(result.current.hasNodeInspectVars).toBe(hasNodeInspectVars)
expect(result.current.fetchInspectVarValue).toBe(fetchInspectVarValue)
expect(result.current.deleteAllInspectorVars).toBe(deleteAllInspectorVars)
})
it('uses an empty flow id for rag pipeline conversation and system value queries', () => {
renderWorkflowHook(() => useInspectVarsCrud(), {
hooksStoreProps: {
configsMap: {
flowId: 'rag-flow',
flowType: FlowType.ragPipeline,
fileSettings: {} as never,
},
},
})
expect(mockUseConversationVarValues).toHaveBeenCalledWith(FlowType.ragPipeline, '')
expect(mockUseSysVarValues).toHaveBeenCalledWith(FlowType.ragPipeline, '')
})
})

View File

@ -0,0 +1,110 @@
import type { Node, NodeOutPutVar, Var } from '../../types'
import { renderHook } from '@testing-library/react'
import { BlockEnum, VarType } from '../../types'
import useNodesAvailableVarList, { useGetNodesAvailableVarList } from '../use-nodes-available-var-list'
const mockGetTreeLeafNodes = vi.hoisted(() => vi.fn())
const mockGetBeforeNodesInSameBranchIncludeParent = vi.hoisted(() => vi.fn())
const mockGetNodeAvailableVars = vi.hoisted(() => vi.fn())
vi.mock('@/app/components/workflow/hooks', () => ({
useIsChatMode: () => true,
useWorkflow: () => ({
getTreeLeafNodes: mockGetTreeLeafNodes,
getBeforeNodesInSameBranchIncludeParent: mockGetBeforeNodesInSameBranchIncludeParent,
}),
useWorkflowVariables: () => ({
getNodeAvailableVars: mockGetNodeAvailableVars,
}),
}))
const createNode = (overrides: Partial<Node> = {}): Node => ({
id: 'node-1',
type: 'custom',
position: { x: 0, y: 0 },
data: {
type: BlockEnum.LLM,
title: 'Node',
desc: '',
},
...overrides,
} as Node)
const outputVars: NodeOutPutVar[] = [{
nodeId: 'vars-node',
title: 'Vars',
vars: [{
variable: 'name',
type: VarType.string,
}] satisfies Var[],
}]
describe('useNodesAvailableVarList', () => {
beforeEach(() => {
vi.clearAllMocks()
mockGetBeforeNodesInSameBranchIncludeParent.mockImplementation((nodeId: string) => [createNode({ id: `before-${nodeId}` })])
mockGetTreeLeafNodes.mockImplementation((nodeId: string) => [createNode({ id: `leaf-${nodeId}` })])
mockGetNodeAvailableVars.mockReturnValue(outputVars)
})
it('builds availability per node, carrying loop nodes and parent iteration context', () => {
const loopNode = createNode({
id: 'loop-1',
data: {
type: BlockEnum.Loop,
title: 'Loop',
desc: '',
},
})
const childNode = createNode({
id: 'child-1',
parentId: 'loop-1',
data: {
type: BlockEnum.LLM,
title: 'Writer',
desc: '',
},
})
const filterVar = vi.fn(() => true)
const { result } = renderHook(() => useNodesAvailableVarList([loopNode, childNode], {
filterVar,
hideEnv: true,
hideChatVar: true,
}))
expect(mockGetBeforeNodesInSameBranchIncludeParent).toHaveBeenCalledWith('loop-1')
expect(mockGetBeforeNodesInSameBranchIncludeParent).toHaveBeenCalledWith('child-1')
expect(result.current['loop-1']?.availableNodes.map(node => node.id)).toEqual(['before-loop-1', 'loop-1'])
expect(result.current['child-1']?.availableVars).toBe(outputVars)
expect(mockGetNodeAvailableVars).toHaveBeenNthCalledWith(2, expect.objectContaining({
parentNode: loopNode,
isChatMode: true,
filterVar,
hideEnv: true,
hideChatVar: true,
}))
})
it('returns a callback version that can use leaf nodes or caller-provided nodes', () => {
const firstNode = createNode({ id: 'node-a' })
const secondNode = createNode({ id: 'node-b' })
const filterVar = vi.fn(() => true)
const passedInAvailableNodes = [createNode({ id: 'manual-node' })]
const { result } = renderHook(() => useGetNodesAvailableVarList())
const leafMap = result.current.getNodesAvailableVarList([firstNode], {
onlyLeafNodeVar: true,
filterVar,
})
const manualMap = result.current.getNodesAvailableVarList([secondNode], {
filterVar,
passedInAvailableNodes,
})
expect(mockGetTreeLeafNodes).toHaveBeenCalledWith('node-a')
expect(leafMap['node-a']?.availableNodes.map(node => node.id)).toEqual(['leaf-node-a'])
expect(manualMap['node-b']?.availableNodes).toBe(passedInAvailableNodes)
})
})

View File

@ -0,0 +1,119 @@
import { act, waitFor } from '@testing-library/react'
import { useNodes } from 'reactflow'
import { createNode } from '../../__tests__/fixtures'
import { renderWorkflowFlowHook } from '../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../types'
import { useNodesInteractionsWithoutSync } from '../use-nodes-interactions-without-sync'
type NodeRuntimeState = {
_runningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
const getNodeRuntimeState = (node?: { data?: unknown }): NodeRuntimeState =>
(node?.data ?? {}) as NodeRuntimeState
const createFlowNodes = () => [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running, _waitingRun: true } }),
createNode({ id: 'n2', position: { x: 100, y: 0 }, data: { _runningStatus: NodeRunningStatus.Succeeded, _waitingRun: false } }),
createNode({ id: 'n3', position: { x: 200, y: 0 }, data: { _runningStatus: NodeRunningStatus.Failed, _waitingRun: true } }),
]
const renderNodesInteractionsHook = () =>
renderWorkflowFlowHook(() => ({
...useNodesInteractionsWithoutSync(),
nodes: useNodes(),
}), {
nodes: createFlowNodes(),
edges: [],
})
describe('useNodesInteractionsWithoutSync', () => {
it('clears _runningStatus and _waitingRun on all nodes', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleNodeCancelRunningStatus()
})
await waitFor(() => {
result.current.nodes.forEach((node) => {
const nodeState = getNodeRuntimeState(node)
expect(nodeState._runningStatus).toBeUndefined()
expect(nodeState._waitingRun).toBe(false)
})
})
})
it('clears _runningStatus only for Succeeded nodes', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelAllNodeSuccessStatus()
})
await waitFor(() => {
const n1 = result.current.nodes.find(node => node.id === 'n1')
const n2 = result.current.nodes.find(node => node.id === 'n2')
const n3 = result.current.nodes.find(node => node.id === 'n3')
expect(getNodeRuntimeState(n1)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(n2)._runningStatus).toBeUndefined()
expect(getNodeRuntimeState(n3)._runningStatus).toBe(NodeRunningStatus.Failed)
})
})
it('does not modify _waitingRun when clearing all success status', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelAllNodeSuccessStatus()
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n1'))._waitingRun).toBe(true)
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n3'))._waitingRun).toBe(true)
})
})
it('clears _runningStatus and _waitingRun for the specified succeeded node', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelNodeSuccessStatus('n2')
})
await waitFor(() => {
const n2 = result.current.nodes.find(node => node.id === 'n2')
expect(getNodeRuntimeState(n2)._runningStatus).toBeUndefined()
expect(getNodeRuntimeState(n2)._waitingRun).toBe(false)
})
})
it('does not modify nodes that are not succeeded', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelNodeSuccessStatus('n1')
})
await waitFor(() => {
const n1 = result.current.nodes.find(node => node.id === 'n1')
expect(getNodeRuntimeState(n1)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(n1)._waitingRun).toBe(true)
})
})
it('does not modify other nodes', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelNodeSuccessStatus('n2')
})
await waitFor(() => {
const n1 = result.current.nodes.find(node => node.id === 'n1')
expect(getNodeRuntimeState(n1)._runningStatus).toBe(NodeRunningStatus.Running)
})
})
})

View File

@ -0,0 +1,205 @@
import type { Edge, Node } from '../../types'
import { act } from '@testing-library/react'
import { createEdge, createNode } from '../../__tests__/fixtures'
import { resetReactFlowMockState, rfState } from '../../__tests__/reactflow-mock-state'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { BlockEnum } from '../../types'
import { useNodesInteractions } from '../use-nodes-interactions'
const mockHandleSyncWorkflowDraft = vi.hoisted(() => vi.fn())
const mockSaveStateToHistory = vi.hoisted(() => vi.fn())
const mockUndo = vi.hoisted(() => vi.fn())
const mockRedo = vi.hoisted(() => vi.fn())
const runtimeState = vi.hoisted(() => ({
nodesReadOnly: false,
workflowReadOnly: false,
}))
let currentNodes: Node[] = []
let currentEdges: Edge[] = []
vi.mock('reactflow', async () =>
(await import('../../__tests__/reactflow-mock-state')).createReactFlowModuleMock())
vi.mock('../use-workflow', () => ({
useWorkflow: () => ({
getAfterNodesInSameBranch: () => [],
}),
useNodesReadOnly: () => ({
getNodesReadOnly: () => runtimeState.nodesReadOnly,
}),
useWorkflowReadOnly: () => ({
getWorkflowReadOnly: () => runtimeState.workflowReadOnly,
}),
}))
vi.mock('../use-helpline', () => ({
useHelpline: () => ({
handleSetHelpline: () => ({
showHorizontalHelpLineNodes: [],
showVerticalHelpLineNodes: [],
}),
}),
}))
vi.mock('../use-nodes-meta-data', () => ({
useNodesMetaData: () => ({
nodesMap: {},
}),
}))
vi.mock('../use-nodes-sync-draft', () => ({
useNodesSyncDraft: () => ({
handleSyncWorkflowDraft: mockHandleSyncWorkflowDraft,
}),
}))
vi.mock('../use-auto-generate-webhook-url', () => ({
useAutoGenerateWebhookUrl: () => vi.fn(),
}))
vi.mock('../use-inspect-vars-crud', () => ({
default: () => ({
deleteNodeInspectorVars: vi.fn(),
}),
}))
vi.mock('../../nodes/iteration/use-interactions', () => ({
useNodeIterationInteractions: () => ({
handleNodeIterationChildDrag: () => ({ restrictPosition: {} }),
handleNodeIterationChildrenCopy: vi.fn(),
}),
}))
vi.mock('../../nodes/loop/use-interactions', () => ({
useNodeLoopInteractions: () => ({
handleNodeLoopChildDrag: () => ({ restrictPosition: {} }),
handleNodeLoopChildrenCopy: vi.fn(),
}),
}))
vi.mock('../use-workflow-history', async importOriginal => ({
...(await importOriginal<typeof import('../use-workflow-history')>()),
useWorkflowHistory: () => ({
saveStateToHistory: mockSaveStateToHistory,
undo: mockUndo,
redo: mockRedo,
}),
}))
describe('useNodesInteractions', () => {
beforeEach(() => {
vi.clearAllMocks()
resetReactFlowMockState()
runtimeState.nodesReadOnly = false
runtimeState.workflowReadOnly = false
currentNodes = [
createNode({
id: 'node-1',
position: { x: 10, y: 20 },
data: {
type: BlockEnum.Code,
title: 'Code',
desc: '',
},
}),
]
currentEdges = [
createEdge({
id: 'edge-1',
source: 'node-1',
target: 'node-2',
}),
]
rfState.nodes = currentNodes as unknown as typeof rfState.nodes
rfState.edges = currentEdges as unknown as typeof rfState.edges
})
it('persists node drags only when the node position actually changes', () => {
const node = currentNodes[0]
const movedNode = {
...node,
position: { x: 120, y: 80 },
}
const { result, store } = renderWorkflowHook(() => useNodesInteractions(), {
historyStore: {
nodes: currentNodes,
edges: currentEdges,
},
})
act(() => {
result.current.handleNodeDragStart({} as never, node, currentNodes)
result.current.handleNodeDragStop({} as never, movedNode, currentNodes)
})
expect(store.getState().nodeAnimation).toBe(false)
expect(mockHandleSyncWorkflowDraft).toHaveBeenCalledTimes(1)
expect(mockSaveStateToHistory).toHaveBeenCalledWith('NodeDragStop', {
nodeId: 'node-1',
})
})
it('restores history snapshots on undo and clears the edge menu', () => {
const historyNodes = [
createNode({
id: 'history-node',
data: {
type: BlockEnum.End,
title: 'End',
desc: '',
},
}),
]
const historyEdges = [
createEdge({
id: 'history-edge',
source: 'history-node',
target: 'node-1',
}),
]
const { result, store } = renderWorkflowHook(() => useNodesInteractions(), {
initialStoreState: {
edgeMenu: {
id: 'edge-1',
} as never,
},
historyStore: {
nodes: historyNodes,
edges: historyEdges,
},
})
act(() => {
result.current.handleHistoryBack()
})
expect(mockUndo).toHaveBeenCalledTimes(1)
expect(rfState.setNodes).toHaveBeenCalledWith(historyNodes)
expect(rfState.setEdges).toHaveBeenCalledWith(historyEdges)
expect(store.getState().edgeMenu).toBeUndefined()
})
it('skips undo and redo when the workflow is read-only', () => {
runtimeState.workflowReadOnly = true
const { result } = renderWorkflowHook(() => useNodesInteractions(), {
historyStore: {
nodes: currentNodes,
edges: currentEdges,
},
})
act(() => {
result.current.handleHistoryBack()
result.current.handleHistoryForward()
})
expect(mockUndo).not.toHaveBeenCalled()
expect(mockRedo).not.toHaveBeenCalled()
expect(rfState.setNodes).not.toHaveBeenCalled()
expect(rfState.setEdges).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,153 @@
import type { Node } from '../../types'
import { CollectionType } from '@/app/components/tools/types'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { BlockEnum } from '../../types'
import { useNodeMetaData, useNodesMetaData } from '../use-nodes-meta-data'
const buildInToolsState = vi.hoisted(() => [] as Array<{ id: string, author: string, description: Record<string, string> }>)
const customToolsState = vi.hoisted(() => [] as Array<{ id: string, author: string, description: Record<string, string> }>)
const workflowToolsState = vi.hoisted(() => [] as Array<{ id: string, author: string, description: Record<string, string> }>)
vi.mock('@/context/i18n', () => ({
useGetLanguage: () => 'en-US',
}))
vi.mock('@/service/use-tools', () => ({
useAllBuiltInTools: () => ({ data: buildInToolsState }),
useAllCustomTools: () => ({ data: customToolsState }),
useAllWorkflowTools: () => ({ data: workflowToolsState }),
}))
const createNode = (overrides: Partial<Node> = {}): Node => ({
id: 'node-1',
type: 'custom',
position: { x: 0, y: 0 },
data: {
type: BlockEnum.LLM,
title: 'Node',
desc: '',
},
...overrides,
} as Node)
describe('useNodesMetaData', () => {
beforeEach(() => {
vi.clearAllMocks()
buildInToolsState.length = 0
customToolsState.length = 0
workflowToolsState.length = 0
})
it('returns empty metadata collections when the hooks store has no node map', () => {
const { result } = renderWorkflowHook(() => useNodesMetaData(), {
hooksStoreProps: {},
})
expect(result.current).toEqual({
nodes: [],
nodesMap: {},
})
})
it('resolves built-in tool metadata from tool providers', () => {
buildInToolsState.push({
id: 'provider-1',
author: 'Provider Author',
description: {
'en-US': 'Built-in provider description',
},
})
const toolNode = createNode({
data: {
type: BlockEnum.Tool,
title: 'Tool Node',
desc: '',
provider_type: CollectionType.builtIn,
provider_id: 'provider-1',
},
})
const { result } = renderWorkflowHook(() => useNodeMetaData(toolNode), {
hooksStoreProps: {
availableNodesMetaData: {
nodes: [],
},
},
})
expect(result.current).toEqual(expect.objectContaining({
author: 'Provider Author',
description: 'Built-in provider description',
}))
})
it('prefers workflow store data for datasource nodes and keeps generic metadata for normal blocks', () => {
const datasourceNode = createNode({
data: {
type: BlockEnum.DataSource,
title: 'Dataset',
desc: '',
plugin_id: 'datasource-1',
},
})
const normalNode = createNode({
data: {
type: BlockEnum.LLM,
title: 'Writer',
desc: '',
},
})
const datasource = {
plugin_id: 'datasource-1',
author: 'Datasource Author',
description: {
'en-US': 'Datasource description',
},
}
const metadataMap = {
[BlockEnum.LLM]: {
metaData: {
type: BlockEnum.LLM,
title: 'LLM',
author: 'Dify',
description: 'Node description',
},
},
}
const datasourceResult = renderWorkflowHook(() => useNodeMetaData(datasourceNode), {
initialStoreState: {
dataSourceList: [datasource as never],
},
hooksStoreProps: {
availableNodesMetaData: {
nodes: [],
nodesMap: metadataMap as never,
},
},
})
const normalResult = renderWorkflowHook(() => useNodeMetaData(normalNode), {
hooksStoreProps: {
availableNodesMetaData: {
nodes: [],
nodesMap: metadataMap as never,
},
},
})
expect(datasourceResult.result.current).toEqual(expect.objectContaining({
author: 'Datasource Author',
description: 'Datasource description',
}))
expect(normalResult.result.current).toEqual(expect.objectContaining({
author: 'Dify',
description: 'Node description',
title: 'LLM',
}))
})
})

View File

@ -0,0 +1,14 @@
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useSetWorkflowVarsWithValue } from '../use-set-workflow-vars-with-value'
describe('useSetWorkflowVarsWithValue', () => {
it('returns fetchInspectVars from hooks store', () => {
const fetchInspectVars = vi.fn()
const { result } = renderWorkflowHook(() => useSetWorkflowVarsWithValue(), {
hooksStoreProps: { fetchInspectVars },
})
expect(result.current.fetchInspectVars).toBe(fetchInspectVars)
})
})

View File

@ -0,0 +1,168 @@
import { act } from '@testing-library/react'
import { ZEN_TOGGLE_EVENT } from '@/app/components/goto-anything/actions/commands/zen'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useShortcuts } from '../use-shortcuts'
type KeyPressRegistration = {
keyFilter: unknown
handler: (event: KeyboardEvent) => void
options?: {
events?: string[]
}
}
const keyPressRegistrations = vi.hoisted<KeyPressRegistration[]>(() => [])
const mockZoomTo = vi.hoisted(() => vi.fn())
const mockGetZoom = vi.hoisted(() => vi.fn(() => 1))
const mockFitView = vi.hoisted(() => vi.fn())
const mockHandleNodesDelete = vi.hoisted(() => vi.fn())
const mockHandleEdgeDelete = vi.hoisted(() => vi.fn())
const mockHandleNodesCopy = vi.hoisted(() => vi.fn())
const mockHandleNodesPaste = vi.hoisted(() => vi.fn())
const mockHandleNodesDuplicate = vi.hoisted(() => vi.fn())
const mockHandleHistoryBack = vi.hoisted(() => vi.fn())
const mockHandleHistoryForward = vi.hoisted(() => vi.fn())
const mockDimOtherNodes = vi.hoisted(() => vi.fn())
const mockUndimAllNodes = vi.hoisted(() => vi.fn())
const mockHandleSyncWorkflowDraft = vi.hoisted(() => vi.fn())
const mockHandleModeHand = vi.hoisted(() => vi.fn())
const mockHandleModePointer = vi.hoisted(() => vi.fn())
const mockHandleLayout = vi.hoisted(() => vi.fn())
const mockHandleToggleMaximizeCanvas = vi.hoisted(() => vi.fn())
vi.mock('ahooks', () => ({
useKeyPress: (keyFilter: unknown, handler: (event: KeyboardEvent) => void, options?: { events?: string[] }) => {
keyPressRegistrations.push({ keyFilter, handler, options })
},
}))
vi.mock('reactflow', () => ({
useReactFlow: () => ({
zoomTo: mockZoomTo,
getZoom: mockGetZoom,
fitView: mockFitView,
}),
}))
vi.mock('..', () => ({
useNodesInteractions: () => ({
handleNodesCopy: mockHandleNodesCopy,
handleNodesPaste: mockHandleNodesPaste,
handleNodesDuplicate: mockHandleNodesDuplicate,
handleNodesDelete: mockHandleNodesDelete,
handleHistoryBack: mockHandleHistoryBack,
handleHistoryForward: mockHandleHistoryForward,
dimOtherNodes: mockDimOtherNodes,
undimAllNodes: mockUndimAllNodes,
}),
useEdgesInteractions: () => ({
handleEdgeDelete: mockHandleEdgeDelete,
}),
useNodesSyncDraft: () => ({
handleSyncWorkflowDraft: mockHandleSyncWorkflowDraft,
}),
useWorkflowCanvasMaximize: () => ({
handleToggleMaximizeCanvas: mockHandleToggleMaximizeCanvas,
}),
useWorkflowMoveMode: () => ({
handleModeHand: mockHandleModeHand,
handleModePointer: mockHandleModePointer,
}),
useWorkflowOrganize: () => ({
handleLayout: mockHandleLayout,
}),
}))
vi.mock('../../workflow-history-store', () => ({
useWorkflowHistoryStore: () => ({
shortcutsEnabled: true,
}),
}))
const createKeyboardEvent = (target: HTMLElement = document.body) => ({
preventDefault: vi.fn(),
target,
}) as unknown as KeyboardEvent
const findRegistration = (matcher: (registration: KeyPressRegistration) => boolean) => {
const registration = keyPressRegistrations.find(matcher)
expect(registration).toBeDefined()
return registration as KeyPressRegistration
}
describe('useShortcuts', () => {
beforeEach(() => {
keyPressRegistrations.length = 0
vi.clearAllMocks()
})
it('deletes selected nodes and edges only outside editable inputs', () => {
renderWorkflowHook(() => useShortcuts())
const deleteShortcut = findRegistration(registration =>
Array.isArray(registration.keyFilter)
&& registration.keyFilter.includes('delete'),
)
const bodyEvent = createKeyboardEvent()
deleteShortcut.handler(bodyEvent)
expect(bodyEvent.preventDefault).toHaveBeenCalled()
expect(mockHandleNodesDelete).toHaveBeenCalledTimes(1)
expect(mockHandleEdgeDelete).toHaveBeenCalledTimes(1)
const inputEvent = createKeyboardEvent(document.createElement('input'))
deleteShortcut.handler(inputEvent)
expect(mockHandleNodesDelete).toHaveBeenCalledTimes(1)
expect(mockHandleEdgeDelete).toHaveBeenCalledTimes(1)
})
it('runs layout and zoom shortcuts through the workflow actions', () => {
renderWorkflowHook(() => useShortcuts())
const layoutShortcut = findRegistration(registration => registration.keyFilter === 'ctrl.o' || registration.keyFilter === 'meta.o')
const fitViewShortcut = findRegistration(registration => registration.keyFilter === 'ctrl.1' || registration.keyFilter === 'meta.1')
const halfZoomShortcut = findRegistration(registration => registration.keyFilter === 'shift.5')
const zoomOutShortcut = findRegistration(registration => registration.keyFilter === 'ctrl.dash' || registration.keyFilter === 'meta.dash')
const zoomInShortcut = findRegistration(registration => registration.keyFilter === 'ctrl.equalsign' || registration.keyFilter === 'meta.equalsign')
layoutShortcut.handler(createKeyboardEvent())
fitViewShortcut.handler(createKeyboardEvent())
halfZoomShortcut.handler(createKeyboardEvent())
zoomOutShortcut.handler(createKeyboardEvent())
zoomInShortcut.handler(createKeyboardEvent())
expect(mockHandleLayout).toHaveBeenCalledTimes(1)
expect(mockFitView).toHaveBeenCalledTimes(1)
expect(mockZoomTo).toHaveBeenNthCalledWith(1, 0.5)
expect(mockZoomTo).toHaveBeenNthCalledWith(2, 0.9)
expect(mockZoomTo).toHaveBeenNthCalledWith(3, 1.1)
expect(mockHandleSyncWorkflowDraft).toHaveBeenCalledTimes(4)
})
it('dims on shift down, undims on shift up, and responds to zen toggle events', () => {
const { unmount } = renderWorkflowHook(() => useShortcuts())
const shiftDownShortcut = findRegistration(registration => registration.keyFilter === 'shift' && registration.options?.events?.[0] === 'keydown')
const shiftUpShortcut = findRegistration(registration => typeof registration.keyFilter === 'function' && registration.options?.events?.[0] === 'keyup')
shiftDownShortcut.handler(createKeyboardEvent())
shiftUpShortcut.handler({ ...createKeyboardEvent(), key: 'Shift' } as KeyboardEvent)
expect(mockDimOtherNodes).toHaveBeenCalledTimes(1)
expect(mockUndimAllNodes).toHaveBeenCalledTimes(1)
act(() => {
window.dispatchEvent(new Event(ZEN_TOGGLE_EVENT))
})
expect(mockHandleToggleMaximizeCanvas).toHaveBeenCalledTimes(1)
unmount()
act(() => {
window.dispatchEvent(new Event(ZEN_TOGGLE_EVENT))
})
expect(mockHandleToggleMaximizeCanvas).toHaveBeenCalledTimes(1)
})
})

View File

@ -1,209 +0,0 @@
import { act, waitFor } from '@testing-library/react'
import { useEdges, useNodes } from 'reactflow'
import { createEdge, createNode } from '../../__tests__/fixtures'
import { renderWorkflowFlowHook } from '../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../types'
import { useEdgesInteractionsWithoutSync } from '../use-edges-interactions-without-sync'
import { useNodesInteractionsWithoutSync } from '../use-nodes-interactions-without-sync'
type EdgeRuntimeState = {
_sourceRunningStatus?: NodeRunningStatus
_targetRunningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
type NodeRuntimeState = {
_runningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
const getEdgeRuntimeState = (edge?: { data?: unknown }): EdgeRuntimeState =>
(edge?.data ?? {}) as EdgeRuntimeState
const getNodeRuntimeState = (node?: { data?: unknown }): NodeRuntimeState =>
(node?.data ?? {}) as NodeRuntimeState
describe('useEdgesInteractionsWithoutSync', () => {
const createFlowNodes = () => [
createNode({ id: 'a' }),
createNode({ id: 'b' }),
createNode({ id: 'c' }),
]
const createFlowEdges = () => [
createEdge({
id: 'e1',
source: 'a',
target: 'b',
data: {
_sourceRunningStatus: NodeRunningStatus.Running,
_targetRunningStatus: NodeRunningStatus.Running,
_waitingRun: true,
},
}),
createEdge({
id: 'e2',
source: 'b',
target: 'c',
data: {
_sourceRunningStatus: NodeRunningStatus.Succeeded,
_targetRunningStatus: undefined,
_waitingRun: false,
},
}),
]
const renderEdgesInteractionsHook = () =>
renderWorkflowFlowHook(() => ({
...useEdgesInteractionsWithoutSync(),
edges: useEdges(),
}), {
nodes: createFlowNodes(),
edges: createFlowEdges(),
})
it('should clear running status and waitingRun on all edges', () => {
const { result } = renderEdgesInteractionsHook()
act(() => {
result.current.handleEdgeCancelRunningStatus()
})
return waitFor(() => {
result.current.edges.forEach((edge) => {
const edgeState = getEdgeRuntimeState(edge)
expect(edgeState._sourceRunningStatus).toBeUndefined()
expect(edgeState._targetRunningStatus).toBeUndefined()
expect(edgeState._waitingRun).toBe(false)
})
})
})
it('should not mutate original edges', () => {
const edges = createFlowEdges()
const originalData = { ...getEdgeRuntimeState(edges[0]) }
const { result } = renderWorkflowFlowHook(() => ({
...useEdgesInteractionsWithoutSync(),
edges: useEdges(),
}), {
nodes: createFlowNodes(),
edges,
})
act(() => {
result.current.handleEdgeCancelRunningStatus()
})
expect(getEdgeRuntimeState(edges[0])._sourceRunningStatus).toBe(originalData._sourceRunningStatus)
})
})
describe('useNodesInteractionsWithoutSync', () => {
const createFlowNodes = () => [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running, _waitingRun: true } }),
createNode({ id: 'n2', position: { x: 100, y: 0 }, data: { _runningStatus: NodeRunningStatus.Succeeded, _waitingRun: false } }),
createNode({ id: 'n3', position: { x: 200, y: 0 }, data: { _runningStatus: NodeRunningStatus.Failed, _waitingRun: true } }),
]
const renderNodesInteractionsHook = () =>
renderWorkflowFlowHook(() => ({
...useNodesInteractionsWithoutSync(),
nodes: useNodes(),
}), {
nodes: createFlowNodes(),
edges: [],
})
describe('handleNodeCancelRunningStatus', () => {
it('should clear _runningStatus and _waitingRun on all nodes', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleNodeCancelRunningStatus()
})
await waitFor(() => {
result.current.nodes.forEach((node) => {
const nodeState = getNodeRuntimeState(node)
expect(nodeState._runningStatus).toBeUndefined()
expect(nodeState._waitingRun).toBe(false)
})
})
})
})
describe('handleCancelAllNodeSuccessStatus', () => {
it('should clear _runningStatus only for Succeeded nodes', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelAllNodeSuccessStatus()
})
await waitFor(() => {
const n1 = result.current.nodes.find(node => node.id === 'n1')
const n2 = result.current.nodes.find(node => node.id === 'n2')
const n3 = result.current.nodes.find(node => node.id === 'n3')
expect(getNodeRuntimeState(n1)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(n2)._runningStatus).toBeUndefined()
expect(getNodeRuntimeState(n3)._runningStatus).toBe(NodeRunningStatus.Failed)
})
})
it('should not modify _waitingRun', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelAllNodeSuccessStatus()
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n1'))._waitingRun).toBe(true)
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n3'))._waitingRun).toBe(true)
})
})
})
describe('handleCancelNodeSuccessStatus', () => {
it('should clear _runningStatus and _waitingRun for the specified Succeeded node', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelNodeSuccessStatus('n2')
})
await waitFor(() => {
const n2 = result.current.nodes.find(node => node.id === 'n2')
expect(getNodeRuntimeState(n2)._runningStatus).toBeUndefined()
expect(getNodeRuntimeState(n2)._waitingRun).toBe(false)
})
})
it('should not modify nodes that are not Succeeded', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelNodeSuccessStatus('n1')
})
await waitFor(() => {
const n1 = result.current.nodes.find(node => node.id === 'n1')
expect(getNodeRuntimeState(n1)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(n1)._waitingRun).toBe(true)
})
})
it('should not modify other nodes', async () => {
const { result } = renderNodesInteractionsHook()
act(() => {
result.current.handleCancelNodeSuccessStatus('n2')
})
await waitFor(() => {
const n1 = result.current.nodes.find(node => node.id === 'n1')
expect(getNodeRuntimeState(n1)._runningStatus).toBe(NodeRunningStatus.Running)
})
})
})
})

View File

@ -0,0 +1,59 @@
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { WorkflowRunningStatus } from '../../types'
import { useWorkflowCanvasMaximize } from '../use-workflow-canvas-maximize'
const mockEmit = vi.hoisted(() => vi.fn())
vi.mock('@/context/event-emitter', () => ({
useEventEmitterContextContext: () => ({
eventEmitter: {
emit: mockEmit,
},
}),
}))
describe('useWorkflowCanvasMaximize', () => {
beforeEach(() => {
vi.clearAllMocks()
localStorage.clear()
})
it('toggles maximize state, persists it, and emits the canvas event', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowCanvasMaximize(), {
initialStoreState: {
maximizeCanvas: false,
},
})
result.current.handleToggleMaximizeCanvas()
expect(store.getState().maximizeCanvas).toBe(true)
expect(localStorage.getItem('workflow-canvas-maximize')).toBe('true')
expect(mockEmit).toHaveBeenCalledWith({
type: 'workflow-canvas-maximize',
payload: true,
})
})
it('does nothing while workflow nodes are read-only', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowCanvasMaximize(), {
initialStoreState: {
maximizeCanvas: false,
workflowRunningData: {
result: {
status: WorkflowRunningStatus.Running,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
},
},
})
result.current.handleToggleMaximizeCanvas()
expect(store.getState().maximizeCanvas).toBe(false)
expect(localStorage.getItem('workflow-canvas-maximize')).toBeNull()
expect(mockEmit).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,141 @@
import type { Edge, Node } from '../../types'
import { act } from '@testing-library/react'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { BlockEnum } from '../../types'
import { useWorkflowHistory, WorkflowHistoryEvent } from '../use-workflow-history'
const reactFlowState = vi.hoisted(() => ({
edges: [] as Edge[],
nodes: [] as Node[],
}))
vi.mock('es-toolkit/compat', () => ({
debounce: <T extends (...args: unknown[]) => unknown>(fn: T) => fn,
}))
vi.mock('reactflow', async () => {
const actual = await vi.importActual<typeof import('reactflow')>('reactflow')
return {
...actual,
useStoreApi: () => ({
getState: () => ({
getNodes: () => reactFlowState.nodes,
edges: reactFlowState.edges,
}),
}),
}
})
vi.mock('react-i18next', async () => {
const actual = await vi.importActual<typeof import('react-i18next')>('react-i18next')
return {
...actual,
useTranslation: () => ({
t: (key: string) => key,
}),
}
})
const nodes: Node[] = [{
id: 'node-1',
type: 'custom',
position: { x: 0, y: 0 },
data: {
type: BlockEnum.Start,
title: 'Start',
desc: '',
},
}]
const edges: Edge[] = [{
id: 'edge-1',
source: 'node-1',
target: 'node-2',
type: 'custom',
data: {
sourceType: BlockEnum.Start,
targetType: BlockEnum.End,
},
}]
describe('useWorkflowHistory', () => {
beforeEach(() => {
reactFlowState.nodes = nodes
reactFlowState.edges = edges
})
it('stores the latest workflow graph snapshot for supported events', () => {
const { result } = renderWorkflowHook(() => useWorkflowHistory(), {
historyStore: {
nodes,
edges,
},
})
act(() => {
result.current.saveStateToHistory(WorkflowHistoryEvent.NodeAdd, { nodeId: 'node-1' })
})
expect(result.current.store.getState().workflowHistoryEvent).toBe(WorkflowHistoryEvent.NodeAdd)
expect(result.current.store.getState().workflowHistoryEventMeta).toEqual({ nodeId: 'node-1' })
expect(result.current.store.getState().nodes).toEqual([
expect.objectContaining({
id: 'node-1',
data: expect.objectContaining({
selected: false,
title: 'Start',
}),
}),
])
expect(result.current.store.getState().edges).toEqual([
expect.objectContaining({
id: 'edge-1',
selected: false,
source: 'node-1',
target: 'node-2',
}),
])
})
it('returns translated labels and falls back for unsupported events', () => {
const { result } = renderWorkflowHook(() => useWorkflowHistory(), {
historyStore: {
nodes,
edges,
},
})
expect(result.current.getHistoryLabel(WorkflowHistoryEvent.NodeDelete)).toBe('changeHistory.nodeDelete')
expect(result.current.getHistoryLabel('Unknown' as keyof typeof WorkflowHistoryEvent)).toBe('Unknown Event')
})
it('runs registered undo and redo callbacks', () => {
const onUndo = vi.fn()
const onRedo = vi.fn()
const { result } = renderWorkflowHook(() => useWorkflowHistory(), {
historyStore: {
nodes,
edges,
},
})
act(() => {
result.current.onUndo(onUndo)
result.current.onRedo(onRedo)
})
const undoSpy = vi.spyOn(result.current.store.temporal.getState(), 'undo')
const redoSpy = vi.spyOn(result.current.store.temporal.getState(), 'redo')
act(() => {
result.current.undo()
result.current.redo()
})
expect(undoSpy).toHaveBeenCalled()
expect(redoSpy).toHaveBeenCalled()
expect(onUndo).toHaveBeenCalled()
expect(onRedo).toHaveBeenCalled()
})
})

View File

@ -0,0 +1,152 @@
import { act } from '@testing-library/react'
import { createLoopNode, createNode } from '../../__tests__/fixtures'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useWorkflowOrganize } from '../use-workflow-organize'
const mockSetViewport = vi.hoisted(() => vi.fn())
const mockSetNodes = vi.hoisted(() => vi.fn())
const mockHandleSyncWorkflowDraft = vi.hoisted(() => vi.fn())
const mockSaveStateToHistory = vi.hoisted(() => vi.fn())
const mockGetLayoutForChildNodes = vi.hoisted(() => vi.fn())
const mockGetLayoutByELK = vi.hoisted(() => vi.fn())
const runtimeState = vi.hoisted(() => ({
nodes: [] as ReturnType<typeof createNode>[],
edges: [] as { id: string, source: string, target: string }[],
nodesReadOnly: false,
}))
vi.mock('reactflow', () => ({
Position: {
Left: 'left',
Right: 'right',
Top: 'top',
Bottom: 'bottom',
},
useStoreApi: () => ({
getState: () => ({
getNodes: () => runtimeState.nodes,
edges: runtimeState.edges,
setNodes: mockSetNodes,
}),
setState: vi.fn(),
}),
useReactFlow: () => ({
setViewport: mockSetViewport,
}),
}))
vi.mock('../use-workflow', () => ({
useNodesReadOnly: () => ({
getNodesReadOnly: () => runtimeState.nodesReadOnly,
nodesReadOnly: runtimeState.nodesReadOnly,
}),
}))
vi.mock('../use-nodes-sync-draft', () => ({
useNodesSyncDraft: () => ({
handleSyncWorkflowDraft: (...args: unknown[]) => mockHandleSyncWorkflowDraft(...args),
}),
}))
vi.mock('../use-workflow-history', () => ({
useWorkflowHistory: () => ({
saveStateToHistory: (...args: unknown[]) => mockSaveStateToHistory(...args),
}),
WorkflowHistoryEvent: {
LayoutOrganize: 'LayoutOrganize',
},
}))
vi.mock('../../utils/elk-layout', async importOriginal => ({
...(await importOriginal<typeof import('../../utils/elk-layout')>()),
getLayoutForChildNodes: (...args: unknown[]) => mockGetLayoutForChildNodes(...args),
getLayoutByELK: (...args: unknown[]) => mockGetLayoutByELK(...args),
}))
describe('useWorkflowOrganize', () => {
beforeEach(() => {
vi.clearAllMocks()
vi.useFakeTimers()
runtimeState.nodesReadOnly = false
runtimeState.nodes = []
runtimeState.edges = []
})
afterEach(() => {
vi.useRealTimers()
})
it('resizes containers, lays out nodes, and syncs draft when editable', async () => {
runtimeState.nodes = [
createLoopNode({
id: 'loop-node',
width: 200,
height: 160,
}),
createNode({
id: 'loop-child',
parentId: 'loop-node',
position: { x: 20, y: 20 },
width: 100,
height: 60,
}),
createNode({
id: 'top-node',
position: { x: 400, y: 0 },
}),
]
runtimeState.edges = []
mockGetLayoutForChildNodes.mockResolvedValue({
bounds: { minX: 0, minY: 0, maxX: 320, maxY: 220 },
nodes: new Map([
['loop-child', { x: 40, y: 60, width: 100, height: 60 }],
]),
})
mockGetLayoutByELK.mockResolvedValue({
nodes: new Map([
['loop-node', { x: 10, y: 20, width: 360, height: 260, layer: 0 }],
['top-node', { x: 500, y: 30, width: 240, height: 100, layer: 0 }],
]),
})
const { result } = renderWorkflowHook(() => useWorkflowOrganize())
await act(async () => {
await result.current.handleLayout()
})
act(() => {
vi.runAllTimers()
})
expect(mockSetNodes).toHaveBeenCalledTimes(1)
const nextNodes = mockSetNodes.mock.calls[0][0]
expect(nextNodes.find((node: { id: string }) => node.id === 'loop-node')).toEqual(expect.objectContaining({
width: expect.any(Number),
height: expect.any(Number),
position: { x: 10, y: 20 },
}))
expect(nextNodes.find((node: { id: string }) => node.id === 'loop-child')).toEqual(expect.objectContaining({
position: { x: 100, y: 120 },
}))
expect(mockSetViewport).toHaveBeenCalledWith({ x: 0, y: 0, zoom: 0.7 })
expect(mockSaveStateToHistory).toHaveBeenCalledWith('LayoutOrganize')
expect(mockHandleSyncWorkflowDraft).toHaveBeenCalled()
})
it('skips layout when nodes are read-only', async () => {
runtimeState.nodesReadOnly = true
runtimeState.nodes = [createNode({ id: 'n1' })]
const { result } = renderWorkflowHook(() => useWorkflowOrganize())
await act(async () => {
await result.current.handleLayout()
})
expect(mockGetLayoutForChildNodes).not.toHaveBeenCalled()
expect(mockGetLayoutByELK).not.toHaveBeenCalled()
expect(mockSetNodes).not.toHaveBeenCalled()
expect(mockSetViewport).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,110 @@
import { act } from '@testing-library/react'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { ControlMode } from '../../types'
import {
useWorkflowInteractions,
useWorkflowMoveMode,
} from '../use-workflow-panel-interactions'
const mockHandleSelectionCancel = vi.hoisted(() => vi.fn())
const mockHandleNodeCancelRunningStatus = vi.hoisted(() => vi.fn())
const mockHandleEdgeCancelRunningStatus = vi.hoisted(() => vi.fn())
const runtimeState = vi.hoisted(() => ({
nodesReadOnly: false,
}))
vi.mock('../use-workflow', () => ({
useNodesReadOnly: () => ({
getNodesReadOnly: () => runtimeState.nodesReadOnly,
nodesReadOnly: runtimeState.nodesReadOnly,
}),
}))
vi.mock('../use-selection-interactions', () => ({
useSelectionInteractions: () => ({
handleSelectionCancel: (...args: unknown[]) => mockHandleSelectionCancel(...args),
}),
}))
vi.mock('../use-nodes-interactions-without-sync', () => ({
useNodesInteractionsWithoutSync: () => ({
handleNodeCancelRunningStatus: (...args: unknown[]) => mockHandleNodeCancelRunningStatus(...args),
}),
}))
vi.mock('../use-edges-interactions-without-sync', () => ({
useEdgesInteractionsWithoutSync: () => ({
handleEdgeCancelRunningStatus: (...args: unknown[]) => mockHandleEdgeCancelRunningStatus(...args),
}),
}))
describe('useWorkflowInteractions', () => {
beforeEach(() => {
vi.clearAllMocks()
runtimeState.nodesReadOnly = false
})
it('closes the debug panel and clears running state', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowInteractions(), {
initialStoreState: {
showDebugAndPreviewPanel: true,
workflowRunningData: { task_id: 'task-1' } as never,
},
})
act(() => {
result.current.handleCancelDebugAndPreviewPanel()
})
expect(store.getState().showDebugAndPreviewPanel).toBe(false)
expect(store.getState().workflowRunningData).toBeUndefined()
expect(mockHandleNodeCancelRunningStatus).toHaveBeenCalledTimes(1)
expect(mockHandleEdgeCancelRunningStatus).toHaveBeenCalledTimes(1)
})
})
describe('useWorkflowMoveMode', () => {
beforeEach(() => {
vi.clearAllMocks()
runtimeState.nodesReadOnly = false
})
it('switches between hand and pointer modes when editable', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowMoveMode(), {
initialStoreState: {
controlMode: ControlMode.Pointer,
},
})
act(() => {
result.current.handleModeHand()
})
expect(store.getState().controlMode).toBe(ControlMode.Hand)
expect(mockHandleSelectionCancel).toHaveBeenCalledTimes(1)
act(() => {
result.current.handleModePointer()
})
expect(store.getState().controlMode).toBe(ControlMode.Pointer)
})
it('does not switch modes when nodes are read-only', () => {
runtimeState.nodesReadOnly = true
const { result, store } = renderWorkflowHook(() => useWorkflowMoveMode(), {
initialStoreState: {
controlMode: ControlMode.Pointer,
},
})
act(() => {
result.current.handleModeHand()
result.current.handleModePointer()
})
expect(store.getState().controlMode).toBe(ControlMode.Pointer)
expect(mockHandleSelectionCancel).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,14 @@
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useWorkflowRefreshDraft } from '../use-workflow-refresh-draft'
describe('useWorkflowRefreshDraft', () => {
it('returns handleRefreshWorkflowDraft from hooks store', () => {
const handleRefreshWorkflowDraft = vi.fn()
const { result } = renderWorkflowHook(() => useWorkflowRefreshDraft(), {
hooksStoreProps: { handleRefreshWorkflowDraft },
})
expect(result.current.handleRefreshWorkflowDraft).toBe(handleRefreshWorkflowDraft)
})
})

View File

@ -1,242 +0,0 @@
import type {
AgentLogResponse,
HumanInputFormFilledResponse,
HumanInputFormTimeoutResponse,
TextChunkResponse,
TextReplaceResponse,
WorkflowFinishedResponse,
} from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { WorkflowRunningStatus } from '../../types'
import { useWorkflowAgentLog } from '../use-workflow-run-event/use-workflow-agent-log'
import { useWorkflowFailed } from '../use-workflow-run-event/use-workflow-failed'
import { useWorkflowFinished } from '../use-workflow-run-event/use-workflow-finished'
import { useWorkflowNodeHumanInputFormFilled } from '../use-workflow-run-event/use-workflow-node-human-input-form-filled'
import { useWorkflowNodeHumanInputFormTimeout } from '../use-workflow-run-event/use-workflow-node-human-input-form-timeout'
import { useWorkflowPaused } from '../use-workflow-run-event/use-workflow-paused'
import { useWorkflowTextChunk } from '../use-workflow-run-event/use-workflow-text-chunk'
import { useWorkflowTextReplace } from '../use-workflow-run-event/use-workflow-text-replace'
vi.mock('@/app/components/base/file-uploader/utils', () => ({
getFilesInLogs: vi.fn(() => []),
}))
describe('useWorkflowFailed', () => {
it('should set status to Failed', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowFailed(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowFailed()
expect(store.getState().workflowRunningData!.result.status).toBe(WorkflowRunningStatus.Failed)
})
})
describe('useWorkflowPaused', () => {
it('should set status to Paused', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowPaused(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowPaused()
expect(store.getState().workflowRunningData!.result.status).toBe(WorkflowRunningStatus.Paused)
})
})
describe('useWorkflowTextChunk', () => {
it('should append text and activate result tab', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowTextChunk(), {
initialStoreState: {
workflowRunningData: baseRunningData({ resultText: 'Hello' }),
},
})
result.current.handleWorkflowTextChunk({ data: { text: ' World' } } as TextChunkResponse)
const state = store.getState().workflowRunningData!
expect(state.resultText).toBe('Hello World')
expect(state.resultTabActive).toBe(true)
})
})
describe('useWorkflowTextReplace', () => {
it('should replace resultText', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowTextReplace(), {
initialStoreState: {
workflowRunningData: baseRunningData({ resultText: 'old text' }),
},
})
result.current.handleWorkflowTextReplace({ data: { text: 'new text' } } as TextReplaceResponse)
expect(store.getState().workflowRunningData!.resultText).toBe('new text')
})
})
describe('useWorkflowFinished', () => {
it('should merge data into result and activate result tab for single string output', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowFinished(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowFinished({
data: { status: 'succeeded', outputs: { answer: 'hello' } },
} as WorkflowFinishedResponse)
const state = store.getState().workflowRunningData!
expect(state.result.status).toBe('succeeded')
expect(state.resultTabActive).toBe(true)
expect(state.resultText).toBe('hello')
})
it('should not activate result tab for multi-key outputs', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowFinished(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowFinished({
data: { status: 'succeeded', outputs: { a: 'hello', b: 'world' } },
} as WorkflowFinishedResponse)
expect(store.getState().workflowRunningData!.resultTabActive).toBeFalsy()
})
})
describe('useWorkflowAgentLog', () => {
it('should create agent_log array when execution_metadata has no agent_log', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1', execution_metadata: {} }],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm1' },
} as AgentLogResponse)
const trace = store.getState().workflowRunningData!.tracing![0]
expect(trace.execution_metadata!.agent_log).toHaveLength(1)
expect(trace.execution_metadata!.agent_log![0].message_id).toBe('m1')
})
it('should append to existing agent_log', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{
node_id: 'n1',
execution_metadata: { agent_log: [{ message_id: 'm1', text: 'log1' }] },
}],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm2' },
} as AgentLogResponse)
expect(store.getState().workflowRunningData!.tracing![0].execution_metadata!.agent_log).toHaveLength(2)
})
it('should update existing log entry by message_id', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{
node_id: 'n1',
execution_metadata: { agent_log: [{ message_id: 'm1', text: 'old' }] },
}],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm1', text: 'new' },
} as unknown as AgentLogResponse)
const log = store.getState().workflowRunningData!.tracing![0].execution_metadata!.agent_log!
expect(log).toHaveLength(1)
expect((log[0] as unknown as { text: string }).text).toBe('new')
})
it('should create execution_metadata when it does not exist', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1' }],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm1' },
} as AgentLogResponse)
expect(store.getState().workflowRunningData!.tracing![0].execution_metadata!.agent_log).toHaveLength(1)
})
})
describe('useWorkflowNodeHumanInputFormFilled', () => {
it('should remove form from humanInputFormDataList and add to humanInputFilledFormDataList', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowNodeHumanInputFormFilled(), {
initialStoreState: {
workflowRunningData: baseRunningData({
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '' },
],
}),
},
})
result.current.handleWorkflowNodeHumanInputFormFilled({
data: { node_id: 'n1', node_title: 'Node 1', rendered_content: 'done' },
} as HumanInputFormFilledResponse)
const state = store.getState().workflowRunningData!
expect(state.humanInputFormDataList).toHaveLength(0)
expect(state.humanInputFilledFormDataList).toHaveLength(1)
expect(state.humanInputFilledFormDataList![0].node_id).toBe('n1')
})
it('should create humanInputFilledFormDataList when it does not exist', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowNodeHumanInputFormFilled(), {
initialStoreState: {
workflowRunningData: baseRunningData({
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '' },
],
}),
},
})
result.current.handleWorkflowNodeHumanInputFormFilled({
data: { node_id: 'n1', node_title: 'Node 1', rendered_content: 'done' },
} as HumanInputFormFilledResponse)
expect(store.getState().workflowRunningData!.humanInputFilledFormDataList).toBeDefined()
})
})
describe('useWorkflowNodeHumanInputFormTimeout', () => {
it('should set expiration_time on the matching form', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowNodeHumanInputFormTimeout(), {
initialStoreState: {
workflowRunningData: baseRunningData({
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '', expiration_time: 0 },
],
}),
},
})
result.current.handleWorkflowNodeHumanInputFormTimeout({
data: { node_id: 'n1', node_title: 'Node 1', expiration_time: 1000 },
} as HumanInputFormTimeoutResponse)
expect(store.getState().workflowRunningData!.humanInputFormDataList![0].expiration_time).toBe(1000)
})
})

View File

@ -1,336 +0,0 @@
import type { WorkflowRunningData } from '../../types'
import type {
IterationFinishedResponse,
IterationNextResponse,
LoopFinishedResponse,
LoopNextResponse,
NodeFinishedResponse,
WorkflowStartedResponse,
} from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { useEdges, useNodes } from 'reactflow'
import { createEdge, createNode } from '../../__tests__/fixtures'
import { baseRunningData, renderWorkflowFlowHook } from '../../__tests__/workflow-test-env'
import { DEFAULT_ITER_TIMES } from '../../constants'
import { NodeRunningStatus, WorkflowRunningStatus } from '../../types'
import { useWorkflowNodeFinished } from '../use-workflow-run-event/use-workflow-node-finished'
import { useWorkflowNodeIterationFinished } from '../use-workflow-run-event/use-workflow-node-iteration-finished'
import { useWorkflowNodeIterationNext } from '../use-workflow-run-event/use-workflow-node-iteration-next'
import { useWorkflowNodeLoopFinished } from '../use-workflow-run-event/use-workflow-node-loop-finished'
import { useWorkflowNodeLoopNext } from '../use-workflow-run-event/use-workflow-node-loop-next'
import { useWorkflowNodeRetry } from '../use-workflow-run-event/use-workflow-node-retry'
import { useWorkflowStarted } from '../use-workflow-run-event/use-workflow-started'
type NodeRuntimeState = {
_waitingRun?: boolean
_runningStatus?: NodeRunningStatus
_retryIndex?: number
_iterationIndex?: number
_loopIndex?: number
_runningBranchId?: string
}
type EdgeRuntimeState = {
_sourceRunningStatus?: NodeRunningStatus
_targetRunningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
const getNodeRuntimeState = (node?: { data?: unknown }): NodeRuntimeState =>
(node?.data ?? {}) as NodeRuntimeState
const getEdgeRuntimeState = (edge?: { data?: unknown }): EdgeRuntimeState =>
(edge?.data ?? {}) as EdgeRuntimeState
function createRunNodes() {
return [
createNode({
id: 'n1',
width: 200,
height: 80,
data: { _waitingRun: false },
}),
]
}
function createRunEdges() {
return [
createEdge({
id: 'e1',
source: 'n0',
target: 'n1',
data: {},
}),
]
}
function renderRunEventHook<T extends Record<string, unknown>>(
useHook: () => T,
options?: {
nodes?: ReturnType<typeof createRunNodes>
edges?: ReturnType<typeof createRunEdges>
initialStoreState?: Record<string, unknown>
},
) {
const { nodes = createRunNodes(), edges = createRunEdges(), initialStoreState } = options ?? {}
return renderWorkflowFlowHook(() => ({
...useHook(),
nodes: useNodes(),
edges: useEdges(),
}), {
nodes,
edges,
reactFlowProps: { fitView: false },
initialStoreState,
})
}
describe('useWorkflowStarted', () => {
it('should initialize workflow running data and reset nodes/edges', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowStarted(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowStarted({
task_id: 'task-2',
data: { id: 'run-1', workflow_id: 'wf-1', created_at: 1000 },
} as WorkflowStartedResponse)
})
const state = store.getState().workflowRunningData!
expect(state.task_id).toBe('task-2')
expect(state.result.status).toBe(WorkflowRunningStatus.Running)
expect(state.resultText).toBe('')
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._waitingRun).toBe(true)
expect(getNodeRuntimeState(result.current.nodes[0])._runningBranchId).toBeUndefined()
expect(getEdgeRuntimeState(result.current.edges[0])._sourceRunningStatus).toBeUndefined()
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBeUndefined()
expect(getEdgeRuntimeState(result.current.edges[0])._waitingRun).toBe(true)
})
})
it('should resume from Paused without resetting nodes/edges', () => {
const { result, store } = renderRunEventHook(() => useWorkflowStarted(), {
initialStoreState: {
workflowRunningData: baseRunningData({
result: { status: WorkflowRunningStatus.Paused } as WorkflowRunningData['result'],
}),
},
})
act(() => {
result.current.handleWorkflowStarted({
task_id: 'task-2',
data: { id: 'run-2', workflow_id: 'wf-1', created_at: 2000 },
} as WorkflowStartedResponse)
})
expect(store.getState().workflowRunningData!.result.status).toBe(WorkflowRunningStatus.Running)
expect(getNodeRuntimeState(result.current.nodes[0])._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._waitingRun).toBeUndefined()
})
})
describe('useWorkflowNodeFinished', () => {
it('should update tracing and node running status', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'trace-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeFinished({
data: { id: 'trace-1', node_id: 'n1', status: NodeRunningStatus.Succeeded },
} as NodeFinishedResponse)
})
const trace = store.getState().workflowRunningData!.tracing![0]
expect(trace.status).toBe(NodeRunningStatus.Succeeded)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningStatus).toBe(NodeRunningStatus.Succeeded)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Succeeded)
})
})
it('should set _runningBranchId for IfElse node', async () => {
const { result } = renderRunEventHook(() => useWorkflowNodeFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'trace-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeFinished({
data: {
id: 'trace-1',
node_id: 'n1',
node_type: 'if-else',
status: NodeRunningStatus.Succeeded,
outputs: { selected_case_id: 'branch-a' },
},
} as unknown as NodeFinishedResponse)
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningBranchId).toBe('branch-a')
})
})
})
describe('useWorkflowNodeRetry', () => {
it('should push retry data to tracing and update _retryIndex', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeRetry(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeRetry({
data: { node_id: 'n1', retry_index: 2 },
} as NodeFinishedResponse)
})
expect(store.getState().workflowRunningData!.tracing).toHaveLength(1)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._retryIndex).toBe(2)
})
})
})
describe('useWorkflowNodeIterationNext', () => {
it('should set _iterationIndex and increment iterTimes', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeIterationNext(), {
initialStoreState: {
workflowRunningData: baseRunningData(),
iterTimes: 3,
},
})
act(() => {
result.current.handleWorkflowNodeIterationNext({
data: { node_id: 'n1' },
} as IterationNextResponse)
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._iterationIndex).toBe(3)
})
expect(store.getState().iterTimes).toBe(4)
})
})
describe('useWorkflowNodeIterationFinished', () => {
it('should update tracing, reset iterTimes, update node status and edges', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeIterationFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'iter-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
iterTimes: 10,
},
})
act(() => {
result.current.handleWorkflowNodeIterationFinished({
data: { id: 'iter-1', node_id: 'n1', status: NodeRunningStatus.Succeeded },
} as IterationFinishedResponse)
})
expect(store.getState().iterTimes).toBe(DEFAULT_ITER_TIMES)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningStatus).toBe(NodeRunningStatus.Succeeded)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Succeeded)
})
})
})
describe('useWorkflowNodeLoopNext', () => {
it('should set _loopIndex and reset child nodes to waiting', async () => {
const { result } = renderRunEventHook(() => useWorkflowNodeLoopNext(), {
nodes: [
createNode({ id: 'n1', data: {} }),
createNode({
id: 'n2',
position: { x: 300, y: 0 },
parentId: 'n1',
data: { _waitingRun: false },
}),
],
edges: [],
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeLoopNext({
data: { node_id: 'n1', index: 5 },
} as LoopNextResponse)
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n1'))._loopIndex).toBe(5)
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n2'))._waitingRun).toBe(true)
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n2'))._runningStatus).toBe(NodeRunningStatus.Waiting)
})
})
})
describe('useWorkflowNodeLoopFinished', () => {
it('should update tracing, node status and edges', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeLoopFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'loop-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeLoopFinished({
data: { id: 'loop-1', node_id: 'n1', status: NodeRunningStatus.Succeeded },
} as LoopFinishedResponse)
})
const trace = store.getState().workflowRunningData!.tracing![0]
expect(trace.status).toBe(NodeRunningStatus.Succeeded)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningStatus).toBe(NodeRunningStatus.Succeeded)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Succeeded)
})
})
})

View File

@ -1,331 +0,0 @@
import type {
HumanInputRequiredResponse,
IterationStartedResponse,
LoopStartedResponse,
NodeStartedResponse,
} from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { useEdges, useNodes, useStoreApi } from 'reactflow'
import { createEdge, createNode } from '../../__tests__/fixtures'
import { baseRunningData, renderWorkflowFlowHook } from '../../__tests__/workflow-test-env'
import { DEFAULT_ITER_TIMES } from '../../constants'
import { NodeRunningStatus } from '../../types'
import { useWorkflowNodeHumanInputRequired } from '../use-workflow-run-event/use-workflow-node-human-input-required'
import { useWorkflowNodeIterationStarted } from '../use-workflow-run-event/use-workflow-node-iteration-started'
import { useWorkflowNodeLoopStarted } from '../use-workflow-run-event/use-workflow-node-loop-started'
import { useWorkflowNodeStarted } from '../use-workflow-run-event/use-workflow-node-started'
type NodeRuntimeState = {
_waitingRun?: boolean
_runningStatus?: NodeRunningStatus
_iterationLength?: number
_loopLength?: number
}
type EdgeRuntimeState = {
_sourceRunningStatus?: NodeRunningStatus
_targetRunningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
const getNodeRuntimeState = (node?: { data?: unknown }): NodeRuntimeState =>
(node?.data ?? {}) as NodeRuntimeState
const getEdgeRuntimeState = (edge?: { data?: unknown }): EdgeRuntimeState =>
(edge?.data ?? {}) as EdgeRuntimeState
const containerParams = { clientWidth: 1200, clientHeight: 800 }
function createViewportNodes() {
return [
createNode({
id: 'n0',
width: 200,
height: 80,
data: { _runningStatus: NodeRunningStatus.Succeeded },
}),
createNode({
id: 'n1',
position: { x: 100, y: 50 },
width: 200,
height: 80,
data: { _waitingRun: true },
}),
createNode({
id: 'n2',
position: { x: 400, y: 50 },
width: 200,
height: 80,
parentId: 'n1',
data: { _waitingRun: true },
}),
]
}
function createViewportEdges() {
return [
createEdge({
id: 'e1',
source: 'n0',
target: 'n1',
sourceHandle: 'source',
data: {},
}),
]
}
function renderViewportHook<T extends Record<string, unknown>>(
useHook: () => T,
options?: {
nodes?: ReturnType<typeof createViewportNodes>
edges?: ReturnType<typeof createViewportEdges>
initialStoreState?: Record<string, unknown>
},
) {
const {
nodes = createViewportNodes(),
edges = createViewportEdges(),
initialStoreState,
} = options ?? {}
return renderWorkflowFlowHook(() => ({
...useHook(),
nodes: useNodes(),
edges: useEdges(),
reactFlowStore: useStoreApi(),
}), {
nodes,
edges,
reactFlowProps: { fitView: false },
initialStoreState,
})
}
describe('useWorkflowNodeStarted', () => {
it('should push to tracing, set node running, and adjust viewport for root node', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeStarted(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeStarted(
{ data: { node_id: 'n1' } } as NodeStartedResponse,
containerParams,
)
})
const tracing = store.getState().workflowRunningData!.tracing!
expect(tracing).toHaveLength(1)
expect(tracing[0].status).toBe(NodeRunningStatus.Running)
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(200)
expect(transform[1]).toBe(310)
expect(transform[2]).toBe(1)
const node = result.current.nodes.find(item => item.id === 'n1')
expect(getNodeRuntimeState(node)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(node)._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Running)
})
})
it('should not adjust viewport for child node (has parentId)', async () => {
const { result } = renderViewportHook(() => useWorkflowNodeStarted(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeStarted(
{ data: { node_id: 'n2' } } as NodeStartedResponse,
containerParams,
)
})
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(0)
expect(transform[1]).toBe(0)
expect(transform[2]).toBe(1)
expect(getNodeRuntimeState(result.current.nodes.find(item => item.id === 'n2'))._runningStatus).toBe(NodeRunningStatus.Running)
})
})
it('should update existing tracing entry if node_id exists at non-zero index', () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeStarted(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [
{ node_id: 'n0', status: NodeRunningStatus.Succeeded },
{ node_id: 'n1', status: NodeRunningStatus.Succeeded },
],
}),
},
})
act(() => {
result.current.handleWorkflowNodeStarted(
{ data: { node_id: 'n1' } } as NodeStartedResponse,
containerParams,
)
})
const tracing = store.getState().workflowRunningData!.tracing!
expect(tracing).toHaveLength(2)
expect(tracing[1].status).toBe(NodeRunningStatus.Running)
})
})
describe('useWorkflowNodeIterationStarted', () => {
it('should push to tracing, reset iterTimes, set viewport, and update node with _iterationLength', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeIterationStarted(), {
nodes: createViewportNodes().slice(0, 2),
initialStoreState: {
workflowRunningData: baseRunningData(),
iterTimes: 99,
},
})
act(() => {
result.current.handleWorkflowNodeIterationStarted(
{ data: { node_id: 'n1', metadata: { iterator_length: 10 } } } as IterationStartedResponse,
containerParams,
)
})
const tracing = store.getState().workflowRunningData!.tracing!
expect(tracing[0].status).toBe(NodeRunningStatus.Running)
expect(store.getState().iterTimes).toBe(DEFAULT_ITER_TIMES)
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(200)
expect(transform[1]).toBe(310)
expect(transform[2]).toBe(1)
const node = result.current.nodes.find(item => item.id === 'n1')
expect(getNodeRuntimeState(node)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(node)._iterationLength).toBe(10)
expect(getNodeRuntimeState(node)._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Running)
})
})
})
describe('useWorkflowNodeLoopStarted', () => {
it('should push to tracing, set viewport, and update node with _loopLength', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeLoopStarted(), {
nodes: createViewportNodes().slice(0, 2),
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeLoopStarted(
{ data: { node_id: 'n1', metadata: { loop_length: 5 } } } as LoopStartedResponse,
containerParams,
)
})
expect(store.getState().workflowRunningData!.tracing![0].status).toBe(NodeRunningStatus.Running)
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(200)
expect(transform[1]).toBe(310)
expect(transform[2]).toBe(1)
const node = result.current.nodes.find(item => item.id === 'n1')
expect(getNodeRuntimeState(node)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(node)._loopLength).toBe(5)
expect(getNodeRuntimeState(node)._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Running)
})
})
})
describe('useWorkflowNodeHumanInputRequired', () => {
it('should create humanInputFormDataList and set tracing/node to Paused', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeHumanInputRequired(), {
nodes: [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running } }),
createNode({ id: 'n2', position: { x: 300, y: 0 }, data: { _runningStatus: NodeRunningStatus.Running } }),
],
edges: [],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeHumanInputRequired({
data: { node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: 'content' },
} as HumanInputRequiredResponse)
})
const state = store.getState().workflowRunningData!
expect(state.humanInputFormDataList).toHaveLength(1)
expect(state.humanInputFormDataList![0].form_id).toBe('f1')
expect(state.tracing![0].status).toBe(NodeRunningStatus.Paused)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes.find(item => item.id === 'n1'))._runningStatus).toBe(NodeRunningStatus.Paused)
})
})
it('should update existing form entry for same node_id', () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeHumanInputRequired(), {
nodes: [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running } }),
createNode({ id: 'n2', position: { x: 300, y: 0 }, data: { _runningStatus: NodeRunningStatus.Running } }),
],
edges: [],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1', status: NodeRunningStatus.Running }],
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'old', node_title: 'Node 1', form_content: 'old' },
],
}),
},
})
act(() => {
result.current.handleWorkflowNodeHumanInputRequired({
data: { node_id: 'n1', form_id: 'new', node_title: 'Node 1', form_content: 'new' },
} as HumanInputRequiredResponse)
})
const formList = store.getState().workflowRunningData!.humanInputFormDataList!
expect(formList).toHaveLength(1)
expect(formList[0].form_id).toBe('new')
})
it('should append new form entry for different node_id', () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeHumanInputRequired(), {
nodes: [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running } }),
createNode({ id: 'n2', position: { x: 300, y: 0 }, data: { _runningStatus: NodeRunningStatus.Running } }),
],
edges: [],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n2', status: NodeRunningStatus.Running }],
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '' },
],
}),
},
})
act(() => {
result.current.handleWorkflowNodeHumanInputRequired({
data: { node_id: 'n2', form_id: 'f2', node_title: 'Node 2', form_content: 'content2' },
} as HumanInputRequiredResponse)
})
expect(store.getState().workflowRunningData!.humanInputFormDataList).toHaveLength(2)
})
})

View File

@ -0,0 +1,24 @@
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useWorkflowRun } from '../use-workflow-run'
describe('useWorkflowRun', () => {
it('returns workflow run handlers from hooks store', () => {
const handlers = {
handleBackupDraft: vi.fn(),
handleLoadBackupDraft: vi.fn(),
handleRestoreFromPublishedWorkflow: vi.fn(),
handleRun: vi.fn(),
handleStopRun: vi.fn(),
}
const { result } = renderWorkflowHook(() => useWorkflowRun(), {
hooksStoreProps: handlers,
})
expect(result.current.handleBackupDraft).toBe(handlers.handleBackupDraft)
expect(result.current.handleLoadBackupDraft).toBe(handlers.handleLoadBackupDraft)
expect(result.current.handleRestoreFromPublishedWorkflow).toBe(handlers.handleRestoreFromPublishedWorkflow)
expect(result.current.handleRun).toBe(handlers.handleRun)
expect(result.current.handleStopRun).toBe(handlers.handleStopRun)
})
})

View File

@ -0,0 +1,119 @@
import type { CommonNodeType, Node, ToolWithProvider } from '../../types'
import { act, renderHook } from '@testing-library/react'
import { workflowNodesAction } from '@/app/components/goto-anything/actions/workflow-nodes'
import { CollectionType } from '@/app/components/tools/types'
import { BlockEnum } from '../../types'
import { useWorkflowSearch } from '../use-workflow-search'
const mockHandleNodeSelect = vi.hoisted(() => vi.fn())
const runtimeNodes = vi.hoisted(() => [] as Node[])
vi.mock('reactflow', () => ({
useNodes: () => runtimeNodes,
}))
vi.mock('../use-nodes-interactions', () => ({
useNodesInteractions: () => ({
handleNodeSelect: mockHandleNodeSelect,
}),
}))
vi.mock('@/service/use-tools', () => ({
useAllBuiltInTools: () => ({
data: [{
id: 'provider-1',
icon: 'tool-icon',
tools: [],
}] satisfies Partial<ToolWithProvider>[],
}),
useAllCustomTools: () => ({ data: [] }),
useAllWorkflowTools: () => ({ data: [] }),
useAllMCPTools: () => ({ data: [] }),
}))
const createNode = (overrides: Partial<Node> = {}): Node => ({
id: 'node-1',
type: 'custom',
position: { x: 0, y: 0 },
data: {
type: BlockEnum.LLM,
title: 'Writer',
desc: 'Draft content',
} as CommonNodeType,
...overrides,
})
describe('useWorkflowSearch', () => {
beforeEach(() => {
vi.clearAllMocks()
runtimeNodes.length = 0
workflowNodesAction.searchFn = undefined
})
it('registers workflow node search results with tool icons and llm metadata scoring', async () => {
runtimeNodes.push(
createNode({
id: 'llm-1',
data: {
type: BlockEnum.LLM,
title: 'Writer',
desc: 'Draft content',
model: {
provider: 'openai',
name: 'gpt-4o',
mode: 'chat',
},
} as CommonNodeType,
}),
createNode({
id: 'tool-1',
data: {
type: BlockEnum.Tool,
title: 'Google Search',
desc: 'Search the web',
provider_type: CollectionType.builtIn,
provider_id: 'provider-1',
} as CommonNodeType,
}),
createNode({
id: 'internal-start',
data: {
type: BlockEnum.IterationStart,
title: 'Internal Start',
desc: '',
} as CommonNodeType,
}),
)
const { unmount } = renderHook(() => useWorkflowSearch())
const llmResults = await workflowNodesAction.search('', 'gpt')
expect(llmResults.map(item => item.id)).toEqual(['llm-1'])
expect(llmResults[0]?.title).toBe('Writer')
const toolResults = await workflowNodesAction.search('', 'search')
expect(toolResults.map(item => item.id)).toEqual(['tool-1'])
expect(toolResults[0]?.description).toBe('Search the web')
unmount()
expect(workflowNodesAction.searchFn).toBeUndefined()
})
it('binds the node selection listener to handleNodeSelect', () => {
const { unmount } = renderHook(() => useWorkflowSearch())
act(() => {
document.dispatchEvent(new CustomEvent('workflow:select-node', {
detail: {
nodeId: 'node-42',
focus: false,
},
}))
})
expect(mockHandleNodeSelect).toHaveBeenCalledWith('node-42')
unmount()
})
})

View File

@ -0,0 +1,28 @@
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useWorkflowStartRun } from '../use-workflow-start-run'
describe('useWorkflowStartRun', () => {
it('returns start-run handlers from hooks store', () => {
const handlers = {
handleStartWorkflowRun: vi.fn(),
handleWorkflowStartRunInWorkflow: vi.fn(),
handleWorkflowStartRunInChatflow: vi.fn(),
handleWorkflowTriggerScheduleRunInWorkflow: vi.fn(),
handleWorkflowTriggerWebhookRunInWorkflow: vi.fn(),
handleWorkflowTriggerPluginRunInWorkflow: vi.fn(),
handleWorkflowRunAllTriggersInWorkflow: vi.fn(),
}
const { result } = renderWorkflowHook(() => useWorkflowStartRun(), {
hooksStoreProps: handlers,
})
expect(result.current.handleStartWorkflowRun).toBe(handlers.handleStartWorkflowRun)
expect(result.current.handleWorkflowStartRunInWorkflow).toBe(handlers.handleWorkflowStartRunInWorkflow)
expect(result.current.handleWorkflowStartRunInChatflow).toBe(handlers.handleWorkflowStartRunInChatflow)
expect(result.current.handleWorkflowTriggerScheduleRunInWorkflow).toBe(handlers.handleWorkflowTriggerScheduleRunInWorkflow)
expect(result.current.handleWorkflowTriggerWebhookRunInWorkflow).toBe(handlers.handleWorkflowTriggerWebhookRunInWorkflow)
expect(result.current.handleWorkflowTriggerPluginRunInWorkflow).toBe(handlers.handleWorkflowTriggerPluginRunInWorkflow)
expect(result.current.handleWorkflowRunAllTriggersInWorkflow).toBe(handlers.handleWorkflowRunAllTriggersInWorkflow)
})
})

View File

@ -0,0 +1,66 @@
import { act } from '@testing-library/react'
import { createNode } from '../../__tests__/fixtures'
import { renderWorkflowHook } from '../../__tests__/workflow-test-env'
import { useWorkflowUpdate } from '../use-workflow-update'
const mockSetViewport = vi.hoisted(() => vi.fn())
const mockEventEmit = vi.hoisted(() => vi.fn())
const mockInitialNodes = vi.hoisted(() => vi.fn((nodes: unknown[], _edges: unknown[]) => nodes))
const mockInitialEdges = vi.hoisted(() => vi.fn((edges: unknown[], _nodes: unknown[]) => edges))
vi.mock('reactflow', () => ({
Position: {
Left: 'left',
Right: 'right',
Top: 'top',
Bottom: 'bottom',
},
useReactFlow: () => ({
setViewport: mockSetViewport,
}),
}))
vi.mock('@/context/event-emitter', () => ({
useEventEmitterContextContext: () => ({
eventEmitter: {
emit: (...args: unknown[]) => mockEventEmit(...args),
},
}),
}))
vi.mock('../../utils', async importOriginal => ({
...(await importOriginal<typeof import('../../utils')>()),
initialNodes: (nodes: unknown[], edges: unknown[]) => mockInitialNodes(nodes, edges),
initialEdges: (edges: unknown[], nodes: unknown[]) => mockInitialEdges(edges, nodes),
}))
describe('useWorkflowUpdate', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('emits initialized data and only sets a valid viewport', () => {
const { result } = renderWorkflowHook(() => useWorkflowUpdate())
act(() => {
result.current.handleUpdateWorkflowCanvas({
nodes: [createNode({ id: 'n1' })],
edges: [],
viewport: { x: 10, y: 20, zoom: 0.5 },
} as never)
result.current.handleUpdateWorkflowCanvas({
nodes: [],
edges: [],
viewport: { x: 'bad' } as never,
})
})
expect(mockInitialNodes).toHaveBeenCalled()
expect(mockInitialEdges).toHaveBeenCalled()
expect(mockEventEmit).toHaveBeenCalledWith(expect.objectContaining({
type: 'WORKFLOW_DATA_UPDATE',
}))
expect(mockSetViewport).toHaveBeenCalledTimes(1)
expect(mockSetViewport).toHaveBeenCalledWith({ x: 10, y: 20, zoom: 0.5 })
})
})

View File

@ -0,0 +1,86 @@
import { act, renderHook } from '@testing-library/react'
import { useWorkflowZoom } from '../use-workflow-zoom'
const {
mockFitView,
mockZoomIn,
mockZoomOut,
mockZoomTo,
mockHandleSyncWorkflowDraft,
runtimeState,
} = vi.hoisted(() => ({
mockFitView: vi.fn(),
mockZoomIn: vi.fn(),
mockZoomOut: vi.fn(),
mockZoomTo: vi.fn(),
mockHandleSyncWorkflowDraft: vi.fn(),
runtimeState: {
workflowReadOnly: false,
},
}))
vi.mock('reactflow', () => ({
useReactFlow: () => ({
fitView: mockFitView,
zoomIn: mockZoomIn,
zoomOut: mockZoomOut,
zoomTo: mockZoomTo,
}),
}))
vi.mock('../use-nodes-sync-draft', () => ({
useNodesSyncDraft: () => ({
handleSyncWorkflowDraft: (...args: unknown[]) => mockHandleSyncWorkflowDraft(...args),
}),
}))
vi.mock('../use-workflow', () => ({
useWorkflowReadOnly: () => ({
getWorkflowReadOnly: () => runtimeState.workflowReadOnly,
}),
}))
describe('useWorkflowZoom', () => {
beforeEach(() => {
vi.clearAllMocks()
runtimeState.workflowReadOnly = false
})
it('runs zoom actions and syncs the workflow draft when editable', () => {
const { result } = renderHook(() => useWorkflowZoom())
act(() => {
result.current.handleFitView()
result.current.handleBackToOriginalSize()
result.current.handleSizeToHalf()
result.current.handleZoomOut()
result.current.handleZoomIn()
})
expect(mockFitView).toHaveBeenCalledTimes(1)
expect(mockZoomTo).toHaveBeenCalledWith(1)
expect(mockZoomTo).toHaveBeenCalledWith(0.5)
expect(mockZoomOut).toHaveBeenCalledTimes(1)
expect(mockZoomIn).toHaveBeenCalledTimes(1)
expect(mockHandleSyncWorkflowDraft).toHaveBeenCalledTimes(5)
})
it('blocks zoom actions when the workflow is read-only', () => {
runtimeState.workflowReadOnly = true
const { result } = renderHook(() => useWorkflowZoom())
act(() => {
result.current.handleFitView()
result.current.handleBackToOriginalSize()
result.current.handleSizeToHalf()
result.current.handleZoomOut()
result.current.handleZoomIn()
})
expect(mockFitView).not.toHaveBeenCalled()
expect(mockZoomTo).not.toHaveBeenCalled()
expect(mockZoomOut).not.toHaveBeenCalled()
expect(mockZoomIn).not.toHaveBeenCalled()
expect(mockHandleSyncWorkflowDraft).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,186 @@
import type { WorkflowRunningData } from '../../../types'
import type {
IterationFinishedResponse,
IterationNextResponse,
LoopFinishedResponse,
LoopNextResponse,
NodeFinishedResponse,
NodeStartedResponse,
WorkflowStartedResponse,
} from '@/types/workflow'
import { useEdges, useNodes, useStoreApi } from 'reactflow'
import { createEdge, createNode } from '../../../__tests__/fixtures'
import { renderWorkflowFlowHook } from '../../../__tests__/workflow-test-env'
import { NodeRunningStatus, WorkflowRunningStatus } from '../../../types'
type NodeRuntimeState = {
_waitingRun?: boolean
_runningStatus?: NodeRunningStatus
_retryIndex?: number
_iterationIndex?: number
_iterationLength?: number
_loopIndex?: number
_loopLength?: number
_runningBranchId?: string
}
type EdgeRuntimeState = {
_sourceRunningStatus?: NodeRunningStatus
_targetRunningStatus?: NodeRunningStatus
_waitingRun?: boolean
}
export const getNodeRuntimeState = (node?: { data?: unknown }): NodeRuntimeState =>
(node?.data ?? {}) as NodeRuntimeState
export const getEdgeRuntimeState = (edge?: { data?: unknown }): EdgeRuntimeState =>
(edge?.data ?? {}) as EdgeRuntimeState
function createRunNodes() {
return [
createNode({
id: 'n1',
width: 200,
height: 80,
data: { _waitingRun: false },
}),
]
}
function createRunEdges() {
return [
createEdge({
id: 'e1',
source: 'n0',
target: 'n1',
data: {},
}),
]
}
export function createViewportNodes() {
return [
createNode({
id: 'n0',
width: 200,
height: 80,
data: { _runningStatus: NodeRunningStatus.Succeeded },
}),
createNode({
id: 'n1',
position: { x: 100, y: 50 },
width: 200,
height: 80,
data: { _waitingRun: true },
}),
createNode({
id: 'n2',
position: { x: 400, y: 50 },
width: 200,
height: 80,
parentId: 'n1',
data: { _waitingRun: true },
}),
]
}
function createViewportEdges() {
return [
createEdge({
id: 'e1',
source: 'n0',
target: 'n1',
sourceHandle: 'source',
data: {},
}),
]
}
export const containerParams = { clientWidth: 1200, clientHeight: 800 }
export function renderRunEventHook<T extends Record<string, unknown>>(
useHook: () => T,
options?: {
nodes?: ReturnType<typeof createRunNodes>
edges?: ReturnType<typeof createRunEdges>
initialStoreState?: Record<string, unknown>
},
) {
const { nodes = createRunNodes(), edges = createRunEdges(), initialStoreState } = options ?? {}
return renderWorkflowFlowHook(() => ({
...useHook(),
nodes: useNodes(),
edges: useEdges(),
}), {
nodes,
edges,
reactFlowProps: { fitView: false },
initialStoreState,
})
}
export function renderViewportHook<T extends Record<string, unknown>>(
useHook: () => T,
options?: {
nodes?: ReturnType<typeof createViewportNodes>
edges?: ReturnType<typeof createViewportEdges>
initialStoreState?: Record<string, unknown>
},
) {
const {
nodes = createViewportNodes(),
edges = createViewportEdges(),
initialStoreState,
} = options ?? {}
return renderWorkflowFlowHook(() => ({
...useHook(),
nodes: useNodes(),
edges: useEdges(),
reactFlowStore: useStoreApi(),
}), {
nodes,
edges,
reactFlowProps: { fitView: false },
initialStoreState,
})
}
export const createStartedResponse = (overrides: Partial<WorkflowStartedResponse> = {}): WorkflowStartedResponse => ({
task_id: 'task-2',
data: { id: 'run-1', workflow_id: 'wf-1', created_at: 1000 },
...overrides,
} as WorkflowStartedResponse)
export const createNodeFinishedResponse = (overrides: Partial<NodeFinishedResponse> = {}): NodeFinishedResponse => ({
data: { id: 'trace-1', node_id: 'n1', status: NodeRunningStatus.Succeeded },
...overrides,
} as NodeFinishedResponse)
export const createIterationNextResponse = (overrides: Partial<IterationNextResponse> = {}): IterationNextResponse => ({
data: { node_id: 'n1' },
...overrides,
} as IterationNextResponse)
export const createIterationFinishedResponse = (overrides: Partial<IterationFinishedResponse> = {}): IterationFinishedResponse => ({
data: { id: 'iter-1', node_id: 'n1', status: NodeRunningStatus.Succeeded },
...overrides,
} as IterationFinishedResponse)
export const createLoopNextResponse = (overrides: Partial<LoopNextResponse> = {}): LoopNextResponse => ({
data: { node_id: 'n1', index: 5 },
...overrides,
} as LoopNextResponse)
export const createLoopFinishedResponse = (overrides: Partial<LoopFinishedResponse> = {}): LoopFinishedResponse => ({
data: { id: 'loop-1', node_id: 'n1', status: NodeRunningStatus.Succeeded },
...overrides,
} as LoopFinishedResponse)
export const createNodeStartedResponse = (overrides: Partial<NodeStartedResponse> = {}): NodeStartedResponse => ({
data: { node_id: 'n1' },
...overrides,
} as NodeStartedResponse)
export const pausedRunningData = (): WorkflowRunningData['result'] => ({ status: WorkflowRunningStatus.Paused } as WorkflowRunningData['result'])

View File

@ -0,0 +1,83 @@
import type { AgentLogResponse } from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { useWorkflowAgentLog } from '../use-workflow-agent-log'
vi.mock('@/app/components/base/file-uploader/utils', () => ({
getFilesInLogs: vi.fn(() => []),
}))
describe('useWorkflowAgentLog', () => {
it('creates agent_log when execution_metadata has none', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1', execution_metadata: {} }],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm1' },
} as AgentLogResponse)
const trace = store.getState().workflowRunningData!.tracing![0]
expect(trace.execution_metadata!.agent_log).toHaveLength(1)
expect(trace.execution_metadata!.agent_log![0].message_id).toBe('m1')
})
it('appends to existing agent_log', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{
node_id: 'n1',
execution_metadata: { agent_log: [{ message_id: 'm1', text: 'log1' }] },
}],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm2' },
} as AgentLogResponse)
expect(store.getState().workflowRunningData!.tracing![0].execution_metadata!.agent_log).toHaveLength(2)
})
it('updates an existing log entry by message_id', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{
node_id: 'n1',
execution_metadata: { agent_log: [{ message_id: 'm1', text: 'old' }] },
}],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm1', text: 'new' },
} as unknown as AgentLogResponse)
const log = store.getState().workflowRunningData!.tracing![0].execution_metadata!.agent_log!
expect(log).toHaveLength(1)
expect((log[0] as unknown as { text: string }).text).toBe('new')
})
it('creates execution_metadata when it does not exist', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowAgentLog(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1' }],
}),
},
})
result.current.handleWorkflowAgentLog({
data: { node_id: 'n1', message_id: 'm1' },
} as AgentLogResponse)
expect(store.getState().workflowRunningData!.tracing![0].execution_metadata!.agent_log).toHaveLength(1)
})
})

View File

@ -0,0 +1,15 @@
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { WorkflowRunningStatus } from '../../../types'
import { useWorkflowFailed } from '../use-workflow-failed'
describe('useWorkflowFailed', () => {
it('sets status to Failed', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowFailed(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowFailed()
expect(store.getState().workflowRunningData!.result.status).toBe(WorkflowRunningStatus.Failed)
})
})

View File

@ -0,0 +1,32 @@
import type { WorkflowFinishedResponse } from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { useWorkflowFinished } from '../use-workflow-finished'
describe('useWorkflowFinished', () => {
it('merges data into result and activates result tab for single string output', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowFinished(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowFinished({
data: { status: 'succeeded', outputs: { answer: 'hello' } },
} as WorkflowFinishedResponse)
const state = store.getState().workflowRunningData!
expect(state.result.status).toBe('succeeded')
expect(state.resultTabActive).toBe(true)
expect(state.resultText).toBe('hello')
})
it('does not activate the result tab for multi-key outputs', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowFinished(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowFinished({
data: { status: 'succeeded', outputs: { a: 'hello', b: 'world' } },
} as WorkflowFinishedResponse)
expect(store.getState().workflowRunningData!.resultTabActive).toBeFalsy()
})
})

View File

@ -0,0 +1,73 @@
import { act, waitFor } from '@testing-library/react'
import { createNode } from '../../../__tests__/fixtures'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { BlockEnum, NodeRunningStatus } from '../../../types'
import { useWorkflowNodeFinished } from '../use-workflow-node-finished'
import {
createNodeFinishedResponse,
getEdgeRuntimeState,
getNodeRuntimeState,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowNodeFinished', () => {
it('updates tracing and node running status', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'trace-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeFinished(createNodeFinishedResponse())
})
const trace = store.getState().workflowRunningData!.tracing![0]
expect(trace.status).toBe(NodeRunningStatus.Succeeded)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningStatus).toBe(NodeRunningStatus.Succeeded)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Succeeded)
})
})
it('sets _runningBranchId for IfElse nodes', async () => {
const { result } = renderRunEventHook(() => useWorkflowNodeFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'trace-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeFinished(createNodeFinishedResponse({
data: {
id: 'trace-1',
node_id: 'n1',
node_type: BlockEnum.IfElse,
status: NodeRunningStatus.Succeeded,
outputs: { selected_case_id: 'branch-a' },
} as never,
}))
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningBranchId).toBe('branch-a')
})
})
})

View File

@ -0,0 +1,44 @@
import type { HumanInputFormFilledResponse } from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { useWorkflowNodeHumanInputFormFilled } from '../use-workflow-node-human-input-form-filled'
describe('useWorkflowNodeHumanInputFormFilled', () => {
it('removes the form from pending and adds it to filled', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowNodeHumanInputFormFilled(), {
initialStoreState: {
workflowRunningData: baseRunningData({
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '' },
],
}),
},
})
result.current.handleWorkflowNodeHumanInputFormFilled({
data: { node_id: 'n1', node_title: 'Node 1', rendered_content: 'done' },
} as HumanInputFormFilledResponse)
const state = store.getState().workflowRunningData!
expect(state.humanInputFormDataList).toHaveLength(0)
expect(state.humanInputFilledFormDataList).toHaveLength(1)
expect(state.humanInputFilledFormDataList![0].node_id).toBe('n1')
})
it('creates humanInputFilledFormDataList when it does not exist', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowNodeHumanInputFormFilled(), {
initialStoreState: {
workflowRunningData: baseRunningData({
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '' },
],
}),
},
})
result.current.handleWorkflowNodeHumanInputFormFilled({
data: { node_id: 'n1', node_title: 'Node 1', rendered_content: 'done' },
} as HumanInputFormFilledResponse)
expect(store.getState().workflowRunningData!.humanInputFilledFormDataList).toBeDefined()
})
})

View File

@ -0,0 +1,23 @@
import type { HumanInputFormTimeoutResponse } from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { useWorkflowNodeHumanInputFormTimeout } from '../use-workflow-node-human-input-form-timeout'
describe('useWorkflowNodeHumanInputFormTimeout', () => {
it('sets expiration_time on the matching form', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowNodeHumanInputFormTimeout(), {
initialStoreState: {
workflowRunningData: baseRunningData({
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '', expiration_time: 0 },
],
}),
},
})
result.current.handleWorkflowNodeHumanInputFormTimeout({
data: { node_id: 'n1', node_title: 'Node 1', expiration_time: 1000 },
} as HumanInputFormTimeoutResponse)
expect(store.getState().workflowRunningData!.humanInputFormDataList![0].expiration_time).toBe(1000)
})
})

View File

@ -0,0 +1,96 @@
import type { HumanInputRequiredResponse } from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { createNode } from '../../../__tests__/fixtures'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeHumanInputRequired } from '../use-workflow-node-human-input-required'
import {
getNodeRuntimeState,
renderViewportHook,
} from './test-helpers'
describe('useWorkflowNodeHumanInputRequired', () => {
it('creates humanInputFormDataList and sets tracing and node to Paused', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeHumanInputRequired(), {
nodes: [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running } }),
createNode({ id: 'n2', position: { x: 300, y: 0 }, data: { _runningStatus: NodeRunningStatus.Running } }),
],
edges: [],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeHumanInputRequired({
data: { node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: 'content' },
} as HumanInputRequiredResponse)
})
const state = store.getState().workflowRunningData!
expect(state.humanInputFormDataList).toHaveLength(1)
expect(state.humanInputFormDataList![0].form_id).toBe('f1')
expect(state.tracing![0].status).toBe(NodeRunningStatus.Paused)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes.find(item => item.id === 'n1'))._runningStatus).toBe(NodeRunningStatus.Paused)
})
})
it('updates existing form entry for the same node_id', () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeHumanInputRequired(), {
nodes: [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running } }),
createNode({ id: 'n2', position: { x: 300, y: 0 }, data: { _runningStatus: NodeRunningStatus.Running } }),
],
edges: [],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n1', status: NodeRunningStatus.Running }],
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'old', node_title: 'Node 1', form_content: 'old' },
],
}),
},
})
act(() => {
result.current.handleWorkflowNodeHumanInputRequired({
data: { node_id: 'n1', form_id: 'new', node_title: 'Node 1', form_content: 'new' },
} as HumanInputRequiredResponse)
})
const formList = store.getState().workflowRunningData!.humanInputFormDataList!
expect(formList).toHaveLength(1)
expect(formList[0].form_id).toBe('new')
})
it('appends a new form entry for a different node_id', () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeHumanInputRequired(), {
nodes: [
createNode({ id: 'n1', data: { _runningStatus: NodeRunningStatus.Running } }),
createNode({ id: 'n2', position: { x: 300, y: 0 }, data: { _runningStatus: NodeRunningStatus.Running } }),
],
edges: [],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ node_id: 'n2', status: NodeRunningStatus.Running }],
humanInputFormDataList: [
{ node_id: 'n1', form_id: 'f1', node_title: 'Node 1', form_content: '' },
],
}),
},
})
act(() => {
result.current.handleWorkflowNodeHumanInputRequired({
data: { node_id: 'n2', form_id: 'f2', node_title: 'Node 2', form_content: 'content2' },
} as HumanInputRequiredResponse)
})
expect(store.getState().workflowRunningData!.humanInputFormDataList).toHaveLength(2)
})
})

View File

@ -0,0 +1,42 @@
import { act, waitFor } from '@testing-library/react'
import { createNode } from '../../../__tests__/fixtures'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { DEFAULT_ITER_TIMES } from '../../../constants'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeIterationFinished } from '../use-workflow-node-iteration-finished'
import {
createIterationFinishedResponse,
getEdgeRuntimeState,
getNodeRuntimeState,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowNodeIterationFinished', () => {
it('updates tracing, resets iterTimes, updates node status and edges', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeIterationFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'iter-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
iterTimes: 10,
},
})
act(() => {
result.current.handleWorkflowNodeIterationFinished(createIterationFinishedResponse())
})
expect(store.getState().iterTimes).toBe(DEFAULT_ITER_TIMES)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningStatus).toBe(NodeRunningStatus.Succeeded)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Succeeded)
})
})
})

View File

@ -0,0 +1,28 @@
import { act, waitFor } from '@testing-library/react'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { useWorkflowNodeIterationNext } from '../use-workflow-node-iteration-next'
import {
createIterationNextResponse,
getNodeRuntimeState,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowNodeIterationNext', () => {
it('sets _iterationIndex and increments iterTimes', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeIterationNext(), {
initialStoreState: {
workflowRunningData: baseRunningData(),
iterTimes: 3,
},
})
act(() => {
result.current.handleWorkflowNodeIterationNext(createIterationNextResponse())
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._iterationIndex).toBe(3)
})
expect(store.getState().iterTimes).toBe(4)
})
})

View File

@ -0,0 +1,49 @@
import type { IterationStartedResponse } from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { DEFAULT_ITER_TIMES } from '../../../constants'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeIterationStarted } from '../use-workflow-node-iteration-started'
import {
containerParams,
createViewportNodes,
getEdgeRuntimeState,
getNodeRuntimeState,
renderViewportHook,
} from './test-helpers'
describe('useWorkflowNodeIterationStarted', () => {
it('pushes to tracing, resets iterTimes, sets viewport, and updates node with _iterationLength', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeIterationStarted(), {
nodes: createViewportNodes().slice(0, 2),
initialStoreState: {
workflowRunningData: baseRunningData(),
iterTimes: 99,
},
})
act(() => {
result.current.handleWorkflowNodeIterationStarted(
{ data: { node_id: 'n1', metadata: { iterator_length: 10 } } } as IterationStartedResponse,
containerParams,
)
})
const tracing = store.getState().workflowRunningData!.tracing!
expect(tracing[0].status).toBe(NodeRunningStatus.Running)
expect(store.getState().iterTimes).toBe(DEFAULT_ITER_TIMES)
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(200)
expect(transform[1]).toBe(310)
expect(transform[2]).toBe(1)
const node = result.current.nodes.find(item => item.id === 'n1')
expect(getNodeRuntimeState(node)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(node)._iterationLength).toBe(10)
expect(getNodeRuntimeState(node)._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Running)
})
})
})

View File

@ -0,0 +1,40 @@
import { act, waitFor } from '@testing-library/react'
import { createNode } from '../../../__tests__/fixtures'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeLoopFinished } from '../use-workflow-node-loop-finished'
import {
createLoopFinishedResponse,
getEdgeRuntimeState,
getNodeRuntimeState,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowNodeLoopFinished', () => {
it('updates tracing, node status and edges', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeLoopFinished(), {
nodes: [
createNode({
id: 'n1',
data: { _runningStatus: NodeRunningStatus.Running },
}),
],
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [{ id: 'loop-1', node_id: 'n1', status: NodeRunningStatus.Running }],
}),
},
})
act(() => {
result.current.handleWorkflowNodeLoopFinished(createLoopFinishedResponse())
})
expect(store.getState().workflowRunningData!.tracing![0].status).toBe(NodeRunningStatus.Succeeded)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._runningStatus).toBe(NodeRunningStatus.Succeeded)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Succeeded)
})
})
})

View File

@ -0,0 +1,38 @@
import { act, waitFor } from '@testing-library/react'
import { createNode } from '../../../__tests__/fixtures'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeLoopNext } from '../use-workflow-node-loop-next'
import {
createLoopNextResponse,
getNodeRuntimeState,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowNodeLoopNext', () => {
it('sets _loopIndex and resets child nodes to waiting', async () => {
const { result } = renderRunEventHook(() => useWorkflowNodeLoopNext(), {
nodes: [
createNode({ id: 'n1', data: {} }),
createNode({
id: 'n2',
position: { x: 300, y: 0 },
parentId: 'n1',
data: { _waitingRun: false },
}),
],
edges: [],
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeLoopNext(createLoopNextResponse())
})
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n1'))._loopIndex).toBe(5)
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n2'))._waitingRun).toBe(true)
expect(getNodeRuntimeState(result.current.nodes.find(node => node.id === 'n2'))._runningStatus).toBe(NodeRunningStatus.Waiting)
})
})
})

View File

@ -0,0 +1,43 @@
import type { LoopStartedResponse } from '@/types/workflow'
import { act, waitFor } from '@testing-library/react'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeLoopStarted } from '../use-workflow-node-loop-started'
import {
containerParams,
createViewportNodes,
getEdgeRuntimeState,
getNodeRuntimeState,
renderViewportHook,
} from './test-helpers'
describe('useWorkflowNodeLoopStarted', () => {
it('pushes to tracing, sets viewport, and updates node with _loopLength', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeLoopStarted(), {
nodes: createViewportNodes().slice(0, 2),
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeLoopStarted(
{ data: { node_id: 'n1', metadata: { loop_length: 5 } } } as LoopStartedResponse,
containerParams,
)
})
expect(store.getState().workflowRunningData!.tracing![0].status).toBe(NodeRunningStatus.Running)
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(200)
expect(transform[1]).toBe(310)
expect(transform[2]).toBe(1)
const node = result.current.nodes.find(item => item.id === 'n1')
expect(getNodeRuntimeState(node)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(node)._loopLength).toBe(5)
expect(getNodeRuntimeState(node)._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Running)
})
})
})

View File

@ -0,0 +1,27 @@
import { act, waitFor } from '@testing-library/react'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { useWorkflowNodeRetry } from '../use-workflow-node-retry'
import {
getNodeRuntimeState,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowNodeRetry', () => {
it('pushes retry data to tracing and updates _retryIndex', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowNodeRetry(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeRetry({
data: { node_id: 'n1', retry_index: 2 },
} as never)
})
expect(store.getState().workflowRunningData!.tracing).toHaveLength(1)
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._retryIndex).toBe(2)
})
})
})

View File

@ -0,0 +1,80 @@
import { act, waitFor } from '@testing-library/react'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { NodeRunningStatus } from '../../../types'
import { useWorkflowNodeStarted } from '../use-workflow-node-started'
import {
containerParams,
createNodeStartedResponse,
getEdgeRuntimeState,
getNodeRuntimeState,
renderViewportHook,
} from './test-helpers'
describe('useWorkflowNodeStarted', () => {
it('pushes to tracing, sets node running, and adjusts viewport for root node', async () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeStarted(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeStarted(createNodeStartedResponse(), containerParams)
})
const tracing = store.getState().workflowRunningData!.tracing!
expect(tracing).toHaveLength(1)
expect(tracing[0].status).toBe(NodeRunningStatus.Running)
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(200)
expect(transform[1]).toBe(310)
expect(transform[2]).toBe(1)
const node = result.current.nodes.find(item => item.id === 'n1')
expect(getNodeRuntimeState(node)._runningStatus).toBe(NodeRunningStatus.Running)
expect(getNodeRuntimeState(node)._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBe(NodeRunningStatus.Running)
})
})
it('does not adjust viewport for child nodes', async () => {
const { result } = renderViewportHook(() => useWorkflowNodeStarted(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowNodeStarted(createNodeStartedResponse({
data: { node_id: 'n2' } as never,
}), containerParams)
})
await waitFor(() => {
const transform = result.current.reactFlowStore.getState().transform
expect(transform[0]).toBe(0)
expect(transform[1]).toBe(0)
expect(transform[2]).toBe(1)
expect(getNodeRuntimeState(result.current.nodes.find(item => item.id === 'n2'))._runningStatus).toBe(NodeRunningStatus.Running)
})
})
it('updates existing tracing entry when node_id already exists', () => {
const { result, store } = renderViewportHook(() => useWorkflowNodeStarted(), {
initialStoreState: {
workflowRunningData: baseRunningData({
tracing: [
{ node_id: 'n0', status: NodeRunningStatus.Succeeded } as never,
{ node_id: 'n1', status: NodeRunningStatus.Succeeded } as never,
],
}),
},
})
act(() => {
result.current.handleWorkflowNodeStarted(createNodeStartedResponse(), containerParams)
})
const tracing = store.getState().workflowRunningData!.tracing!
expect(tracing).toHaveLength(2)
expect(tracing[1].status).toBe(NodeRunningStatus.Running)
})
})

View File

@ -0,0 +1,15 @@
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { WorkflowRunningStatus } from '../../../types'
import { useWorkflowPaused } from '../use-workflow-paused'
describe('useWorkflowPaused', () => {
it('sets status to Paused', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowPaused(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
result.current.handleWorkflowPaused()
expect(store.getState().workflowRunningData!.result.status).toBe(WorkflowRunningStatus.Paused)
})
})

View File

@ -0,0 +1,54 @@
import { renderHook } from '@testing-library/react'
import { useWorkflowRunEvent } from '../use-workflow-run-event'
const handlers = vi.hoisted(() => ({
handleWorkflowStarted: vi.fn(),
handleWorkflowFinished: vi.fn(),
handleWorkflowFailed: vi.fn(),
handleWorkflowNodeStarted: vi.fn(),
handleWorkflowNodeFinished: vi.fn(),
handleWorkflowNodeIterationStarted: vi.fn(),
handleWorkflowNodeIterationNext: vi.fn(),
handleWorkflowNodeIterationFinished: vi.fn(),
handleWorkflowNodeLoopStarted: vi.fn(),
handleWorkflowNodeLoopNext: vi.fn(),
handleWorkflowNodeLoopFinished: vi.fn(),
handleWorkflowNodeRetry: vi.fn(),
handleWorkflowTextChunk: vi.fn(),
handleWorkflowTextReplace: vi.fn(),
handleWorkflowAgentLog: vi.fn(),
handleWorkflowPaused: vi.fn(),
handleWorkflowNodeHumanInputRequired: vi.fn(),
handleWorkflowNodeHumanInputFormFilled: vi.fn(),
handleWorkflowNodeHumanInputFormTimeout: vi.fn(),
}))
vi.mock('..', () => ({
useWorkflowStarted: () => ({ handleWorkflowStarted: handlers.handleWorkflowStarted }),
useWorkflowFinished: () => ({ handleWorkflowFinished: handlers.handleWorkflowFinished }),
useWorkflowFailed: () => ({ handleWorkflowFailed: handlers.handleWorkflowFailed }),
useWorkflowNodeStarted: () => ({ handleWorkflowNodeStarted: handlers.handleWorkflowNodeStarted }),
useWorkflowNodeFinished: () => ({ handleWorkflowNodeFinished: handlers.handleWorkflowNodeFinished }),
useWorkflowNodeIterationStarted: () => ({ handleWorkflowNodeIterationStarted: handlers.handleWorkflowNodeIterationStarted }),
useWorkflowNodeIterationNext: () => ({ handleWorkflowNodeIterationNext: handlers.handleWorkflowNodeIterationNext }),
useWorkflowNodeIterationFinished: () => ({ handleWorkflowNodeIterationFinished: handlers.handleWorkflowNodeIterationFinished }),
useWorkflowNodeLoopStarted: () => ({ handleWorkflowNodeLoopStarted: handlers.handleWorkflowNodeLoopStarted }),
useWorkflowNodeLoopNext: () => ({ handleWorkflowNodeLoopNext: handlers.handleWorkflowNodeLoopNext }),
useWorkflowNodeLoopFinished: () => ({ handleWorkflowNodeLoopFinished: handlers.handleWorkflowNodeLoopFinished }),
useWorkflowNodeRetry: () => ({ handleWorkflowNodeRetry: handlers.handleWorkflowNodeRetry }),
useWorkflowTextChunk: () => ({ handleWorkflowTextChunk: handlers.handleWorkflowTextChunk }),
useWorkflowTextReplace: () => ({ handleWorkflowTextReplace: handlers.handleWorkflowTextReplace }),
useWorkflowAgentLog: () => ({ handleWorkflowAgentLog: handlers.handleWorkflowAgentLog }),
useWorkflowPaused: () => ({ handleWorkflowPaused: handlers.handleWorkflowPaused }),
useWorkflowNodeHumanInputRequired: () => ({ handleWorkflowNodeHumanInputRequired: handlers.handleWorkflowNodeHumanInputRequired }),
useWorkflowNodeHumanInputFormFilled: () => ({ handleWorkflowNodeHumanInputFormFilled: handlers.handleWorkflowNodeHumanInputFormFilled }),
useWorkflowNodeHumanInputFormTimeout: () => ({ handleWorkflowNodeHumanInputFormTimeout: handlers.handleWorkflowNodeHumanInputFormTimeout }),
}))
describe('useWorkflowRunEvent', () => {
it('returns the composed handlers from all workflow event hooks', () => {
const { result } = renderHook(() => useWorkflowRunEvent())
expect(result.current).toEqual(handlers)
})
})

View File

@ -0,0 +1,56 @@
import { act, waitFor } from '@testing-library/react'
import { baseRunningData } from '../../../__tests__/workflow-test-env'
import { WorkflowRunningStatus } from '../../../types'
import { useWorkflowStarted } from '../use-workflow-started'
import {
createStartedResponse,
getEdgeRuntimeState,
getNodeRuntimeState,
pausedRunningData,
renderRunEventHook,
} from './test-helpers'
describe('useWorkflowStarted', () => {
it('initializes workflow running data and resets nodes and edges', async () => {
const { result, store } = renderRunEventHook(() => useWorkflowStarted(), {
initialStoreState: { workflowRunningData: baseRunningData() },
})
act(() => {
result.current.handleWorkflowStarted(createStartedResponse())
})
const state = store.getState().workflowRunningData!
expect(state.task_id).toBe('task-2')
expect(state.result.status).toBe(WorkflowRunningStatus.Running)
expect(state.resultText).toBe('')
await waitFor(() => {
expect(getNodeRuntimeState(result.current.nodes[0])._waitingRun).toBe(true)
expect(getNodeRuntimeState(result.current.nodes[0])._runningBranchId).toBeUndefined()
expect(getEdgeRuntimeState(result.current.edges[0])._sourceRunningStatus).toBeUndefined()
expect(getEdgeRuntimeState(result.current.edges[0])._targetRunningStatus).toBeUndefined()
expect(getEdgeRuntimeState(result.current.edges[0])._waitingRun).toBe(true)
})
})
it('resumes from Paused without resetting nodes or edges', () => {
const { result, store } = renderRunEventHook(() => useWorkflowStarted(), {
initialStoreState: {
workflowRunningData: baseRunningData({
result: pausedRunningData(),
}),
},
})
act(() => {
result.current.handleWorkflowStarted(createStartedResponse({
data: { id: 'run-2', workflow_id: 'wf-1', created_at: 2000 },
}))
})
expect(store.getState().workflowRunningData!.result.status).toBe(WorkflowRunningStatus.Running)
expect(getNodeRuntimeState(result.current.nodes[0])._waitingRun).toBe(false)
expect(getEdgeRuntimeState(result.current.edges[0])._waitingRun).toBeUndefined()
})
})

View File

@ -0,0 +1,19 @@
import type { TextChunkResponse } from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { useWorkflowTextChunk } from '../use-workflow-text-chunk'
describe('useWorkflowTextChunk', () => {
it('appends text and activates the result tab', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowTextChunk(), {
initialStoreState: {
workflowRunningData: baseRunningData({ resultText: 'Hello' }),
},
})
result.current.handleWorkflowTextChunk({ data: { text: ' World' } } as TextChunkResponse)
const state = store.getState().workflowRunningData!
expect(state.resultText).toBe('Hello World')
expect(state.resultTabActive).toBe(true)
})
})

View File

@ -0,0 +1,17 @@
import type { TextReplaceResponse } from '@/types/workflow'
import { baseRunningData, renderWorkflowHook } from '../../../__tests__/workflow-test-env'
import { useWorkflowTextReplace } from '../use-workflow-text-replace'
describe('useWorkflowTextReplace', () => {
it('replaces resultText', () => {
const { result, store } = renderWorkflowHook(() => useWorkflowTextReplace(), {
initialStoreState: {
workflowRunningData: baseRunningData({ resultText: 'old text' }),
},
})
result.current.handleWorkflowTextReplace({ data: { text: 'new text' } } as TextReplaceResponse)
expect(store.getState().workflowRunningData!.resultText).toBe('new text')
})
})

View File

@ -222,6 +222,33 @@ describe('panel-operator details', () => {
expect(screen.getByRole('link', { name: 'workflow.panel.helpLink' })).toHaveAttribute('href', 'https://docs.example.com/node')
})
it('should hide change action when node is undeletable', () => {
mockUseNodeMetaData.mockReturnValueOnce({
isTypeFixed: false,
isSingleton: true,
isUndeletable: true,
description: 'Undeletable node',
author: 'Dify',
} as ReturnType<typeof useNodeMetaData>)
renderWorkflowFlowComponent(
<PanelOperatorPopup
id="node-4"
data={{ type: BlockEnum.Code, title: 'Undeletable node', desc: '' } as any}
onClosePopup={vi.fn()}
showHelpLink={false}
/>,
{
nodes: [],
edges: [],
},
)
expect(screen.getByText('workflow.panel.runThisStep')).toBeInTheDocument()
expect(screen.queryByText('workflow.panel.change')).not.toBeInTheDocument()
expect(screen.queryByText('common.operation.delete')).not.toBeInTheDocument()
})
it('should render workflow-tool and readonly popup variants', () => {
mockUseAllWorkflowTools.mockReturnValueOnce({
data: [{ id: 'workflow-tool', workflow_app_id: 'app-123' }],

View File

@ -47,7 +47,7 @@ const PanelOperatorPopup = ({
const { nodesReadOnly } = useNodesReadOnly()
const edge = edges.find(edge => edge.target === id)
const nodeMetaData = useNodeMetaData({ id, data } as Node)
const showChangeBlock = !nodeMetaData.isTypeFixed && !nodesReadOnly
const showChangeBlock = !nodeMetaData.isTypeFixed && !nodeMetaData.isUndeletable && !nodesReadOnly
const isChildNode = !!(data.isInIteration || data.isInLoop)
const { data: workflowTools } = useAllWorkflowTools()

View File

@ -0,0 +1,209 @@
import type { OutputVar } from '../../../../code/types'
import { cleanup, fireEvent, render, screen } from '@testing-library/react'
import OutputVarList from '../output-var-list'
vi.mock('../var-type-picker', () => ({
default: (props: { value: string, onChange: (v: string) => void, readonly: boolean }) => (
<select
data-testid="var-type-picker"
value={props.value ?? ''}
onChange={e => props.onChange(e.target.value)}
disabled={props.readonly}
>
<option value="string">string</option>
<option value="number">number</option>
</select>
),
}))
vi.mock('@/app/components/base/ui/toast', () => ({
toast: { error: vi.fn() },
}))
describe('OutputVarList', () => {
const createOutputs = (entries: Record<string, string> = {}): OutputVar => {
const result: OutputVar = {}
for (const [key, type] of Object.entries(entries))
result[key] = { type: type as OutputVar[string]['type'], children: null }
return result
}
// Render the component and trigger a rename at the given index.
// Returns the newOutputs passed to onChange.
const collectRenameResult = (
outputs: OutputVar,
outputKeyOrders: string[],
renameIndex: number,
newName: string,
): OutputVar => {
let captured: OutputVar | undefined
render(
<OutputVarList
readonly={false}
outputs={outputs}
outputKeyOrders={outputKeyOrders}
onChange={(newOutputs) => { captured = newOutputs }}
onRemove={vi.fn()}
/>,
)
const inputs = screen.getAllByRole('textbox')
fireEvent.change(inputs[renameIndex], { target: { value: newName } })
return captured!
}
beforeEach(() => {
vi.clearAllMocks()
})
describe('duplicate name handling', () => {
it('should preserve outputs entry when renaming one of two duplicate-name variables', () => {
const outputs = createOutputs({ var_1: 'string' })
const outputKeyOrders = ['var_1', 'var_1']
const newOutputs = collectRenameResult(outputs, outputKeyOrders, 1, '')
// Renamed entry gets a new key ''
expect(newOutputs['']).toEqual({ type: 'string', children: null })
// Original key 'var_1' must survive because index 0 still uses it
expect(newOutputs.var_1).toEqual({ type: 'string', children: null })
})
it('should delete old key when renamed entry is the only one using it', () => {
const outputs = createOutputs({ var_1: 'string', var_2: 'number' })
const outputKeyOrders = ['var_1', 'var_2']
const newOutputs = collectRenameResult(outputs, outputKeyOrders, 1, 'renamed')
expect(newOutputs.renamed).toEqual({ type: 'number', children: null })
expect(newOutputs.var_2).toBeUndefined()
expect(newOutputs.var_1).toEqual({ type: 'string', children: null })
})
it('should keep outputs key alive when duplicate is renamed back to unique name', () => {
// Step 1: rename var_2 -> var_1 (creates duplicate)
const outputs = createOutputs({ var_1: 'string', var_2: 'number' })
const afterFirst = collectRenameResult(outputs, ['var_1', 'var_2'], 1, 'var_1')
expect(afterFirst.var_2).toBeUndefined()
expect(afterFirst.var_1).toBeDefined()
// Clean up first render before the second to avoid DOM collision
cleanup()
// Step 2: rename second var_1 -> var_2 (restores unique names)
const afterSecond = collectRenameResult(afterFirst, ['var_1', 'var_1'], 1, 'var_2')
// var_1 must survive because index 0 still uses it
expect(afterSecond.var_1).toBeDefined()
expect(afterSecond.var_2).toBeDefined()
})
})
describe('removal with duplicate names', () => {
it('should call onRemove with correct index when removing a duplicate', () => {
const outputs = createOutputs({ var_1: 'string' })
const onRemove = vi.fn()
render(
<OutputVarList
readonly={false}
outputs={outputs}
outputKeyOrders={['var_1', 'var_1']}
onChange={vi.fn()}
onRemove={onRemove}
/>,
)
// The second remove button (index 1 in the row)
const buttons = screen.getAllByRole('button')
fireEvent.click(buttons[1])
expect(onRemove).toHaveBeenCalledWith(1)
})
})
describe('normal operation', () => {
it('should render one row per outputKeyOrders entry', () => {
const outputs = createOutputs({ a: 'string', b: 'number' })
const onChange = vi.fn()
render(
<OutputVarList
readonly={false}
outputs={outputs}
outputKeyOrders={['a', 'b']}
onChange={onChange}
onRemove={vi.fn()}
/>,
)
const inputs = screen.getAllByRole('textbox')
expect(inputs).toHaveLength(2)
expect(inputs[0]).toHaveValue('a')
expect(inputs[1]).toHaveValue('b')
})
it('should call onChange with updated outputs when renaming', () => {
const outputs = createOutputs({ var_1: 'string' })
const onChange = vi.fn()
render(
<OutputVarList
readonly={false}
outputs={outputs}
outputKeyOrders={['var_1']}
onChange={onChange}
onRemove={vi.fn()}
/>,
)
fireEvent.change(screen.getByRole('textbox'), { target: { value: 'new_name' } })
expect(onChange).toHaveBeenCalledWith(
expect.objectContaining({
new_name: { type: 'string', children: null },
}),
0,
'new_name',
)
})
it('should call onRemove when remove button is clicked', () => {
const outputs = createOutputs({ var_1: 'string' })
const onRemove = vi.fn()
render(
<OutputVarList
readonly={false}
outputs={outputs}
outputKeyOrders={['var_1']}
onChange={vi.fn()}
onRemove={onRemove}
/>,
)
fireEvent.click(screen.getByRole('button'))
expect(onRemove).toHaveBeenCalledWith(0)
})
it('should render inputs as readonly when readonly is true', () => {
const outputs = createOutputs({ var_1: 'string' })
render(
<OutputVarList
readonly={true}
outputs={outputs}
outputKeyOrders={['var_1']}
onChange={vi.fn()}
onRemove={vi.fn()}
/>,
)
expect(screen.getByRole('textbox')).toHaveAttribute('readonly')
})
})
})

View File

@ -59,7 +59,9 @@ const OutputVarList: FC<Props> = ({
const newOutputs = produce(outputs, (draft) => {
draft[newKey] = draft[oldKey]
delete draft[oldKey]
// Only delete old key if no other entry shares this name
if (!list.some((item, i) => i !== index && item.variable === oldKey))
delete draft[oldKey]
})
onChange(newOutputs, index, newKey)
}

View File

@ -134,19 +134,24 @@ function useOutputVarList<T>({
return
}
const newOutputKeyOrders = outputKeyOrders.filter((_, i) => i !== index)
const newInputs = produce(inputs, (draft: any) => {
delete draft[varKey][key]
// Only delete from outputs when no remaining entry shares this name
if (!newOutputKeyOrders.includes(key))
delete draft[varKey][key]
if ((inputs as CodeNodeType).type === BlockEnum.Code && (inputs as CodeNodeType).error_strategy === ErrorHandleTypeEnum.defaultValue && varKey === 'outputs')
draft.default_value = getDefaultValue(draft as any)
})
setInputs(newInputs)
onOutputKeyOrdersChange(outputKeyOrders.filter((_, i) => i !== index))
const varId = nodesWithInspectVars.find(node => node.nodeId === id)?.vars.find((varItem) => {
return varItem.name === key
})?.id
if (varId)
deleteInspectVar(id, varId)
onOutputKeyOrdersChange(newOutputKeyOrders)
if (!newOutputKeyOrders.includes(key)) {
const varId = nodesWithInspectVars.find(node => node.nodeId === id)?.vars.find((varItem) => {
return varItem.name === key
})?.id
if (varId)
deleteInspectVar(id, varId)
}
}, [outputKeyOrders, isVarUsedInNodes, id, inputs, setInputs, onOutputKeyOrdersChange, nodesWithInspectVars, deleteInspectVar, showRemoveVarConfirm, varKey])
return {

View File

@ -0,0 +1,249 @@
import type { ReactNode } from 'react'
import type { AgentNodeType } from '../types'
import type useConfig from '../use-config'
import type { StrategyParamItem } from '@/app/components/plugins/types'
import { render, screen } from '@testing-library/react'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum } from '@/app/components/workflow/types'
import { VarType } from '../../tool/types'
import Node from '../node'
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockModelBar = vi.hoisted(() => vi.fn())
const mockToolIcon = vi.hoisted(() => vi.fn())
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('@/hooks/use-i18n', () => ({
useRenderI18nObject: () => (value: string | { en_US?: string }) => typeof value === 'string' ? value : value.en_US || '',
}))
vi.mock('../components/model-bar', () => ({
ModelBar: (props: { provider?: string, model?: string, param: string }) => {
mockModelBar(props)
return <div>{props.provider ? `${props.param}:${props.provider}/${props.model}` : `${props.param}:empty-model`}</div>
},
}))
vi.mock('../components/tool-icon', () => ({
ToolIcon: (props: { providerName: string }) => {
mockToolIcon(props)
return <div>{`tool:${props.providerName}`}</div>
},
}))
vi.mock('../../_base/components/group', () => ({
Group: ({ label, children }: { label: ReactNode, children: ReactNode }) => (
<div>
<div>{label}</div>
{children}
</div>
),
GroupLabel: ({ className, children }: { className?: string, children: ReactNode }) => <div className={className}>{children}</div>,
}))
vi.mock('../../_base/components/setting-item', () => ({
SettingItem: ({
label,
status,
tooltip,
children,
}: {
label: ReactNode
status?: string
tooltip?: string
children?: ReactNode
}) => (
<div>
{`${label}:${status || 'normal'}:${tooltip || ''}`}
{children}
</div>
),
}))
const createStrategyParam = (overrides: Partial<StrategyParamItem> = {}): StrategyParamItem => ({
name: 'requiredModel',
type: FormTypeEnum.modelSelector,
required: true,
label: { en_US: 'Required Model' } as StrategyParamItem['label'],
help: { en_US: 'Required model help' } as StrategyParamItem['help'],
placeholder: { en_US: 'Required model placeholder' } as StrategyParamItem['placeholder'],
scope: 'global',
default: null,
options: [],
template: { enabled: false },
auto_generate: { type: 'none' },
...overrides,
})
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
plugin_unique_identifier: 'provider/agent:1.0.0',
agent_parameters: {
optionalModel: {
type: VarType.constant,
value: { provider: 'openai', model: 'gpt-4o' },
},
toolParam: {
type: VarType.constant,
value: { provider_name: 'author/tool-a' },
},
multiToolParam: {
type: VarType.constant,
value: [
{ provider_name: 'author/tool-b' },
{ provider_name: 'author/tool-c' },
],
},
},
...overrides,
})
const createConfigResult = (overrides: Partial<ReturnType<typeof useConfig>> = {}): ReturnType<typeof useConfig> => ({
readOnly: false,
inputs: createData(),
setInputs: vi.fn(),
handleVarListChange: vi.fn(),
handleAddVariable: vi.fn(),
currentStrategy: {
identity: {
author: 'provider',
name: 'react',
icon: 'icon',
label: { en_US: 'React Agent' } as StrategyParamItem['label'],
provider: 'provider/agent',
},
parameters: [
createStrategyParam(),
createStrategyParam({
name: 'optionalModel',
required: false,
}),
createStrategyParam({
name: 'toolParam',
type: FormTypeEnum.toolSelector,
required: false,
}),
createStrategyParam({
name: 'multiToolParam',
type: FormTypeEnum.multiToolSelector,
required: false,
}),
],
description: { en_US: 'agent description' } as StrategyParamItem['label'],
output_schema: {},
features: [],
},
formData: {},
onFormChange: vi.fn(),
currentStrategyStatus: {
plugin: { source: 'marketplace', installed: true },
isExistInPlugin: false,
},
strategyProvider: undefined,
pluginDetail: ({
declaration: {
label: { en_US: 'Plugin Marketplace' } as never,
},
} as never),
availableVars: [],
availableNodesWithParent: [],
outputSchema: [],
handleMemoryChange: vi.fn(),
isChatMode: true,
...overrides,
})
describe('agent/node', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue(createConfigResult())
})
it('renders the not-set state when no strategy is configured', () => {
mockUseConfig.mockReturnValue(createConfigResult({
inputs: createData({
agent_strategy_name: undefined,
agent_strategy_label: undefined,
agent_parameters: {},
}),
currentStrategy: undefined,
}))
render(
<Node
id="agent-node"
data={createData()}
/>,
)
expect(screen.getByText('workflow.nodes.agent.strategyNotSet:normal:')).toBeInTheDocument()
expect(mockModelBar).not.toHaveBeenCalled()
expect(mockToolIcon).not.toHaveBeenCalled()
})
it('renders strategy status, required and selected model bars, and tool icons', () => {
render(
<Node
id="agent-node"
data={createData()}
/>,
)
expect(screen.getByText(/workflow.nodes.agent.strategy.shortLabel:error:/)).toHaveTextContent('React Agent')
expect(screen.getByText(/workflow.nodes.agent.strategy.shortLabel:error:/)).toHaveTextContent('Plugin Marketplace')
expect(screen.getByText('requiredModel:empty-model')).toBeInTheDocument()
expect(screen.getByText('optionalModel:openai/gpt-4o')).toBeInTheDocument()
expect(screen.getByText('tool:author/tool-a')).toBeInTheDocument()
expect(screen.getByText('tool:author/tool-b')).toBeInTheDocument()
expect(screen.getByText('tool:author/tool-c')).toBeInTheDocument()
expect(mockModelBar).toHaveBeenCalledTimes(2)
expect(mockToolIcon).toHaveBeenCalledTimes(3)
})
it('skips optional models and empty tool values when no configuration is provided', () => {
mockUseConfig.mockReturnValue(createConfigResult({
inputs: createData({
agent_parameters: {},
}),
currentStrategy: {
...createConfigResult().currentStrategy!,
parameters: [
createStrategyParam({
name: 'optionalModel',
required: false,
}),
createStrategyParam({
name: 'toolParam',
type: FormTypeEnum.toolSelector,
required: false,
}),
],
},
currentStrategyStatus: {
plugin: { source: 'marketplace', installed: true },
isExistInPlugin: true,
},
}))
render(
<Node
id="agent-node"
data={createData()}
/>,
)
expect(mockModelBar).not.toHaveBeenCalled()
expect(mockToolIcon).not.toHaveBeenCalled()
expect(screen.queryByText('optionalModel:empty-model')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,297 @@
import type { ReactNode } from 'react'
import type { AgentNodeType } from '../types'
import type useConfig from '../use-config'
import type { StrategyParamItem } from '@/app/components/plugins/types'
import type { NodePanelProps } from '@/app/components/workflow/types'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum } from '@/app/components/workflow/types'
import Panel from '../panel'
import { AgentFeature } from '../types'
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockResetEditor = vi.hoisted(() => vi.fn())
const mockAgentStrategy = vi.hoisted(() => vi.fn())
const mockMemoryConfig = vi.hoisted(() => vi.fn())
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('../../../store', () => ({
useStore: (selector: (state: { setControlPromptEditorRerenderKey: typeof mockResetEditor }) => unknown) => selector({
setControlPromptEditorRerenderKey: mockResetEditor,
}),
}))
vi.mock('../../_base/components/agent-strategy', () => ({
AgentStrategy: (props: {
strategy?: {
agent_strategy_provider_name: string
agent_strategy_name: string
agent_strategy_label: string
agent_output_schema: AgentNodeType['output_schema']
plugin_unique_identifier: string
meta?: AgentNodeType['meta']
}
formSchema: Array<{ variable: string, tooltip?: StrategyParamItem['help'] }>
formValue: Record<string, unknown>
onStrategyChange: (strategy: {
agent_strategy_provider_name: string
agent_strategy_name: string
agent_strategy_label: string
agent_output_schema: AgentNodeType['output_schema']
plugin_unique_identifier: string
meta?: AgentNodeType['meta']
}) => void
onFormValueChange: (value: Record<string, unknown>) => void
}) => {
mockAgentStrategy(props)
return (
<div>
<button
type="button"
onClick={() => props.onStrategyChange({
agent_strategy_provider_name: 'provider/updated',
agent_strategy_name: 'updated',
agent_strategy_label: 'Updated Strategy',
agent_output_schema: {
properties: {
structured: {
type: 'string',
description: 'structured output',
},
},
},
plugin_unique_identifier: 'provider/updated:1.0.0',
meta: {
version: '2.0.0',
} as AgentNodeType['meta'],
})}
>
change-strategy
</button>
<button type="button" onClick={() => props.onFormValueChange({ instruction: 'Use the tool' })}>
change-form
</button>
</div>
)
},
}))
vi.mock('../../_base/components/memory-config', () => ({
__esModule: true,
default: (props: {
readonly?: boolean
config: { data?: AgentNodeType['memory'] }
onChange: (value?: AgentNodeType['memory']) => void
}) => {
mockMemoryConfig(props)
return (
<button
type="button"
onClick={() => props.onChange({
window: {
enabled: true,
size: 8,
},
query_prompt_template: 'history',
} as AgentNodeType['memory'])}
>
change-memory
</button>
)
},
}))
vi.mock('../../_base/components/output-vars', () => ({
__esModule: true,
default: ({ children }: { children: ReactNode }) => <div>{children}</div>,
VarItem: ({ name, type, description }: { name: string, type: string, description?: string }) => (
<div>{`${name}:${type}:${description || ''}`}</div>
),
}))
const createStrategyParam = (overrides: Partial<StrategyParamItem> = {}): StrategyParamItem => ({
name: 'instruction',
type: FormTypeEnum.any,
required: true,
label: { en_US: 'Instruction' } as StrategyParamItem['label'],
help: { en_US: 'Instruction help' } as StrategyParamItem['help'],
placeholder: { en_US: 'Instruction placeholder' } as StrategyParamItem['placeholder'],
scope: 'global',
default: null,
options: [],
template: { enabled: false },
auto_generate: { type: 'none' },
...overrides,
})
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {
properties: {
summary: {
type: 'string',
description: 'summary output',
},
},
},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
plugin_unique_identifier: 'provider/agent:1.0.0',
meta: { version: '1.0.0' } as AgentNodeType['meta'],
memory: {
window: {
enabled: false,
size: 3,
},
query_prompt_template: '',
} as AgentNodeType['memory'],
...overrides,
})
const createConfigResult = (overrides: Partial<ReturnType<typeof useConfig>> = {}): ReturnType<typeof useConfig> => ({
readOnly: false,
inputs: createData(),
setInputs: vi.fn(),
handleVarListChange: vi.fn(),
handleAddVariable: vi.fn(),
currentStrategy: {
identity: {
author: 'provider',
name: 'react',
icon: 'icon',
label: { en_US: 'React Agent' } as StrategyParamItem['label'],
provider: 'provider/agent',
},
parameters: [
createStrategyParam(),
createStrategyParam({
name: 'modelParam',
type: FormTypeEnum.modelSelector,
required: false,
}),
],
description: { en_US: 'agent description' } as StrategyParamItem['label'],
output_schema: {},
features: [AgentFeature.HISTORY_MESSAGES],
},
formData: {
instruction: 'Plan and answer',
},
onFormChange: vi.fn(),
currentStrategyStatus: {
plugin: { source: 'marketplace', installed: true },
isExistInPlugin: true,
},
strategyProvider: undefined,
pluginDetail: undefined,
availableVars: [],
availableNodesWithParent: [],
outputSchema: [{
name: 'summary',
type: 'String',
description: 'summary output',
}],
handleMemoryChange: vi.fn(),
isChatMode: true,
...overrides,
})
const panelProps = {} as NodePanelProps<AgentNodeType>['panelProps']
describe('agent/panel', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue(createConfigResult())
})
it('renders strategy data, forwards strategy and form updates, and exposes output vars', async () => {
const user = userEvent.setup()
const setInputs = vi.fn()
const onFormChange = vi.fn()
const handleMemoryChange = vi.fn()
mockUseConfig.mockReturnValue(createConfigResult({
setInputs,
onFormChange,
handleMemoryChange,
}))
render(
<Panel
id="agent-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getByText('text:String:workflow.nodes.agent.outputVars.text')).toBeInTheDocument()
expect(screen.getByText('usage:object:workflow.nodes.agent.outputVars.usage')).toBeInTheDocument()
expect(screen.getByText('files:Array[File]:workflow.nodes.agent.outputVars.files.title')).toBeInTheDocument()
expect(screen.getByText('json:Array[Object]:workflow.nodes.agent.outputVars.json')).toBeInTheDocument()
expect(screen.getByText('summary:String:summary output')).toBeInTheDocument()
expect(mockAgentStrategy).toHaveBeenCalledWith(expect.objectContaining({
formSchema: expect.arrayContaining([
expect.objectContaining({
variable: 'instruction',
tooltip: { en_US: 'Instruction help' },
}),
expect.objectContaining({
variable: 'modelParam',
}),
]),
formValue: {
instruction: 'Plan and answer',
},
}))
await user.click(screen.getByRole('button', { name: 'change-strategy' }))
await user.click(screen.getByRole('button', { name: 'change-form' }))
await user.click(screen.getByRole('button', { name: 'change-memory' }))
expect(setInputs).toHaveBeenCalledWith(expect.objectContaining({
agent_strategy_provider_name: 'provider/updated',
agent_strategy_name: 'updated',
agent_strategy_label: 'Updated Strategy',
plugin_unique_identifier: 'provider/updated:1.0.0',
output_schema: expect.objectContaining({
properties: expect.objectContaining({
structured: expect.any(Object),
}),
}),
}))
expect(onFormChange).toHaveBeenCalledWith({ instruction: 'Use the tool' })
expect(handleMemoryChange).toHaveBeenCalledWith(expect.objectContaining({
query_prompt_template: 'history',
}))
expect(mockResetEditor).toHaveBeenCalledTimes(1)
})
it('hides memory config when chat mode support is unavailable', () => {
mockUseConfig.mockReturnValue(createConfigResult({
isChatMode: false,
currentStrategy: {
...createConfigResult().currentStrategy!,
features: [],
},
}))
render(
<Panel
id="agent-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.queryByRole('button', { name: 'change-memory' })).not.toBeInTheDocument()
expect(mockMemoryConfig).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,422 @@
import type { AgentNodeType } from '../types'
import type { StrategyParamItem } from '@/app/components/plugins/types'
import { act, renderHook, waitFor } from '@testing-library/react'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum, VarType as WorkflowVarType } from '@/app/components/workflow/types'
import { VarType } from '../../tool/types'
import useConfig, { useStrategyInfo } from '../use-config'
const mockUseNodesReadOnly = vi.hoisted(() => vi.fn())
const mockUseIsChatMode = vi.hoisted(() => vi.fn())
const mockUseNodeCrud = vi.hoisted(() => vi.fn())
const mockUseVarList = vi.hoisted(() => vi.fn())
const mockUseAvailableVarList = vi.hoisted(() => vi.fn())
const mockUseStrategyProviderDetail = vi.hoisted(() => vi.fn())
const mockUseFetchPluginsInMarketPlaceByIds = vi.hoisted(() => vi.fn())
const mockUseCheckInstalled = vi.hoisted(() => vi.fn())
const mockGenerateAgentToolValue = vi.hoisted(() => vi.fn())
const mockToolParametersToFormSchemas = vi.hoisted(() => vi.fn())
vi.mock('@/app/components/workflow/hooks', () => ({
useNodesReadOnly: (...args: unknown[]) => mockUseNodesReadOnly(...args),
useIsChatMode: (...args: unknown[]) => mockUseIsChatMode(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-node-crud', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseNodeCrud(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-var-list', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseVarList(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-available-var-list', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseAvailableVarList(...args),
}))
vi.mock('@/service/use-strategy', () => ({
useStrategyProviderDetail: (...args: unknown[]) => mockUseStrategyProviderDetail(...args),
}))
vi.mock('@/service/use-plugins', () => ({
useFetchPluginsInMarketPlaceByIds: (...args: unknown[]) => mockUseFetchPluginsInMarketPlaceByIds(...args),
useCheckInstalled: (...args: unknown[]) => mockUseCheckInstalled(...args),
}))
vi.mock('@/app/components/tools/utils/to-form-schema', () => ({
generateAgentToolValue: (...args: unknown[]) => mockGenerateAgentToolValue(...args),
toolParametersToFormSchemas: (...args: unknown[]) => mockToolParametersToFormSchemas(...args),
}))
const createStrategyParam = (overrides: Partial<StrategyParamItem> = {}): StrategyParamItem => ({
name: 'instruction',
type: FormTypeEnum.any,
required: true,
label: { en_US: 'Instruction' } as StrategyParamItem['label'],
help: { en_US: 'Instruction help' } as StrategyParamItem['help'],
placeholder: { en_US: 'Instruction placeholder' } as StrategyParamItem['placeholder'],
scope: 'global',
default: null,
options: [],
template: { enabled: false },
auto_generate: { type: 'none' },
...overrides,
})
const createToolValue = () => ({
settings: {
api_key: 'secret',
},
parameters: {
query: 'weather',
},
schemas: [
{
variable: 'api_key',
form: 'form',
},
{
variable: 'query',
form: 'llm',
},
],
})
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {
properties: {
summary: {
type: 'string',
description: 'summary output',
},
items: {
type: 'array',
items: {
type: 'number',
},
description: 'items output',
},
},
},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
plugin_unique_identifier: 'provider/agent:1.0.0',
agent_parameters: {
instruction: {
type: VarType.variable,
value: '#start.topic#',
},
modelParam: {
type: VarType.constant,
value: {
provider: 'openai',
model: 'gpt-4o',
},
},
},
meta: { version: '1.0.0' } as AgentNodeType['meta'],
...overrides,
})
describe('agent/use-config', () => {
const providerRefetch = vi.fn()
const marketplaceRefetch = vi.fn()
const setInputs = vi.fn()
const handleVarListChange = vi.fn()
const handleAddVariable = vi.fn()
let currentInputs: AgentNodeType
beforeEach(() => {
vi.clearAllMocks()
currentInputs = createData({
tool_node_version: '2',
})
mockUseNodesReadOnly.mockReturnValue({ nodesReadOnly: false, getNodesReadOnly: () => false })
mockUseIsChatMode.mockReturnValue(true)
mockUseNodeCrud.mockImplementation(() => ({
inputs: currentInputs,
setInputs,
}))
mockUseVarList.mockReturnValue({
handleVarListChange,
handleAddVariable,
} as never)
mockUseAvailableVarList.mockReturnValue({
availableVars: [{
nodeId: 'node-1',
title: 'Start',
vars: [{
variable: 'topic',
type: WorkflowVarType.string,
}],
}],
availableNodesWithParent: [{
nodeId: 'node-1',
title: 'Start',
}],
} as never)
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: false,
isError: false,
data: {
declaration: {
strategies: [{
identity: {
name: 'react',
},
parameters: [
createStrategyParam(),
createStrategyParam({
name: 'modelParam',
type: FormTypeEnum.modelSelector,
required: false,
}),
],
}],
},
},
refetch: providerRefetch,
} as never)
mockUseFetchPluginsInMarketPlaceByIds.mockReturnValue({
isLoading: false,
data: {
data: {
plugins: [{ id: 'provider/agent' }],
},
},
refetch: marketplaceRefetch,
} as never)
mockUseCheckInstalled.mockReturnValue({
data: {
plugins: [{
declaration: {
label: { en_US: 'Installed Agent Plugin' },
},
}],
},
} as never)
mockToolParametersToFormSchemas.mockImplementation(value => value as never)
mockGenerateAgentToolValue.mockImplementation((_value, schemas, isLLM) => ({
kind: isLLM ? 'llm' : 'setting',
fields: (schemas as Array<{ variable: string }>).map(item => item.variable),
}) as never)
})
it('returns an undefined strategy status while strategy data is still loading and can refetch dependencies', () => {
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: true,
isError: false,
data: undefined,
refetch: providerRefetch,
} as never)
const { result } = renderHook(() => useStrategyInfo('provider/agent', 'react'))
expect(result.current.strategyStatus).toBeUndefined()
expect(result.current.strategy).toBeUndefined()
act(() => {
result.current.refetch()
})
expect(providerRefetch).toHaveBeenCalledTimes(1)
expect(marketplaceRefetch).toHaveBeenCalledTimes(1)
})
it('resolves strategy status for external plugins that are missing or not installed', () => {
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: false,
isError: true,
data: {
declaration: {
strategies: [],
},
},
refetch: providerRefetch,
} as never)
mockUseFetchPluginsInMarketPlaceByIds.mockReturnValue({
isLoading: false,
data: {
data: {
plugins: [],
},
},
refetch: marketplaceRefetch,
} as never)
const { result } = renderHook(() => useStrategyInfo('provider/agent', 'react'))
expect(result.current.strategyStatus).toEqual({
plugin: {
source: 'external',
installed: false,
},
isExistInPlugin: false,
})
})
it('exposes derived form data, strategy state, output schema, and setter helpers', () => {
const { result } = renderHook(() => useConfig('agent-node', currentInputs))
expect(result.current.readOnly).toBe(false)
expect(result.current.isChatMode).toBe(true)
expect(result.current.formData).toEqual({
instruction: '#start.topic#',
modelParam: {
provider: 'openai',
model: 'gpt-4o',
},
})
expect(result.current.currentStrategyStatus).toEqual({
plugin: {
source: 'marketplace',
installed: true,
},
isExistInPlugin: true,
})
expect(result.current.availableVars).toHaveLength(1)
expect(result.current.availableNodesWithParent).toEqual([{
nodeId: 'node-1',
title: 'Start',
}])
expect(result.current.outputSchema).toEqual([
{ name: 'summary', type: 'String', description: 'summary output' },
{ name: 'items', type: 'Array[Number]', description: 'items output' },
])
setInputs.mockClear()
act(() => {
result.current.onFormChange({
instruction: '#start.updated#',
modelParam: {
provider: 'anthropic',
model: 'claude-sonnet',
},
})
result.current.handleMemoryChange({
window: {
enabled: true,
size: 6,
},
query_prompt_template: 'history',
} as AgentNodeType['memory'])
})
expect(setInputs).toHaveBeenNthCalledWith(1, expect.objectContaining({
agent_parameters: {
instruction: {
type: VarType.variable,
value: '#start.updated#',
},
modelParam: {
type: VarType.constant,
value: {
provider: 'anthropic',
model: 'claude-sonnet',
},
},
},
}))
expect(setInputs).toHaveBeenNthCalledWith(2, expect.objectContaining({
memory: {
window: {
enabled: true,
size: 6,
},
query_prompt_template: 'history',
},
}))
expect(result.current.handleVarListChange).toBe(handleVarListChange)
expect(result.current.handleAddVariable).toBe(handleAddVariable)
expect(result.current.pluginDetail).toEqual({
declaration: {
label: { en_US: 'Installed Agent Plugin' },
},
})
})
it('formats legacy tool selector values before exposing the node config', async () => {
currentInputs = createData({
tool_node_version: undefined,
agent_parameters: {
toolParam: {
type: VarType.constant,
value: createToolValue(),
},
multiToolParam: {
type: VarType.constant,
value: [createToolValue()],
},
},
})
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: false,
isError: false,
data: {
declaration: {
strategies: [{
identity: {
name: 'react',
},
parameters: [
createStrategyParam({
name: 'toolParam',
type: FormTypeEnum.toolSelector,
required: false,
}),
createStrategyParam({
name: 'multiToolParam',
type: FormTypeEnum.multiToolSelector,
required: false,
}),
],
}],
},
},
refetch: providerRefetch,
} as never)
renderHook(() => useConfig('agent-node', currentInputs))
await waitFor(() => {
expect(setInputs).toHaveBeenCalledWith(expect.objectContaining({
tool_node_version: '2',
agent_parameters: expect.objectContaining({
toolParam: expect.objectContaining({
value: expect.objectContaining({
settings: {
kind: 'setting',
fields: ['api_key'],
},
parameters: {
kind: 'llm',
fields: ['query'],
},
}),
}),
multiToolParam: expect.objectContaining({
value: [expect.objectContaining({
settings: {
kind: 'setting',
fields: ['api_key'],
},
parameters: {
kind: 'llm',
fields: ['query'],
},
})],
}),
}),
}))
})
})
})

View File

@ -0,0 +1,144 @@
import type { AgentNodeType } from '../types'
import type { InputVar } from '@/app/components/workflow/types'
import { renderHook } from '@testing-library/react'
import formatTracing from '@/app/components/workflow/run/utils/format-log'
import { BlockEnum, InputVarType } from '@/app/components/workflow/types'
import useNodeCrud from '../../_base/hooks/use-node-crud'
import { VarType } from '../../tool/types'
import { useStrategyInfo } from '../use-config'
import useSingleRunFormParams from '../use-single-run-form-params'
vi.mock('@/app/components/workflow/run/utils/format-log', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../../_base/hooks/use-node-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../use-config', async () => {
const actual = await vi.importActual<typeof import('../use-config')>('../use-config')
return {
...actual,
useStrategyInfo: vi.fn(),
}
})
const mockFormatTracing = vi.mocked(formatTracing)
const mockUseNodeCrud = vi.mocked(useNodeCrud)
const mockUseStrategyInfo = vi.mocked(useStrategyInfo)
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
agent_parameters: {
prompt: {
type: VarType.variable,
value: '#start.topic#',
},
summary: {
type: VarType.variable,
value: '#node-2.answer#',
},
count: {
type: VarType.constant,
value: 2,
},
},
...overrides,
})
describe('agent/use-single-run-form-params', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseNodeCrud.mockReturnValue({
inputs: createData(),
setInputs: vi.fn(),
} as unknown as ReturnType<typeof useNodeCrud>)
mockUseStrategyInfo.mockReturnValue({
strategyProvider: undefined,
strategy: {
parameters: [
{ name: 'prompt', type: 'string' },
{ name: 'summary', type: 'string' },
{ name: 'count', type: 'number' },
],
},
strategyStatus: undefined,
refetch: vi.fn(),
} as unknown as ReturnType<typeof useStrategyInfo>)
mockFormatTracing.mockReturnValue([{
id: 'agent-node',
status: 'succeeded',
}] as unknown as ReturnType<typeof formatTracing>)
})
it('builds a single-run variable form, returns node info, and skips malformed dependent vars', () => {
const setRunInputData = vi.fn()
const getInputVars = vi.fn<() => InputVar[]>(() => [
{
label: 'Prompt',
variable: '#start.topic#',
type: InputVarType.textInput,
required: true,
},
{
label: 'Broken',
variable: undefined as unknown as string,
type: InputVarType.textInput,
required: false,
},
])
const { result } = renderHook(() => useSingleRunFormParams({
id: 'agent-node',
payload: createData(),
runInputData: { topic: 'finance' },
runInputDataRef: { current: { topic: 'finance' } },
getInputVars,
setRunInputData,
toVarInputs: () => [],
runResult: { id: 'trace-1' } as never,
}))
expect(getInputVars).toHaveBeenCalledWith(['#start.topic#', '#node-2.answer#'])
expect(result.current.forms).toHaveLength(1)
expect(result.current.forms[0].inputs).toHaveLength(2)
expect(result.current.forms[0].values).toEqual({ topic: 'finance' })
expect(result.current.nodeInfo).toEqual({
id: 'agent-node',
status: 'succeeded',
})
result.current.forms[0].onChange({ topic: 'updated' })
expect(setRunInputData).toHaveBeenCalledWith({ topic: 'updated' })
expect(result.current.getDependentVars()).toEqual([
['start', 'topic'],
])
})
it('returns an empty form list when no variable input is required and no run result is available', () => {
const { result } = renderHook(() => useSingleRunFormParams({
id: 'agent-node',
payload: createData(),
runInputData: {},
runInputDataRef: { current: {} },
getInputVars: () => [],
setRunInputData: vi.fn(),
toVarInputs: () => [],
runResult: undefined as never,
}))
expect(result.current.forms).toEqual([])
expect(result.current.nodeInfo).toBeUndefined()
expect(result.current.getDependentVars()).toEqual([])
})
})

View File

@ -0,0 +1,78 @@
import type { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { fireEvent, render, screen } from '@testing-library/react'
import { ModelBar } from '../model-bar'
type ModelProviderItem = {
provider: string
models: Array<{ model: string }>
}
const mockModelLists = new Map<ModelTypeEnum, ModelProviderItem[]>()
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useModelList: (modelType: ModelTypeEnum) => ({
data: mockModelLists.get(modelType) || [],
}),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/model-selector', () => ({
default: ({
defaultModel,
modelList,
}: {
defaultModel?: { provider: string, model: string }
modelList: ModelProviderItem[]
}) => (
<div>
{defaultModel ? `${defaultModel.provider}/${defaultModel.model}` : 'no-model'}
:
{modelList.length}
</div>
),
}))
vi.mock('@/app/components/header/indicator', () => ({
default: ({ color }: { color: string }) => <div>{`indicator:${color}`}</div>,
}))
describe('agent/model-bar', () => {
beforeEach(() => {
vi.clearAllMocks()
mockModelLists.clear()
mockModelLists.set('llm' as ModelTypeEnum, [{ provider: 'openai', models: [{ model: 'gpt-4o' }] }])
mockModelLists.set('moderation' as ModelTypeEnum, [])
mockModelLists.set('rerank' as ModelTypeEnum, [])
mockModelLists.set('speech2text' as ModelTypeEnum, [])
mockModelLists.set('text-embedding' as ModelTypeEnum, [])
mockModelLists.set('tts' as ModelTypeEnum, [])
})
it('should render an empty readonly selector with a warning when no model is selected', () => {
render(<ModelBar />)
const emptySelector = screen.getByText((_, element) => element?.textContent === 'no-model:0')
fireEvent.mouseEnter(emptySelector)
expect(emptySelector).toBeInTheDocument()
expect(screen.getByText('indicator:red')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.agent.modelNotSelected')).toBeInTheDocument()
})
it('should render the selected model without warning when it is installed', () => {
render(<ModelBar provider="openai" model="gpt-4o" />)
expect(screen.getByText('openai/gpt-4o:1')).toBeInTheDocument()
expect(screen.queryByText('indicator:red')).not.toBeInTheDocument()
})
it('should show a warning tooltip when the selected model is not installed', () => {
render(<ModelBar provider="openai" model="gpt-4.1" />)
fireEvent.mouseEnter(screen.getByText('openai/gpt-4.1:1'))
expect(screen.getByText('openai/gpt-4.1:1')).toBeInTheDocument()
expect(screen.getByText('indicator:red')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.agent.modelNotInstallTooltip')).toBeInTheDocument()
})
})

View File

@ -0,0 +1,113 @@
import { fireEvent, render, screen } from '@testing-library/react'
import { ToolIcon } from '../tool-icon'
type ToolProvider = {
id?: string
name?: string
icon?: string | { content: string, background: string }
is_team_authorization?: boolean
}
let mockBuiltInTools: ToolProvider[] | undefined
let mockCustomTools: ToolProvider[] | undefined
let mockWorkflowTools: ToolProvider[] | undefined
let mockMcpTools: ToolProvider[] | undefined
let mockMarketplaceIcon: string | { content: string, background: string } | undefined
vi.mock('@/service/use-tools', () => ({
useAllBuiltInTools: () => ({ data: mockBuiltInTools }),
useAllCustomTools: () => ({ data: mockCustomTools }),
useAllWorkflowTools: () => ({ data: mockWorkflowTools }),
useAllMCPTools: () => ({ data: mockMcpTools }),
}))
vi.mock('@/app/components/base/app-icon', () => ({
default: ({
icon,
background,
className,
}: {
icon?: string
background?: string
className?: string
}) => <div className={className}>{`app-icon:${background}:${icon}`}</div>,
}))
vi.mock('@/app/components/base/icons/src/vender/other', () => ({
Group: ({ className }: { className?: string }) => <div className={className}>group-icon</div>,
}))
vi.mock('@/app/components/header/indicator', () => ({
default: ({ color }: { color: string }) => <div>{`indicator:${color}`}</div>,
}))
vi.mock('@/utils/get-icon', () => ({
getIconFromMarketPlace: () => mockMarketplaceIcon,
}))
describe('agent/tool-icon', () => {
beforeEach(() => {
vi.clearAllMocks()
mockBuiltInTools = []
mockCustomTools = []
mockWorkflowTools = []
mockMcpTools = []
mockMarketplaceIcon = undefined
})
it('should render a string icon, recover from fetch errors, and keep installed tools warning-free', () => {
mockBuiltInTools = [{
name: 'author/tool-a',
icon: 'https://example.com/tool-a.png',
is_team_authorization: true,
}]
render(<ToolIcon id="tool-1" providerName="author/tool-a" />)
const icon = screen.getByRole('img', { name: 'tool icon' })
expect(icon).toHaveAttribute('src', 'https://example.com/tool-a.png')
expect(screen.queryByText(/indicator:/)).not.toBeInTheDocument()
fireEvent.mouseEnter(icon)
expect(screen.queryByText('workflow.nodes.agent.toolNotInstallTooltip')).not.toBeInTheDocument()
fireEvent.error(icon)
expect(screen.getByText('group-icon')).toBeInTheDocument()
})
it('should render authorization and installation warnings with the correct icon sources', () => {
mockWorkflowTools = [{
id: 'author/tool-b',
icon: {
content: 'B',
background: '#fff',
},
is_team_authorization: false,
}]
const { rerender } = render(<ToolIcon id="tool-2" providerName="author/tool-b" />)
fireEvent.mouseEnter(screen.getByText('app-icon:#fff:B'))
expect(screen.getByText('indicator:yellow')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.agent.toolNotAuthorizedTooltip:{"tool":"tool-b"}')).toBeInTheDocument()
mockWorkflowTools = []
mockMarketplaceIcon = 'https://example.com/market-tool.png'
rerender(<ToolIcon id="tool-3" providerName="market/tool-c" />)
const marketplaceIcon = screen.getByRole('img', { name: 'tool icon' })
fireEvent.mouseEnter(marketplaceIcon)
expect(marketplaceIcon).toHaveAttribute('src', 'https://example.com/market-tool.png')
expect(screen.getByText('indicator:red')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.agent.toolNotInstallTooltip:{"tool":"tool-c"}')).toBeInTheDocument()
})
it('should fall back to the group icon while tool data is still loading', () => {
mockBuiltInTools = undefined
render(<ToolIcon id="tool-4" providerName="author/tool-d" />)
expect(screen.getByText('group-icon')).toBeInTheDocument()
expect(screen.queryByText(/indicator:/)).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,92 @@
import type { AnswerNodeType } from '../types'
import type { PanelProps } from '@/types/workflow'
import { fireEvent, render, screen } from '@testing-library/react'
import { BlockEnum } from '@/app/components/workflow/types'
import Panel from '../panel'
type MockEditorProps = {
readOnly: boolean
title: string
value: string
onChange: (value: string) => void
nodesOutputVars: unknown[]
availableNodes: unknown[]
}
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockUseAvailableVarList = vi.hoisted(() => vi.fn())
const mockEditorRender = vi.hoisted(() => vi.fn())
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-available-var-list', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseAvailableVarList(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/prompt/editor', () => ({
__esModule: true,
default: (props: MockEditorProps) => {
mockEditorRender(props)
return (
<button type="button" onClick={() => props.onChange('Updated answer')}>
{props.title}
:
{props.value}
</button>
)
},
}))
const createData = (overrides: Partial<AnswerNodeType> = {}): AnswerNodeType => ({
title: 'Answer',
desc: '',
type: BlockEnum.Answer,
variables: [],
answer: 'Initial answer',
...overrides,
})
describe('AnswerPanel', () => {
const handleAnswerChange = vi.fn()
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue({
readOnly: false,
inputs: createData(),
handleAnswerChange,
filterVar: vi.fn(),
})
mockUseAvailableVarList.mockReturnValue({
availableVars: [{ variable: 'context', type: 'string' }],
availableNodesWithParent: [{ value: 'node-1', label: 'Node 1' }],
})
})
it('should pass editor state and available variables through to the prompt editor', () => {
render(<Panel id="answer-node" data={createData()} panelProps={{} as PanelProps} />)
expect(screen.getByRole('button', { name: 'workflow.nodes.answer.answer:Initial answer' })).toBeInTheDocument()
expect(mockEditorRender).toHaveBeenCalledWith(expect.objectContaining({
readOnly: false,
title: 'workflow.nodes.answer.answer',
value: 'Initial answer',
nodesOutputVars: [{ variable: 'context', type: 'string' }],
availableNodes: [{ value: 'node-1', label: 'Node 1' }],
isSupportFileVar: true,
justVar: true,
}))
})
it('should delegate answer edits to use-config', () => {
render(<Panel id="answer-node" data={createData()} panelProps={{} as PanelProps} />)
fireEvent.click(screen.getByRole('button', { name: 'workflow.nodes.answer.answer:Initial answer' }))
expect(handleAnswerChange).toHaveBeenCalledWith('Updated answer')
})
})

View File

@ -0,0 +1,81 @@
import type { AnswerNodeType } from '../types'
import { act, renderHook } from '@testing-library/react'
import { BlockEnum, VarType } from '@/app/components/workflow/types'
import useConfig from '../use-config'
const mockUseNodesReadOnly = vi.hoisted(() => vi.fn())
const mockUseNodeCrud = vi.hoisted(() => vi.fn())
const mockUseVarList = vi.hoisted(() => vi.fn())
vi.mock('@/app/components/workflow/hooks', () => ({
useNodesReadOnly: () => mockUseNodesReadOnly(),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-node-crud', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseNodeCrud(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-var-list', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseVarList(...args),
}))
const createPayload = (overrides: Partial<AnswerNodeType> = {}): AnswerNodeType => ({
title: 'Answer',
desc: '',
type: BlockEnum.Answer,
variables: [],
answer: 'Initial answer',
...overrides,
})
describe('answer/use-config', () => {
const mockSetInputs = vi.fn()
const mockHandleVarListChange = vi.fn()
const mockHandleAddVariable = vi.fn()
let currentInputs: AnswerNodeType
beforeEach(() => {
vi.clearAllMocks()
currentInputs = createPayload()
mockUseNodesReadOnly.mockReturnValue({ nodesReadOnly: false })
mockUseNodeCrud.mockReturnValue({
inputs: currentInputs,
setInputs: mockSetInputs,
})
mockUseVarList.mockReturnValue({
handleVarListChange: mockHandleVarListChange,
handleAddVariable: mockHandleAddVariable,
})
})
it('should update the answer text and expose var-list handlers', () => {
const { result } = renderHook(() => useConfig('answer-node', currentInputs))
act(() => {
result.current.handleAnswerChange('Updated answer')
})
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
answer: 'Updated answer',
}))
expect(result.current.handleVarListChange).toBe(mockHandleVarListChange)
expect(result.current.handleAddVariable).toBe(mockHandleAddVariable)
expect(result.current.readOnly).toBe(false)
})
it('should filter out array-object variables from the prompt editor picker', () => {
const { result } = renderHook(() => useConfig('answer-node', currentInputs))
expect(result.current.filterVar({
variable: 'items',
type: VarType.arrayObject,
})).toBe(false)
expect(result.current.filterVar({
variable: 'message',
type: VarType.string,
})).toBe(true)
})
})

View File

@ -0,0 +1,150 @@
import type { AssignerNodeOperation, AssignerNodeType } from '../types'
import { render, screen } from '@testing-library/react'
import { useNodes } from 'reactflow'
import { BlockEnum } from '@/app/components/workflow/types'
import Node from '../node'
import { AssignerNodeInputType, WriteMode } from '../types'
vi.mock('reactflow', async () => {
const actual = await vi.importActual<typeof import('reactflow')>('reactflow')
return {
...actual,
useNodes: vi.fn(),
}
})
vi.mock('@/app/components/workflow/nodes/_base/components/variable/variable-label', () => ({
VariableLabelInNode: ({
variables,
nodeTitle,
nodeType,
rightSlot,
}: {
variables: string[]
nodeTitle?: string
nodeType?: BlockEnum
rightSlot?: React.ReactNode
}) => (
<div>
<span>{`${nodeTitle}:${nodeType}:${variables.join('.')}`}</span>
{rightSlot}
</div>
),
}))
const mockUseNodes = vi.mocked(useNodes)
const createOperation = (overrides: Partial<AssignerNodeOperation> = {}): AssignerNodeOperation => ({
variable_selector: ['node-1', 'count'],
input_type: AssignerNodeInputType.variable,
operation: WriteMode.overwrite,
value: ['node-2', 'result'],
...overrides,
})
const createData = (overrides: Partial<AssignerNodeType> = {}): AssignerNodeType => ({
title: 'Assigner',
desc: '',
type: BlockEnum.VariableAssigner,
version: '2',
items: [createOperation()],
...overrides,
})
describe('assigner/node', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseNodes.mockReturnValue([
{
id: 'node-1',
data: {
title: 'Answer',
type: BlockEnum.Answer,
},
},
{
id: 'start-node',
data: {
title: 'Start',
type: BlockEnum.Start,
},
},
] as ReturnType<typeof useNodes>)
})
it('renders the empty-state hint when no assignable variable is configured', () => {
render(
<Node
id="assigner-node"
data={createData({
items: [createOperation({ variable_selector: [] })],
})}
/>,
)
expect(screen.getByText('workflow.nodes.assigner.varNotSet')).toBeInTheDocument()
})
it('renders both version 2 and legacy previews with resolved node labels', () => {
const { container, rerender } = render(
<Node
id="assigner-node"
data={createData()}
/>,
)
expect(screen.getByText('Answer:answer:node-1.count')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.assigner.operations.over-write')).toBeInTheDocument()
rerender(
<Node
id="assigner-node"
data={{
title: 'Legacy Assigner',
desc: '',
type: BlockEnum.VariableAssigner,
assigned_variable_selector: ['sys', 'query'],
write_mode: WriteMode.append,
} as unknown as AssignerNodeType}
/>,
)
expect(screen.getByText('Start:start:sys.query')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.assigner.operations.append')).toBeInTheDocument()
rerender(
<Node
id="assigner-node"
data={{
title: 'Legacy Assigner',
desc: '',
type: BlockEnum.VariableAssigner,
assigned_variable_selector: [],
write_mode: WriteMode.append,
} as unknown as AssignerNodeType}
/>,
)
expect(container).toBeEmptyDOMElement()
})
it('skips empty v2 operations and resolves system variables through the start node', () => {
render(
<Node
id="assigner-node"
data={createData({
items: [
createOperation({ variable_selector: [] }),
createOperation({
variable_selector: ['sys', 'query'],
operation: WriteMode.append,
}),
],
})}
/>,
)
expect(screen.getByText('Start:start:sys.query')).toBeInTheDocument()
expect(screen.queryByText('undefined:undefined:')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,119 @@
import type { AssignerNodeOperation, AssignerNodeType } from '../types'
import type { PanelProps } from '@/types/workflow'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '@/app/components/workflow/types'
import Panel from '../panel'
import { AssignerNodeInputType, WriteMode } from '../types'
type MockVarListProps = {
readonly: boolean
nodeId: string
list: AssignerNodeOperation[]
onChange: (list: AssignerNodeOperation[]) => void
}
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockUseHandleAddOperationItem = vi.hoisted(() => vi.fn())
const mockVarListRender = vi.hoisted(() => vi.fn())
const createOperation = (overrides: Partial<AssignerNodeOperation> = {}): AssignerNodeOperation => ({
variable_selector: ['node-1', 'count'],
input_type: AssignerNodeInputType.variable,
operation: WriteMode.overwrite,
value: ['node-2', 'result'],
...overrides,
})
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('../hooks', () => ({
useHandleAddOperationItem: () => mockUseHandleAddOperationItem,
}))
vi.mock('../components/var-list', () => ({
__esModule: true,
default: (props: MockVarListProps) => {
mockVarListRender(props)
return (
<div>
<div>{props.list.map(item => item.variable_selector.join('.')).join(',')}</div>
<button type="button" onClick={() => props.onChange([createOperation({ variable_selector: ['node-1', 'updated'] })])}>
emit-list-change
</button>
</div>
)
},
}))
const createData = (overrides: Partial<AssignerNodeType> = {}): AssignerNodeType => ({
title: 'Assigner',
desc: '',
type: BlockEnum.VariableAssigner,
version: '2',
items: [createOperation()],
...overrides,
})
const panelProps = {} as PanelProps
describe('assigner/panel', () => {
const handleOperationListChanges = vi.fn()
beforeEach(() => {
vi.clearAllMocks()
mockUseHandleAddOperationItem.mockReturnValue([
createOperation(),
createOperation({ variable_selector: [] }),
])
mockUseConfig.mockReturnValue({
readOnly: false,
inputs: createData(),
handleOperationListChanges,
getAssignedVarType: vi.fn(),
getToAssignedVarType: vi.fn(),
writeModeTypesNum: [],
writeModeTypesArr: [],
writeModeTypes: [],
filterAssignedVar: vi.fn(),
filterToAssignedVar: vi.fn(),
})
})
it('passes the resolved config to the variable list and appends operations through the add button', async () => {
const user = userEvent.setup()
render(
<Panel
id="assigner-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getByText('workflow.nodes.assigner.variables')).toBeInTheDocument()
expect(screen.getByText('node-1.count')).toBeInTheDocument()
expect(mockVarListRender).toHaveBeenCalledWith(expect.objectContaining({
readonly: false,
nodeId: 'assigner-node',
list: createData().items,
}))
await user.click(screen.getAllByRole('button')[0]!)
expect(mockUseHandleAddOperationItem).toHaveBeenCalledWith(createData().items)
expect(handleOperationListChanges).toHaveBeenCalledWith([
createOperation(),
createOperation({ variable_selector: [] }),
])
await user.click(screen.getByRole('button', { name: 'emit-list-change' }))
expect(handleOperationListChanges).toHaveBeenCalledWith([
createOperation({ variable_selector: ['node-1', 'updated'] }),
])
})
})

View File

@ -0,0 +1,85 @@
import type { AssignerNodeOperation, AssignerNodeType } from '../types'
import type { InputVar } from '@/app/components/workflow/types'
import { renderHook } from '@testing-library/react'
import { BlockEnum, InputVarType } from '@/app/components/workflow/types'
import useNodeCrud from '../../_base/hooks/use-node-crud'
import { AssignerNodeInputType, WriteMode } from '../types'
import useSingleRunFormParams from '../use-single-run-form-params'
vi.mock('../../_base/hooks/use-node-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
const mockUseNodeCrud = vi.mocked(useNodeCrud)
const createOperation = (overrides: Partial<AssignerNodeOperation> = {}): AssignerNodeOperation => ({
variable_selector: ['node-1', 'target'],
input_type: AssignerNodeInputType.variable,
operation: WriteMode.overwrite,
value: ['node-2', 'result'],
...overrides,
})
const createData = (overrides: Partial<AssignerNodeType> = {}): AssignerNodeType => ({
title: 'Assigner',
desc: '',
type: BlockEnum.VariableAssigner,
version: '2',
items: [
createOperation(),
createOperation({ operation: WriteMode.append, value: ['node-3', 'items'] }),
createOperation({ operation: WriteMode.clear, value: ['node-4', 'unused'] }),
createOperation({ operation: WriteMode.set, input_type: AssignerNodeInputType.constant, value: 'fixed' }),
createOperation({ operation: WriteMode.increment, input_type: AssignerNodeInputType.constant, value: 2 }),
],
...overrides,
})
describe('assigner/use-single-run-form-params', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseNodeCrud.mockReturnValue({
inputs: createData(),
setInputs: vi.fn(),
} as unknown as ReturnType<typeof useNodeCrud>)
})
it('exposes only variable-driven dependencies in the single-run form', () => {
const setRunInputData = vi.fn()
const varInputs: InputVar[] = [{
label: 'Result',
variable: 'result',
type: InputVarType.textInput,
required: true,
}]
const varSelectorsToVarInputs = vi.fn(() => varInputs)
const { result } = renderHook(() => useSingleRunFormParams({
id: 'assigner-node',
payload: createData(),
runInputData: { result: 'hello' },
runInputDataRef: { current: {} },
getInputVars: () => [],
setRunInputData,
toVarInputs: () => [],
varSelectorsToVarInputs,
}))
expect(varSelectorsToVarInputs).toHaveBeenCalledWith([
['node-2', 'result'],
['node-3', 'items'],
])
expect(result.current.forms).toHaveLength(1)
expect(result.current.forms[0].inputs).toEqual(varInputs)
expect(result.current.forms[0].values).toEqual({ result: 'hello' })
result.current.forms[0].onChange({ result: 'updated' })
expect(setRunInputData).toHaveBeenCalledWith({ result: 'updated' })
expect(result.current.getDependentVars()).toEqual([
['node-2', 'result'],
['node-3', 'items'],
])
})
})

View File

@ -0,0 +1,52 @@
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { VarType } from '@/app/components/workflow/types'
import { WriteMode } from '../../types'
import OperationSelector from '../operation-selector'
describe('assigner/operation-selector', () => {
it('shows numeric write modes and emits the selected operation', async () => {
const user = userEvent.setup()
const onSelect = vi.fn()
render(
<OperationSelector
value={WriteMode.overwrite}
onSelect={onSelect}
assignedVarType={VarType.number}
writeModeTypes={[WriteMode.overwrite, WriteMode.clear, WriteMode.set]}
writeModeTypesArr={[WriteMode.overwrite, WriteMode.clear]}
writeModeTypesNum={[WriteMode.increment]}
/>,
)
await user.click(screen.getByText('workflow.nodes.assigner.operations.over-write'))
expect(screen.getByText('workflow.nodes.assigner.operations.title')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.assigner.operations.clear')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.assigner.operations.set')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.assigner.operations.+=')).toBeInTheDocument()
await user.click(screen.getAllByText('workflow.nodes.assigner.operations.+=').at(-1)!)
expect(onSelect).toHaveBeenCalledWith({ value: WriteMode.increment, name: WriteMode.increment })
})
it('does not open when the selector is disabled', async () => {
const user = userEvent.setup()
render(
<OperationSelector
value={WriteMode.overwrite}
onSelect={vi.fn()}
disabled
assignedVarType={VarType.string}
writeModeTypes={[WriteMode.overwrite]}
/>,
)
await user.click(screen.getByText('workflow.nodes.assigner.operations.over-write'))
expect(screen.queryByText('workflow.nodes.assigner.operations.title')).not.toBeInTheDocument()
})
})

Some files were not shown because too many files have changed in this diff Show More