mirror of
https://github.com/langgenius/dify.git
synced 2026-01-26 23:05:45 +08:00
Compare commits
5 Commits
fix/workfl
...
build/bill
| Author | SHA1 | Date | |
|---|---|---|---|
| dd94f81272 | |||
| 14afad0deb | |||
| 7c12e923b6 | |||
| b9f1d65d4f | |||
| b4e2af96e2 |
@ -470,7 +470,7 @@ class AdvancedChatDraftRunLoopNodeApi(Resource):
|
||||
Run draft workflow loop node
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
args = LoopNodeRunPayload.model_validate(console_ns.payload or {}).model_dump(exclude_none=True)
|
||||
args = LoopNodeRunPayload.model_validate(console_ns.payload or {})
|
||||
|
||||
try:
|
||||
response = AppGenerateService.generate_single_loop(
|
||||
@ -508,7 +508,7 @@ class WorkflowDraftRunLoopNodeApi(Resource):
|
||||
Run draft workflow loop node
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
args = LoopNodeRunPayload.model_validate(console_ns.payload or {}).model_dump(exclude_none=True)
|
||||
args = LoopNodeRunPayload.model_validate(console_ns.payload or {})
|
||||
|
||||
try:
|
||||
response = AppGenerateService.generate_single_loop(
|
||||
@ -999,6 +999,7 @@ class DraftWorkflowTriggerRunApi(Resource):
|
||||
if not event:
|
||||
return jsonable_encoder({"status": "waiting", "retry_in": LISTENING_RETRY_IN})
|
||||
workflow_args = dict(event.workflow_args)
|
||||
|
||||
workflow_args[SKIP_PREPARE_USER_INPUTS_KEY] = True
|
||||
return helper.compact_generate_response(
|
||||
AppGenerateService.generate(
|
||||
@ -1147,6 +1148,7 @@ class DraftWorkflowTriggerRunAllApi(Resource):
|
||||
|
||||
try:
|
||||
workflow_args = dict(trigger_debug_event.workflow_args)
|
||||
|
||||
workflow_args[SKIP_PREPARE_USER_INPUTS_KEY] = True
|
||||
response = AppGenerateService.generate(
|
||||
app_model=app_model,
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import contextvars
|
||||
import logging
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator, Mapping
|
||||
from typing import Any, Literal, Union, overload
|
||||
from typing import TYPE_CHECKING, Any, Literal, Union, overload
|
||||
|
||||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
@ -13,6 +15,9 @@ from sqlalchemy.orm import Session, sessionmaker
|
||||
import contexts
|
||||
from configs import dify_config
|
||||
from constants import UUID_NIL
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from controllers.console.app.workflow import LoopNodeRunPayload
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager
|
||||
from core.app.apps.advanced_chat.app_runner import AdvancedChatAppRunner
|
||||
@ -304,7 +309,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
workflow: Workflow,
|
||||
node_id: str,
|
||||
user: Account | EndUser,
|
||||
args: Mapping,
|
||||
args: LoopNodeRunPayload,
|
||||
streaming: bool = True,
|
||||
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]:
|
||||
"""
|
||||
@ -320,7 +325,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
if not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
|
||||
if args.get("inputs") is None:
|
||||
if args.inputs is None:
|
||||
raise ValueError("inputs is required")
|
||||
|
||||
# convert to app config
|
||||
@ -338,7 +343,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
stream=streaming,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
extras={"auto_generate_conversation_name": False},
|
||||
single_loop_run=AdvancedChatAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args["inputs"]),
|
||||
single_loop_run=AdvancedChatAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args.inputs),
|
||||
)
|
||||
contexts.plugin_tool_providers.set({})
|
||||
contexts.plugin_tool_providers_lock.set(threading.Lock())
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import contextvars
|
||||
import logging
|
||||
import threading
|
||||
import uuid
|
||||
from collections.abc import Generator, Mapping, Sequence
|
||||
from typing import Any, Literal, Union, overload
|
||||
from typing import TYPE_CHECKING, Any, Literal, Union, overload
|
||||
|
||||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
@ -40,6 +42,9 @@ from models import Account, App, EndUser, Workflow, WorkflowNodeExecutionTrigger
|
||||
from models.enums import WorkflowRunTriggeredFrom
|
||||
from services.workflow_draft_variable_service import DraftVarLoader, WorkflowDraftVariableService
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from controllers.console.app.workflow import LoopNodeRunPayload
|
||||
|
||||
SKIP_PREPARE_USER_INPUTS_KEY = "_skip_prepare_user_inputs"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -381,7 +386,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
workflow: Workflow,
|
||||
node_id: str,
|
||||
user: Account | EndUser,
|
||||
args: Mapping[str, Any],
|
||||
args: LoopNodeRunPayload,
|
||||
streaming: bool = True,
|
||||
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], None, None]:
|
||||
"""
|
||||
@ -397,7 +402,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
if not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
|
||||
if args.get("inputs") is None:
|
||||
if args.inputs is None:
|
||||
raise ValueError("inputs is required")
|
||||
|
||||
# convert to app config
|
||||
@ -413,7 +418,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
stream=streaming,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
extras={"auto_generate_conversation_name": False},
|
||||
single_loop_run=WorkflowAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args["inputs"]),
|
||||
single_loop_run=WorkflowAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args.inputs or {}),
|
||||
workflow_execution_id=str(uuid.uuid4()),
|
||||
)
|
||||
contexts.plugin_tool_providers.set({})
|
||||
|
||||
21
api/enums/hosted_provider.py
Normal file
21
api/enums/hosted_provider.py
Normal file
@ -0,0 +1,21 @@
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class HostedTrialProvider(StrEnum):
|
||||
"""
|
||||
Enum representing hosted model provider names for trial access.
|
||||
"""
|
||||
|
||||
OPENAI = "langgenius/openai/openai"
|
||||
ANTHROPIC = "langgenius/anthropic/anthropic"
|
||||
GEMINI = "langgenius/gemini/google"
|
||||
X = "langgenius/x/x"
|
||||
DEEPSEEK = "langgenius/deepseek/deepseek"
|
||||
TONGYI = "langgenius/tongyi/tongyi"
|
||||
|
||||
@property
|
||||
def config_key(self) -> str:
|
||||
"""Return the config key used in dify_config (e.g., HOSTED_{config_key}_PAID_ENABLED)."""
|
||||
if self == HostedTrialProvider.X:
|
||||
return "XAI"
|
||||
return self.name
|
||||
@ -1,6 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from collections.abc import Generator, Mapping
|
||||
from typing import Any, Union
|
||||
from typing import TYPE_CHECKING, Any, Union
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.apps.advanced_chat.app_generator import AdvancedChatAppGenerator
|
||||
@ -18,6 +20,9 @@ from services.errors.app import QuotaExceededError, WorkflowIdFormatError, Workf
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
from services.workflow_service import WorkflowService
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from controllers.console.app.workflow import LoopNodeRunPayload
|
||||
|
||||
|
||||
class AppGenerateService:
|
||||
@classmethod
|
||||
@ -165,7 +170,9 @@ class AppGenerateService:
|
||||
raise ValueError(f"Invalid app mode {app_model.mode}")
|
||||
|
||||
@classmethod
|
||||
def generate_single_loop(cls, app_model: App, user: Account, node_id: str, args: Any, streaming: bool = True):
|
||||
def generate_single_loop(
|
||||
cls, app_model: App, user: Account, node_id: str, args: LoopNodeRunPayload, streaming: bool = True
|
||||
):
|
||||
if app_model.mode == AppMode.ADVANCED_CHAT:
|
||||
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
|
||||
return AdvancedChatAppGenerator.convert_to_event_stream(
|
||||
|
||||
@ -143,6 +143,7 @@ class BillingService:
|
||||
raise ValueError("Invalid arguments.")
|
||||
if method == "POST" and response.status_code != httpx.codes.OK:
|
||||
raise ValueError(f"Unable to send request to {url}. Please try again later or contact support.")
|
||||
logger.info("billing_service: %s _send_request: response: %s", method, response.json())
|
||||
return response.json()
|
||||
|
||||
@staticmethod
|
||||
@ -165,6 +166,7 @@ class BillingService:
|
||||
def delete_account(cls, account_id: str):
|
||||
"""Delete account."""
|
||||
params = {"account_id": account_id}
|
||||
logger.info("billing_service: delete_account: params: %s", params)
|
||||
return cls._send_request("DELETE", "/account/", params=params)
|
||||
|
||||
@classmethod
|
||||
|
||||
@ -4,6 +4,7 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from configs import dify_config
|
||||
from enums.cloud_plan import CloudPlan
|
||||
from enums.hosted_provider import HostedTrialProvider
|
||||
from services.billing_service import BillingService
|
||||
from services.enterprise.enterprise_service import EnterpriseService
|
||||
|
||||
@ -170,6 +171,7 @@ class SystemFeatureModel(BaseModel):
|
||||
plugin_installation_permission: PluginInstallationPermissionModel = PluginInstallationPermissionModel()
|
||||
enable_change_email: bool = True
|
||||
plugin_manager: PluginManagerModel = PluginManagerModel()
|
||||
trial_models: list[str] = []
|
||||
enable_trial_app: bool = False
|
||||
enable_explore_banner: bool = False
|
||||
|
||||
@ -227,9 +229,21 @@ class FeatureService:
|
||||
system_features.is_allow_register = dify_config.ALLOW_REGISTER
|
||||
system_features.is_allow_create_workspace = dify_config.ALLOW_CREATE_WORKSPACE
|
||||
system_features.is_email_setup = dify_config.MAIL_TYPE is not None and dify_config.MAIL_TYPE != ""
|
||||
system_features.trial_models = cls._fulfill_trial_models_from_env()
|
||||
system_features.enable_trial_app = dify_config.ENABLE_TRIAL_APP
|
||||
system_features.enable_explore_banner = dify_config.ENABLE_EXPLORE_BANNER
|
||||
|
||||
@classmethod
|
||||
def _fulfill_trial_models_from_env(cls) -> list[str]:
|
||||
return [
|
||||
provider.value
|
||||
for provider in HostedTrialProvider
|
||||
if (
|
||||
getattr(dify_config, f"HOSTED_{provider.config_key}_PAID_ENABLED", False)
|
||||
and getattr(dify_config, f"HOSTED_{provider.config_key}_TRIAL_ENABLED", False)
|
||||
)
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _fulfill_params_from_env(cls, features: FeatureModel):
|
||||
features.can_replace_logo = dify_config.CAN_REPLACE_LOGO
|
||||
|
||||
@ -14,10 +14,13 @@ logger = logging.getLogger(__name__)
|
||||
@shared_task(queue="dataset")
|
||||
def delete_account_task(account_id):
|
||||
with session_factory.create_session() as session:
|
||||
logger.info("delete_account_task: account_id: %s", account_id)
|
||||
account = session.query(Account).where(Account.id == account_id).first()
|
||||
try:
|
||||
if dify_config.BILLING_ENABLED:
|
||||
logger.info("delete_account_task: before delete_account: %s", account_id)
|
||||
BillingService.delete_account(account_id)
|
||||
logger.info("delete_account_task: after delete_account: %s", account_id)
|
||||
except Exception:
|
||||
logger.exception("Failed to delete account %s from billing service.", account_id)
|
||||
raise
|
||||
@ -27,3 +30,4 @@ def delete_account_task(account_id):
|
||||
return
|
||||
# send success email
|
||||
send_deletion_success_task.delay(account.email)
|
||||
logger.info("delete_account_task: delete_account successfully: %s", account_id)
|
||||
|
||||
@ -1,705 +0,0 @@
|
||||
/**
|
||||
* Test Suite for useNodesSyncDraft Hook
|
||||
*
|
||||
* PURPOSE:
|
||||
* This hook handles syncing workflow draft to the server. The key fix being tested
|
||||
* is the error handling behavior when `draft_workflow_not_sync` error occurs.
|
||||
*
|
||||
* MULTI-TAB PROBLEM SCENARIO:
|
||||
* 1. User opens the same workflow in Tab A and Tab B (both have hash: v1)
|
||||
* 2. Tab A saves successfully, server returns new hash: v2
|
||||
* 3. Tab B tries to save with old hash: v1, server returns 400 error with code
|
||||
* 'draft_workflow_not_sync'
|
||||
* 4. BEFORE FIX: handleRefreshWorkflowDraft() was called without args, which fetched
|
||||
* draft AND overwrote canvas - user lost unsaved changes in Tab B
|
||||
* 5. AFTER FIX: handleRefreshWorkflowDraft(true) is called, which fetches draft but
|
||||
* only updates hash (notUpdateCanvas=true), preserving user's canvas changes
|
||||
*
|
||||
* TESTING STRATEGY:
|
||||
* We don't simulate actual tab switching UI behavior. Instead, we mock the API to
|
||||
* return `draft_workflow_not_sync` error and verify:
|
||||
* - The hook calls handleRefreshWorkflowDraft(true) - not handleRefreshWorkflowDraft()
|
||||
* - This ensures canvas data is preserved while hash is updated for retry
|
||||
*
|
||||
* This is behavior-driven testing - we verify "what the code does when receiving
|
||||
* specific API errors" rather than simulating complete user interaction flows.
|
||||
* True multi-tab integration testing would require E2E frameworks like Playwright.
|
||||
*/
|
||||
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { useNodesSyncDraft } from './use-nodes-sync-draft'
|
||||
|
||||
// Mock reactflow store
|
||||
const mockGetNodes = vi.fn()
|
||||
|
||||
type MockEdge = {
|
||||
id: string
|
||||
source: string
|
||||
target: string
|
||||
data: Record<string, unknown>
|
||||
}
|
||||
|
||||
const mockStoreState: {
|
||||
getNodes: ReturnType<typeof vi.fn>
|
||||
edges: MockEdge[]
|
||||
transform: number[]
|
||||
} = {
|
||||
getNodes: mockGetNodes,
|
||||
edges: [],
|
||||
transform: [0, 0, 1],
|
||||
}
|
||||
vi.mock('reactflow', () => ({
|
||||
useStoreApi: () => ({
|
||||
getState: () => mockStoreState,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock features store
|
||||
const mockFeaturesState = {
|
||||
features: {
|
||||
opening: { enabled: false, opening_statement: '', suggested_questions: [] },
|
||||
suggested: {},
|
||||
text2speech: {},
|
||||
speech2text: {},
|
||||
citation: {},
|
||||
moderation: {},
|
||||
file: {},
|
||||
},
|
||||
}
|
||||
vi.mock('@/app/components/base/features/hooks', () => ({
|
||||
useFeaturesStore: () => ({
|
||||
getState: () => mockFeaturesState,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock workflow service
|
||||
const mockSyncWorkflowDraft = vi.fn()
|
||||
vi.mock('@/service/workflow', () => ({
|
||||
syncWorkflowDraft: (...args: unknown[]) => mockSyncWorkflowDraft(...args),
|
||||
}))
|
||||
|
||||
// Mock useNodesReadOnly
|
||||
const mockGetNodesReadOnly = vi.fn()
|
||||
vi.mock('@/app/components/workflow/hooks/use-workflow', () => ({
|
||||
useNodesReadOnly: () => ({
|
||||
getNodesReadOnly: mockGetNodesReadOnly,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock useSerialAsyncCallback - pass through the callback
|
||||
vi.mock('@/app/components/workflow/hooks/use-serial-async-callback', () => ({
|
||||
useSerialAsyncCallback: (callback: (...args: unknown[]) => unknown) => callback,
|
||||
}))
|
||||
|
||||
// Mock workflow store
|
||||
const mockSetSyncWorkflowDraftHash = vi.fn()
|
||||
const mockSetDraftUpdatedAt = vi.fn()
|
||||
|
||||
const createMockWorkflowStoreState = (overrides = {}) => ({
|
||||
appId: 'test-app-id',
|
||||
conversationVariables: [],
|
||||
environmentVariables: [],
|
||||
syncWorkflowDraftHash: 'current-hash-123',
|
||||
isWorkflowDataLoaded: true,
|
||||
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
|
||||
setDraftUpdatedAt: mockSetDraftUpdatedAt,
|
||||
...overrides,
|
||||
})
|
||||
|
||||
const mockWorkflowStoreGetState = vi.fn()
|
||||
vi.mock('@/app/components/workflow/store', () => ({
|
||||
useWorkflowStore: () => ({
|
||||
getState: mockWorkflowStoreGetState,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock useWorkflowRefreshDraft (THE KEY DEPENDENCY FOR THIS TEST)
|
||||
const mockHandleRefreshWorkflowDraft = vi.fn()
|
||||
vi.mock('.', () => ({
|
||||
useWorkflowRefreshDraft: () => ({
|
||||
handleRefreshWorkflowDraft: mockHandleRefreshWorkflowDraft,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock API_PREFIX
|
||||
vi.mock('@/config', () => ({
|
||||
API_PREFIX: '/api',
|
||||
}))
|
||||
|
||||
// Create a mock error response that mimics the actual API error
|
||||
const createMockErrorResponse = (code: string) => {
|
||||
const errorBody = { code, message: 'Draft not in sync' }
|
||||
let bodyUsed = false
|
||||
|
||||
return {
|
||||
json: vi.fn().mockImplementation(() => {
|
||||
bodyUsed = true
|
||||
return Promise.resolve(errorBody)
|
||||
}),
|
||||
get bodyUsed() {
|
||||
return bodyUsed
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
describe('useNodesSyncDraft', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockGetNodesReadOnly.mockReturnValue(false)
|
||||
mockGetNodes.mockReturnValue([
|
||||
{ id: 'node-1', type: 'start', data: { type: 'start' } },
|
||||
{ id: 'node-2', type: 'llm', data: { type: 'llm' } },
|
||||
])
|
||||
mockStoreState.edges = [
|
||||
{ id: 'edge-1', source: 'node-1', target: 'node-2', data: {} },
|
||||
]
|
||||
mockWorkflowStoreGetState.mockReturnValue(createMockWorkflowStoreState())
|
||||
mockSyncWorkflowDraft.mockResolvedValue({
|
||||
hash: 'new-hash-456',
|
||||
updated_at: Date.now(),
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks()
|
||||
})
|
||||
|
||||
describe('doSyncWorkflowDraft function', () => {
|
||||
it('should return doSyncWorkflowDraft function', () => {
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
expect(result.current.doSyncWorkflowDraft).toBeDefined()
|
||||
expect(typeof result.current.doSyncWorkflowDraft).toBe('function')
|
||||
})
|
||||
|
||||
it('should return syncWorkflowDraftWhenPageClose function', () => {
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
expect(result.current.syncWorkflowDraftWhenPageClose).toBeDefined()
|
||||
expect(typeof result.current.syncWorkflowDraftWhenPageClose).toBe('function')
|
||||
})
|
||||
})
|
||||
|
||||
describe('successful sync', () => {
|
||||
it('should call syncWorkflowDraft service on successful sync', async () => {
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith({
|
||||
url: '/apps/test-app-id/workflows/draft',
|
||||
params: expect.objectContaining({
|
||||
hash: 'current-hash-123',
|
||||
graph: expect.objectContaining({
|
||||
nodes: expect.any(Array),
|
||||
edges: expect.any(Array),
|
||||
viewport: expect.any(Object),
|
||||
}),
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
it('should update syncWorkflowDraftHash on success', async () => {
|
||||
mockSyncWorkflowDraft.mockResolvedValue({
|
||||
hash: 'new-hash-789',
|
||||
updated_at: 1234567890,
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-789')
|
||||
})
|
||||
|
||||
it('should update draftUpdatedAt on success', async () => {
|
||||
const updatedAt = 1234567890
|
||||
mockSyncWorkflowDraft.mockResolvedValue({
|
||||
hash: 'new-hash',
|
||||
updated_at: updatedAt,
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSetDraftUpdatedAt).toHaveBeenCalledWith(updatedAt)
|
||||
})
|
||||
|
||||
it('should call onSuccess callback on success', async () => {
|
||||
const onSuccess = vi.fn()
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onSuccess })
|
||||
})
|
||||
|
||||
expect(onSuccess).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should call onSettled callback after success', async () => {
|
||||
const onSettled = vi.fn()
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onSettled })
|
||||
})
|
||||
|
||||
expect(onSettled).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('sync error handling - draft_workflow_not_sync (THE KEY FIX)', () => {
|
||||
/**
|
||||
* This is THE KEY TEST for the bug fix.
|
||||
*
|
||||
* SCENARIO: Multi-tab editing
|
||||
* 1. User opens workflow in Tab A and Tab B
|
||||
* 2. Tab A saves draft successfully, gets new hash
|
||||
* 3. Tab B tries to save with old hash
|
||||
* 4. Server returns 400 with code 'draft_workflow_not_sync'
|
||||
*
|
||||
* BEFORE FIX:
|
||||
* - handleRefreshWorkflowDraft() was called without arguments
|
||||
* - This would fetch draft AND overwrite the canvas
|
||||
* - User loses their unsaved changes in Tab B
|
||||
*
|
||||
* AFTER FIX:
|
||||
* - handleRefreshWorkflowDraft(true) is called
|
||||
* - This fetches draft but DOES NOT overwrite canvas
|
||||
* - Only hash is updated for the next sync attempt
|
||||
* - User's unsaved changes are preserved
|
||||
*/
|
||||
it('should call handleRefreshWorkflowDraft with notUpdateCanvas=true when draft_workflow_not_sync error occurs', async () => {
|
||||
const mockError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
// THE KEY ASSERTION: handleRefreshWorkflowDraft must be called with true
|
||||
await waitFor(() => {
|
||||
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledWith(true)
|
||||
})
|
||||
})
|
||||
|
||||
it('should NOT call handleRefreshWorkflowDraft when notRefreshWhenSyncError is true', async () => {
|
||||
const mockError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
// First parameter is notRefreshWhenSyncError
|
||||
await result.current.doSyncWorkflowDraft(true)
|
||||
})
|
||||
|
||||
// Wait a bit for async operations
|
||||
await new Promise(resolve => setTimeout(resolve, 100))
|
||||
|
||||
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should call onError callback when draft_workflow_not_sync error occurs', async () => {
|
||||
const mockError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
const onError = vi.fn()
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onError })
|
||||
})
|
||||
|
||||
expect(onError).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should call onSettled callback after error', async () => {
|
||||
const mockError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
const onSettled = vi.fn()
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onSettled })
|
||||
})
|
||||
|
||||
expect(onSettled).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('other error handling', () => {
|
||||
it('should NOT call handleRefreshWorkflowDraft for non-draft_workflow_not_sync errors', async () => {
|
||||
const mockError = createMockErrorResponse('some_other_error')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
// Wait a bit for async operations
|
||||
await new Promise(resolve => setTimeout(resolve, 100))
|
||||
|
||||
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle error without json method', async () => {
|
||||
const mockError = new Error('Network error')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
const onError = vi.fn()
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onError })
|
||||
})
|
||||
|
||||
expect(onError).toHaveBeenCalled()
|
||||
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle error with bodyUsed already true', async () => {
|
||||
const mockError = {
|
||||
json: vi.fn(),
|
||||
bodyUsed: true,
|
||||
}
|
||||
mockSyncWorkflowDraft.mockRejectedValue(mockError)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
// Should not call json() when bodyUsed is true
|
||||
expect(mockError.json).not.toHaveBeenCalled()
|
||||
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('read-only mode', () => {
|
||||
it('should not sync when nodes are read-only', async () => {
|
||||
mockGetNodesReadOnly.mockReturnValue(true)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSyncWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should not sync on page close when nodes are read-only', () => {
|
||||
mockGetNodesReadOnly.mockReturnValue(true)
|
||||
|
||||
// Mock sendBeacon
|
||||
const mockSendBeacon = vi.fn()
|
||||
Object.defineProperty(navigator, 'sendBeacon', {
|
||||
value: mockSendBeacon,
|
||||
writable: true,
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.syncWorkflowDraftWhenPageClose()
|
||||
})
|
||||
|
||||
expect(mockSendBeacon).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('workflow data not loaded', () => {
|
||||
it('should not sync when workflow data is not loaded', async () => {
|
||||
mockWorkflowStoreGetState.mockReturnValue(
|
||||
createMockWorkflowStoreState({ isWorkflowDataLoaded: false }),
|
||||
)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSyncWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('no appId', () => {
|
||||
it('should not sync when appId is not set', async () => {
|
||||
mockWorkflowStoreGetState.mockReturnValue(
|
||||
createMockWorkflowStoreState({ appId: null }),
|
||||
)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSyncWorkflowDraft).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('node filtering', () => {
|
||||
it('should filter out temp nodes', async () => {
|
||||
mockGetNodes.mockReturnValue([
|
||||
{ id: 'node-1', type: 'start', data: { type: 'start' } },
|
||||
{ id: 'node-temp', type: 'custom', data: { type: 'custom', _isTempNode: true } },
|
||||
{ id: 'node-2', type: 'llm', data: { type: 'llm' } },
|
||||
])
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
params: expect.objectContaining({
|
||||
graph: expect.objectContaining({
|
||||
nodes: expect.not.arrayContaining([
|
||||
expect.objectContaining({ id: 'node-temp' }),
|
||||
]),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
})
|
||||
|
||||
it('should remove internal underscore properties from nodes', async () => {
|
||||
mockGetNodes.mockReturnValue([
|
||||
{
|
||||
id: 'node-1',
|
||||
type: 'start',
|
||||
data: {
|
||||
type: 'start',
|
||||
_internalProp: 'should be removed',
|
||||
_anotherInternal: true,
|
||||
publicProp: 'should remain',
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
const callArgs = mockSyncWorkflowDraft.mock.calls[0][0]
|
||||
const sentNode = callArgs.params.graph.nodes[0]
|
||||
|
||||
expect(sentNode.data).not.toHaveProperty('_internalProp')
|
||||
expect(sentNode.data).not.toHaveProperty('_anotherInternal')
|
||||
expect(sentNode.data).toHaveProperty('publicProp', 'should remain')
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge filtering', () => {
|
||||
it('should filter out temp edges', async () => {
|
||||
mockStoreState.edges = [
|
||||
{ id: 'edge-1', source: 'node-1', target: 'node-2', data: {} },
|
||||
{ id: 'edge-temp', source: 'node-1', target: 'node-3', data: { _isTemp: true } },
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
const callArgs = mockSyncWorkflowDraft.mock.calls[0][0]
|
||||
const sentEdges = callArgs.params.graph.edges
|
||||
|
||||
expect(sentEdges).toHaveLength(1)
|
||||
expect(sentEdges[0].id).toBe('edge-1')
|
||||
})
|
||||
|
||||
it('should remove internal underscore properties from edges', async () => {
|
||||
mockStoreState.edges = [
|
||||
{
|
||||
id: 'edge-1',
|
||||
source: 'node-1',
|
||||
target: 'node-2',
|
||||
data: {
|
||||
_internalEdgeProp: 'should be removed',
|
||||
publicEdgeProp: 'should remain',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
const callArgs = mockSyncWorkflowDraft.mock.calls[0][0]
|
||||
const sentEdge = callArgs.params.graph.edges[0]
|
||||
|
||||
expect(sentEdge.data).not.toHaveProperty('_internalEdgeProp')
|
||||
expect(sentEdge.data).toHaveProperty('publicEdgeProp', 'should remain')
|
||||
})
|
||||
})
|
||||
|
||||
describe('viewport handling', () => {
|
||||
it('should send current viewport from transform', async () => {
|
||||
mockStoreState.transform = [100, 200, 1.5]
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
params: expect.objectContaining({
|
||||
graph: expect.objectContaining({
|
||||
viewport: { x: 100, y: 200, zoom: 1.5 },
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('multi-tab concurrent editing scenario (END-TO-END TEST)', () => {
|
||||
/**
|
||||
* Simulates the complete multi-tab scenario to verify the fix works correctly.
|
||||
*
|
||||
* Scenario:
|
||||
* 1. Tab A and Tab B both have the workflow open with hash 'hash-v1'
|
||||
* 2. Tab A saves successfully, server returns 'hash-v2'
|
||||
* 3. Tab B tries to save with 'hash-v1', gets 'draft_workflow_not_sync' error
|
||||
* 4. Tab B should only update hash to 'hash-v2', not overwrite canvas
|
||||
* 5. Tab B can now retry save with correct hash
|
||||
*/
|
||||
it('should preserve canvas data during hash conflict resolution', async () => {
|
||||
// Initial state: both tabs have hash-v1
|
||||
mockWorkflowStoreGetState.mockReturnValue(
|
||||
createMockWorkflowStoreState({ syncWorkflowDraftHash: 'hash-v1' }),
|
||||
)
|
||||
|
||||
// Tab B tries to save with old hash, server returns error
|
||||
const syncError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(syncError)
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
// Tab B attempts to sync
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
// Verify the sync was attempted with old hash
|
||||
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
params: expect.objectContaining({
|
||||
hash: 'hash-v1',
|
||||
}),
|
||||
}),
|
||||
)
|
||||
|
||||
// Verify handleRefreshWorkflowDraft was called with true (not overwrite canvas)
|
||||
await waitFor(() => {
|
||||
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledWith(true)
|
||||
})
|
||||
|
||||
// The key assertion: only one argument (true) was passed
|
||||
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledTimes(1)
|
||||
expect(mockHandleRefreshWorkflowDraft.mock.calls[0]).toEqual([true])
|
||||
})
|
||||
|
||||
it('should handle multiple consecutive sync failures gracefully', async () => {
|
||||
// Create fresh error for each call to avoid bodyUsed issue
|
||||
mockSyncWorkflowDraft
|
||||
.mockRejectedValueOnce(createMockErrorResponse('draft_workflow_not_sync'))
|
||||
.mockRejectedValueOnce(createMockErrorResponse('draft_workflow_not_sync'))
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
// First sync attempt
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
// Wait for first refresh call
|
||||
await waitFor(() => {
|
||||
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
// Second sync attempt
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft()
|
||||
})
|
||||
|
||||
// Both should call handleRefreshWorkflowDraft with true
|
||||
await waitFor(() => {
|
||||
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
|
||||
mockHandleRefreshWorkflowDraft.mock.calls.forEach((call) => {
|
||||
expect(call).toEqual([true])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('callbacks behavior', () => {
|
||||
it('should not call onSuccess when sync fails', async () => {
|
||||
const syncError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(syncError)
|
||||
const onSuccess = vi.fn()
|
||||
const onError = vi.fn()
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onSuccess, onError })
|
||||
})
|
||||
|
||||
expect(onSuccess).not.toHaveBeenCalled()
|
||||
expect(onError).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should always call onSettled regardless of success or failure', async () => {
|
||||
const onSettled = vi.fn()
|
||||
|
||||
const { result } = renderHook(() => useNodesSyncDraft())
|
||||
|
||||
// Test success case
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onSettled })
|
||||
})
|
||||
expect(onSettled).toHaveBeenCalledTimes(1)
|
||||
|
||||
// Reset
|
||||
onSettled.mockClear()
|
||||
|
||||
// Test failure case
|
||||
const syncError = createMockErrorResponse('draft_workflow_not_sync')
|
||||
mockSyncWorkflowDraft.mockRejectedValue(syncError)
|
||||
|
||||
await act(async () => {
|
||||
await result.current.doSyncWorkflowDraft(false, { onSettled })
|
||||
})
|
||||
expect(onSettled).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -115,7 +115,7 @@ export const useNodesSyncDraft = () => {
|
||||
if (error && error.json && !error.bodyUsed) {
|
||||
error.json().then((err: any) => {
|
||||
if (err.code === 'draft_workflow_not_sync' && !notRefreshWhenSyncError)
|
||||
handleRefreshWorkflowDraft(true)
|
||||
handleRefreshWorkflowDraft()
|
||||
})
|
||||
}
|
||||
callback?.onError?.()
|
||||
|
||||
@ -1,556 +0,0 @@
|
||||
/**
|
||||
* Test Suite for useWorkflowRefreshDraft Hook
|
||||
*
|
||||
* PURPOSE:
|
||||
* This hook is responsible for refreshing workflow draft data from the server.
|
||||
* The key fix being tested is the `notUpdateCanvas` parameter behavior.
|
||||
*
|
||||
* MULTI-TAB PROBLEM SCENARIO:
|
||||
* 1. User opens the same workflow in Tab A and Tab B (both have hash: v1)
|
||||
* 2. Tab A saves successfully, server returns new hash: v2
|
||||
* 3. Tab B tries to save with old hash: v1, server returns 400 error (draft_workflow_not_sync)
|
||||
* 4. BEFORE FIX: handleRefreshWorkflowDraft() was called without args, which fetched
|
||||
* draft AND overwrote canvas - user lost unsaved changes in Tab B
|
||||
* 5. AFTER FIX: handleRefreshWorkflowDraft(true) is called, which fetches draft but
|
||||
* only updates hash, preserving user's canvas changes
|
||||
*
|
||||
* TESTING STRATEGY:
|
||||
* We don't simulate actual tab switching UI behavior. Instead, we test the hook's
|
||||
* response to specific inputs:
|
||||
* - When notUpdateCanvas=true: should NOT call handleUpdateWorkflowCanvas
|
||||
* - When notUpdateCanvas=false/undefined: should call handleUpdateWorkflowCanvas
|
||||
*
|
||||
* This is behavior-driven testing - we verify "what the code does when given specific
|
||||
* inputs" rather than simulating complete user interaction flows.
|
||||
*/
|
||||
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { useWorkflowRefreshDraft } from './use-workflow-refresh-draft'
|
||||
|
||||
// Mock the workflow service
|
||||
const mockFetchWorkflowDraft = vi.fn()
|
||||
vi.mock('@/service/workflow', () => ({
|
||||
fetchWorkflowDraft: (...args: unknown[]) => mockFetchWorkflowDraft(...args),
|
||||
}))
|
||||
|
||||
// Mock the workflow update hook
|
||||
const mockHandleUpdateWorkflowCanvas = vi.fn()
|
||||
vi.mock('@/app/components/workflow/hooks', () => ({
|
||||
useWorkflowUpdate: () => ({
|
||||
handleUpdateWorkflowCanvas: mockHandleUpdateWorkflowCanvas,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock store state
|
||||
const mockSetSyncWorkflowDraftHash = vi.fn()
|
||||
const mockSetIsSyncingWorkflowDraft = vi.fn()
|
||||
const mockSetEnvironmentVariables = vi.fn()
|
||||
const mockSetEnvSecrets = vi.fn()
|
||||
const mockSetConversationVariables = vi.fn()
|
||||
const mockSetIsWorkflowDataLoaded = vi.fn()
|
||||
const mockCancelDebouncedSync = vi.fn()
|
||||
|
||||
const createMockStoreState = (overrides = {}) => ({
|
||||
appId: 'test-app-id',
|
||||
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
|
||||
setIsSyncingWorkflowDraft: mockSetIsSyncingWorkflowDraft,
|
||||
setEnvironmentVariables: mockSetEnvironmentVariables,
|
||||
setEnvSecrets: mockSetEnvSecrets,
|
||||
setConversationVariables: mockSetConversationVariables,
|
||||
setIsWorkflowDataLoaded: mockSetIsWorkflowDataLoaded,
|
||||
isWorkflowDataLoaded: true,
|
||||
debouncedSyncWorkflowDraft: {
|
||||
cancel: mockCancelDebouncedSync,
|
||||
},
|
||||
...overrides,
|
||||
})
|
||||
|
||||
const mockWorkflowStoreGetState = vi.fn()
|
||||
vi.mock('@/app/components/workflow/store', () => ({
|
||||
useWorkflowStore: () => ({
|
||||
getState: mockWorkflowStoreGetState,
|
||||
}),
|
||||
}))
|
||||
|
||||
// Default mock response from fetchWorkflowDraft
|
||||
const createMockDraftResponse = (overrides = {}) => ({
|
||||
hash: 'new-hash-12345',
|
||||
graph: {
|
||||
nodes: [{ id: 'node-1', type: 'start', data: {} }],
|
||||
edges: [{ id: 'edge-1', source: 'node-1', target: 'node-2' }],
|
||||
viewport: { x: 100, y: 200, zoom: 1.5 },
|
||||
},
|
||||
environment_variables: [
|
||||
{ id: 'env-1', name: 'API_KEY', value: 'secret-key', value_type: 'secret' },
|
||||
{ id: 'env-2', name: 'BASE_URL', value: 'https://api.example.com', value_type: 'string' },
|
||||
],
|
||||
conversation_variables: [
|
||||
{ id: 'conv-1', name: 'user_input', value: 'test' },
|
||||
],
|
||||
...overrides,
|
||||
})
|
||||
|
||||
describe('useWorkflowRefreshDraft', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockWorkflowStoreGetState.mockReturnValue(createMockStoreState())
|
||||
mockFetchWorkflowDraft.mockResolvedValue(createMockDraftResponse())
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks()
|
||||
})
|
||||
|
||||
describe('handleRefreshWorkflowDraft function', () => {
|
||||
it('should return handleRefreshWorkflowDraft function', () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
expect(result.current.handleRefreshWorkflowDraft).toBeDefined()
|
||||
expect(typeof result.current.handleRefreshWorkflowDraft).toBe('function')
|
||||
})
|
||||
})
|
||||
|
||||
describe('notUpdateCanvas parameter behavior (THE KEY FIX)', () => {
|
||||
it('should NOT call handleUpdateWorkflowCanvas when notUpdateCanvas is true', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockFetchWorkflowDraft).toHaveBeenCalledWith('/apps/test-app-id/workflows/draft')
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
|
||||
})
|
||||
|
||||
// THE KEY ASSERTION: Canvas should NOT be updated when notUpdateCanvas is true
|
||||
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should call handleUpdateWorkflowCanvas when notUpdateCanvas is false', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(false)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockFetchWorkflowDraft).toHaveBeenCalledWith('/apps/test-app-id/workflows/draft')
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
// Canvas SHOULD be updated when notUpdateCanvas is false
|
||||
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
|
||||
nodes: [{ id: 'node-1', type: 'start', data: {} }],
|
||||
edges: [{ id: 'edge-1', source: 'node-1', target: 'node-2' }],
|
||||
viewport: { x: 100, y: 200, zoom: 1.5 },
|
||||
})
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
|
||||
})
|
||||
})
|
||||
|
||||
it('should call handleUpdateWorkflowCanvas when notUpdateCanvas is undefined (default)', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockFetchWorkflowDraft).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
// Canvas SHOULD be updated when notUpdateCanvas is undefined
|
||||
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
it('should still update hash even when notUpdateCanvas is true', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
|
||||
})
|
||||
|
||||
// Verify canvas was NOT updated
|
||||
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should still update environment variables when notUpdateCanvas is true', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([
|
||||
{ id: 'env-1', name: 'API_KEY', value: '[__HIDDEN__]', value_type: 'secret' },
|
||||
{ id: 'env-2', name: 'BASE_URL', value: 'https://api.example.com', value_type: 'string' },
|
||||
])
|
||||
})
|
||||
|
||||
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should still update env secrets when notUpdateCanvas is true', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetEnvSecrets).toHaveBeenCalledWith({
|
||||
'env-1': 'secret-key',
|
||||
})
|
||||
})
|
||||
|
||||
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should still update conversation variables when notUpdateCanvas is true', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetConversationVariables).toHaveBeenCalledWith([
|
||||
{ id: 'conv-1', name: 'user_input', value: 'test' },
|
||||
])
|
||||
})
|
||||
|
||||
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('syncing state management', () => {
|
||||
it('should set isSyncingWorkflowDraft to true before fetch', () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(true)
|
||||
})
|
||||
|
||||
it('should set isSyncingWorkflowDraft to false after fetch completes', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(false)
|
||||
})
|
||||
})
|
||||
|
||||
it('should set isSyncingWorkflowDraft to false even when fetch fails', async () => {
|
||||
mockFetchWorkflowDraft.mockRejectedValue(new Error('Network error'))
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('isWorkflowDataLoaded flag management', () => {
|
||||
it('should set isWorkflowDataLoaded to false before fetch when it was true', () => {
|
||||
mockWorkflowStoreGetState.mockReturnValue(
|
||||
createMockStoreState({ isWorkflowDataLoaded: true }),
|
||||
)
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockSetIsWorkflowDataLoaded).toHaveBeenCalledWith(false)
|
||||
})
|
||||
|
||||
it('should set isWorkflowDataLoaded to true after fetch succeeds', async () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetIsWorkflowDataLoaded).toHaveBeenCalledWith(true)
|
||||
})
|
||||
})
|
||||
|
||||
it('should restore isWorkflowDataLoaded when fetch fails and it was previously loaded', async () => {
|
||||
mockWorkflowStoreGetState.mockReturnValue(
|
||||
createMockStoreState({ isWorkflowDataLoaded: true }),
|
||||
)
|
||||
mockFetchWorkflowDraft.mockRejectedValue(new Error('Network error'))
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
// Should restore to true because wasLoaded was true
|
||||
expect(mockSetIsWorkflowDataLoaded).toHaveBeenLastCalledWith(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('debounced sync cancellation', () => {
|
||||
it('should cancel debounced sync before fetching draft', () => {
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
|
||||
expect(mockCancelDebouncedSync).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle case when debouncedSyncWorkflowDraft has no cancel method', () => {
|
||||
mockWorkflowStoreGetState.mockReturnValue(
|
||||
createMockStoreState({ debouncedSyncWorkflowDraft: {} }),
|
||||
)
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
// Should not throw
|
||||
expect(() => {
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft()
|
||||
})
|
||||
}).not.toThrow()
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty graph in response', async () => {
|
||||
mockFetchWorkflowDraft.mockResolvedValue({
|
||||
hash: 'hash-empty',
|
||||
graph: null,
|
||||
environment_variables: [],
|
||||
conversation_variables: [],
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(false)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
|
||||
nodes: [],
|
||||
edges: [],
|
||||
viewport: { x: 0, y: 0, zoom: 1 },
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle missing viewport in response', async () => {
|
||||
mockFetchWorkflowDraft.mockResolvedValue({
|
||||
hash: 'hash-no-viewport',
|
||||
graph: {
|
||||
nodes: [{ id: 'node-1' }],
|
||||
edges: [],
|
||||
viewport: null,
|
||||
},
|
||||
environment_variables: [],
|
||||
conversation_variables: [],
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(false)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
|
||||
nodes: [{ id: 'node-1' }],
|
||||
edges: [],
|
||||
viewport: { x: 0, y: 0, zoom: 1 },
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle missing environment_variables in response', async () => {
|
||||
mockFetchWorkflowDraft.mockResolvedValue({
|
||||
hash: 'hash-no-env',
|
||||
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
|
||||
environment_variables: undefined,
|
||||
conversation_variables: [],
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([])
|
||||
expect(mockSetEnvSecrets).toHaveBeenCalledWith({})
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle missing conversation_variables in response', async () => {
|
||||
mockFetchWorkflowDraft.mockResolvedValue({
|
||||
hash: 'hash-no-conv',
|
||||
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
|
||||
environment_variables: [],
|
||||
conversation_variables: undefined,
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetConversationVariables).toHaveBeenCalledWith([])
|
||||
})
|
||||
})
|
||||
|
||||
it('should filter only secret type for envSecrets', async () => {
|
||||
mockFetchWorkflowDraft.mockResolvedValue({
|
||||
hash: 'hash-mixed-env',
|
||||
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
|
||||
environment_variables: [
|
||||
{ id: 'env-1', name: 'SECRET_KEY', value: 'secret-value', value_type: 'secret' },
|
||||
{ id: 'env-2', name: 'PUBLIC_URL', value: 'https://example.com', value_type: 'string' },
|
||||
{ id: 'env-3', name: 'ANOTHER_SECRET', value: 'another-secret', value_type: 'secret' },
|
||||
],
|
||||
conversation_variables: [],
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetEnvSecrets).toHaveBeenCalledWith({
|
||||
'env-1': 'secret-value',
|
||||
'env-3': 'another-secret',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
it('should hide secret values in environment variables', async () => {
|
||||
mockFetchWorkflowDraft.mockResolvedValue({
|
||||
hash: 'hash-secrets',
|
||||
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
|
||||
environment_variables: [
|
||||
{ id: 'env-1', name: 'SECRET_KEY', value: 'super-secret', value_type: 'secret' },
|
||||
{ id: 'env-2', name: 'PUBLIC_URL', value: 'https://example.com', value_type: 'string' },
|
||||
],
|
||||
conversation_variables: [],
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([
|
||||
{ id: 'env-1', name: 'SECRET_KEY', value: '[__HIDDEN__]', value_type: 'secret' },
|
||||
{ id: 'env-2', name: 'PUBLIC_URL', value: 'https://example.com', value_type: 'string' },
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('multi-tab scenario simulation (THE BUG FIX VERIFICATION)', () => {
|
||||
/**
|
||||
* This test verifies the fix for the multi-tab scenario:
|
||||
* 1. User opens workflow in Tab A and Tab B
|
||||
* 2. Tab A saves draft successfully
|
||||
* 3. Tab B tries to save but gets 'draft_workflow_not_sync' error (hash mismatch)
|
||||
* 4. BEFORE FIX: Tab B would fetch draft and overwrite canvas with old data
|
||||
* 5. AFTER FIX: Tab B only updates hash, preserving user's canvas changes
|
||||
*/
|
||||
it('should only update hash when called with notUpdateCanvas=true (simulating sync error recovery)', async () => {
|
||||
const mockResponse = createMockDraftResponse()
|
||||
mockFetchWorkflowDraft.mockResolvedValue(mockResponse)
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
// Simulate the sync error recovery scenario where notUpdateCanvas is true
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(true)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockFetchWorkflowDraft).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
// Hash should be updated for next sync attempt
|
||||
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
|
||||
})
|
||||
|
||||
// Canvas should NOT be updated - user's changes are preserved
|
||||
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
|
||||
|
||||
// Other states should still be updated
|
||||
expect(mockSetEnvironmentVariables).toHaveBeenCalled()
|
||||
expect(mockSetConversationVariables).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should update canvas when called with notUpdateCanvas=false (normal refresh)', async () => {
|
||||
const mockResponse = createMockDraftResponse()
|
||||
mockFetchWorkflowDraft.mockResolvedValue(mockResponse)
|
||||
|
||||
const { result } = renderHook(() => useWorkflowRefreshDraft())
|
||||
|
||||
// Simulate normal refresh scenario
|
||||
act(() => {
|
||||
result.current.handleRefreshWorkflowDraft(false)
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockFetchWorkflowDraft).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
|
||||
})
|
||||
|
||||
// Canvas SHOULD be updated in normal refresh
|
||||
await waitFor(() => {
|
||||
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -8,7 +8,7 @@ export const useWorkflowRefreshDraft = () => {
|
||||
const workflowStore = useWorkflowStore()
|
||||
const { handleUpdateWorkflowCanvas } = useWorkflowUpdate()
|
||||
|
||||
const handleRefreshWorkflowDraft = useCallback((notUpdateCanvas?: boolean) => {
|
||||
const handleRefreshWorkflowDraft = useCallback(() => {
|
||||
const {
|
||||
appId,
|
||||
setSyncWorkflowDraftHash,
|
||||
@ -31,14 +31,12 @@ export const useWorkflowRefreshDraft = () => {
|
||||
fetchWorkflowDraft(`/apps/${appId}/workflows/draft`)
|
||||
.then((response) => {
|
||||
// Ensure we have a valid workflow structure with viewport
|
||||
if (!notUpdateCanvas) {
|
||||
const workflowData: WorkflowDataUpdater = {
|
||||
nodes: response.graph?.nodes || [],
|
||||
edges: response.graph?.edges || [],
|
||||
viewport: response.graph?.viewport || { x: 0, y: 0, zoom: 1 },
|
||||
}
|
||||
handleUpdateWorkflowCanvas(workflowData)
|
||||
const workflowData: WorkflowDataUpdater = {
|
||||
nodes: response.graph?.nodes || [],
|
||||
edges: response.graph?.edges || [],
|
||||
viewport: response.graph?.viewport || { x: 0, y: 0, zoom: 1 },
|
||||
}
|
||||
handleUpdateWorkflowCanvas(workflowData)
|
||||
setSyncWorkflowDraftHash(response.hash)
|
||||
setEnvSecrets((response.environment_variables || []).filter(env => env.value_type === 'secret').reduce((acc, env) => {
|
||||
acc[env.id] = env.value
|
||||
|
||||
@ -67,7 +67,7 @@
|
||||
"@lexical/react": "0.38.2",
|
||||
"@lexical/selection": "0.38.2",
|
||||
"@lexical/text": "0.38.2",
|
||||
"@lexical/utils": "0.38.2",
|
||||
"@lexical/utils": "0.39.0",
|
||||
"@monaco-editor/react": "4.7.0",
|
||||
"@octokit/core": "6.1.6",
|
||||
"@octokit/request-error": "6.1.8",
|
||||
|
||||
80
web/pnpm-lock.yaml
generated
80
web/pnpm-lock.yaml
generated
@ -94,8 +94,8 @@ importers:
|
||||
specifier: 0.38.2
|
||||
version: 0.38.2
|
||||
'@lexical/utils':
|
||||
specifier: 0.38.2
|
||||
version: 0.38.2
|
||||
specifier: 0.39.0
|
||||
version: 0.39.0
|
||||
'@monaco-editor/react':
|
||||
specifier: 4.7.0
|
||||
version: 4.7.0(monaco-editor@0.55.1)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
|
||||
@ -2066,6 +2066,9 @@ packages:
|
||||
'@lexical/clipboard@0.38.2':
|
||||
resolution: {integrity: sha512-dDShUplCu8/o6BB9ousr3uFZ9bltR+HtleF/Tl8FXFNPpZ4AXhbLKUoJuucRuIr+zqT7RxEv/3M6pk/HEoE6NQ==}
|
||||
|
||||
'@lexical/clipboard@0.39.0':
|
||||
resolution: {integrity: sha512-ylrHy8M+I5EH4utwqivslugqQhvgLTz9VEJdrb2RjbhKQEXwMcqKCRWh6cRfkYx64onE2YQE0nRIdzHhExEpLQ==}
|
||||
|
||||
'@lexical/code@0.38.2':
|
||||
resolution: {integrity: sha512-wpqgbmPsfi/+8SYP0zI2kml09fGPRhzO5litR9DIbbSGvcbawMbRNcKLO81DaTbsJRnBJiQvbBBBJAwZKRqgBw==}
|
||||
|
||||
@ -2081,6 +2084,9 @@ packages:
|
||||
'@lexical/extension@0.38.2':
|
||||
resolution: {integrity: sha512-qbUNxEVjAC0kxp7hEMTzktj0/51SyJoIJWK6Gm790b4yNBq82fEPkksfuLkRg9VQUteD0RT1Nkjy8pho8nNamw==}
|
||||
|
||||
'@lexical/extension@0.39.0':
|
||||
resolution: {integrity: sha512-mp/WcF8E53FWPiUHgHQz382J7u7C4+cELYNkC00dKaymf8NhS6M65Y8tyDikNGNUcLXSzaluwK0HkiKjTYGhVQ==}
|
||||
|
||||
'@lexical/hashtag@0.38.2':
|
||||
resolution: {integrity: sha512-jNI4Pv+plth39bjOeeQegMypkjDmoMWBMZtV0lCynBpkkPFlfMnyL9uzW/IxkZnX8LXWSw5mbWk07nqOUNTCrA==}
|
||||
|
||||
@ -2090,12 +2096,18 @@ packages:
|
||||
'@lexical/html@0.38.2':
|
||||
resolution: {integrity: sha512-pC5AV+07bmHistRwgG3NJzBMlIzSdxYO6rJU4eBNzyR4becdiLsI4iuv+aY7PhfSv+SCs7QJ9oc4i5caq48Pkg==}
|
||||
|
||||
'@lexical/html@0.39.0':
|
||||
resolution: {integrity: sha512-7VLWP5DpzBg3kKctpNK6PbhymKAtU6NAnKieopCfCIWlMW+EqpldteiIXGqSqrMRK0JWTmF1gKgr9nnQyOOsXw==}
|
||||
|
||||
'@lexical/link@0.38.2':
|
||||
resolution: {integrity: sha512-UOKTyYqrdCR9+7GmH6ZVqJTmqYefKGMUHMGljyGks+OjOGZAQs78S1QgcPEqltDy+SSdPSYK7wAo6gjxZfEq9g==}
|
||||
|
||||
'@lexical/list@0.38.2':
|
||||
resolution: {integrity: sha512-OQm9TzatlMrDZGxMxbozZEHzMJhKxAbH1TOnOGyFfzpfjbnFK2y8oLeVsfQZfZRmiqQS4Qc/rpFnRP2Ax5dsbA==}
|
||||
|
||||
'@lexical/list@0.39.0':
|
||||
resolution: {integrity: sha512-mxgSxUrakTCHtC+gF30BChQBJTsCMiMgfC2H5VvhcFwXMgsKE/aK9+a+C/sSvvzCmPXqzYsuAcGkJcrY3e5xlw==}
|
||||
|
||||
'@lexical/mark@0.38.2':
|
||||
resolution: {integrity: sha512-U+8KGwc3cP5DxSs15HfkP2YZJDs5wMbWQAwpGqep9bKphgxUgjPViKhdi+PxIt2QEzk7WcoZWUsK1d2ty/vSmg==}
|
||||
|
||||
@ -2123,15 +2135,24 @@ packages:
|
||||
'@lexical/selection@0.38.2':
|
||||
resolution: {integrity: sha512-eMFiWlBH6bEX9U9sMJ6PXPxVXTrihQfFeiIlWLuTpEIDF2HRz7Uo1KFRC/yN6q0DQaj7d9NZYA6Mei5DoQuz5w==}
|
||||
|
||||
'@lexical/selection@0.39.0':
|
||||
resolution: {integrity: sha512-j0cgNuTKDCdf/4MzRnAUwEqG6C/WQp18k2WKmX5KIVZJlhnGIJmlgSBrxjo8AuZ16DIHxTm2XNB4cUDCgZNuPA==}
|
||||
|
||||
'@lexical/table@0.38.2':
|
||||
resolution: {integrity: sha512-uu0i7yz0nbClmHOO5ZFsinRJE6vQnFz2YPblYHAlNigiBedhqMwSv5bedrzDq8nTTHwych3mC63tcyKIrM+I1g==}
|
||||
|
||||
'@lexical/table@0.39.0':
|
||||
resolution: {integrity: sha512-1eH11kV4bJ0fufCYl8DpE19kHwqUI8Ev5CZwivfAtC3ntwyNkeEpjCc0pqeYYIWN/4rTZ5jgB3IJV4FntyfCzw==}
|
||||
|
||||
'@lexical/text@0.38.2':
|
||||
resolution: {integrity: sha512-+juZxUugtC4T37aE3P0l4I9tsWbogDUnTI/mgYk4Ht9g+gLJnhQkzSA8chIyfTxbj5i0A8yWrUUSw+/xA7lKUQ==}
|
||||
|
||||
'@lexical/utils@0.38.2':
|
||||
resolution: {integrity: sha512-y+3rw15r4oAWIEXicUdNjfk8018dbKl7dWHqGHVEtqzAYefnEYdfD2FJ5KOTXfeoYfxi8yOW7FvzS4NZDi8Bfw==}
|
||||
|
||||
'@lexical/utils@0.39.0':
|
||||
resolution: {integrity: sha512-8YChidpMJpwQc4nex29FKUeuZzC++QCS/Jt46lPuy1GS/BZQoPHFKQ5hyVvM9QVhc5CEs4WGNoaCZvZIVN8bQw==}
|
||||
|
||||
'@lexical/yjs@0.38.2':
|
||||
resolution: {integrity: sha512-fg6ZHNrVQmy1AAxaTs8HrFbeNTJCaCoEDPi6pqypHQU3QVfqr4nq0L0EcHU/TRlR1CeduEPvZZIjUUxWTZ0u8g==}
|
||||
peerDependencies:
|
||||
@ -2619,6 +2640,9 @@ packages:
|
||||
'@preact/signals-core@1.12.1':
|
||||
resolution: {integrity: sha512-BwbTXpj+9QutoZLQvbttRg5x3l5468qaV2kufh+51yha1c53ep5dY4kTuZR35+3pAZxpfQerGJiQqg34ZNZ6uA==}
|
||||
|
||||
'@preact/signals-core@1.12.2':
|
||||
resolution: {integrity: sha512-5Yf8h1Ke3SMHr15xl630KtwPTW4sYDFkkxS0vQ8UiQLWwZQnrF9IKaVG1mN5VcJz52EcWs2acsc/Npjha/7ysA==}
|
||||
|
||||
'@preact/signals@1.3.2':
|
||||
resolution: {integrity: sha512-naxcJgUJ6BTOROJ7C3QML7KvwKwCXQJYTc5L/b0eEsdYgPB6SxwoQ1vDGcS0Q7GVjAenVq/tXrybVdFShHYZWg==}
|
||||
peerDependencies:
|
||||
@ -6223,6 +6247,9 @@ packages:
|
||||
lexical@0.38.2:
|
||||
resolution: {integrity: sha512-JJmfsG3c4gwBHzUGffbV7ifMNkKAWMCnYE3xJl87gty7hjyV5f3xq7eqTjP5HFYvO4XpjJvvWO2/djHp5S10tw==}
|
||||
|
||||
lexical@0.39.0:
|
||||
resolution: {integrity: sha512-lpLv7MEJH5QDujEDlYqettL3ATVtNYjqyimzqgrm0RvCm3AO9WXSdsgTxuN7IAZRu88xkxCDeYubeUf4mNZVdg==}
|
||||
|
||||
lib0@0.2.117:
|
||||
resolution: {integrity: sha512-DeXj9X5xDCjgKLU/7RR+/HQEVzuuEUiwldwOGsHK/sfAfELGWEyTcf0x+uOvCvK3O2zPmZePXWL85vtia6GyZw==}
|
||||
engines: {node: '>=16'}
|
||||
@ -10373,6 +10400,14 @@ snapshots:
|
||||
'@lexical/utils': 0.38.2
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/clipboard@0.39.0':
|
||||
dependencies:
|
||||
'@lexical/html': 0.39.0
|
||||
'@lexical/list': 0.39.0
|
||||
'@lexical/selection': 0.39.0
|
||||
'@lexical/utils': 0.39.0
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/code@0.38.2':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.38.2
|
||||
@ -10401,6 +10436,12 @@ snapshots:
|
||||
'@preact/signals-core': 1.12.1
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/extension@0.39.0':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.39.0
|
||||
'@preact/signals-core': 1.12.2
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/hashtag@0.38.2':
|
||||
dependencies:
|
||||
'@lexical/text': 0.38.2
|
||||
@ -10419,6 +10460,12 @@ snapshots:
|
||||
'@lexical/utils': 0.38.2
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/html@0.39.0':
|
||||
dependencies:
|
||||
'@lexical/selection': 0.39.0
|
||||
'@lexical/utils': 0.39.0
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/link@0.38.2':
|
||||
dependencies:
|
||||
'@lexical/extension': 0.38.2
|
||||
@ -10432,6 +10479,13 @@ snapshots:
|
||||
'@lexical/utils': 0.38.2
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/list@0.39.0':
|
||||
dependencies:
|
||||
'@lexical/extension': 0.39.0
|
||||
'@lexical/selection': 0.39.0
|
||||
'@lexical/utils': 0.39.0
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/mark@0.38.2':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.38.2
|
||||
@ -10501,6 +10555,10 @@ snapshots:
|
||||
dependencies:
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/selection@0.39.0':
|
||||
dependencies:
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/table@0.38.2':
|
||||
dependencies:
|
||||
'@lexical/clipboard': 0.38.2
|
||||
@ -10508,6 +10566,13 @@ snapshots:
|
||||
'@lexical/utils': 0.38.2
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/table@0.39.0':
|
||||
dependencies:
|
||||
'@lexical/clipboard': 0.39.0
|
||||
'@lexical/extension': 0.39.0
|
||||
'@lexical/utils': 0.39.0
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/text@0.38.2':
|
||||
dependencies:
|
||||
lexical: 0.38.2
|
||||
@ -10519,6 +10584,13 @@ snapshots:
|
||||
'@lexical/table': 0.38.2
|
||||
lexical: 0.38.2
|
||||
|
||||
'@lexical/utils@0.39.0':
|
||||
dependencies:
|
||||
'@lexical/list': 0.39.0
|
||||
'@lexical/selection': 0.39.0
|
||||
'@lexical/table': 0.39.0
|
||||
lexical: 0.39.0
|
||||
|
||||
'@lexical/yjs@0.38.2(yjs@13.6.27)':
|
||||
dependencies:
|
||||
'@lexical/offset': 0.38.2
|
||||
@ -10973,6 +11045,8 @@ snapshots:
|
||||
|
||||
'@preact/signals-core@1.12.1': {}
|
||||
|
||||
'@preact/signals-core@1.12.2': {}
|
||||
|
||||
'@preact/signals@1.3.2(preact@10.28.0)':
|
||||
dependencies:
|
||||
'@preact/signals-core': 1.12.1
|
||||
@ -15090,6 +15164,8 @@ snapshots:
|
||||
|
||||
lexical@0.38.2: {}
|
||||
|
||||
lexical@0.39.0: {}
|
||||
|
||||
lib0@0.2.117:
|
||||
dependencies:
|
||||
isomorphic.js: 0.2.5
|
||||
|
||||
Reference in New Issue
Block a user