Merge main HEAD (segment 5) into sandboxed-agent-rebase

Resolve 83 conflicts: 10 backend, 62 frontend, 11 config/lock files.
Preserve sandbox/agent/collaboration features while adopting main's
UI refactorings (Dialog/AlertDialog/Popover), model provider updates,
and enterprise features.

Made-with: Cursor
This commit is contained in:
Novice
2026-03-23 14:20:06 +08:00
1671 changed files with 124822 additions and 22302 deletions

View File

@ -0,0 +1,68 @@
import { render, screen } from '@testing-library/react'
import Meta from '../meta'
const mockFormatTime = vi.fn((value: number) => `formatted:${value}`)
vi.mock('@/hooks/use-timestamp', () => ({
default: () => ({
formatTime: mockFormatTime,
}),
}))
describe('Meta', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('renders loading placeholders while the run is in progress', () => {
const { container } = render(<Meta status="running" />)
expect(container.querySelectorAll('.bg-text-quaternary')).toHaveLength(6)
expect(screen.queryByText('SUCCESS')).not.toBeInTheDocument()
expect(screen.queryByText('runLog.meta.steps')).toBeInTheDocument()
})
it.each([
['succeeded', 'SUCCESS'],
['partial-succeeded', 'PARTIAL SUCCESS'],
['exception', 'EXCEPTION'],
['failed', 'FAIL'],
['stopped', 'STOP'],
['paused', 'PENDING'],
] as const)('renders the %s status label', (status, label) => {
render(<Meta status={status} />)
expect(screen.getByText(label)).toBeInTheDocument()
})
it('renders explicit metadata values and hides steps when requested', () => {
render(
<Meta
status="succeeded"
executor="Alice"
startTime={1700000000000}
time={1.2349}
tokens={42}
steps={3}
showSteps={false}
/>,
)
expect(screen.getByText('Alice')).toBeInTheDocument()
expect(screen.getByText('formatted:1700000000000')).toBeInTheDocument()
expect(screen.getByText('1.235s')).toBeInTheDocument()
expect(screen.getByText('42 Tokens')).toBeInTheDocument()
expect(screen.queryByText('Run Steps')).not.toBeInTheDocument()
expect(mockFormatTime).toHaveBeenCalledWith(1700000000000, expect.any(String))
})
it('falls back to default values when metadata is missing', () => {
render(<Meta status="failed" />)
expect(screen.getByText('N/A')).toBeInTheDocument()
expect(screen.getAllByText('-')).toHaveLength(2)
expect(screen.getByText('0 Tokens')).toBeInTheDocument()
expect(screen.getByText('runLog.meta.steps').parentElement).toHaveTextContent('1')
expect(mockFormatTime).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,137 @@
import type { FileEntity } from '@/app/components/base/file-uploader/types'
import type { FileResponse } from '@/types/workflow'
import { render, screen } from '@testing-library/react'
import { TransferMethod } from '@/types/app'
import OutputPanel from '../output-panel'
type FileOutput = FileResponse & { dify_model_identity: '__dify__file__' }
vi.mock('@/app/components/base/chat/chat/loading-anim', () => ({
default: () => <div data-testid="loading-anim" />,
}))
vi.mock('@/app/components/base/file-uploader', () => ({
FileList: ({ files }: { files: FileEntity[] }) => (
<div data-testid="file-list">{files.map(file => file.name).join(', ')}</div>
),
}))
vi.mock('@/app/components/base/markdown', () => ({
Markdown: ({ content }: { content: string }) => <div data-testid="markdown">{content}</div>,
}))
vi.mock('@/app/components/workflow/run/status-container', () => ({
default: ({ status, children }: { status: string, children?: React.ReactNode }) => (
<div data-status={status} data-testid="status-container">{children}</div>
),
}))
vi.mock('@/app/components/workflow/nodes/_base/components/editor/code-editor', () => ({
default: ({
language,
value,
height,
}: {
language: string
value: string
height?: number
}) => (
<div data-height={height} data-language={language} data-testid="code-editor" data-value={value}>
{value}
</div>
),
}))
const createFileOutput = (overrides: Partial<FileOutput> = {}): FileOutput => ({
dify_model_identity: '__dify__file__',
related_id: 'file-1',
extension: 'pdf',
filename: 'report.pdf',
size: 128,
mime_type: 'application/pdf',
transfer_method: TransferMethod.local_file,
type: 'document',
url: 'https://example.com/report.pdf',
upload_file_id: 'upload-1',
remote_url: '',
...overrides,
})
describe('OutputPanel', () => {
it('renders the loading animation while the workflow is running', () => {
render(<OutputPanel isRunning />)
expect(screen.getByTestId('loading-anim')).toBeInTheDocument()
})
it('renders the failed status container when there is an error', () => {
render(<OutputPanel error="Execution failed" />)
expect(screen.getByTestId('status-container')).toHaveAttribute('data-status', 'failed')
expect(screen.getByText('Execution failed')).toBeInTheDocument()
})
it('renders the no-output placeholder when there are no outputs', () => {
render(<OutputPanel />)
expect(screen.getByTestId('markdown')).toHaveTextContent('No Output')
})
it('renders a plain text output as markdown', () => {
render(<OutputPanel outputs={{ answer: 'Hello Dify' }} />)
expect(screen.getByTestId('markdown')).toHaveTextContent('Hello Dify')
})
it('renders array text outputs as joined markdown content', () => {
render(<OutputPanel outputs={{ answer: ['Line 1', 'Line 2'] }} />)
expect(screen.getByTestId('markdown')).toHaveTextContent(/Line 1\s+Line 2/)
})
it('renders a file list for a single file output', () => {
render(<OutputPanel outputs={{ attachment: createFileOutput() }} />)
expect(screen.getByTestId('file-list')).toHaveTextContent('report.pdf')
})
it('renders a file list for an array of file outputs', () => {
render(
<OutputPanel
outputs={{
attachments: [
createFileOutput(),
createFileOutput({
related_id: 'file-2',
filename: 'summary.md',
extension: 'md',
mime_type: 'text/markdown',
type: 'custom',
upload_file_id: 'upload-2',
url: 'https://example.com/summary.md',
}),
],
}}
/>,
)
expect(screen.getByTestId('file-list')).toHaveTextContent('report.pdf, summary.md')
})
it('renders structured outputs inside the code editor when height is available', () => {
render(<OutputPanel height={200} outputs={{ answer: 'hello', score: 1 }} />)
expect(screen.getByTestId('code-editor')).toHaveAttribute('data-language', 'json')
expect(screen.getByTestId('code-editor')).toHaveAttribute('data-height', '92')
expect(screen.getByTestId('code-editor')).toHaveAttribute('data-value', `{
"answer": "hello",
"score": 1
}`)
})
it('skips the code editor when structured outputs have no positive height', () => {
render(<OutputPanel height={0} outputs={{ answer: 'hello', score: 1 }} />)
expect(screen.queryByTestId('code-editor')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,88 @@
import type { FileEntity } from '@/app/components/base/file-uploader/types'
import { fireEvent, render, screen } from '@testing-library/react'
import { TransferMethod } from '@/types/app'
import ResultText from '../result-text'
vi.mock('@/app/components/base/chat/chat/loading-anim', () => ({
default: () => <div data-testid="loading-anim" />,
}))
vi.mock('@/app/components/base/file-uploader', () => ({
FileList: ({ files }: { files: FileEntity[] }) => (
<div data-testid="file-list">{files.map(file => file.name).join(', ')}</div>
),
}))
vi.mock('@/app/components/base/markdown', () => ({
Markdown: ({ content }: { content: string }) => <div data-testid="markdown">{content}</div>,
}))
vi.mock('@/app/components/workflow/run/status-container', () => ({
default: ({ status, children }: { status: string, children?: React.ReactNode }) => (
<div data-status={status} data-testid="status-container">{children}</div>
),
}))
describe('ResultText', () => {
it('renders the loading animation while waiting for a text result', () => {
render(<ResultText isRunning />)
expect(screen.getByTestId('loading-anim')).toBeInTheDocument()
})
it('renders the error state when the run fails', () => {
render(<ResultText error="Run failed" />)
expect(screen.getByTestId('status-container')).toHaveAttribute('data-status', 'failed')
expect(screen.getByText('Run failed')).toBeInTheDocument()
})
it('renders the empty-state call to action and forwards clicks', () => {
const onClick = vi.fn()
render(<ResultText onClick={onClick} />)
expect(screen.getByText('runLog.resultEmpty.title')).toBeInTheDocument()
fireEvent.click(screen.getByText('runLog.resultEmpty.link'))
expect(onClick).toHaveBeenCalledTimes(1)
})
it('does not render the empty state for paused runs', () => {
render(<ResultText isPaused />)
expect(screen.queryByText('runLog.resultEmpty.title')).not.toBeInTheDocument()
})
it('renders markdown content when text outputs are available', () => {
render(<ResultText outputs="hello workflow" />)
expect(screen.getByTestId('markdown')).toHaveTextContent('hello workflow')
})
it('renders file groups when file outputs are available', () => {
render(
<ResultText
allFiles={[
{
varName: 'attachments',
list: [
{
id: 'file-1',
name: 'report.pdf',
size: 128,
type: 'application/pdf',
progress: 100,
transferMethod: TransferMethod.local_file,
supportFileType: 'document',
} satisfies FileEntity,
],
},
]}
/>,
)
expect(screen.getByText('attachments')).toBeInTheDocument()
expect(screen.getByTestId('file-list')).toHaveTextContent('report.pdf')
})
})

View File

@ -0,0 +1,168 @@
import type { AgentLogItemWithChildren, NodeTracing } from '@/types/workflow'
import { fireEvent, render, screen } from '@testing-library/react'
import { BlockEnum } from '../../types'
import SpecialResultPanel from '../special-result-panel'
const mocks = vi.hoisted(() => ({
retryPanel: vi.fn(),
iterationPanel: vi.fn(),
loopPanel: vi.fn(),
agentPanel: vi.fn(),
}))
vi.mock('../retry-log', () => ({
RetryResultPanel: ({ list }: { list: NodeTracing[] }) => {
mocks.retryPanel(list)
return <div data-testid="retry-result-panel">{list.length}</div>
},
}))
vi.mock('../iteration-log', () => ({
IterationResultPanel: ({ list }: { list: NodeTracing[][] }) => {
mocks.iterationPanel(list)
return <div data-testid="iteration-result-panel">{list.length}</div>
},
}))
vi.mock('../loop-log', () => ({
LoopResultPanel: ({ list }: { list: NodeTracing[][] }) => {
mocks.loopPanel(list)
return <div data-testid="loop-result-panel">{list.length}</div>
},
}))
vi.mock('../agent-log', () => ({
AgentResultPanel: ({ agentOrToolLogItemStack }: { agentOrToolLogItemStack: AgentLogItemWithChildren[] }) => {
mocks.agentPanel(agentOrToolLogItemStack)
return <div data-testid="agent-result-panel">{agentOrToolLogItemStack.length}</div>
},
}))
const createNodeTracing = (overrides: Partial<NodeTracing> = {}): NodeTracing => ({
id: 'trace-1',
index: 0,
predecessor_node_id: '',
node_id: 'node-1',
node_type: BlockEnum.Code,
title: 'Code',
inputs: {},
inputs_truncated: false,
process_data: {},
process_data_truncated: false,
outputs: {},
outputs_truncated: false,
status: 'succeeded',
error: '',
elapsed_time: 0.2,
metadata: {
iterator_length: 0,
iterator_index: 0,
loop_length: 0,
loop_index: 0,
},
created_at: 1710000000,
created_by: {
id: 'user-1',
name: 'Alice',
email: 'alice@example.com',
},
finished_at: 1710000001,
execution_metadata: undefined,
...overrides,
})
const createAgentLogItem = (overrides: Partial<AgentLogItemWithChildren> = {}): AgentLogItemWithChildren => ({
node_execution_id: 'exec-1',
message_id: 'message-1',
node_id: 'node-1',
label: 'Step 1',
data: {},
status: 'succeeded',
children: [],
...overrides,
})
describe('SpecialResultPanel', () => {
beforeEach(() => {
vi.clearAllMocks()
})
// The wrapper should isolate clicks from the parent tracing card.
describe('Event Isolation', () => {
it('should stop click propagation at the wrapper level', () => {
const parentClick = vi.fn()
const { container } = render(
<div onClick={parentClick}>
<SpecialResultPanel />
</div>,
)
const panelRoot = container.firstElementChild?.firstElementChild
if (!panelRoot)
throw new Error('Expected panel root element')
fireEvent.click(panelRoot)
expect(parentClick).not.toHaveBeenCalled()
})
})
// Panel branches should render only when their required props are present.
describe('Conditional Panels', () => {
it('should render retry, iteration, loop, and agent panels when their data is provided', () => {
const retryList = [createNodeTracing()]
const iterationList = [[createNodeTracing({ id: 'iter-1' })]]
const loopList = [[createNodeTracing({ id: 'loop-1' })]]
const agentStack = [createAgentLogItem()]
const agentMap = {
'message-1': [createAgentLogItem()],
}
render(
<SpecialResultPanel
showRetryDetail
setShowRetryDetailFalse={vi.fn()}
retryResultList={retryList}
showIteratingDetail
setShowIteratingDetailFalse={vi.fn()}
iterationResultList={iterationList}
showLoopingDetail
setShowLoopingDetailFalse={vi.fn()}
loopResultList={loopList}
agentOrToolLogItemStack={agentStack}
agentOrToolLogListMap={agentMap}
handleShowAgentOrToolLog={vi.fn()}
/>,
)
expect(screen.getByTestId('retry-result-panel')).toHaveTextContent('1')
expect(screen.getByTestId('iteration-result-panel')).toHaveTextContent('1')
expect(screen.getByTestId('loop-result-panel')).toHaveTextContent('1')
expect(screen.getByTestId('agent-result-panel')).toHaveTextContent('1')
expect(mocks.retryPanel).toHaveBeenCalledWith(retryList)
expect(mocks.iterationPanel).toHaveBeenCalledWith(iterationList)
expect(mocks.loopPanel).toHaveBeenCalledWith(loopList)
expect(mocks.agentPanel).toHaveBeenCalledWith(agentStack)
})
it('should keep panels hidden when required guards are missing', () => {
render(
<SpecialResultPanel
showRetryDetail
retryResultList={[]}
showIteratingDetail
iterationResultList={[]}
showLoopingDetail
loopResultList={[]}
agentOrToolLogItemStack={[createAgentLogItem()]}
/>,
)
expect(screen.queryByTestId('retry-result-panel')).not.toBeInTheDocument()
expect(screen.queryByTestId('iteration-result-panel')).not.toBeInTheDocument()
expect(screen.queryByTestId('loop-result-panel')).not.toBeInTheDocument()
expect(screen.queryByTestId('agent-result-panel')).not.toBeInTheDocument()
})
})
})

View File

@ -0,0 +1,58 @@
import { render, screen } from '@testing-library/react'
import useTheme from '@/hooks/use-theme'
import { Theme } from '@/types/app'
import StatusContainer from '../status-container'
vi.mock('@/hooks/use-theme', () => ({
default: vi.fn(),
}))
const mockUseTheme = vi.mocked(useTheme)
describe('StatusContainer', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseTheme.mockReturnValue({ theme: Theme.light } as ReturnType<typeof useTheme>)
})
// Status styling should follow the current theme and runtime status.
describe('Status Variants', () => {
it('should render success styling for the light theme', () => {
const { container } = render(
<StatusContainer status="succeeded">
<span>Finished</span>
</StatusContainer>,
)
expect(screen.getByText('Finished')).toBeInTheDocument()
expect(container.firstElementChild).toHaveClass('bg-workflow-display-success-bg')
expect(container.firstElementChild).toHaveClass('text-text-success')
expect(container.querySelector('.bg-\\[url\\(\\~\\@\\/app\\/components\\/workflow\\/run\\/assets\\/highlight\\.svg\\)\\]')).toBeInTheDocument()
})
it('should render failed styling for the dark theme', () => {
mockUseTheme.mockReturnValue({ theme: Theme.dark } as ReturnType<typeof useTheme>)
const { container } = render(
<StatusContainer status="failed">
<span>Failed</span>
</StatusContainer>,
)
expect(container.firstElementChild).toHaveClass('bg-workflow-display-error-bg')
expect(container.firstElementChild).toHaveClass('text-text-warning')
expect(container.querySelector('.bg-\\[url\\(\\~\\@\\/app\\/components\\/workflow\\/run\\/assets\\/highlight-dark\\.svg\\)\\]')).toBeInTheDocument()
})
it('should render warning styling for paused runs', () => {
const { container } = render(
<StatusContainer status="paused">
<span>Paused</span>
</StatusContainer>,
)
expect(container.firstElementChild).toHaveClass('bg-workflow-display-warning-bg')
expect(container.firstElementChild).toHaveClass('text-text-destructive')
})
})
})

View File

@ -0,0 +1,132 @@
import type { WorkflowPausedDetailsResponse } from '@/models/log'
import { render, screen } from '@testing-library/react'
import { createDocLinkMock, resolveDocLink } from '../../__tests__/i18n'
import Status from '../status'
const mockDocLink = createDocLinkMock()
const mockUseWorkflowPausedDetails = vi.fn()
vi.mock('@/context/i18n', () => ({
useDocLink: () => mockDocLink,
}))
vi.mock('@/service/use-log', () => ({
useWorkflowPausedDetails: (params: { workflowRunId: string, enabled?: boolean }) => mockUseWorkflowPausedDetails(params),
}))
const createPausedDetails = (overrides: Partial<WorkflowPausedDetailsResponse> = {}): WorkflowPausedDetailsResponse => ({
paused_at: '2026-03-18T00:00:00Z',
paused_nodes: [],
...overrides,
})
describe('Status', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseWorkflowPausedDetails.mockReturnValue({ data: undefined })
})
it('renders the running status and loading placeholders', () => {
render(<Status status="running" workflowRunId="run-1" />)
expect(screen.getByText('Running')).toBeInTheDocument()
expect(document.querySelectorAll('.bg-text-quaternary')).toHaveLength(2)
expect(mockUseWorkflowPausedDetails).toHaveBeenCalledWith({
workflowRunId: 'run-1',
enabled: false,
})
})
it('renders the listening label when the run is waiting for input', () => {
render(<Status status="running" isListening workflowRunId="run-2" />)
expect(screen.getByText('Listening')).toBeInTheDocument()
})
it('renders succeeded metadata values', () => {
render(<Status status="succeeded" time={1.234} tokens={8} />)
expect(screen.getByText('SUCCESS')).toBeInTheDocument()
expect(screen.getByText('1.234s')).toBeInTheDocument()
expect(screen.getByText('8 Tokens')).toBeInTheDocument()
})
it('renders stopped fallbacks when time and tokens are missing', () => {
render(<Status status="stopped" />)
expect(screen.getByText('STOP')).toBeInTheDocument()
expect(screen.getByText('-')).toBeInTheDocument()
expect(screen.getByText('0 Tokens')).toBeInTheDocument()
})
it('renders failed details and the partial-success exception tip', () => {
render(<Status status="failed" error="Something broke" exceptionCounts={2} />)
expect(screen.getByText('FAIL')).toBeInTheDocument()
expect(screen.getByText('Something broke')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.common.errorHandle.partialSucceeded.tip:{"num":2}')).toBeInTheDocument()
})
it('renders the partial-succeeded warning summary', () => {
render(<Status status="partial-succeeded" exceptionCounts={3} />)
expect(screen.getByText('PARTIAL SUCCESS')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.common.errorHandle.partialSucceeded.tip:{"num":3}')).toBeInTheDocument()
})
it('renders the exception learn-more link', () => {
render(<Status status="exception" error="Bad request" />)
const learnMoreLink = screen.getByRole('link', { name: 'workflow.common.learnMore' })
expect(screen.getByText('EXCEPTION')).toBeInTheDocument()
expect(learnMoreLink).toHaveAttribute('href', resolveDocLink('/use-dify/debug/error-type'))
expect(mockDocLink).toHaveBeenCalledWith('/use-dify/debug/error-type')
})
it('renders paused placeholders when pause details have not loaded yet', () => {
render(<Status status="paused" workflowRunId="run-3" />)
expect(screen.getByText('PENDING')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.humanInput.log.reason')).toBeInTheDocument()
expect(document.querySelectorAll('.bg-text-quaternary')).toHaveLength(3)
expect(mockUseWorkflowPausedDetails).toHaveBeenCalledWith({
workflowRunId: 'run-3',
enabled: true,
})
})
it('renders paused human-input reasons and backstage URLs', () => {
mockUseWorkflowPausedDetails.mockReturnValue({
data: createPausedDetails({
paused_nodes: [
{
node_id: 'node-1',
node_title: 'Need review',
pause_type: {
type: 'human_input',
form_id: 'form-1',
backstage_input_url: 'https://example.com/a',
},
},
{
node_id: 'node-2',
node_title: 'Need review 2',
pause_type: {
type: 'human_input',
form_id: 'form-2',
backstage_input_url: 'https://example.com/b',
},
},
],
}),
})
render(<Status status="paused" workflowRunId="run-4" />)
expect(screen.getByText('workflow.nodes.humanInput.log.reasonContent')).toBeInTheDocument()
expect(screen.getByText('workflow.nodes.humanInput.log.backstageInputURL')).toBeInTheDocument()
expect(screen.getByRole('link', { name: 'https://example.com/a' })).toHaveAttribute('href', 'https://example.com/a')
expect(screen.getByRole('link', { name: 'https://example.com/b' })).toHaveAttribute('href', 'https://example.com/b')
})
})

View File

@ -0,0 +1,112 @@
import type { AgentLogItemWithChildren, NodeTracing } from '@/types/workflow'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '../../../types'
import AgentLogTrigger from '../agent-log-trigger'
const createAgentLogItem = (overrides: Partial<AgentLogItemWithChildren> = {}): AgentLogItemWithChildren => ({
node_execution_id: 'exec-1',
message_id: 'message-1',
node_id: 'node-1',
label: 'Step 1',
data: {},
status: 'succeeded',
children: [],
...overrides,
})
const createNodeTracing = (overrides: Partial<NodeTracing> = {}): NodeTracing => ({
id: 'trace-1',
index: 0,
predecessor_node_id: '',
node_id: 'node-1',
node_type: BlockEnum.Agent,
title: 'Agent',
inputs: {},
inputs_truncated: false,
process_data: {},
process_data_truncated: false,
outputs: {},
outputs_truncated: false,
status: 'succeeded',
error: '',
elapsed_time: 0.2,
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
tool_info: {
agent_strategy: 'Plan and execute',
},
},
metadata: {
iterator_length: 0,
iterator_index: 0,
loop_length: 0,
loop_index: 0,
},
created_at: 1710000000,
created_by: {
id: 'user-1',
name: 'Alice',
email: 'alice@example.com',
},
finished_at: 1710000001,
agentLog: [createAgentLogItem()],
...overrides,
})
describe('AgentLogTrigger', () => {
beforeEach(() => {
vi.clearAllMocks()
})
// Agent triggers should expose strategy text and open the log stack payload.
describe('User Interactions', () => {
it('should show the agent strategy and pass the log payload on click', async () => {
const user = userEvent.setup()
const onShowAgentOrToolLog = vi.fn()
const agentLog = [createAgentLogItem({ message_id: 'message-1' })]
render(
<AgentLogTrigger
nodeInfo={createNodeTracing({ agentLog })}
onShowAgentOrToolLog={onShowAgentOrToolLog}
/>,
)
expect(screen.getByText('workflow.nodes.agent.strategy.label')).toBeInTheDocument()
expect(screen.getByText('Plan and execute')).toBeInTheDocument()
expect(screen.getByText('runLog.detail')).toBeInTheDocument()
await user.click(screen.getByText('Plan and execute'))
expect(onShowAgentOrToolLog).toHaveBeenCalledWith({
message_id: 'trace-1',
children: agentLog,
})
})
it('should still open the detail view when no strategy label is available', async () => {
const user = userEvent.setup()
const onShowAgentOrToolLog = vi.fn()
render(
<AgentLogTrigger
nodeInfo={createNodeTracing({
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
},
})}
onShowAgentOrToolLog={onShowAgentOrToolLog}
/>,
)
await user.click(screen.getByText('runLog.detail'))
expect(onShowAgentOrToolLog).toHaveBeenCalledTimes(1)
})
})
})

View File

@ -0,0 +1,149 @@
import type { LoopDurationMap, LoopVariableMap, NodeTracing } from '@/types/workflow'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '../../../types'
import LoopLogTrigger from '../loop-log-trigger'
const createNodeTracing = (overrides: Partial<NodeTracing> = {}): NodeTracing => ({
id: 'trace-1',
index: 0,
predecessor_node_id: '',
node_id: 'loop-node',
node_type: BlockEnum.Loop,
title: 'Loop',
inputs: {},
inputs_truncated: false,
process_data: {},
process_data_truncated: false,
outputs: {},
outputs_truncated: false,
status: 'succeeded',
error: '',
elapsed_time: 0.2,
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
},
metadata: {
iterator_length: 0,
iterator_index: 0,
loop_length: 0,
loop_index: 0,
},
created_at: 1710000000,
created_by: {
id: 'user-1',
name: 'Alice',
email: 'alice@example.com',
},
finished_at: 1710000001,
...overrides,
})
describe('LoopLogTrigger', () => {
beforeEach(() => {
vi.clearAllMocks()
})
// Loop triggers should summarize count/error status and forward structured details.
describe('Structured Detail Handling', () => {
it('should pass existing loop details, durations, and variables to the callback', async () => {
const user = userEvent.setup()
const onShowLoopResultList = vi.fn()
const detailList = [
[createNodeTracing({ id: 'loop-1-step-1', status: 'succeeded' })],
[createNodeTracing({ id: 'loop-2-step-1', status: 'failed' })],
]
const loopDurationMap: LoopDurationMap = { 0: 1.2, 1: 2.5 }
const loopVariableMap: LoopVariableMap = { 1: { item: 'alpha' } }
render(
<div onClick={vi.fn()}>
<LoopLogTrigger
nodeInfo={createNodeTracing({
details: detailList,
loopDurationMap,
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
loop_duration_map: loopDurationMap,
loop_variable_map: loopVariableMap,
},
})}
onShowLoopResultList={onShowLoopResultList}
/>
</div>,
)
expect(screen.getByText(/workflow\.nodes\.loop\.loop/)).toBeInTheDocument()
expect(screen.getByText(/workflow\.nodes\.loop\.error/)).toBeInTheDocument()
await user.click(screen.getByRole('button'))
expect(onShowLoopResultList).toHaveBeenCalledWith(detailList, loopDurationMap, loopVariableMap)
})
it('should reconstruct loop detail groups from execution metadata when details are absent', async () => {
const user = userEvent.setup()
const onShowLoopResultList = vi.fn()
const loopDurationMap: LoopDurationMap = {
'parallel-1': 1.5,
'2': 2.2,
}
const allExecutions = [
createNodeTracing({
id: 'parallel-child',
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
parallel_mode_run_id: 'parallel-1',
},
}),
createNodeTracing({
id: 'serial-child',
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
loop_id: 'loop-node',
loop_index: 2,
},
}),
]
render(
<LoopLogTrigger
nodeInfo={createNodeTracing({
details: undefined,
execution_metadata: {
total_tokens: 0,
total_price: 0,
currency: 'USD',
loop_duration_map: loopDurationMap,
loop_variable_map: {},
},
})}
allExecutions={allExecutions}
onShowLoopResultList={onShowLoopResultList}
/>,
)
await user.click(screen.getByRole('button'))
expect(onShowLoopResultList).toHaveBeenCalledTimes(1)
const [structuredList, durations, variableMap] = onShowLoopResultList.mock.calls[0]
expect(structuredList).toHaveLength(2)
expect(structuredList).toEqual(
expect.arrayContaining([
[allExecutions[0]],
[allExecutions[1]],
]),
)
expect(durations).toEqual(loopDurationMap)
expect(variableMap).toEqual({})
})
})
})

View File

@ -0,0 +1,90 @@
import type { NodeTracing } from '@/types/workflow'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '../../../types'
import RetryLogTrigger from '../retry-log-trigger'
const createNodeTracing = (overrides: Partial<NodeTracing> = {}): NodeTracing => ({
id: 'trace-1',
index: 0,
predecessor_node_id: '',
node_id: 'node-1',
node_type: BlockEnum.Code,
title: 'Code',
inputs: {},
inputs_truncated: false,
process_data: {},
process_data_truncated: false,
outputs: {},
outputs_truncated: false,
status: 'succeeded',
error: '',
elapsed_time: 0.2,
metadata: {
iterator_length: 0,
iterator_index: 0,
loop_length: 0,
loop_index: 0,
},
created_at: 1710000000,
created_by: {
id: 'user-1',
name: 'Alice',
email: 'alice@example.com',
},
finished_at: 1710000001,
outputs_full_content: undefined,
execution_metadata: undefined,
extras: undefined,
retryDetail: [],
...overrides,
})
describe('RetryLogTrigger', () => {
beforeEach(() => {
vi.clearAllMocks()
})
// Clicking the trigger should stop bubbling and expose the retry detail list.
describe('User Interactions', () => {
it('should forward retry details and stop parent clicks', async () => {
const user = userEvent.setup()
const onShowRetryResultList = vi.fn()
const parentClick = vi.fn()
const retryDetail = [
createNodeTracing({ id: 'retry-1' }),
createNodeTracing({ id: 'retry-2' }),
]
render(
<div onClick={parentClick}>
<RetryLogTrigger
nodeInfo={createNodeTracing({ retryDetail })}
onShowRetryResultList={onShowRetryResultList}
/>
</div>,
)
await user.click(screen.getByRole('button', { name: 'workflow.nodes.common.retry.retries:{"num":2}' }))
expect(onShowRetryResultList).toHaveBeenCalledWith(retryDetail)
expect(parentClick).not.toHaveBeenCalled()
})
it('should fall back to an empty retry list when details are missing', async () => {
const user = userEvent.setup()
const onShowRetryResultList = vi.fn()
render(
<RetryLogTrigger
nodeInfo={createNodeTracing({ retryDetail: undefined })}
onShowRetryResultList={onShowRetryResultList}
/>,
)
await user.click(screen.getByRole('button'))
expect(onShowRetryResultList).toHaveBeenCalledWith([])
})
})
})

View File

@ -1,4 +1,4 @@
import parseDSL from './graph-to-log-struct'
import parseDSL from '../graph-to-log-struct'
describe('parseDSL', () => {
it('should parse plain nodes correctly', () => {

View File

@ -0,0 +1,13 @@
import format from '..'
import { agentNodeData, multiStepsCircle, oneStepCircle } from '../data'
describe('agent', () => {
it('list should transform to tree', () => {
expect(format(agentNodeData.in as unknown as Parameters<typeof format>[0])).toEqual(agentNodeData.expect)
})
it('list should remove circle log item', () => {
expect(format(oneStepCircle.in as unknown as Parameters<typeof format>[0])).toEqual(oneStepCircle.expect)
expect(format(multiStepsCircle.in as unknown as Parameters<typeof format>[0])).toEqual(multiStepsCircle.expect)
})
})

View File

@ -1,15 +0,0 @@
import format from '.'
import { agentNodeData, multiStepsCircle, oneStepCircle } from './data'
describe('agent', () => {
it('list should transform to tree', () => {
// console.log(format(agentNodeData.in as any))
expect(format(agentNodeData.in as any)).toEqual(agentNodeData.expect)
})
it('list should remove circle log item', () => {
// format(oneStepCircle.in as any)
expect(format(oneStepCircle.in as any)).toEqual(oneStepCircle.expect)
expect(format(multiStepsCircle.in as any)).toEqual(multiStepsCircle.expect)
})
})

View File

@ -1,16 +1,16 @@
import type { NodeTracing } from '@/types/workflow'
import { noop } from 'es-toolkit/function'
import format from '.'
import graphToLogStruct from '../graph-to-log-struct'
import format from '..'
import graphToLogStruct from '../../graph-to-log-struct'
describe('iteration', () => {
const list = graphToLogStruct('start -> (iteration, iterationNode, plainNode1 -> plainNode2)')
// const [startNode, iterationNode, ...iterations] = list
const result = format(list as any, noop)
const result = format(list as NodeTracing[], noop)
it('result should have no nodes in iteration node', () => {
expect((result as any).find((item: any) => !!item.execution_metadata?.iteration_id)).toBeUndefined()
expect(result.find(item => !!item.execution_metadata?.iteration_id)).toBeUndefined()
})
// test('iteration should put nodes in details', () => {
// expect(result as any).toEqual([
// expect(result).toEqual([
// startNode,
// {
// ...iterationNode,

View File

@ -1,11 +1,12 @@
import type { NodeTracing } from '@/types/workflow'
import { noop } from 'es-toolkit/function'
import format from '.'
import graphToLogStruct from '../graph-to-log-struct'
import format from '..'
import graphToLogStruct from '../../graph-to-log-struct'
describe('loop', () => {
const list = graphToLogStruct('start -> (loop, loopNode, plainNode1 -> plainNode2)')
const [startNode, loopNode, ...loops] = list
const result = format(list as any, noop)
const result = format(list as NodeTracing[], noop)
it('result should have no nodes in loop node', () => {
expect(result.find(item => !!item.execution_metadata?.loop_id)).toBeUndefined()
})

View File

@ -1,11 +1,12 @@
import format from '.'
import graphToLogStruct from '../graph-to-log-struct'
import type { NodeTracing } from '@/types/workflow'
import format from '..'
import graphToLogStruct from '../../graph-to-log-struct'
describe('retry', () => {
// retry nodeId:1 3 times.
const steps = graphToLogStruct('start -> (retry, retryNode, 3)')
const [startNode, retryNode, ...retryDetail] = steps
const result = format(steps as any)
const result = format(steps as NodeTracing[])
it('should have no retry status nodes', () => {
expect(result.find(item => item.status === 'retry')).toBeUndefined()
})