fix: populate sequence in workflow LLM generation and render outputs_as_generation

This commit is contained in:
Novice
2026-03-25 16:47:02 +08:00
parent 10c1c96ea5
commit 6189d3f778
9 changed files with 183 additions and 42 deletions

View File

@ -2104,6 +2104,7 @@ class LLMNode(Node[LLMNodeData]):
"name": tool_call.name,
"arguments": tool_call.arguments,
"output": tool_call.output,
"result": tool_call.output,
"files": files,
"status": tool_call.status.value if hasattr(tool_call.status, "value") else tool_call.status,
"elapsed_time": tool_call.elapsed_time,
@ -2509,20 +2510,21 @@ class LLMNode(Node[LLMNodeData]):
content_position = 0
tool_call_seen_index: dict[str, int] = {}
for trace_segment in trace_state.trace_segments:
if trace_segment.type == "thought":
sequence.append({"type": "reasoning", "index": reasoning_index})
reasoning_index += 1
elif trace_segment.type == "content":
segment_text = trace_segment.text or ""
start = content_position
end = start + len(segment_text)
sequence.append({"type": "content", "start": start, "end": end})
content_position = end
elif trace_segment.type == "tool_call":
tool_id = trace_segment.tool_call.id if trace_segment.tool_call and trace_segment.tool_call.id else ""
if tool_id not in tool_call_seen_index:
tool_call_seen_index[tool_id] = len(tool_call_seen_index)
sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]})
if trace_segment.type == "model" and isinstance(trace_segment.output, ModelTraceSegment):
model_output = trace_segment.output
if model_output.reasoning:
sequence.append({"type": "reasoning", "index": reasoning_index})
reasoning_index += 1
if model_output.text:
start = content_position
end = start + len(model_output.text)
sequence.append({"type": "content", "start": start, "end": end})
content_position = end
for tc in model_output.tool_calls:
tool_id = tc.id or ""
if tool_id not in tool_call_seen_index:
tool_call_seen_index[tool_id] = len(tool_call_seen_index)
sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]})
tool_calls_for_generation: list[ToolCallResult] = []
for log in agent_context.agent_logs:

View File

@ -1,6 +1,6 @@
import type { ChatMessageRes, IChatItem } from './chat/type'
import type { ChatItem, ChatItemInTree } from './types'
import type { LLMGenerationItem } from '@/types/workflow'
import type { LLMGenerationItem, WorkflowGenerationValue } from '@/types/workflow'
import { v4 as uuidV4 } from 'uuid'
import { UUID_NIL } from './constants'
@ -298,9 +298,78 @@ const buildLLMGenerationItemsFromHistorySequence = (message: ChatMessageRes): {
return { llmGenerationItems, message: result }
}
const buildLLMGenerationItemsFromWorkflowOutputs = (outputs: Record<string, WorkflowGenerationValue>): {
llmGenerationItems: LLMGenerationItem[]
message: string
} | null => {
const llmGenerationItems: LLMGenerationItem[] = []
let message = ''
const pushToolCall = (tc: WorkflowGenerationValue['tool_calls'][number]) => {
llmGenerationItems.push({
id: uuidV4(),
type: 'tool',
toolName: tc.name,
toolArguments: tc.arguments,
toolOutput: tc.result ?? tc.output,
toolDuration: tc.elapsed_time,
toolIcon: tc.icon,
toolIconDark: tc.icon_dark,
})
}
for (const { content, reasoning_content, tool_calls, sequence } of Object.values(outputs)) {
if (sequence.length > 0) {
for (const segment of sequence) {
switch (segment.type) {
case 'content': {
const text = content.substring(segment.start, segment.end)
if (text?.trim()) {
message += text
llmGenerationItems.push({ id: uuidV4(), type: 'text', text, textCompleted: true })
}
break
}
case 'reasoning': {
const reasoning = reasoning_content[segment.index]
if (reasoning)
llmGenerationItems.push({ id: uuidV4(), type: 'thought', thoughtOutput: reasoning, thoughtCompleted: true })
break
}
case 'tool_call': {
const tc = tool_calls[segment.index]
if (tc)
pushToolCall(tc)
break
}
}
}
}
else {
for (const reasoning of reasoning_content) {
if (reasoning)
llmGenerationItems.push({ id: uuidV4(), type: 'thought', thoughtOutput: reasoning, thoughtCompleted: true })
}
for (const tc of tool_calls) {
if (tc)
pushToolCall(tc)
}
if (content.trim()) {
message += content
llmGenerationItems.push({ id: uuidV4(), type: 'text', text: content, textCompleted: true })
}
}
}
if (llmGenerationItems.length === 0 && !message)
return null
return { llmGenerationItems, message }
}
export {
buildChatItemTree,
buildLLMGenerationItemsFromHistorySequence,
buildLLMGenerationItemsFromWorkflowOutputs,
getLastAnswer,
getProcessedInputsFromUrlParams,
getProcessedSystemVariablesFromUrlParams,

View File

@ -24,7 +24,7 @@ const mockRunDetail: WorkflowRunDetailResponse = {
inputs: JSON.stringify({ question: 'How do I reset my password?' }, null, 2),
inputs_truncated: false,
status: 'succeeded',
outputs: JSON.stringify({ answer: 'Follow the reset link we just emailed you.' }, null, 2),
outputs: { answer: 'Follow the reset link we just emailed you.' },
outputs_truncated: false,
total_steps: 3,
created_by_role: 'account',

View File

@ -36,6 +36,9 @@ export const useWorkflowFinished = () => {
draft.resultTabActive = true
draft.resultText = firstOutputVal
}
else if (out && typeof out === 'object' && Object.keys(out).length > 0) {
draft.resultTabActive = true
}
}))
}, [workflowStore])

View File

@ -50,7 +50,7 @@ const createRunDetail = (overrides: Partial<WorkflowRunDetailResponse> = {}): Wo
inputs: '{}',
inputs_truncated: false,
status: 'succeeded',
outputs: '{}',
outputs: {},
outputs_truncated: false,
total_steps: 1,
created_by_role: 'account',

View File

@ -153,6 +153,7 @@ const RunPanel: FC<RunProps> = ({
{!loading && currentTab === 'RESULT' && runDetail && (
<OutputPanel
outputs={runDetail.outputs}
outputsAsGeneration={runDetail.outputs_as_generation}
error={runDetail.error}
height={height}
/>

View File

@ -1,7 +1,11 @@
'use client'
import type { FC } from 'react'
import type { JsonValue } from '@/app/components/workflow/types'
import type { FileResponse, WorkflowGenerationValue } from '@/types/workflow'
import { useMemo } from 'react'
import GenerationContent from '@/app/components/base/chat/chat/answer/generation-content'
import LoadingAnim from '@/app/components/base/chat/chat/loading-anim'
import { buildLLMGenerationItemsFromWorkflowOutputs } from '@/app/components/base/chat/utils'
import { FileList } from '@/app/components/base/file-uploader'
import { getProcessedFilesFromResponse } from '@/app/components/base/file-uploader/utils'
import { Markdown } from '@/app/components/base/markdown'
@ -9,9 +13,13 @@ import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
import StatusContainer from '@/app/components/workflow/run/status-container'
const isDifyFile = (val: JsonValue): val is JsonValue & { dify_model_identity: '__dify__file__' } =>
typeof val === 'object' && val !== null && !Array.isArray(val) && 'dify_model_identity' in val && val.dify_model_identity === '__dify__file__'
type OutputPanelProps = {
isRunning?: boolean
outputs?: any
outputs?: Record<string, JsonValue>
outputsAsGeneration?: boolean
error?: string
height?: number
}
@ -19,10 +27,24 @@ type OutputPanelProps = {
const OutputPanel: FC<OutputPanelProps> = ({
isRunning,
outputs,
outputsAsGeneration,
error,
height,
}) => {
const generationResult = useMemo(() => {
if (!outputsAsGeneration || !outputs || typeof outputs !== 'object')
return null
try {
return buildLLMGenerationItemsFromWorkflowOutputs(outputs as Record<string, WorkflowGenerationValue>)
}
catch {
return null
}
}, [outputs, outputsAsGeneration])
const isTextOutput = useMemo(() => {
if (generationResult)
return false
if (!outputs || typeof outputs !== 'object')
return false
const keys = Object.keys(outputs)
@ -31,28 +53,38 @@ const OutputPanel: FC<OutputPanelProps> = ({
typeof value === 'string'
|| (Array.isArray(value) && value.every(item => typeof item === 'string'))
)
}, [outputs])
}, [outputs, generationResult])
const fileList = useMemo(() => {
const fileList: any[] = []
if (!outputs)
return fileList
if (Object.keys(outputs).length > 1)
return fileList
if (!outputs || Object.keys(outputs).length > 1)
return []
const matched: FileResponse[] = []
for (const key in outputs) {
if (Array.isArray(outputs[key])) {
outputs[key].map((output: any) => {
if (output?.dify_model_identity === '__dify__file__')
fileList.push(output)
return null
})
const val = outputs[key]
if (Array.isArray(val)) {
for (const item of val) {
if (isDifyFile(item))
matched.push(item as unknown as FileResponse)
}
}
else if (outputs[key]?.dify_model_identity === '__dify__file__') {
fileList.push(outputs[key])
else if (isDifyFile(val)) {
matched.push(val as unknown as FileResponse)
}
}
return getProcessedFilesFromResponse(fileList)
return getProcessedFilesFromResponse(matched)
}, [outputs])
const hasGenerationToolOrThought = generationResult?.llmGenerationItems.some(
item => item.type === 'tool' || item.type === 'thought',
)
const textOutputContent = useMemo(() => {
if (!isTextOutput || !outputs)
return ''
const firstVal = outputs[Object.keys(outputs)[0]]
return Array.isArray(firstVal) ? firstVal.join('\n') : String(firstVal ?? '')
}, [isTextOutput, outputs])
return (
<div className="p-2">
{isRunning && (
@ -70,15 +102,22 @@ const OutputPanel: FC<OutputPanelProps> = ({
<Markdown content="No Output" />
</div>
)}
{generationResult && generationResult.llmGenerationItems.length > 0 && (
hasGenerationToolOrThought
? (
<div className="px-2 py-1">
<GenerationContent llmGenerationItems={generationResult.llmGenerationItems} />
</div>
)
: (
<div className="px-4 py-2">
<Markdown content={generationResult.message} />
</div>
)
)}
{isTextOutput && (
<div className="px-4 py-2">
<Markdown
content={
Array.isArray(outputs[Object.keys(outputs)[0]])
? outputs[Object.keys(outputs)[0]].join('\n')
: (outputs[Object.keys(outputs)[0]] || '')
}
/>
<Markdown content={textOutputContent} />
</div>
)}
{fileList.length > 0 && (
@ -91,7 +130,7 @@ const OutputPanel: FC<OutputPanelProps> = ({
/>
</div>
)}
{!isTextOutput && outputs && Object.keys(outputs).length > 0 && height! > 0 && (
{!isTextOutput && !generationResult && outputs && Object.keys(outputs).length > 0 && height! > 0 && (
<div className="flex flex-col gap-2">
<CodeEditor
showFileList

View File

@ -2,6 +2,7 @@ import type { Viewport } from 'reactflow'
import type { Metadata } from '@/app/components/base/chat/chat/type'
import type {
Edge,
JsonValue,
Node,
} from '@/app/components/workflow/types'
import type { VisionFile } from '@/types/app'
@ -297,7 +298,8 @@ export type WorkflowRunDetailResponse = {
inputs: string
inputs_truncated: boolean
status: 'running' | 'succeeded' | 'failed' | 'stopped'
outputs?: string
outputs?: Record<string, JsonValue>
outputs_as_generation?: boolean
outputs_truncated: boolean
outputs_full_content?: {
download_url: string

View File

@ -124,6 +124,31 @@ export type LLMLogItem = {
sequence: SequenceSegment[]
}
export type WorkflowGenerationToolCall = {
id: string
name: string
arguments: string
output?: string
result?: string
files?: string[]
status?: string
elapsed_time?: number
icon?: string | IconObject
icon_dark?: string | IconObject
}
export type WorkflowGenerationSequenceSegment
= | { type: 'content', start: number, end: number }
| { type: 'reasoning', index: number }
| { type: 'tool_call', index: number }
export type WorkflowGenerationValue = {
content: string
reasoning_content: string[]
tool_calls: WorkflowGenerationToolCall[]
sequence: WorkflowGenerationSequenceSegment[]
}
export type LLMTraceItem = {
type: 'model' | 'tool'
duration: number