test: add unit tests for agent and HTTP components, enhancing coverage for configuration and rendering behaviors

This commit is contained in:
CodingOnStar
2026-04-10 12:48:04 +08:00
parent 331158e4bf
commit 8907c6787e
30 changed files with 5315 additions and 767 deletions

View File

@ -0,0 +1,249 @@
import type { ReactNode } from 'react'
import type { AgentNodeType } from '../types'
import type useConfig from '../use-config'
import type { StrategyParamItem } from '@/app/components/plugins/types'
import { render, screen } from '@testing-library/react'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum } from '@/app/components/workflow/types'
import { VarType } from '../../tool/types'
import Node from '../node'
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockModelBar = vi.hoisted(() => vi.fn())
const mockToolIcon = vi.hoisted(() => vi.fn())
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('@/hooks/use-i18n', () => ({
useRenderI18nObject: () => (value: string | { en_US?: string }) => typeof value === 'string' ? value : value.en_US || '',
}))
vi.mock('../components/model-bar', () => ({
ModelBar: (props: { provider?: string, model?: string, param: string }) => {
mockModelBar(props)
return <div>{props.provider ? `${props.param}:${props.provider}/${props.model}` : `${props.param}:empty-model`}</div>
},
}))
vi.mock('../components/tool-icon', () => ({
ToolIcon: (props: { providerName: string }) => {
mockToolIcon(props)
return <div>{`tool:${props.providerName}`}</div>
},
}))
vi.mock('../../_base/components/group', () => ({
Group: ({ label, children }: { label: ReactNode, children: ReactNode }) => (
<div>
<div>{label}</div>
{children}
</div>
),
GroupLabel: ({ className, children }: { className?: string, children: ReactNode }) => <div className={className}>{children}</div>,
}))
vi.mock('../../_base/components/setting-item', () => ({
SettingItem: ({
label,
status,
tooltip,
children,
}: {
label: ReactNode
status?: string
tooltip?: string
children?: ReactNode
}) => (
<div>
{`${label}:${status || 'normal'}:${tooltip || ''}`}
{children}
</div>
),
}))
const createStrategyParam = (overrides: Partial<StrategyParamItem> = {}): StrategyParamItem => ({
name: 'requiredModel',
type: FormTypeEnum.modelSelector,
required: true,
label: { en_US: 'Required Model' } as StrategyParamItem['label'],
help: { en_US: 'Required model help' } as StrategyParamItem['help'],
placeholder: { en_US: 'Required model placeholder' } as StrategyParamItem['placeholder'],
scope: 'global',
default: null,
options: [],
template: { enabled: false },
auto_generate: { type: 'none' },
...overrides,
})
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
plugin_unique_identifier: 'provider/agent:1.0.0',
agent_parameters: {
optionalModel: {
type: VarType.constant,
value: { provider: 'openai', model: 'gpt-4o' },
},
toolParam: {
type: VarType.constant,
value: { provider_name: 'author/tool-a' },
},
multiToolParam: {
type: VarType.constant,
value: [
{ provider_name: 'author/tool-b' },
{ provider_name: 'author/tool-c' },
],
},
},
...overrides,
})
const createConfigResult = (overrides: Partial<ReturnType<typeof useConfig>> = {}): ReturnType<typeof useConfig> => ({
readOnly: false,
inputs: createData(),
setInputs: vi.fn(),
handleVarListChange: vi.fn(),
handleAddVariable: vi.fn(),
currentStrategy: {
identity: {
author: 'provider',
name: 'react',
icon: 'icon',
label: { en_US: 'React Agent' } as StrategyParamItem['label'],
provider: 'provider/agent',
},
parameters: [
createStrategyParam(),
createStrategyParam({
name: 'optionalModel',
required: false,
}),
createStrategyParam({
name: 'toolParam',
type: FormTypeEnum.toolSelector,
required: false,
}),
createStrategyParam({
name: 'multiToolParam',
type: FormTypeEnum.multiToolSelector,
required: false,
}),
],
description: { en_US: 'agent description' } as StrategyParamItem['label'],
output_schema: {},
features: [],
},
formData: {},
onFormChange: vi.fn(),
currentStrategyStatus: {
plugin: { source: 'marketplace', installed: true },
isExistInPlugin: false,
},
strategyProvider: undefined,
pluginDetail: ({
declaration: {
label: { en_US: 'Plugin Marketplace' } as never,
},
} as never),
availableVars: [],
availableNodesWithParent: [],
outputSchema: [],
handleMemoryChange: vi.fn(),
isChatMode: true,
...overrides,
})
describe('agent/node', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue(createConfigResult())
})
it('renders the not-set state when no strategy is configured', () => {
mockUseConfig.mockReturnValue(createConfigResult({
inputs: createData({
agent_strategy_name: undefined,
agent_strategy_label: undefined,
agent_parameters: {},
}),
currentStrategy: undefined,
}))
render(
<Node
id="agent-node"
data={createData()}
/>,
)
expect(screen.getByText('workflow.nodes.agent.strategyNotSet:normal:')).toBeInTheDocument()
expect(mockModelBar).not.toHaveBeenCalled()
expect(mockToolIcon).not.toHaveBeenCalled()
})
it('renders strategy status, required and selected model bars, and tool icons', () => {
render(
<Node
id="agent-node"
data={createData()}
/>,
)
expect(screen.getByText(/workflow.nodes.agent.strategy.shortLabel:error:/)).toHaveTextContent('React Agent')
expect(screen.getByText(/workflow.nodes.agent.strategy.shortLabel:error:/)).toHaveTextContent('Plugin Marketplace')
expect(screen.getByText('requiredModel:empty-model')).toBeInTheDocument()
expect(screen.getByText('optionalModel:openai/gpt-4o')).toBeInTheDocument()
expect(screen.getByText('tool:author/tool-a')).toBeInTheDocument()
expect(screen.getByText('tool:author/tool-b')).toBeInTheDocument()
expect(screen.getByText('tool:author/tool-c')).toBeInTheDocument()
expect(mockModelBar).toHaveBeenCalledTimes(2)
expect(mockToolIcon).toHaveBeenCalledTimes(3)
})
it('skips optional models and empty tool values when no configuration is provided', () => {
mockUseConfig.mockReturnValue(createConfigResult({
inputs: createData({
agent_parameters: {},
}),
currentStrategy: {
...createConfigResult().currentStrategy!,
parameters: [
createStrategyParam({
name: 'optionalModel',
required: false,
}),
createStrategyParam({
name: 'toolParam',
type: FormTypeEnum.toolSelector,
required: false,
}),
],
},
currentStrategyStatus: {
plugin: { source: 'marketplace', installed: true },
isExistInPlugin: true,
},
}))
render(
<Node
id="agent-node"
data={createData()}
/>,
)
expect(mockModelBar).not.toHaveBeenCalled()
expect(mockToolIcon).not.toHaveBeenCalled()
expect(screen.queryByText('optionalModel:empty-model')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,297 @@
import type { ReactNode } from 'react'
import type { AgentNodeType } from '../types'
import type useConfig from '../use-config'
import type { StrategyParamItem } from '@/app/components/plugins/types'
import type { NodePanelProps } from '@/app/components/workflow/types'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum } from '@/app/components/workflow/types'
import Panel from '../panel'
import { AgentFeature } from '../types'
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockResetEditor = vi.hoisted(() => vi.fn())
const mockAgentStrategy = vi.hoisted(() => vi.fn())
const mockMemoryConfig = vi.hoisted(() => vi.fn())
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('../../../store', () => ({
useStore: (selector: (state: { setControlPromptEditorRerenderKey: typeof mockResetEditor }) => unknown) => selector({
setControlPromptEditorRerenderKey: mockResetEditor,
}),
}))
vi.mock('../../_base/components/agent-strategy', () => ({
AgentStrategy: (props: {
strategy?: {
agent_strategy_provider_name: string
agent_strategy_name: string
agent_strategy_label: string
agent_output_schema: AgentNodeType['output_schema']
plugin_unique_identifier: string
meta?: AgentNodeType['meta']
}
formSchema: Array<{ variable: string, tooltip?: StrategyParamItem['help'] }>
formValue: Record<string, unknown>
onStrategyChange: (strategy: {
agent_strategy_provider_name: string
agent_strategy_name: string
agent_strategy_label: string
agent_output_schema: AgentNodeType['output_schema']
plugin_unique_identifier: string
meta?: AgentNodeType['meta']
}) => void
onFormValueChange: (value: Record<string, unknown>) => void
}) => {
mockAgentStrategy(props)
return (
<div>
<button
type="button"
onClick={() => props.onStrategyChange({
agent_strategy_provider_name: 'provider/updated',
agent_strategy_name: 'updated',
agent_strategy_label: 'Updated Strategy',
agent_output_schema: {
properties: {
structured: {
type: 'string',
description: 'structured output',
},
},
},
plugin_unique_identifier: 'provider/updated:1.0.0',
meta: {
version: '2.0.0',
} as AgentNodeType['meta'],
})}
>
change-strategy
</button>
<button type="button" onClick={() => props.onFormValueChange({ instruction: 'Use the tool' })}>
change-form
</button>
</div>
)
},
}))
vi.mock('../../_base/components/memory-config', () => ({
__esModule: true,
default: (props: {
readonly?: boolean
config: { data?: AgentNodeType['memory'] }
onChange: (value?: AgentNodeType['memory']) => void
}) => {
mockMemoryConfig(props)
return (
<button
type="button"
onClick={() => props.onChange({
window: {
enabled: true,
size: 8,
},
query_prompt_template: 'history',
} as AgentNodeType['memory'])}
>
change-memory
</button>
)
},
}))
vi.mock('../../_base/components/output-vars', () => ({
__esModule: true,
default: ({ children }: { children: ReactNode }) => <div>{children}</div>,
VarItem: ({ name, type, description }: { name: string, type: string, description?: string }) => (
<div>{`${name}:${type}:${description || ''}`}</div>
),
}))
const createStrategyParam = (overrides: Partial<StrategyParamItem> = {}): StrategyParamItem => ({
name: 'instruction',
type: FormTypeEnum.any,
required: true,
label: { en_US: 'Instruction' } as StrategyParamItem['label'],
help: { en_US: 'Instruction help' } as StrategyParamItem['help'],
placeholder: { en_US: 'Instruction placeholder' } as StrategyParamItem['placeholder'],
scope: 'global',
default: null,
options: [],
template: { enabled: false },
auto_generate: { type: 'none' },
...overrides,
})
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {
properties: {
summary: {
type: 'string',
description: 'summary output',
},
},
},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
plugin_unique_identifier: 'provider/agent:1.0.0',
meta: { version: '1.0.0' } as AgentNodeType['meta'],
memory: {
window: {
enabled: false,
size: 3,
},
query_prompt_template: '',
} as AgentNodeType['memory'],
...overrides,
})
const createConfigResult = (overrides: Partial<ReturnType<typeof useConfig>> = {}): ReturnType<typeof useConfig> => ({
readOnly: false,
inputs: createData(),
setInputs: vi.fn(),
handleVarListChange: vi.fn(),
handleAddVariable: vi.fn(),
currentStrategy: {
identity: {
author: 'provider',
name: 'react',
icon: 'icon',
label: { en_US: 'React Agent' } as StrategyParamItem['label'],
provider: 'provider/agent',
},
parameters: [
createStrategyParam(),
createStrategyParam({
name: 'modelParam',
type: FormTypeEnum.modelSelector,
required: false,
}),
],
description: { en_US: 'agent description' } as StrategyParamItem['label'],
output_schema: {},
features: [AgentFeature.HISTORY_MESSAGES],
},
formData: {
instruction: 'Plan and answer',
},
onFormChange: vi.fn(),
currentStrategyStatus: {
plugin: { source: 'marketplace', installed: true },
isExistInPlugin: true,
},
strategyProvider: undefined,
pluginDetail: undefined,
availableVars: [],
availableNodesWithParent: [],
outputSchema: [{
name: 'summary',
type: 'String',
description: 'summary output',
}],
handleMemoryChange: vi.fn(),
isChatMode: true,
...overrides,
})
const panelProps = {} as NodePanelProps<AgentNodeType>['panelProps']
describe('agent/panel', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue(createConfigResult())
})
it('renders strategy data, forwards strategy and form updates, and exposes output vars', async () => {
const user = userEvent.setup()
const setInputs = vi.fn()
const onFormChange = vi.fn()
const handleMemoryChange = vi.fn()
mockUseConfig.mockReturnValue(createConfigResult({
setInputs,
onFormChange,
handleMemoryChange,
}))
render(
<Panel
id="agent-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getByText('text:String:workflow.nodes.agent.outputVars.text')).toBeInTheDocument()
expect(screen.getByText('usage:object:workflow.nodes.agent.outputVars.usage')).toBeInTheDocument()
expect(screen.getByText('files:Array[File]:workflow.nodes.agent.outputVars.files.title')).toBeInTheDocument()
expect(screen.getByText('json:Array[Object]:workflow.nodes.agent.outputVars.json')).toBeInTheDocument()
expect(screen.getByText('summary:String:summary output')).toBeInTheDocument()
expect(mockAgentStrategy).toHaveBeenCalledWith(expect.objectContaining({
formSchema: expect.arrayContaining([
expect.objectContaining({
variable: 'instruction',
tooltip: { en_US: 'Instruction help' },
}),
expect.objectContaining({
variable: 'modelParam',
}),
]),
formValue: {
instruction: 'Plan and answer',
},
}))
await user.click(screen.getByRole('button', { name: 'change-strategy' }))
await user.click(screen.getByRole('button', { name: 'change-form' }))
await user.click(screen.getByRole('button', { name: 'change-memory' }))
expect(setInputs).toHaveBeenCalledWith(expect.objectContaining({
agent_strategy_provider_name: 'provider/updated',
agent_strategy_name: 'updated',
agent_strategy_label: 'Updated Strategy',
plugin_unique_identifier: 'provider/updated:1.0.0',
output_schema: expect.objectContaining({
properties: expect.objectContaining({
structured: expect.any(Object),
}),
}),
}))
expect(onFormChange).toHaveBeenCalledWith({ instruction: 'Use the tool' })
expect(handleMemoryChange).toHaveBeenCalledWith(expect.objectContaining({
query_prompt_template: 'history',
}))
expect(mockResetEditor).toHaveBeenCalledTimes(1)
})
it('hides memory config when chat mode support is unavailable', () => {
mockUseConfig.mockReturnValue(createConfigResult({
isChatMode: false,
currentStrategy: {
...createConfigResult().currentStrategy!,
features: [],
},
}))
render(
<Panel
id="agent-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.queryByRole('button', { name: 'change-memory' })).not.toBeInTheDocument()
expect(mockMemoryConfig).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,422 @@
import type { AgentNodeType } from '../types'
import type { StrategyParamItem } from '@/app/components/plugins/types'
import { act, renderHook, waitFor } from '@testing-library/react'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum, VarType as WorkflowVarType } from '@/app/components/workflow/types'
import { VarType } from '../../tool/types'
import useConfig, { useStrategyInfo } from '../use-config'
const mockUseNodesReadOnly = vi.hoisted(() => vi.fn())
const mockUseIsChatMode = vi.hoisted(() => vi.fn())
const mockUseNodeCrud = vi.hoisted(() => vi.fn())
const mockUseVarList = vi.hoisted(() => vi.fn())
const mockUseAvailableVarList = vi.hoisted(() => vi.fn())
const mockUseStrategyProviderDetail = vi.hoisted(() => vi.fn())
const mockUseFetchPluginsInMarketPlaceByIds = vi.hoisted(() => vi.fn())
const mockUseCheckInstalled = vi.hoisted(() => vi.fn())
const mockGenerateAgentToolValue = vi.hoisted(() => vi.fn())
const mockToolParametersToFormSchemas = vi.hoisted(() => vi.fn())
vi.mock('@/app/components/workflow/hooks', () => ({
useNodesReadOnly: (...args: unknown[]) => mockUseNodesReadOnly(...args),
useIsChatMode: (...args: unknown[]) => mockUseIsChatMode(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-node-crud', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseNodeCrud(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-var-list', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseVarList(...args),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-available-var-list', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseAvailableVarList(...args),
}))
vi.mock('@/service/use-strategy', () => ({
useStrategyProviderDetail: (...args: unknown[]) => mockUseStrategyProviderDetail(...args),
}))
vi.mock('@/service/use-plugins', () => ({
useFetchPluginsInMarketPlaceByIds: (...args: unknown[]) => mockUseFetchPluginsInMarketPlaceByIds(...args),
useCheckInstalled: (...args: unknown[]) => mockUseCheckInstalled(...args),
}))
vi.mock('@/app/components/tools/utils/to-form-schema', () => ({
generateAgentToolValue: (...args: unknown[]) => mockGenerateAgentToolValue(...args),
toolParametersToFormSchemas: (...args: unknown[]) => mockToolParametersToFormSchemas(...args),
}))
const createStrategyParam = (overrides: Partial<StrategyParamItem> = {}): StrategyParamItem => ({
name: 'instruction',
type: FormTypeEnum.any,
required: true,
label: { en_US: 'Instruction' } as StrategyParamItem['label'],
help: { en_US: 'Instruction help' } as StrategyParamItem['help'],
placeholder: { en_US: 'Instruction placeholder' } as StrategyParamItem['placeholder'],
scope: 'global',
default: null,
options: [],
template: { enabled: false },
auto_generate: { type: 'none' },
...overrides,
})
const createToolValue = () => ({
settings: {
api_key: 'secret',
},
parameters: {
query: 'weather',
},
schemas: [
{
variable: 'api_key',
form: 'form',
},
{
variable: 'query',
form: 'llm',
},
],
})
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {
properties: {
summary: {
type: 'string',
description: 'summary output',
},
items: {
type: 'array',
items: {
type: 'number',
},
description: 'items output',
},
},
},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
plugin_unique_identifier: 'provider/agent:1.0.0',
agent_parameters: {
instruction: {
type: VarType.variable,
value: '#start.topic#',
},
modelParam: {
type: VarType.constant,
value: {
provider: 'openai',
model: 'gpt-4o',
},
},
},
meta: { version: '1.0.0' } as AgentNodeType['meta'],
...overrides,
})
describe('agent/use-config', () => {
const providerRefetch = vi.fn()
const marketplaceRefetch = vi.fn()
const setInputs = vi.fn()
const handleVarListChange = vi.fn()
const handleAddVariable = vi.fn()
let currentInputs: AgentNodeType
beforeEach(() => {
vi.clearAllMocks()
currentInputs = createData({
tool_node_version: '2',
})
mockUseNodesReadOnly.mockReturnValue({ nodesReadOnly: false, getNodesReadOnly: () => false })
mockUseIsChatMode.mockReturnValue(true)
mockUseNodeCrud.mockImplementation(() => ({
inputs: currentInputs,
setInputs,
}))
mockUseVarList.mockReturnValue({
handleVarListChange,
handleAddVariable,
} as never)
mockUseAvailableVarList.mockReturnValue({
availableVars: [{
nodeId: 'node-1',
title: 'Start',
vars: [{
variable: 'topic',
type: WorkflowVarType.string,
}],
}],
availableNodesWithParent: [{
nodeId: 'node-1',
title: 'Start',
}],
} as never)
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: false,
isError: false,
data: {
declaration: {
strategies: [{
identity: {
name: 'react',
},
parameters: [
createStrategyParam(),
createStrategyParam({
name: 'modelParam',
type: FormTypeEnum.modelSelector,
required: false,
}),
],
}],
},
},
refetch: providerRefetch,
} as never)
mockUseFetchPluginsInMarketPlaceByIds.mockReturnValue({
isLoading: false,
data: {
data: {
plugins: [{ id: 'provider/agent' }],
},
},
refetch: marketplaceRefetch,
} as never)
mockUseCheckInstalled.mockReturnValue({
data: {
plugins: [{
declaration: {
label: { en_US: 'Installed Agent Plugin' },
},
}],
},
} as never)
mockToolParametersToFormSchemas.mockImplementation(value => value as never)
mockGenerateAgentToolValue.mockImplementation((_value, schemas, isLLM) => ({
kind: isLLM ? 'llm' : 'setting',
fields: (schemas as Array<{ variable: string }>).map(item => item.variable),
}) as never)
})
it('returns an undefined strategy status while strategy data is still loading and can refetch dependencies', () => {
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: true,
isError: false,
data: undefined,
refetch: providerRefetch,
} as never)
const { result } = renderHook(() => useStrategyInfo('provider/agent', 'react'))
expect(result.current.strategyStatus).toBeUndefined()
expect(result.current.strategy).toBeUndefined()
act(() => {
result.current.refetch()
})
expect(providerRefetch).toHaveBeenCalledTimes(1)
expect(marketplaceRefetch).toHaveBeenCalledTimes(1)
})
it('resolves strategy status for external plugins that are missing or not installed', () => {
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: false,
isError: true,
data: {
declaration: {
strategies: [],
},
},
refetch: providerRefetch,
} as never)
mockUseFetchPluginsInMarketPlaceByIds.mockReturnValue({
isLoading: false,
data: {
data: {
plugins: [],
},
},
refetch: marketplaceRefetch,
} as never)
const { result } = renderHook(() => useStrategyInfo('provider/agent', 'react'))
expect(result.current.strategyStatus).toEqual({
plugin: {
source: 'external',
installed: false,
},
isExistInPlugin: false,
})
})
it('exposes derived form data, strategy state, output schema, and setter helpers', () => {
const { result } = renderHook(() => useConfig('agent-node', currentInputs))
expect(result.current.readOnly).toBe(false)
expect(result.current.isChatMode).toBe(true)
expect(result.current.formData).toEqual({
instruction: '#start.topic#',
modelParam: {
provider: 'openai',
model: 'gpt-4o',
},
})
expect(result.current.currentStrategyStatus).toEqual({
plugin: {
source: 'marketplace',
installed: true,
},
isExistInPlugin: true,
})
expect(result.current.availableVars).toHaveLength(1)
expect(result.current.availableNodesWithParent).toEqual([{
nodeId: 'node-1',
title: 'Start',
}])
expect(result.current.outputSchema).toEqual([
{ name: 'summary', type: 'String', description: 'summary output' },
{ name: 'items', type: 'Array[Number]', description: 'items output' },
])
setInputs.mockClear()
act(() => {
result.current.onFormChange({
instruction: '#start.updated#',
modelParam: {
provider: 'anthropic',
model: 'claude-sonnet',
},
})
result.current.handleMemoryChange({
window: {
enabled: true,
size: 6,
},
query_prompt_template: 'history',
} as AgentNodeType['memory'])
})
expect(setInputs).toHaveBeenNthCalledWith(1, expect.objectContaining({
agent_parameters: {
instruction: {
type: VarType.variable,
value: '#start.updated#',
},
modelParam: {
type: VarType.constant,
value: {
provider: 'anthropic',
model: 'claude-sonnet',
},
},
},
}))
expect(setInputs).toHaveBeenNthCalledWith(2, expect.objectContaining({
memory: {
window: {
enabled: true,
size: 6,
},
query_prompt_template: 'history',
},
}))
expect(result.current.handleVarListChange).toBe(handleVarListChange)
expect(result.current.handleAddVariable).toBe(handleAddVariable)
expect(result.current.pluginDetail).toEqual({
declaration: {
label: { en_US: 'Installed Agent Plugin' },
},
})
})
it('formats legacy tool selector values before exposing the node config', async () => {
currentInputs = createData({
tool_node_version: undefined,
agent_parameters: {
toolParam: {
type: VarType.constant,
value: createToolValue(),
},
multiToolParam: {
type: VarType.constant,
value: [createToolValue()],
},
},
})
mockUseStrategyProviderDetail.mockReturnValue({
isLoading: false,
isError: false,
data: {
declaration: {
strategies: [{
identity: {
name: 'react',
},
parameters: [
createStrategyParam({
name: 'toolParam',
type: FormTypeEnum.toolSelector,
required: false,
}),
createStrategyParam({
name: 'multiToolParam',
type: FormTypeEnum.multiToolSelector,
required: false,
}),
],
}],
},
},
refetch: providerRefetch,
} as never)
renderHook(() => useConfig('agent-node', currentInputs))
await waitFor(() => {
expect(setInputs).toHaveBeenCalledWith(expect.objectContaining({
tool_node_version: '2',
agent_parameters: expect.objectContaining({
toolParam: expect.objectContaining({
value: expect.objectContaining({
settings: {
kind: 'setting',
fields: ['api_key'],
},
parameters: {
kind: 'llm',
fields: ['query'],
},
}),
}),
multiToolParam: expect.objectContaining({
value: [expect.objectContaining({
settings: {
kind: 'setting',
fields: ['api_key'],
},
parameters: {
kind: 'llm',
fields: ['query'],
},
})],
}),
}),
}))
})
})
})

View File

@ -0,0 +1,144 @@
import type { AgentNodeType } from '../types'
import type { InputVar } from '@/app/components/workflow/types'
import { renderHook } from '@testing-library/react'
import formatTracing from '@/app/components/workflow/run/utils/format-log'
import { BlockEnum, InputVarType } from '@/app/components/workflow/types'
import useNodeCrud from '../../_base/hooks/use-node-crud'
import { VarType } from '../../tool/types'
import { useStrategyInfo } from '../use-config'
import useSingleRunFormParams from '../use-single-run-form-params'
vi.mock('@/app/components/workflow/run/utils/format-log', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../../_base/hooks/use-node-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../use-config', async () => {
const actual = await vi.importActual<typeof import('../use-config')>('../use-config')
return {
...actual,
useStrategyInfo: vi.fn(),
}
})
const mockFormatTracing = vi.mocked(formatTracing)
const mockUseNodeCrud = vi.mocked(useNodeCrud)
const mockUseStrategyInfo = vi.mocked(useStrategyInfo)
const createData = (overrides: Partial<AgentNodeType> = {}): AgentNodeType => ({
title: 'Agent',
desc: '',
type: BlockEnum.Agent,
output_schema: {},
agent_strategy_provider_name: 'provider/agent',
agent_strategy_name: 'react',
agent_strategy_label: 'React Agent',
agent_parameters: {
prompt: {
type: VarType.variable,
value: '#start.topic#',
},
summary: {
type: VarType.variable,
value: '#node-2.answer#',
},
count: {
type: VarType.constant,
value: 2,
},
},
...overrides,
})
describe('agent/use-single-run-form-params', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseNodeCrud.mockReturnValue({
inputs: createData(),
setInputs: vi.fn(),
} as unknown as ReturnType<typeof useNodeCrud>)
mockUseStrategyInfo.mockReturnValue({
strategyProvider: undefined,
strategy: {
parameters: [
{ name: 'prompt', type: 'string' },
{ name: 'summary', type: 'string' },
{ name: 'count', type: 'number' },
],
},
strategyStatus: undefined,
refetch: vi.fn(),
} as unknown as ReturnType<typeof useStrategyInfo>)
mockFormatTracing.mockReturnValue([{
id: 'agent-node',
status: 'succeeded',
}] as unknown as ReturnType<typeof formatTracing>)
})
it('builds a single-run variable form, returns node info, and skips malformed dependent vars', () => {
const setRunInputData = vi.fn()
const getInputVars = vi.fn<() => InputVar[]>(() => [
{
label: 'Prompt',
variable: '#start.topic#',
type: InputVarType.textInput,
required: true,
},
{
label: 'Broken',
variable: undefined as unknown as string,
type: InputVarType.textInput,
required: false,
},
])
const { result } = renderHook(() => useSingleRunFormParams({
id: 'agent-node',
payload: createData(),
runInputData: { topic: 'finance' },
runInputDataRef: { current: { topic: 'finance' } },
getInputVars,
setRunInputData,
toVarInputs: () => [],
runResult: { id: 'trace-1' } as never,
}))
expect(getInputVars).toHaveBeenCalledWith(['#start.topic#', '#node-2.answer#'])
expect(result.current.forms).toHaveLength(1)
expect(result.current.forms[0].inputs).toHaveLength(2)
expect(result.current.forms[0].values).toEqual({ topic: 'finance' })
expect(result.current.nodeInfo).toEqual({
id: 'agent-node',
status: 'succeeded',
})
result.current.forms[0].onChange({ topic: 'updated' })
expect(setRunInputData).toHaveBeenCalledWith({ topic: 'updated' })
expect(result.current.getDependentVars()).toEqual([
['start', 'topic'],
])
})
it('returns an empty form list when no variable input is required and no run result is available', () => {
const { result } = renderHook(() => useSingleRunFormParams({
id: 'agent-node',
payload: createData(),
runInputData: {},
runInputDataRef: { current: {} },
getInputVars: () => [],
setRunInputData: vi.fn(),
toVarInputs: () => [],
runResult: undefined as never,
}))
expect(result.current.forms).toEqual([])
expect(result.current.nodeInfo).toBeUndefined()
expect(result.current.getDependentVars()).toEqual([])
})
})

View File

@ -0,0 +1,67 @@
import type { HttpNodeType } from '../types'
import { render, screen } from '@testing-library/react'
import { BlockEnum } from '@/app/components/workflow/types'
import Node from '../node'
import { AuthorizationType, BodyType, Method } from '../types'
const mockReadonlyInputWithSelectVar = vi.hoisted(() => vi.fn())
vi.mock('@/app/components/workflow/nodes/_base/components/readonly-input-with-select-var', () => ({
__esModule: true,
default: (props: { value: string, nodeId: string, className?: string }) => {
mockReadonlyInputWithSelectVar(props)
return <div data-testid="readonly-input">{props.value}</div>
},
}))
const createData = (overrides: Partial<HttpNodeType> = {}): HttpNodeType => ({
title: 'HTTP Request',
desc: '',
type: BlockEnum.HttpRequest,
variables: [],
method: Method.get,
url: 'https://api.example.com',
authorization: { type: AuthorizationType.none },
headers: '',
params: '',
body: { type: BodyType.none, data: [] },
timeout: { connect: 5, read: 10, write: 15 },
ssl_verify: true,
...overrides,
})
describe('http/node', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('renders the request method and forwards the URL to the readonly input', () => {
render(
<Node
id="http-node"
data={createData({
method: Method.post,
url: 'https://api.example.com/users',
})}
/>,
)
expect(screen.getByText('post')).toBeInTheDocument()
expect(screen.getByTestId('readonly-input')).toHaveTextContent('https://api.example.com/users')
expect(mockReadonlyInputWithSelectVar).toHaveBeenCalledWith(expect.objectContaining({
nodeId: 'http-node',
value: 'https://api.example.com/users',
}))
})
it('renders nothing when the request URL is empty', () => {
const { container } = render(
<Node
id="http-node"
data={createData({ url: '' })}
/>,
)
expect(container).toBeEmptyDOMElement()
})
})

View File

@ -0,0 +1,295 @@
import type { ReactNode } from 'react'
import type { HttpNodeType } from '../types'
import type { NodePanelProps } from '@/app/components/workflow/types'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum } from '@/app/components/workflow/types'
import Panel from '../panel'
import { AuthorizationType, BodyPayloadValueType, BodyType, Method } from '../types'
const mockUseConfig = vi.hoisted(() => vi.fn())
const mockAuthorizationModal = vi.hoisted(() => vi.fn())
const mockCurlPanel = vi.hoisted(() => vi.fn())
const mockApiInput = vi.hoisted(() => vi.fn())
const mockKeyValue = vi.hoisted(() => vi.fn())
const mockEditBody = vi.hoisted(() => vi.fn())
const mockTimeout = vi.hoisted(() => vi.fn())
type ApiInputProps = {
method: Method
url: string
onMethodChange: (method: Method) => void
onUrlChange: (url: string) => void
}
type KeyValueProps = {
nodeId: string
list: Array<{ key: string, value: string }>
onChange: (value: Array<{ key: string, value: string }>) => void
onAdd: () => void
}
type EditBodyProps = {
payload: HttpNodeType['body']
onChange: (value: HttpNodeType['body']) => void
}
type TimeoutProps = {
payload: HttpNodeType['timeout']
onChange: (value: HttpNodeType['timeout']) => void
}
vi.mock('../use-config', () => ({
__esModule: true,
default: (...args: unknown[]) => mockUseConfig(...args),
}))
vi.mock('../components/authorization', () => ({
__esModule: true,
default: (props: { nodeId: string, payload: HttpNodeType['authorization'], onChange: (value: HttpNodeType['authorization']) => void, onHide: () => void }) => {
mockAuthorizationModal(props)
return <div data-testid="authorization-modal">{props.nodeId}</div>
},
}))
vi.mock('../components/curl-panel', () => ({
__esModule: true,
default: (props: { nodeId: string, onHide: () => void, handleCurlImport: (node: HttpNodeType) => void }) => {
mockCurlPanel(props)
return <div data-testid="curl-panel">{props.nodeId}</div>
},
}))
vi.mock('../components/api-input', () => ({
__esModule: true,
default: (props: ApiInputProps) => {
mockApiInput(props)
return (
<div>
<div>{`${props.method}:${props.url}`}</div>
<button type="button" onClick={() => props.onMethodChange(Method.post)}>emit-method-change</button>
<button type="button" onClick={() => props.onUrlChange('https://changed.example.com')}>emit-url-change</button>
</div>
)
},
}))
vi.mock('../components/key-value', () => ({
__esModule: true,
default: (props: KeyValueProps) => {
mockKeyValue(props)
return (
<div>
<div>{props.list.map(item => `${item.key}:${item.value}`).join(',')}</div>
<button type="button" onClick={() => props.onChange([{ key: 'x-token', value: '123' }])}>
emit-key-value-change
</button>
<button type="button" onClick={props.onAdd}>emit-key-value-add</button>
</div>
)
},
}))
vi.mock('../components/edit-body', () => ({
__esModule: true,
default: (props: EditBodyProps) => {
mockEditBody(props)
return (
<button
type="button"
onClick={() => props.onChange({
type: BodyType.json,
data: [{ type: BodyPayloadValueType.text, value: '{"hello":"world"}' }],
})}
>
emit-body-change
</button>
)
},
}))
vi.mock('../components/timeout', () => ({
__esModule: true,
default: (props: TimeoutProps) => {
mockTimeout(props)
return (
<button type="button" onClick={() => props.onChange({ ...props.payload, connect: 9 })}>
emit-timeout-change
</button>
)
},
}))
vi.mock('@/app/components/workflow/nodes/_base/components/output-vars', () => ({
__esModule: true,
default: ({ children }: { children: ReactNode }) => <div>{children}</div>,
VarItem: ({ name, type }: { name: string, type: string }) => <div>{`${name}:${type}`}</div>,
}))
const createData = (overrides: Partial<HttpNodeType> = {}): HttpNodeType => ({
title: 'HTTP Request',
desc: '',
type: BlockEnum.HttpRequest,
variables: [],
method: Method.get,
url: 'https://api.example.com',
authorization: { type: AuthorizationType.none },
headers: '',
params: '',
body: { type: BodyType.none, data: [] },
timeout: { connect: 5, read: 10, write: 15 },
ssl_verify: true,
...overrides,
})
const panelProps = {} as NodePanelProps<HttpNodeType>['panelProps']
describe('http/panel', () => {
const handleMethodChange = vi.fn()
const handleUrlChange = vi.fn()
const setHeaders = vi.fn()
const addHeader = vi.fn()
const setParams = vi.fn()
const addParam = vi.fn()
const setBody = vi.fn()
const showAuthorization = vi.fn()
const hideAuthorization = vi.fn()
const setAuthorization = vi.fn()
const setTimeout = vi.fn()
const showCurlPanel = vi.fn()
const hideCurlPanel = vi.fn()
const handleCurlImport = vi.fn()
const handleSSLVerifyChange = vi.fn()
const createConfigResult = (overrides: Record<string, unknown> = {}) => ({
readOnly: false,
isDataReady: true,
inputs: createData({
authorization: { type: AuthorizationType.apiKey, config: null },
}),
handleMethodChange,
handleUrlChange,
headers: [{ key: 'accept', value: 'application/json' }],
setHeaders,
addHeader,
params: [{ key: 'page', value: '1' }],
setParams,
addParam,
setBody,
isShowAuthorization: false,
showAuthorization,
hideAuthorization,
setAuthorization,
setTimeout,
isShowCurlPanel: false,
showCurlPanel,
hideCurlPanel,
handleCurlImport,
handleSSLVerifyChange,
...overrides,
})
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue(createConfigResult())
})
it('renders request fields, forwards child changes, and wires header operations', async () => {
const user = userEvent.setup()
render(
<Panel
id="http-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getByText('get:https://api.example.com')).toBeInTheDocument()
expect(screen.getByText('body:string')).toBeInTheDocument()
expect(screen.getByText('status_code:number')).toBeInTheDocument()
expect(screen.getByText('headers:object')).toBeInTheDocument()
expect(screen.getByText('files:Array[File]')).toBeInTheDocument()
await user.click(screen.getByRole('button', { name: 'emit-method-change' }))
await user.click(screen.getByRole('button', { name: 'emit-url-change' }))
await user.click(screen.getAllByRole('button', { name: 'emit-key-value-change' })[0]!)
await user.click(screen.getAllByRole('button', { name: 'emit-key-value-add' })[0]!)
await user.click(screen.getAllByRole('button', { name: 'emit-key-value-change' })[1]!)
await user.click(screen.getAllByRole('button', { name: 'emit-key-value-add' })[1]!)
await user.click(screen.getByRole('button', { name: 'emit-body-change' }))
await user.click(screen.getByRole('button', { name: 'emit-timeout-change' }))
await user.click(screen.getByText('workflow.nodes.http.authorization.authorization'))
await user.click(screen.getByText('workflow.nodes.http.curl.title'))
await user.click(screen.getByRole('switch'))
expect(handleMethodChange).toHaveBeenCalledWith(Method.post)
expect(handleUrlChange).toHaveBeenCalledWith('https://changed.example.com')
expect(setHeaders).toHaveBeenCalledWith([{ key: 'x-token', value: '123' }])
expect(addHeader).toHaveBeenCalledTimes(1)
expect(setParams).toHaveBeenCalledWith([{ key: 'x-token', value: '123' }])
expect(addParam).toHaveBeenCalledTimes(1)
expect(setBody).toHaveBeenCalledWith({
type: BodyType.json,
data: [{ type: 'text', value: '{"hello":"world"}' }],
})
expect(setTimeout).toHaveBeenCalledWith(expect.objectContaining({ connect: 9 }))
expect(showAuthorization).toHaveBeenCalledTimes(1)
expect(showCurlPanel).toHaveBeenCalledTimes(1)
expect(handleSSLVerifyChange).toHaveBeenCalledWith(false)
expect(mockApiInput).toHaveBeenCalledWith(expect.objectContaining({
method: Method.get,
url: 'https://api.example.com',
}))
})
it('returns null before the config data is ready', () => {
mockUseConfig.mockReturnValueOnce(createConfigResult({ isDataReady: false }))
const { container } = render(
<Panel
id="http-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(container).toBeEmptyDOMElement()
})
it('renders auth and curl panels only when writable and toggled on', () => {
mockUseConfig.mockReturnValueOnce(createConfigResult({
isShowAuthorization: true,
isShowCurlPanel: true,
}))
const { rerender } = render(
<Panel
id="http-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getByTestId('authorization-modal')).toHaveTextContent('http-node')
expect(screen.getByTestId('curl-panel')).toHaveTextContent('http-node')
mockUseConfig.mockReturnValueOnce(createConfigResult({
readOnly: true,
isShowAuthorization: true,
isShowCurlPanel: true,
}))
rerender(
<Panel
id="http-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.queryByTestId('authorization-modal')).not.toBeInTheDocument()
expect(screen.queryByTestId('curl-panel')).not.toBeInTheDocument()
expect(screen.getByRole('switch')).toHaveAttribute('aria-disabled', 'true')
})
})

View File

@ -0,0 +1,271 @@
import type { HttpNodeType } from '../types'
import { act, renderHook, waitFor } from '@testing-library/react'
import { useNodesReadOnly } from '@/app/components/workflow/hooks'
import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud'
import { useStore } from '@/app/components/workflow/store'
import { BlockEnum, VarType } from '@/app/components/workflow/types'
import useVarList from '../../_base/hooks/use-var-list'
import useKeyValueList from '../hooks/use-key-value-list'
import { APIType, AuthorizationType, BodyPayloadValueType, BodyType, Method } from '../types'
import useConfig from '../use-config'
vi.mock('@/app/components/workflow/hooks', () => ({
useNodesReadOnly: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-node-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-var-list', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../hooks/use-key-value-list', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/store', () => ({
useStore: vi.fn(),
}))
const mockUseNodesReadOnly = vi.mocked(useNodesReadOnly)
const mockUseNodeCrud = vi.mocked(useNodeCrud)
const mockUseVarList = vi.mocked(useVarList)
const mockUseKeyValueList = vi.mocked(useKeyValueList)
const mockUseStore = vi.mocked(useStore)
const createPayload = (overrides: Partial<HttpNodeType> = {}): HttpNodeType => ({
title: 'HTTP Request',
desc: '',
type: BlockEnum.HttpRequest,
variables: [],
method: Method.get,
url: 'https://api.example.com',
authorization: { type: AuthorizationType.none },
headers: 'accept:application/json',
params: 'page:1',
body: {
type: BodyType.json,
data: '{"name":"alice"}',
},
timeout: { connect: 5, read: 10, write: 15 },
ssl_verify: true,
...overrides,
})
describe('http/use-config', () => {
const mockSetInputs = vi.fn()
const mockHandleVarListChange = vi.fn()
const mockHandleAddVariable = vi.fn()
const headerSetList = vi.fn()
const headerAddItem = vi.fn()
const headerToggle = vi.fn()
const paramSetList = vi.fn()
const paramAddItem = vi.fn()
const paramToggle = vi.fn()
let currentInputs: HttpNodeType
let headerFieldChange: ((value: string) => void) | undefined
let paramFieldChange: ((value: string) => void) | undefined
beforeEach(() => {
vi.clearAllMocks()
currentInputs = createPayload()
headerFieldChange = undefined
paramFieldChange = undefined
mockUseNodesReadOnly.mockReturnValue({ nodesReadOnly: false, getNodesReadOnly: () => false })
mockUseNodeCrud.mockImplementation(() => ({
inputs: currentInputs,
setInputs: mockSetInputs,
}))
mockUseVarList.mockReturnValue({
handleVarListChange: mockHandleVarListChange,
handleAddVariable: mockHandleAddVariable,
} as ReturnType<typeof useVarList>)
mockUseKeyValueList.mockImplementation((value, onChange) => {
if (value === currentInputs.headers) {
headerFieldChange = onChange
return {
list: [{ id: 'header-1', key: 'accept', value: 'application/json' }],
setList: headerSetList,
addItem: headerAddItem,
isKeyValueEdit: true,
toggleIsKeyValueEdit: headerToggle,
}
}
paramFieldChange = onChange
return {
list: [{ id: 'param-1', key: 'page', value: '1' }],
setList: paramSetList,
addItem: paramAddItem,
isKeyValueEdit: false,
toggleIsKeyValueEdit: paramToggle,
}
})
mockUseStore.mockImplementation((selector) => {
const state = {
nodesDefaultConfigs: {
[BlockEnum.HttpRequest]: createPayload({
method: Method.post,
url: 'https://default.example.com',
headers: '',
params: '',
body: { type: BodyType.none, data: [] },
timeout: { connect: 1, read: 2, write: 3 },
ssl_verify: false,
}),
},
}
return selector(state as never)
})
})
it('stays pending when the node default config is unavailable', () => {
mockUseStore.mockImplementation((selector) => {
return selector({ nodesDefaultConfigs: {} } as never)
})
const { result } = renderHook(() => useConfig('http-node', currentInputs))
expect(result.current.isDataReady).toBe(false)
expect(mockSetInputs).not.toHaveBeenCalled()
})
it('hydrates defaults, normalizes body payloads, and exposes var-list and key-value helpers', async () => {
const { result } = renderHook(() => useConfig('http-node', currentInputs))
await waitFor(() => {
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
method: Method.get,
url: 'https://api.example.com',
body: {
type: BodyType.json,
data: [{
type: BodyPayloadValueType.text,
value: '{"name":"alice"}',
}],
},
ssl_verify: true,
}))
})
expect(result.current.isDataReady).toBe(true)
expect(result.current.readOnly).toBe(false)
expect(result.current.handleVarListChange).toBe(mockHandleVarListChange)
expect(result.current.handleAddVariable).toBe(mockHandleAddVariable)
expect(result.current.headers).toEqual([{ id: 'header-1', key: 'accept', value: 'application/json' }])
expect(result.current.setHeaders).toBe(headerSetList)
expect(result.current.addHeader).toBe(headerAddItem)
expect(result.current.isHeaderKeyValueEdit).toBe(true)
expect(result.current.toggleIsHeaderKeyValueEdit).toBe(headerToggle)
expect(result.current.params).toEqual([{ id: 'param-1', key: 'page', value: '1' }])
expect(result.current.setParams).toBe(paramSetList)
expect(result.current.addParam).toBe(paramAddItem)
expect(result.current.isParamKeyValueEdit).toBe(false)
expect(result.current.toggleIsParamKeyValueEdit).toBe(paramToggle)
expect(result.current.filterVar({ type: VarType.string } as never)).toBe(true)
expect(result.current.filterVar({ type: VarType.number } as never)).toBe(true)
expect(result.current.filterVar({ type: VarType.secret } as never)).toBe(true)
expect(result.current.filterVar({ type: VarType.file } as never)).toBe(false)
})
it('initializes empty body data arrays when the payload body is missing', async () => {
currentInputs = createPayload({
body: {
type: BodyType.formData,
data: undefined as unknown as HttpNodeType['body']['data'],
},
})
renderHook(() => useConfig('http-node', currentInputs))
await waitFor(() => {
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
body: {
type: BodyType.formData,
data: [],
},
}))
})
})
it('updates request fields, authorization state, curl imports, and ssl verification', async () => {
const { result } = renderHook(() => useConfig('http-node', currentInputs))
await waitFor(() => {
expect(result.current.isDataReady).toBe(true)
})
mockSetInputs.mockClear()
act(() => {
result.current.handleMethodChange(Method.delete)
result.current.handleUrlChange('https://changed.example.com')
headerFieldChange?.('x-token:123')
paramFieldChange?.('size:20')
result.current.setBody({ type: BodyType.rawText, data: 'raw payload' })
result.current.showAuthorization()
})
expect(result.current.isShowAuthorization).toBe(true)
act(() => {
result.current.hideAuthorization()
result.current.setAuthorization({
type: AuthorizationType.apiKey,
config: {
type: APIType.bearer,
api_key: 'secret',
},
})
result.current.setTimeout({ connect: 30, read: 40, write: 50 })
result.current.showCurlPanel()
})
expect(result.current.isShowCurlPanel).toBe(true)
act(() => {
result.current.hideCurlPanel()
result.current.handleCurlImport(createPayload({
method: Method.patch,
url: 'https://imported.example.com',
headers: 'authorization:Bearer imported',
params: 'debug:true',
body: { type: BodyType.json, data: [{ type: BodyPayloadValueType.text, value: '{"ok":true}' }] },
}))
result.current.handleSSLVerifyChange(false)
})
expect(result.current.isShowAuthorization).toBe(false)
expect(result.current.isShowCurlPanel).toBe(false)
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ method: Method.delete }))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ url: 'https://changed.example.com' }))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ headers: 'x-token:123' }))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ params: 'size:20' }))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
body: { type: BodyType.rawText, data: 'raw payload' },
}))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
authorization: expect.objectContaining({
type: AuthorizationType.apiKey,
}),
}))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
timeout: { connect: 30, read: 40, write: 50 },
}))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({
method: Method.patch,
url: 'https://imported.example.com',
headers: 'authorization:Bearer imported',
params: 'debug:true',
body: { type: BodyType.json, data: [{ type: BodyPayloadValueType.text, value: '{"ok":true}' }] },
}))
expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ ssl_verify: false }))
})
})

View File

@ -0,0 +1,148 @@
import type { KnowledgeRetrievalNodeType } from '../types'
import type { DataSet } from '@/models/datasets'
import { render, screen } from '@testing-library/react'
import { ChunkingMode, DatasetPermission, DataSourceType } from '@/models/datasets'
import { RETRIEVE_METHOD, RETRIEVE_TYPE } from '@/types/app'
import { DatasetsDetailContext } from '../../../datasets-detail-store/provider'
import { createDatasetsDetailStore } from '../../../datasets-detail-store/store'
import { BlockEnum } from '../../../types'
import Node from '../node'
import { MetadataFilteringModeEnum } from '../types'
const createDataset = (overrides: Partial<DataSet> = {}): DataSet => ({
id: 'dataset-1',
name: 'Dataset Name',
indexing_status: 'completed',
icon_info: {
icon: '📙',
icon_background: '#FFF4ED',
icon_type: 'emoji',
icon_url: '',
},
description: 'Dataset description',
permission: DatasetPermission.onlyMe,
data_source_type: DataSourceType.FILE,
indexing_technique: 'high_quality' as DataSet['indexing_technique'],
created_by: 'user-1',
updated_by: 'user-1',
updated_at: 1690000000,
app_count: 0,
doc_form: ChunkingMode.text,
document_count: 1,
total_document_count: 1,
word_count: 1000,
provider: 'internal',
embedding_model: 'text-embedding-3',
embedding_model_provider: 'openai',
embedding_available: true,
retrieval_model_dict: {
search_method: RETRIEVE_METHOD.semantic,
reranking_enable: false,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 5,
score_threshold_enabled: false,
score_threshold: 0,
},
retrieval_model: {
search_method: RETRIEVE_METHOD.semantic,
reranking_enable: false,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 5,
score_threshold_enabled: false,
score_threshold: 0,
},
tags: [],
external_knowledge_info: {
external_knowledge_id: '',
external_knowledge_api_id: '',
external_knowledge_api_name: '',
external_knowledge_api_endpoint: '',
},
external_retrieval_model: {
top_k: 0,
score_threshold: 0,
score_threshold_enabled: false,
},
built_in_field_enabled: false,
runtime_mode: 'rag_pipeline',
enable_api: false,
is_multimodal: false,
...overrides,
})
const createData = (overrides: Partial<KnowledgeRetrievalNodeType> = {}): KnowledgeRetrievalNodeType => ({
title: 'Knowledge Retrieval',
desc: '',
type: BlockEnum.KnowledgeRetrieval,
query_variable_selector: ['start', 'sys.query'],
query_attachment_selector: [],
dataset_ids: ['dataset-1'],
retrieval_mode: RETRIEVE_TYPE.multiWay,
metadata_filtering_mode: MetadataFilteringModeEnum.disabled,
...overrides,
})
const renderWithDatasets = (data: KnowledgeRetrievalNodeType, datasets: DataSet[] = []) => {
const store = createDatasetsDetailStore()
store.getState().updateDatasetsDetail(datasets)
return render(
<DatasetsDetailContext.Provider value={store}>
<Node id="knowledge-node" data={data} />
</DatasetsDetailContext.Provider>,
)
}
describe('knowledge-retrieval/node', () => {
it('renders matched dataset details and falls back to the default icon info when a dataset has no icon', async () => {
renderWithDatasets(
createData({
dataset_ids: ['dataset-1', 'dataset-2'],
}),
[
createDataset(),
createDataset({
id: 'dataset-2',
name: 'Fallback Icon Dataset',
icon_info: undefined as never,
}),
],
)
expect(await screen.findByText('Dataset Name')).toBeInTheDocument()
expect(screen.getByText('Fallback Icon Dataset')).toBeInTheDocument()
})
it('renders nothing when the node has no dataset ids or no matching dataset details', () => {
const { container, rerender } = renderWithDatasets(
createData({
dataset_ids: ['missing-dataset'],
}),
[createDataset()],
)
expect(container).toBeEmptyDOMElement()
const store = createDatasetsDetailStore()
store.getState().updateDatasetsDetail([createDataset()])
rerender(
<DatasetsDetailContext.Provider value={store}>
<Node
id="knowledge-node"
data={createData({
dataset_ids: [],
})}
/>
</DatasetsDetailContext.Provider>,
)
expect(container).toBeEmptyDOMElement()
})
})

View File

@ -0,0 +1,375 @@
import type { ReactNode } from 'react'
import type { KnowledgeRetrievalNodeType } from '../types'
import type { NodePanelProps } from '@/app/components/workflow/types'
import type { DataSet, MetadataInDoc } from '@/models/datasets'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { BlockEnum, VarType } from '@/app/components/workflow/types'
import { ChunkingMode, DatasetPermission, DataSourceType } from '@/models/datasets'
import { RETRIEVE_METHOD, RETRIEVE_TYPE } from '@/types/app'
import Panel from '../panel'
import { ComparisonOperator, LogicalOperator, MetadataFilteringModeEnum, MetadataFilteringVariableType } from '../types'
import useConfig from '../use-config'
const mockVarReferencePicker = vi.hoisted(() => vi.fn())
const mockRetrievalConfig = vi.hoisted(() => vi.fn())
const mockDatasetList = vi.hoisted(() => vi.fn())
const mockAddKnowledge = vi.hoisted(() => vi.fn())
const mockMetadataFilter = vi.hoisted(() => vi.fn())
const createDataset = (overrides: Partial<DataSet> = {}): DataSet => ({
id: 'dataset-1',
name: 'Dataset Name',
indexing_status: 'completed',
icon_info: {
icon: '📙',
icon_background: '#FFF4ED',
icon_type: 'emoji',
icon_url: '',
},
description: 'Dataset description',
permission: DatasetPermission.onlyMe,
data_source_type: DataSourceType.FILE,
indexing_technique: 'high_quality' as DataSet['indexing_technique'],
created_by: 'user-1',
updated_by: 'user-1',
updated_at: 1690000000,
app_count: 0,
doc_form: ChunkingMode.text,
document_count: 1,
total_document_count: 1,
word_count: 1000,
provider: 'internal',
embedding_model: 'text-embedding-3',
embedding_model_provider: 'openai',
embedding_available: true,
retrieval_model_dict: {
search_method: RETRIEVE_METHOD.semantic,
reranking_enable: false,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 5,
score_threshold_enabled: false,
score_threshold: 0,
},
retrieval_model: {
search_method: RETRIEVE_METHOD.semantic,
reranking_enable: false,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 5,
score_threshold_enabled: false,
score_threshold: 0,
},
tags: [],
external_knowledge_info: {
external_knowledge_id: '',
external_knowledge_api_id: '',
external_knowledge_api_name: '',
external_knowledge_api_endpoint: '',
},
external_retrieval_model: {
top_k: 0,
score_threshold: 0,
score_threshold_enabled: false,
},
built_in_field_enabled: false,
runtime_mode: 'rag_pipeline',
enable_api: false,
is_multimodal: false,
...overrides,
})
const createMetadata = (overrides: Partial<MetadataInDoc> = {}): MetadataInDoc => ({
id: 'meta-1',
name: 'topic',
type: MetadataFilteringVariableType.string,
value: 'topic',
...overrides,
})
vi.mock('@/app/components/workflow/nodes/_base/components/variable/var-reference-picker', () => ({
__esModule: true,
default: (props: {
value: string[]
onChange: (value: string[]) => void
filterVar: (value: { type: VarType }) => boolean
}) => {
mockVarReferencePicker(props)
return (
<button type="button" onClick={() => props.onChange(['node-2', 'query'])}>
var-reference-picker
</button>
)
},
}))
vi.mock('../components/retrieval-config', () => ({
__esModule: true,
default: (props: {
onRetrievalModeChange: (value: RETRIEVE_TYPE) => void
onMultipleRetrievalConfigChange: (value: KnowledgeRetrievalNodeType['multiple_retrieval_config']) => void
onSingleRetrievalModelChange: (model: { provider: string, modelId: string, mode?: string }) => void
onSingleRetrievalModelParamsChange: (params: Record<string, unknown>) => void
}) => {
mockRetrievalConfig(props)
return (
<div>
<button type="button" onClick={() => props.onRetrievalModeChange(RETRIEVE_TYPE.oneWay)}>change-retrieval-mode</button>
<button type="button" onClick={() => props.onMultipleRetrievalConfigChange({ top_k: 8, score_threshold: 0.4 })}>change-multiple-config</button>
<button type="button" onClick={() => props.onSingleRetrievalModelChange({ provider: 'openai', modelId: 'gpt-4o-mini', mode: 'chat' })}>change-model</button>
<button type="button" onClick={() => props.onSingleRetrievalModelParamsChange({ temperature: 0.2 })}>change-model-params</button>
</div>
)
},
}))
vi.mock('../components/dataset-list', () => ({
__esModule: true,
default: (props: { list: DataSet[], onChange: (datasets: DataSet[]) => void }) => {
mockDatasetList(props)
return (
<button
type="button"
onClick={() => props.onChange([createDataset({ id: 'dataset-2', name: 'Updated Dataset' })])}
>
dataset-list
</button>
)
},
}))
vi.mock('../components/add-dataset', () => ({
__esModule: true,
default: (props: { selectedIds: string[], onChange: (datasets: DataSet[]) => void }) => {
mockAddKnowledge(props)
return (
<button
type="button"
onClick={() => props.onChange([createDataset({ id: 'dataset-3', name: 'Added Dataset' })])}
>
add-dataset
</button>
)
},
}))
vi.mock('../components/metadata/metadata-filter', () => ({
__esModule: true,
default: (props: {
metadataList: MetadataInDoc[]
handleAddCondition: ({ id, name, type }: MetadataInDoc) => void
handleMetadataFilterModeChange: (mode: MetadataFilteringModeEnum) => void
handleRemoveCondition: (id: string) => void
handleToggleConditionLogicalOperator: () => void
handleUpdateCondition: (id: string, condition: unknown) => void
handleMetadataModelChange: (model: { provider: string, modelId: string, mode?: string }) => void
handleMetadataCompletionParamsChange: (params: Record<string, unknown>) => void
}) => {
mockMetadataFilter(props)
return (
<div>
<div>{props.metadataList.map(item => item.name).join(',')}</div>
<button type="button" onClick={() => props.handleAddCondition(createMetadata())}>add-condition</button>
<button type="button" onClick={() => props.handleMetadataFilterModeChange(MetadataFilteringModeEnum.manual)}>change-filter-mode</button>
<button type="button" onClick={() => props.handleRemoveCondition('condition-1')}>remove-condition</button>
<button type="button" onClick={() => props.handleToggleConditionLogicalOperator()}>toggle-logical-operator</button>
<button
type="button"
onClick={() => props.handleUpdateCondition('condition-1', {
id: 'condition-1',
name: 'topic',
metadata_id: 'meta-1',
comparison_operator: ComparisonOperator.is,
value: 'agent',
})}
>
update-condition
</button>
<button type="button" onClick={() => props.handleMetadataModelChange({ provider: 'openai', modelId: 'gpt-4.1-mini', mode: 'chat' })}>change-metadata-model</button>
<button type="button" onClick={() => props.handleMetadataCompletionParamsChange({ temperature: 0.3 })}>change-metadata-params</button>
</div>
)
},
}))
vi.mock('@/app/components/workflow/nodes/_base/components/output-vars', () => ({
__esModule: true,
default: ({ children }: { children: ReactNode }) => <div>{children}</div>,
VarItem: ({ name, type }: { name: string, type: string }) => <div>{`${name}:${type}`}</div>,
}))
vi.mock('../use-config', () => ({
__esModule: true,
default: vi.fn(),
}))
const mockUseConfig = vi.mocked(useConfig)
const createData = (overrides: Partial<KnowledgeRetrievalNodeType> = {}): KnowledgeRetrievalNodeType => ({
title: 'Knowledge Retrieval',
desc: '',
type: BlockEnum.KnowledgeRetrieval,
query_variable_selector: ['start', 'sys.query'],
query_attachment_selector: [],
dataset_ids: ['dataset-1'],
retrieval_mode: RETRIEVE_TYPE.multiWay,
multiple_retrieval_config: { top_k: 5, score_threshold: 0.5 },
single_retrieval_config: {
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: 'chat',
completion_params: {},
},
},
metadata_filtering_mode: MetadataFilteringModeEnum.disabled,
metadata_filtering_conditions: {
logical_operator: LogicalOperator.and,
conditions: [{
id: 'condition-1',
name: 'topic',
metadata_id: 'meta-1',
comparison_operator: ComparisonOperator.contains,
value: 'agent',
}],
},
metadata_model_config: {
provider: 'openai',
name: 'gpt-4.1-mini',
mode: 'chat',
completion_params: {},
},
...overrides,
})
const panelProps = {} as NodePanelProps<KnowledgeRetrievalNodeType>['panelProps']
describe('knowledge-retrieval/panel', () => {
const handleQueryVarChange = vi.fn()
const handleQueryAttachmentChange = vi.fn()
const handleModelChanged = vi.fn()
const handleCompletionParamsChange = vi.fn()
const handleRetrievalModeChange = vi.fn()
const handleMultipleRetrievalConfigChange = vi.fn()
const handleOnDatasetsChange = vi.fn()
const setRerankModelOpen = vi.fn()
const handleAddCondition = vi.fn()
const handleMetadataFilterModeChange = vi.fn()
const handleRemoveCondition = vi.fn()
const handleToggleConditionLogicalOperator = vi.fn()
const handleUpdateCondition = vi.fn()
const handleMetadataModelChange = vi.fn()
const handleMetadataCompletionParamsChange = vi.fn()
const createConfigResult = (overrides: Record<string, unknown> = {}) => ({
readOnly: false,
inputs: createData(),
handleQueryVarChange,
handleQueryAttachmentChange,
filterStringVar: vi.fn((value: { type: VarType }) => value.type === VarType.string),
filterFileVar: vi.fn((value: { type: VarType }) => value.type === VarType.file),
handleModelChanged,
handleCompletionParamsChange,
handleRetrievalModeChange,
handleMultipleRetrievalConfigChange,
selectedDatasets: [
createDataset({ doc_metadata: [createMetadata(), createMetadata({ id: 'meta-2', name: 'shared' })] }),
createDataset({ id: 'dataset-2', doc_metadata: [createMetadata({ id: 'meta-3', name: 'shared' }), createMetadata({ id: 'meta-4', name: 'language' })] }),
],
selectedDatasetsLoaded: true,
handleOnDatasetsChange,
rerankModelOpen: false,
setRerankModelOpen,
handleAddCondition,
handleMetadataFilterModeChange,
handleRemoveCondition,
handleToggleConditionLogicalOperator,
handleUpdateCondition,
handleMetadataModelChange,
handleMetadataCompletionParamsChange,
availableStringVars: [],
availableStringNodesWithParent: [],
availableNumberVars: [],
availableNumberNodesWithParent: [],
showImageQueryVarSelector: true,
...overrides,
})
beforeEach(() => {
vi.clearAllMocks()
mockUseConfig.mockReturnValue(createConfigResult() as ReturnType<typeof useConfig>)
})
it('wires panel actions and passes the intersected metadata list to metadata filters', async () => {
const user = userEvent.setup()
render(
<Panel
id="knowledge-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getByText('result:Array[Object]')).toBeInTheDocument()
expect(screen.getByText('shared')).toBeInTheDocument()
await user.click(screen.getAllByRole('button', { name: 'var-reference-picker' })[0]!)
await user.click(screen.getAllByRole('button', { name: 'var-reference-picker' })[1]!)
await user.click(screen.getByRole('button', { name: 'change-retrieval-mode' }))
await user.click(screen.getByRole('button', { name: 'change-multiple-config' }))
await user.click(screen.getByRole('button', { name: 'change-model' }))
await user.click(screen.getByRole('button', { name: 'change-model-params' }))
await user.click(screen.getByRole('button', { name: 'dataset-list' }))
await user.click(screen.getByRole('button', { name: 'add-dataset' }))
await user.click(screen.getByRole('button', { name: 'add-condition' }))
await user.click(screen.getByRole('button', { name: 'change-filter-mode' }))
await user.click(screen.getByRole('button', { name: 'remove-condition' }))
await user.click(screen.getByRole('button', { name: 'toggle-logical-operator' }))
await user.click(screen.getByRole('button', { name: 'update-condition' }))
await user.click(screen.getByRole('button', { name: 'change-metadata-model' }))
await user.click(screen.getByRole('button', { name: 'change-metadata-params' }))
expect(handleQueryVarChange).toHaveBeenCalledWith(['node-2', 'query'])
expect(handleQueryAttachmentChange).toHaveBeenCalledWith(['node-2', 'query'])
expect(handleRetrievalModeChange).toHaveBeenCalledWith(RETRIEVE_TYPE.oneWay)
expect(handleMultipleRetrievalConfigChange).toHaveBeenCalledWith({ top_k: 8, score_threshold: 0.4 })
expect(handleModelChanged).toHaveBeenCalledWith({ provider: 'openai', modelId: 'gpt-4o-mini', mode: 'chat' })
expect(handleCompletionParamsChange).toHaveBeenCalledWith({ temperature: 0.2 })
expect(handleOnDatasetsChange).toHaveBeenCalledWith([expect.objectContaining({ id: 'dataset-2' })])
expect(handleOnDatasetsChange).toHaveBeenCalledWith([expect.objectContaining({ id: 'dataset-3' })])
expect(handleAddCondition).toHaveBeenCalledWith(expect.objectContaining({ id: 'meta-1' }))
expect(handleMetadataFilterModeChange).toHaveBeenCalledWith(MetadataFilteringModeEnum.manual)
expect(handleRemoveCondition).toHaveBeenCalledWith('condition-1')
expect(handleToggleConditionLogicalOperator).toHaveBeenCalledTimes(1)
expect(handleUpdateCondition).toHaveBeenCalledWith('condition-1', expect.objectContaining({ comparison_operator: ComparisonOperator.is }))
expect(handleMetadataModelChange).toHaveBeenCalledWith({ provider: 'openai', modelId: 'gpt-4.1-mini', mode: 'chat' })
expect(handleMetadataCompletionParamsChange).toHaveBeenCalledWith({ temperature: 0.3 })
expect(mockMetadataFilter).toHaveBeenCalledWith(expect.objectContaining({
metadataList: [expect.objectContaining({ name: 'shared' })],
}))
})
it('hides readonly-only controls and the attachment selector when image queries are unavailable', () => {
mockUseConfig.mockReturnValueOnce(createConfigResult({
readOnly: true,
showImageQueryVarSelector: false,
}) as ReturnType<typeof useConfig>)
render(
<Panel
id="knowledge-node"
data={createData()}
panelProps={panelProps}
/>,
)
expect(screen.getAllByRole('button', { name: 'var-reference-picker' })).toHaveLength(1)
expect(screen.queryByRole('button', { name: 'add-dataset' })).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,457 @@
import type { KnowledgeRetrievalNodeType } from '../types'
import type { DataSet, MetadataInDoc } from '@/models/datasets'
import { act, renderHook, waitFor } from '@testing-library/react'
import { isEqual } from 'es-toolkit/predicate'
import { useState } from 'react'
import { useCurrentProviderAndModel, useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { useDatasetsDetailStore } from '@/app/components/workflow/datasets-detail-store/store'
import {
useIsChatMode,
useNodesReadOnly,
useWorkflow,
} from '@/app/components/workflow/hooks'
import useAvailableVarList from '@/app/components/workflow/nodes/_base/hooks/use-available-var-list'
import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud'
import { BlockEnum, VarType } from '@/app/components/workflow/types'
import { DATASET_DEFAULT } from '@/config'
import { ChunkingMode, DatasetPermission, DataSourceType } from '@/models/datasets'
import { fetchDatasets } from '@/service/datasets'
import { AppModeEnum, RETRIEVE_METHOD, RETRIEVE_TYPE } from '@/types/app'
import { ComparisonOperator, LogicalOperator, MetadataFilteringModeEnum, MetadataFilteringVariableType } from '../types'
import useConfig from '../use-config'
let uuidCounter = 0
vi.mock('uuid', () => ({
v4: vi.fn(() => {
uuidCounter += 1
return `condition-${uuidCounter}`
}),
}))
vi.mock('@/app/components/workflow/hooks', () => ({
useNodesReadOnly: vi.fn(),
useIsChatMode: vi.fn(),
useWorkflow: vi.fn(),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useModelListAndDefaultModelAndCurrentProviderAndModel: vi.fn(),
useCurrentProviderAndModel: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-node-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-available-var-list', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/datasets-detail-store/store', () => ({
useDatasetsDetailStore: vi.fn(),
}))
vi.mock('@/service/datasets', () => ({
fetchDatasets: vi.fn(),
}))
const mockUseNodesReadOnly = vi.mocked(useNodesReadOnly)
const mockUseIsChatMode = vi.mocked(useIsChatMode)
const mockUseWorkflow = vi.mocked(useWorkflow)
const mockUseModelListAndDefaultModelAndCurrentProviderAndModel = vi.mocked(useModelListAndDefaultModelAndCurrentProviderAndModel)
const mockUseCurrentProviderAndModel = vi.mocked(useCurrentProviderAndModel)
const mockUseNodeCrud = vi.mocked(useNodeCrud)
const mockUseAvailableVarList = vi.mocked(useAvailableVarList)
const mockUseDatasetsDetailStore = vi.mocked(useDatasetsDetailStore)
const mockFetchDatasets = vi.mocked(fetchDatasets)
const createDataset = (overrides: Partial<DataSet> = {}): DataSet => ({
id: 'dataset-1',
name: 'Dataset Name',
indexing_status: 'completed',
icon_info: {
icon: '📙',
icon_background: '#FFF4ED',
icon_type: 'emoji',
icon_url: '',
},
description: 'Dataset description',
permission: DatasetPermission.onlyMe,
data_source_type: DataSourceType.FILE,
indexing_technique: 'high_quality' as DataSet['indexing_technique'],
created_by: 'user-1',
updated_by: 'user-1',
updated_at: 1690000000,
app_count: 0,
doc_form: ChunkingMode.text,
document_count: 1,
total_document_count: 1,
word_count: 1000,
provider: 'internal',
embedding_model: 'text-embedding-3',
embedding_model_provider: 'openai',
embedding_available: true,
retrieval_model_dict: {
search_method: RETRIEVE_METHOD.semantic,
reranking_enable: false,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 5,
score_threshold_enabled: false,
score_threshold: 0,
},
retrieval_model: {
search_method: RETRIEVE_METHOD.semantic,
reranking_enable: false,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 5,
score_threshold_enabled: false,
score_threshold: 0,
},
tags: [],
external_knowledge_info: {
external_knowledge_id: '',
external_knowledge_api_id: '',
external_knowledge_api_name: '',
external_knowledge_api_endpoint: '',
},
external_retrieval_model: {
top_k: 0,
score_threshold: 0,
score_threshold_enabled: false,
},
built_in_field_enabled: false,
runtime_mode: 'rag_pipeline',
enable_api: false,
is_multimodal: false,
...overrides,
})
const createMetadata = (overrides: Partial<MetadataInDoc> = {}): MetadataInDoc => ({
id: 'meta-1',
name: 'topic',
type: MetadataFilteringVariableType.string,
value: 'topic',
...overrides,
})
const createData = (overrides: Partial<KnowledgeRetrievalNodeType> = {}): KnowledgeRetrievalNodeType => ({
title: 'Knowledge Retrieval',
desc: '',
type: BlockEnum.KnowledgeRetrieval,
query_variable_selector: [],
query_attachment_selector: ['start-node', 'files'],
dataset_ids: ['dataset-1'],
retrieval_mode: RETRIEVE_TYPE.multiWay,
multiple_retrieval_config: undefined,
single_retrieval_config: {
model: {
provider: '',
name: '',
mode: '',
completion_params: {},
},
},
metadata_filtering_mode: MetadataFilteringModeEnum.disabled,
metadata_filtering_conditions: undefined,
metadata_model_config: undefined,
...overrides,
})
describe('knowledge-retrieval/use-config', () => {
const updateDatasetsDetail = vi.fn()
const nodeCrudSetInputs = vi.fn()
beforeEach(() => {
vi.clearAllMocks()
uuidCounter = 0
mockUseNodesReadOnly.mockReturnValue({ nodesReadOnly: false, getNodesReadOnly: () => false })
mockUseIsChatMode.mockReturnValue(true)
mockUseWorkflow.mockReturnValue({
getBeforeNodesInSameBranch: () => [{
id: 'start-node',
data: {
type: BlockEnum.Start,
},
}],
} as unknown as ReturnType<typeof useWorkflow>)
mockUseModelListAndDefaultModelAndCurrentProviderAndModel.mockImplementation((type) => {
if (type === 'rerank') {
return {
modelList: [{
provider: 'rerank-provider',
models: [{
model: 'rerank-model',
}],
}],
defaultModel: {
provider: {
provider: 'rerank-provider',
},
model: 'rerank-model',
},
currentProvider: { provider: 'rerank-provider' },
currentModel: {
model: 'rerank-model',
model_properties: {
mode: AppModeEnum.CHAT,
},
},
} as unknown as ReturnType<typeof useModelListAndDefaultModelAndCurrentProviderAndModel>
}
return {
modelList: [],
defaultModel: undefined,
currentProvider: { provider: 'openai' },
currentModel: {
model: 'gpt-4o-mini',
model_properties: {
mode: AppModeEnum.CHAT,
},
},
} as unknown as ReturnType<typeof useModelListAndDefaultModelAndCurrentProviderAndModel>
})
mockUseCurrentProviderAndModel.mockReturnValue({
currentProvider: { provider: 'rerank-provider' },
currentModel: { model: 'rerank-model' },
} as unknown as ReturnType<typeof useCurrentProviderAndModel>)
mockUseNodeCrud.mockImplementation((_id, data) => {
const [inputs, setInputs] = useState(data)
return {
inputs,
setInputs: (nextInputs) => {
nodeCrudSetInputs(nextInputs as KnowledgeRetrievalNodeType)
setInputs(prev => isEqual(prev, nextInputs) ? prev : nextInputs)
},
}
})
mockUseAvailableVarList.mockImplementation((_id, config) => {
const activeConfig = config!
const stringVars = [{
nodeId: 'string-node',
title: 'String Node',
vars: [{
variable: 'topic',
type: VarType.string,
}],
}]
const numberVars = [{
nodeId: 'number-node',
title: 'Number Node',
vars: [{
variable: 'score',
type: VarType.number,
}],
}]
if (activeConfig.filterVar({ type: VarType.string } as never, ['string-node', 'topic'])) {
return {
availableVars: stringVars,
availableNodes: [],
availableNodesWithParent: [],
} as unknown as ReturnType<typeof useAvailableVarList>
}
return {
availableVars: numberVars,
availableNodes: [],
availableNodesWithParent: [],
} as unknown as ReturnType<typeof useAvailableVarList>
})
mockUseDatasetsDetailStore.mockImplementation((selector) => {
return selector({ updateDatasetsDetail } as never)
})
mockFetchDatasets.mockResolvedValue({
data: [createDataset({ id: 'dataset-1', name: 'Knowledge Base', is_multimodal: true })],
page: 1,
limit: 20,
total: 1,
has_more: false,
})
})
it('initializes defaults, loads dataset details, and exposes metadata variables', async () => {
const { result } = renderHook(() => useConfig('knowledge-node', createData()))
await waitFor(() => {
expect(result.current.selectedDatasetsLoaded).toBe(true)
expect(result.current.selectedDatasets[0]?.name).toBe('Knowledge Base')
})
expect(mockFetchDatasets).toHaveBeenCalledWith({
url: '/datasets',
params: {
page: 1,
ids: ['dataset-1'],
},
})
expect(result.current.inputs.query_variable_selector).toEqual(['start-node', 'sys.query'])
expect(result.current.inputs.multiple_retrieval_config).toEqual(expect.objectContaining({
top_k: DATASET_DEFAULT.top_k,
reranking_enable: true,
}))
expect(result.current.showImageQueryVarSelector).toBe(true)
expect(result.current.availableStringVars).toEqual([{
nodeId: 'string-node',
title: 'String Node',
vars: [{
variable: 'topic',
type: VarType.string,
}],
}])
expect(result.current.availableNumberVars).toEqual([{
nodeId: 'number-node',
title: 'Number Node',
vars: [{
variable: 'score',
type: VarType.number,
}],
}])
})
it('updates query and single retrieval model state through the real hook', async () => {
const { result } = renderHook(() => useConfig('knowledge-node', createData()))
await waitFor(() => {
expect(result.current.selectedDatasetsLoaded).toBe(true)
})
act(() => {
result.current.handleQueryVarChange(['start-node', 'question'])
result.current.handleQueryAttachmentChange(['start-node', 'files'])
result.current.handleRetrievalModeChange(RETRIEVE_TYPE.oneWay)
result.current.handleModelChanged({ provider: 'anthropic', modelId: 'claude-sonnet', mode: AppModeEnum.CHAT })
result.current.handleCompletionParamsChange({ temperature: 0.2 })
result.current.handleCompletionParamsChange({ temperature: 0.2 })
})
expect(nodeCrudSetInputs).toHaveBeenCalledWith(expect.objectContaining({
query_variable_selector: ['start-node', 'question'],
}))
expect(nodeCrudSetInputs).toHaveBeenCalledWith(expect.objectContaining({
query_attachment_selector: ['start-node', 'files'],
}))
await waitFor(() => {
expect(result.current.inputs.retrieval_mode).toBe(RETRIEVE_TYPE.oneWay)
expect(result.current.inputs.single_retrieval_config).toEqual(expect.objectContaining({
model: expect.objectContaining({
provider: 'anthropic',
name: 'claude-sonnet',
completion_params: { temperature: 0.2 },
}),
}))
})
})
it('updates retrieval config and dataset state through the real hook', async () => {
const { result } = renderHook(() => useConfig('knowledge-node', createData()))
await waitFor(() => {
expect(result.current.selectedDatasetsLoaded).toBe(true)
})
act(() => {
result.current.handleRetrievalModeChange(RETRIEVE_TYPE.oneWay)
result.current.handleRetrievalModeChange(RETRIEVE_TYPE.multiWay)
result.current.handleMultipleRetrievalConfigChange({ top_k: 8, score_threshold: 0.4 })
result.current.handleOnDatasetsChange([
createDataset({ id: 'dataset-2', name: 'Economic', indexing_technique: 'economy' as DataSet['indexing_technique'], is_multimodal: false }),
createDataset({ id: 'dataset-3', name: 'High Quality', indexing_technique: 'high_quality' as DataSet['indexing_technique'], is_multimodal: false }),
])
})
expect(nodeCrudSetInputs).toHaveBeenCalledWith(expect.objectContaining({
multiple_retrieval_config: expect.objectContaining({
top_k: 8,
score_threshold: 0.4,
}),
}))
await waitFor(() => {
expect(result.current.inputs.retrieval_mode).toBe(RETRIEVE_TYPE.multiWay)
expect(result.current.inputs.query_attachment_selector).toEqual([])
expect(result.current.inputs.dataset_ids).toEqual(['dataset-2', 'dataset-3'])
expect(result.current.inputs.multiple_retrieval_config).toEqual(expect.objectContaining({
reranking_enable: true,
reranking_model: {
provider: 'rerank-provider',
model: 'rerank-model',
},
}))
expect(result.current.rerankModelOpen).toBe(true)
})
expect(updateDatasetsDetail).toHaveBeenCalledWith([
expect.objectContaining({ id: 'dataset-2' }),
expect.objectContaining({ id: 'dataset-3' }),
])
})
it('manages metadata conditions, metadata model config, and variable filters', async () => {
const { result } = renderHook(() => useConfig('knowledge-node', createData({
dataset_ids: [],
})))
await waitFor(() => {
expect(result.current.selectedDatasetsLoaded).toBe(true)
})
act(() => {
result.current.handleMetadataFilterModeChange(MetadataFilteringModeEnum.manual)
result.current.handleAddCondition(createMetadata())
result.current.handleAddCondition(createMetadata({
id: 'meta-2',
name: 'score',
type: MetadataFilteringVariableType.number,
}))
})
await waitFor(() => {
expect(result.current.inputs.metadata_filtering_mode).toBe(MetadataFilteringModeEnum.manual)
expect(result.current.inputs.metadata_filtering_conditions?.conditions).toHaveLength(2)
})
const firstCondition = result.current.inputs.metadata_filtering_conditions!.conditions[0]
act(() => {
result.current.handleUpdateCondition(firstCondition.id, {
...firstCondition,
value: 'agent',
comparison_operator: ComparisonOperator.contains,
})
result.current.handleToggleConditionLogicalOperator()
result.current.handleRemoveCondition(firstCondition.id)
result.current.handleMetadataModelChange({ provider: 'openai', modelId: 'gpt-4.1-mini', mode: AppModeEnum.CHAT })
result.current.handleMetadataCompletionParamsChange({ top_p: 0.3 })
})
await waitFor(() => {
expect(result.current.inputs.metadata_filtering_conditions?.logical_operator).toBe(LogicalOperator.or)
expect(result.current.inputs.metadata_filtering_conditions?.conditions).toHaveLength(1)
expect(result.current.inputs.metadata_model_config).toEqual({
provider: 'openai',
name: 'gpt-4.1-mini',
mode: AppModeEnum.CHAT,
completion_params: { top_p: 0.3 },
})
})
expect(result.current.filterStringVar({ type: VarType.string } as never)).toBe(true)
expect(result.current.filterStringVar({ type: VarType.number } as never)).toBe(false)
expect(result.current.filterFileVar({ type: VarType.file } as never)).toBe(true)
expect(result.current.filterFileVar({ type: VarType.arrayFile } as never)).toBe(true)
expect(result.current.filterFileVar({ type: VarType.string } as never)).toBe(false)
})
})

View File

@ -0,0 +1,119 @@
import type { MutableRefObject } from 'react'
import type { KnowledgeRetrievalNodeType } from '../types'
import type { DataSet } from '@/models/datasets'
import {
produce,
} from 'immer'
import {
useCallback,
useEffect,
useMemo,
useState,
} from 'react'
import { fetchDatasets } from '@/service/datasets'
import { RETRIEVE_TYPE } from '@/types/app'
import { getMultipleRetrievalConfig, getSelectedDatasetsMode } from '../utils'
type ModelIdentity = {
provider?: string
model?: string
}
type Params = {
inputs: KnowledgeRetrievalNodeType
inputRef: MutableRefObject<KnowledgeRetrievalNodeType>
setInputs: (inputs: KnowledgeRetrievalNodeType) => void
payloadRetrievalMode: RETRIEVE_TYPE
updateDatasetsDetail: (datasets: DataSet[]) => void
fallbackRerankModel: ModelIdentity
}
const useKnowledgeDatasetSelection = ({
inputs,
inputRef,
setInputs,
payloadRetrievalMode,
updateDatasetsDetail,
fallbackRerankModel,
}: Params) => {
const [selectedDatasets, setSelectedDatasets] = useState<DataSet[]>([])
const [selectedDatasetsLoaded, setSelectedDatasetsLoaded] = useState(false)
const [rerankModelOpen, setRerankModelOpen] = useState(false)
useEffect(() => {
void (async () => {
const currentInputs = inputRef.current
const datasetIds = currentInputs.dataset_ids
if (datasetIds.length > 0) {
const { data: dataSetsWithDetail } = await fetchDatasets({
url: '/datasets',
params: {
page: 1,
ids: datasetIds,
},
})
setSelectedDatasets(dataSetsWithDetail)
}
const nextInputs = produce(currentInputs, (draft) => {
draft.dataset_ids = datasetIds
})
setInputs(nextInputs)
setSelectedDatasetsLoaded(true)
})()
}, [inputRef, setInputs])
const handleOnDatasetsChange = useCallback((newDatasets: DataSet[]) => {
const {
mixtureHighQualityAndEconomic,
mixtureInternalAndExternal,
inconsistentEmbeddingModel,
allInternal,
allExternal,
} = getSelectedDatasetsMode(newDatasets)
const noMultiModalDatasets = newDatasets.every(dataset => !dataset.is_multimodal)
const nextInputs = produce(inputs, (draft) => {
draft.dataset_ids = newDatasets.map(dataset => dataset.id)
if (payloadRetrievalMode === RETRIEVE_TYPE.multiWay && newDatasets.length > 0) {
draft.multiple_retrieval_config = getMultipleRetrievalConfig(
draft.multiple_retrieval_config!,
newDatasets,
selectedDatasets,
fallbackRerankModel,
)
}
if (noMultiModalDatasets)
draft.query_attachment_selector = []
})
updateDatasetsDetail(newDatasets)
setInputs(nextInputs)
setSelectedDatasets(newDatasets)
if (
(allInternal && (mixtureHighQualityAndEconomic || inconsistentEmbeddingModel))
|| mixtureInternalAndExternal
|| allExternal
) {
setRerankModelOpen(true)
}
}, [fallbackRerankModel, inputs, payloadRetrievalMode, selectedDatasets, setInputs, updateDatasetsDetail])
const showImageQueryVarSelector = useMemo(() => {
return selectedDatasets.some(dataset => dataset.is_multimodal)
}, [selectedDatasets])
return {
selectedDatasets,
selectedDatasetsLoaded,
rerankModelOpen,
setRerankModelOpen,
handleOnDatasetsChange,
showImageQueryVarSelector,
}
}
export default useKnowledgeDatasetSelection

View File

@ -0,0 +1,64 @@
import type { MutableRefObject } from 'react'
import type { KnowledgeRetrievalNodeType } from '../types'
import type { ValueSelector } from '@/app/components/workflow/types'
import { produce } from 'immer'
import {
useCallback,
useEffect,
useRef,
} from 'react'
import { RETRIEVE_TYPE } from '@/types/app'
type Params = {
inputs: KnowledgeRetrievalNodeType
doSetInputs: (inputs: KnowledgeRetrievalNodeType) => void
}
const normalizeInputs = (nextInputs: KnowledgeRetrievalNodeType) => {
return produce(nextInputs, (draft) => {
if (nextInputs.retrieval_mode === RETRIEVE_TYPE.multiWay)
delete draft.single_retrieval_config
else
delete draft.multiple_retrieval_config
})
}
const useKnowledgeInputManager = ({
inputs,
doSetInputs,
}: Params) => {
const inputRef = useRef(inputs)
useEffect(() => {
inputRef.current = inputs
}, [inputs])
const setInputs = useCallback((nextInputs: KnowledgeRetrievalNodeType) => {
const normalizedInputs = normalizeInputs(nextInputs)
doSetInputs(normalizedInputs)
inputRef.current = normalizedInputs
}, [doSetInputs])
const handleQueryVarChange = useCallback((newVar: ValueSelector | string) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.query_variable_selector = newVar as ValueSelector
})
setInputs(nextInputs)
}, [setInputs])
const handleQueryAttachmentChange = useCallback((newVar: ValueSelector | string) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.query_attachment_selector = newVar as ValueSelector
})
setInputs(nextInputs)
}, [setInputs])
return {
inputRef: inputRef as MutableRefObject<KnowledgeRetrievalNodeType>,
setInputs,
handleQueryVarChange,
handleQueryAttachmentChange,
}
}
export default useKnowledgeInputManager

View File

@ -0,0 +1,164 @@
import type { MutableRefObject } from 'react'
import type {
HandleAddCondition,
HandleRemoveCondition,
HandleToggleConditionLogicalOperator,
HandleUpdateCondition,
KnowledgeRetrievalNodeType,
MetadataFilteringModeEnum,
} from '../types'
import type { Var } from '@/app/components/workflow/types'
import { produce } from 'immer'
import { useCallback } from 'react'
import { v4 as uuid4 } from 'uuid'
import useAvailableVarList from '@/app/components/workflow/nodes/_base/hooks/use-available-var-list'
import { VarType } from '@/app/components/workflow/types'
import { AppModeEnum } from '@/types/app'
import {
ComparisonOperator,
LogicalOperator,
MetadataFilteringVariableType,
} from '../types'
type Params = {
id: string
inputRef: MutableRefObject<KnowledgeRetrievalNodeType>
setInputs: (inputs: KnowledgeRetrievalNodeType) => void
}
const filterStringVar = (varPayload: Var) => {
return varPayload.type === VarType.string
}
const filterNumberVar = (varPayload: Var) => {
return varPayload.type === VarType.number
}
const filterFileVar = (varPayload: Var) => {
return varPayload.type === VarType.file || varPayload.type === VarType.arrayFile
}
const useKnowledgeMetadataConfig = ({
id,
inputRef,
setInputs,
}: Params) => {
const handleMetadataFilterModeChange = useCallback((newMode: MetadataFilteringModeEnum) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.metadata_filtering_mode = newMode
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleAddCondition = useCallback<HandleAddCondition>(({ id, name, type }) => {
const comparisonOperator = type === MetadataFilteringVariableType.number
? ComparisonOperator.equal
: ComparisonOperator.is
const nextInputs = produce(inputRef.current, (draft) => {
const newCondition = {
id: uuid4(),
metadata_id: id,
name,
comparison_operator: comparisonOperator,
}
if (draft.metadata_filtering_conditions) {
draft.metadata_filtering_conditions.conditions.push(newCondition)
return
}
draft.metadata_filtering_conditions = {
logical_operator: LogicalOperator.and,
conditions: [newCondition],
}
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleRemoveCondition = useCallback<HandleRemoveCondition>((conditionId) => {
const nextInputs = produce(inputRef.current, (draft) => {
const conditions = draft.metadata_filtering_conditions?.conditions || []
const index = conditions.findIndex(condition => condition.id === conditionId)
if (index > -1)
draft.metadata_filtering_conditions?.conditions.splice(index, 1)
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleUpdateCondition = useCallback<HandleUpdateCondition>((conditionId, newCondition) => {
const nextInputs = produce(inputRef.current, (draft) => {
const conditions = draft.metadata_filtering_conditions?.conditions || []
const index = conditions.findIndex(condition => condition.id === conditionId)
if (index > -1)
draft.metadata_filtering_conditions!.conditions[index] = newCondition
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleToggleConditionLogicalOperator = useCallback<HandleToggleConditionLogicalOperator>(() => {
const nextInputs = produce(inputRef.current, (draft) => {
const currentLogicalOperator = draft.metadata_filtering_conditions?.logical_operator
draft.metadata_filtering_conditions!.logical_operator = currentLogicalOperator === LogicalOperator.and
? LogicalOperator.or
: LogicalOperator.and
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleMetadataModelChange = useCallback((model: { provider: string, modelId: string, mode?: string }) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.metadata_model_config = {
provider: model.provider,
name: model.modelId,
mode: model.mode || AppModeEnum.CHAT,
completion_params: draft.metadata_model_config?.completion_params || { temperature: 0.7 },
}
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleMetadataCompletionParamsChange = useCallback((newParams: Record<string, unknown>) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.metadata_model_config = {
...draft.metadata_model_config!,
completion_params: newParams,
}
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const {
availableVars: availableStringVars,
availableNodesWithParent: availableStringNodesWithParent,
} = useAvailableVarList(id, {
onlyLeafNodeVar: false,
filterVar: filterStringVar,
})
const {
availableVars: availableNumberVars,
availableNodesWithParent: availableNumberNodesWithParent,
} = useAvailableVarList(id, {
onlyLeafNodeVar: false,
filterVar: filterNumberVar,
})
return {
filterStringVar,
filterFileVar,
handleMetadataFilterModeChange,
handleAddCondition,
handleRemoveCondition,
handleUpdateCondition,
handleToggleConditionLogicalOperator,
handleMetadataModelChange,
handleMetadataCompletionParamsChange,
availableStringVars,
availableStringNodesWithParent,
availableNumberVars,
availableNumberNodesWithParent,
}
}
export default useKnowledgeMetadataConfig

View File

@ -0,0 +1,188 @@
import type { MutableRefObject } from 'react'
import type {
KnowledgeRetrievalNodeType,
MultipleRetrievalConfig,
} from '../types'
import type { ModelConfig } from '@/app/components/workflow/types'
import type { DataSet } from '@/models/datasets'
import { isEqual } from 'es-toolkit/predicate'
import { produce } from 'immer'
import {
useCallback,
useEffect,
} from 'react'
import { DATASET_DEFAULT } from '@/config'
import {
AppModeEnum,
RETRIEVE_TYPE,
} from '@/types/app'
import { getMultipleRetrievalConfig } from '../utils'
type ModelIdentity = {
provider?: string
model?: string
}
type TextProvider = {
provider: string
}
type TextModel = {
model: string
model_properties?: {
mode?: string
}
}
type Params = {
inputs: KnowledgeRetrievalNodeType
inputRef: MutableRefObject<KnowledgeRetrievalNodeType>
setInputs: (inputs: KnowledgeRetrievalNodeType) => void
selectedDatasets: DataSet[]
currentProvider?: TextProvider
currentModel?: TextModel
fallbackRerankModel: ModelIdentity
hasRerankDefaultModel: boolean
}
const createSingleRetrievalConfig = (model: ModelConfig): KnowledgeRetrievalNodeType['single_retrieval_config'] => ({
model,
})
const useKnowledgeModelConfig = ({
inputs,
inputRef,
setInputs,
selectedDatasets,
currentProvider,
currentModel,
fallbackRerankModel,
hasRerankDefaultModel,
}: Params) => {
const handleModelChanged = useCallback((model: { provider: string, modelId: string, mode?: string }) => {
const nextInputs = produce(inputRef.current, (draft) => {
if (!draft.single_retrieval_config) {
draft.single_retrieval_config = createSingleRetrievalConfig({
provider: '',
name: '',
mode: '',
completion_params: {},
})
}
const draftModel = draft.single_retrieval_config!.model
draftModel.provider = model.provider
draftModel.name = model.modelId
draftModel.mode = model.mode || AppModeEnum.CHAT
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleCompletionParamsChange = useCallback((newParams: Record<string, unknown>) => {
if (isEqual(newParams, inputRef.current.single_retrieval_config?.model.completion_params))
return
const nextInputs = produce(inputRef.current, (draft) => {
if (!draft.single_retrieval_config) {
draft.single_retrieval_config = createSingleRetrievalConfig({
provider: '',
name: '',
mode: '',
completion_params: {},
})
}
draft.single_retrieval_config!.model.completion_params = newParams
})
setInputs(nextInputs)
}, [inputRef, setInputs])
useEffect(() => {
const currentInputs = inputRef.current
if (
currentInputs.retrieval_mode === RETRIEVE_TYPE.multiWay
&& currentInputs.multiple_retrieval_config?.reranking_model?.provider
&& fallbackRerankModel.model
&& hasRerankDefaultModel
) {
return
}
if (currentInputs.retrieval_mode === RETRIEVE_TYPE.oneWay && currentInputs.single_retrieval_config?.model?.provider)
return
const nextInputs = produce(currentInputs, (draft) => {
if (currentProvider?.provider && currentModel?.model) {
const hasSetModel = draft.single_retrieval_config?.model?.provider
if (!hasSetModel) {
draft.single_retrieval_config = createSingleRetrievalConfig({
provider: currentProvider.provider,
name: currentModel.model,
mode: currentModel.model_properties?.mode || AppModeEnum.CHAT,
completion_params: {},
})
}
}
const multipleRetrievalConfig = draft.multiple_retrieval_config
draft.multiple_retrieval_config = {
top_k: multipleRetrievalConfig?.top_k || DATASET_DEFAULT.top_k,
score_threshold: multipleRetrievalConfig?.score_threshold,
reranking_model: multipleRetrievalConfig?.reranking_model,
reranking_mode: multipleRetrievalConfig?.reranking_mode,
weights: multipleRetrievalConfig?.weights,
reranking_enable: multipleRetrievalConfig?.reranking_enable !== undefined
? multipleRetrievalConfig.reranking_enable
: Boolean(fallbackRerankModel.model && hasRerankDefaultModel),
}
})
setInputs(nextInputs)
}, [currentModel, currentProvider?.provider, fallbackRerankModel.model, hasRerankDefaultModel, inputRef, setInputs])
const handleRetrievalModeChange = useCallback((newMode: RETRIEVE_TYPE) => {
const nextInputs = produce(inputs, (draft) => {
draft.retrieval_mode = newMode
if (newMode === RETRIEVE_TYPE.multiWay) {
draft.multiple_retrieval_config = getMultipleRetrievalConfig(
draft.multiple_retrieval_config as MultipleRetrievalConfig,
selectedDatasets,
selectedDatasets,
fallbackRerankModel,
)
return
}
const hasSetModel = draft.single_retrieval_config?.model?.provider
if (!hasSetModel) {
draft.single_retrieval_config = createSingleRetrievalConfig({
provider: currentProvider?.provider || '',
name: currentModel?.model || '',
mode: currentModel?.model_properties?.mode || AppModeEnum.CHAT,
completion_params: {},
})
}
})
setInputs(nextInputs)
}, [currentModel?.model, currentModel?.model_properties?.mode, currentProvider?.provider, fallbackRerankModel, inputs, selectedDatasets, setInputs])
const handleMultipleRetrievalConfigChange = useCallback((newConfig: MultipleRetrievalConfig) => {
const nextInputs = produce(inputs, (draft) => {
draft.multiple_retrieval_config = getMultipleRetrievalConfig(
newConfig,
selectedDatasets,
selectedDatasets,
fallbackRerankModel,
)
})
setInputs(nextInputs)
}, [fallbackRerankModel, inputs, selectedDatasets, setInputs])
return {
handleModelChanged,
handleCompletionParamsChange,
handleRetrievalModeChange,
handleMultipleRetrievalConfigChange,
}
}
export default useKnowledgeModelConfig

View File

@ -1,47 +1,24 @@
import type { ValueSelector, Var } from '../../types'
import type {
HandleAddCondition,
HandleRemoveCondition,
HandleToggleConditionLogicalOperator,
HandleUpdateCondition,
KnowledgeRetrievalNodeType,
MetadataFilteringModeEnum,
MultipleRetrievalConfig,
} from './types'
import type { DataSet } from '@/models/datasets'
import { isEqual } from 'es-toolkit/predicate'
import type { ValueSelector } from '../../types'
import type { KnowledgeRetrievalNodeType } from './types'
import { produce } from 'immer'
import {
useCallback,
useEffect,
useMemo,
useRef,
useState,
} from 'react'
import { v4 as uuid4 } from 'uuid'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useCurrentProviderAndModel, useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import useAvailableVarList from '@/app/components/workflow/nodes/_base/hooks/use-available-var-list'
import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud'
import { DATASET_DEFAULT } from '@/config'
import { fetchDatasets } from '@/service/datasets'
import { AppModeEnum, RETRIEVE_TYPE } from '@/types/app'
import { useDatasetsDetailStore } from '../../datasets-detail-store/store'
import {
useIsChatMode,
useNodesReadOnly,
useWorkflow,
} from '../../hooks'
import { BlockEnum, VarType } from '../../types'
import {
ComparisonOperator,
LogicalOperator,
MetadataFilteringVariableType,
} from './types'
import {
getMultipleRetrievalConfig,
getSelectedDatasetsMode,
} from './utils'
import { BlockEnum } from '../../types'
import useKnowledgeDatasetSelection from './hooks/use-knowledge-dataset-selection'
import useKnowledgeInputManager from './hooks/use-knowledge-input-manager'
import useKnowledgeMetadataConfig from './hooks/use-knowledge-metadata-config'
import useKnowledgeModelConfig from './hooks/use-knowledge-model-config'
const useConfig = (id: string, payload: KnowledgeRetrievalNodeType) => {
const { nodesReadOnly: readOnly } = useNodesReadOnly()
@ -51,34 +28,15 @@ const useConfig = (id: string, payload: KnowledgeRetrievalNodeType) => {
const startNodeId = startNode?.id
const { inputs, setInputs: doSetInputs } = useNodeCrud<KnowledgeRetrievalNodeType>(id, payload)
const updateDatasetsDetail = useDatasetsDetailStore(s => s.updateDatasetsDetail)
const inputRef = useRef(inputs)
const setInputs = useCallback((s: KnowledgeRetrievalNodeType) => {
const newInputs = produce(s, (draft) => {
if (s.retrieval_mode === RETRIEVE_TYPE.multiWay)
delete draft.single_retrieval_config
else
delete draft.multiple_retrieval_config
})
// not work in pass to draft...
doSetInputs(newInputs)
inputRef.current = newInputs
}, [doSetInputs])
const handleQueryVarChange = useCallback((newVar: ValueSelector | string) => {
const newInputs = produce(inputs, (draft) => {
draft.query_variable_selector = newVar as ValueSelector
})
setInputs(newInputs)
}, [inputs, setInputs])
const handleQueryAttachmentChange = useCallback((newVar: ValueSelector | string) => {
const newInputs = produce(inputs, (draft) => {
draft.query_attachment_selector = newVar as ValueSelector
})
setInputs(newInputs)
}, [inputs, setInputs])
const {
inputRef,
setInputs,
handleQueryVarChange,
handleQueryAttachmentChange,
} = useKnowledgeInputManager({
inputs,
doSetInputs,
})
const {
currentProvider,
@ -103,313 +61,67 @@ const useConfig = (id: string, payload: KnowledgeRetrievalNodeType) => {
: undefined,
)
const handleModelChanged = useCallback((model: { provider: string, modelId: string, mode?: string }) => {
const newInputs = produce(inputRef.current, (draft) => {
if (!draft.single_retrieval_config) {
draft.single_retrieval_config = {
model: {
provider: '',
name: '',
mode: '',
completion_params: {},
},
}
}
const draftModel = draft.single_retrieval_config?.model
draftModel.provider = model.provider
draftModel.name = model.modelId
draftModel.mode = model.mode!
})
setInputs(newInputs)
}, [setInputs])
const handleCompletionParamsChange = useCallback((newParams: Record<string, any>) => {
// inputRef.current.single_retrieval_config?.model is old when change the provider...
if (isEqual(newParams, inputRef.current.single_retrieval_config?.model.completion_params))
return
const newInputs = produce(inputRef.current, (draft) => {
if (!draft.single_retrieval_config) {
draft.single_retrieval_config = {
model: {
provider: '',
name: '',
mode: '',
completion_params: {},
},
}
}
draft.single_retrieval_config.model.completion_params = newParams
})
setInputs(newInputs)
}, [setInputs])
// set defaults models
useEffect(() => {
const inputs = inputRef.current
if (inputs.retrieval_mode === RETRIEVE_TYPE.multiWay && inputs.multiple_retrieval_config?.reranking_model?.provider && currentRerankModel && rerankDefaultModel)
return
if (inputs.retrieval_mode === RETRIEVE_TYPE.oneWay && inputs.single_retrieval_config?.model?.provider)
return
const newInput = produce(inputs, (draft) => {
if (currentProvider?.provider && currentModel?.model) {
const hasSetModel = draft.single_retrieval_config?.model?.provider
if (!hasSetModel) {
draft.single_retrieval_config = {
model: {
provider: currentProvider?.provider,
name: currentModel?.model,
mode: currentModel?.model_properties?.mode as string,
completion_params: {},
},
}
}
}
const multipleRetrievalConfig = draft.multiple_retrieval_config
draft.multiple_retrieval_config = {
top_k: multipleRetrievalConfig?.top_k || DATASET_DEFAULT.top_k,
score_threshold: multipleRetrievalConfig?.score_threshold,
reranking_model: multipleRetrievalConfig?.reranking_model,
reranking_mode: multipleRetrievalConfig?.reranking_mode,
weights: multipleRetrievalConfig?.weights,
reranking_enable: multipleRetrievalConfig?.reranking_enable !== undefined
? multipleRetrievalConfig.reranking_enable
: Boolean(currentRerankModel && rerankDefaultModel),
}
})
setInputs(newInput)
}, [currentProvider?.provider, currentModel, currentRerankModel, rerankDefaultModel])
const [selectedDatasets, setSelectedDatasets] = useState<DataSet[]>([])
const [rerankModelOpen, setRerankModelOpen] = useState(false)
const handleRetrievalModeChange = useCallback((newMode: RETRIEVE_TYPE) => {
const newInputs = produce(inputs, (draft) => {
draft.retrieval_mode = newMode
if (newMode === RETRIEVE_TYPE.multiWay) {
const multipleRetrievalConfig = draft.multiple_retrieval_config
draft.multiple_retrieval_config = getMultipleRetrievalConfig(multipleRetrievalConfig!, selectedDatasets, selectedDatasets, {
provider: currentRerankProvider?.provider,
model: currentRerankModel?.model,
})
}
else {
const hasSetModel = draft.single_retrieval_config?.model?.provider
if (!hasSetModel) {
draft.single_retrieval_config = {
model: {
provider: currentProvider?.provider || '',
name: currentModel?.model || '',
mode: currentModel?.model_properties?.mode as string,
completion_params: {},
},
}
}
}
})
setInputs(newInputs)
}, [currentModel?.model, currentModel?.model_properties?.mode, currentProvider?.provider, inputs, setInputs, selectedDatasets, currentRerankModel, currentRerankProvider])
const handleMultipleRetrievalConfigChange = useCallback((newConfig: MultipleRetrievalConfig) => {
const newInputs = produce(inputs, (draft) => {
const newMultipleRetrievalConfig = getMultipleRetrievalConfig(newConfig!, selectedDatasets, selectedDatasets, {
provider: currentRerankProvider?.provider,
model: currentRerankModel?.model,
})
draft.multiple_retrieval_config = newMultipleRetrievalConfig
})
setInputs(newInputs)
}, [inputs, setInputs, selectedDatasets, currentRerankModel, currentRerankProvider])
const [selectedDatasetsLoaded, setSelectedDatasetsLoaded] = useState(false)
// datasets
useEffect(() => {
(async () => {
const inputs = inputRef.current
const datasetIds = inputs.dataset_ids
if (datasetIds?.length > 0) {
const { data: dataSetsWithDetail } = await fetchDatasets({ url: '/datasets', params: { page: 1, ids: datasetIds } as any })
setSelectedDatasets(dataSetsWithDetail)
}
const newInputs = produce(inputs, (draft) => {
draft.dataset_ids = datasetIds
})
setInputs(newInputs)
setSelectedDatasetsLoaded(true)
})()
}, [])
useEffect(() => {
const inputs = inputRef.current
let query_variable_selector: ValueSelector = inputs.query_variable_selector
if (isChatMode && inputs.query_variable_selector.length === 0 && startNodeId)
query_variable_selector = [startNodeId, 'sys.query']
setInputs(produce(inputs, (draft) => {
draft.query_variable_selector = query_variable_selector
}))
}, [])
const handleOnDatasetsChange = useCallback((newDatasets: DataSet[]) => {
const {
mixtureHighQualityAndEconomic,
mixtureInternalAndExternal,
inconsistentEmbeddingModel,
allInternal,
allExternal,
} = getSelectedDatasetsMode(newDatasets)
const noMultiModalDatasets = newDatasets.every(d => !d.is_multimodal)
const newInputs = produce(inputs, (draft) => {
draft.dataset_ids = newDatasets.map(d => d.id)
if (payload.retrieval_mode === RETRIEVE_TYPE.multiWay && newDatasets.length > 0) {
const multipleRetrievalConfig = draft.multiple_retrieval_config
const newMultipleRetrievalConfig = getMultipleRetrievalConfig(multipleRetrievalConfig!, newDatasets, selectedDatasets, {
provider: currentRerankProvider?.provider,
model: currentRerankModel?.model,
})
draft.multiple_retrieval_config = newMultipleRetrievalConfig
}
if (noMultiModalDatasets)
draft.query_attachment_selector = []
})
updateDatasetsDetail(newDatasets)
setInputs(newInputs)
setSelectedDatasets(newDatasets)
if (
(allInternal && (mixtureHighQualityAndEconomic || inconsistentEmbeddingModel))
|| mixtureInternalAndExternal
|| allExternal
) {
setRerankModelOpen(true)
}
}, [inputs, setInputs, payload.retrieval_mode, selectedDatasets, currentRerankModel, currentRerankProvider, updateDatasetsDetail])
const filterStringVar = useCallback((varPayload: Var) => {
return varPayload.type === VarType.string
}, [])
const filterNumberVar = useCallback((varPayload: Var) => {
return varPayload.type === VarType.number
}, [])
const filterFileVar = useCallback((varPayload: Var) => {
return varPayload.type === VarType.file || varPayload.type === VarType.arrayFile
}, [])
const handleMetadataFilterModeChange = useCallback((newMode: MetadataFilteringModeEnum) => {
setInputs(produce(inputRef.current, (draft) => {
draft.metadata_filtering_mode = newMode
}))
}, [setInputs])
const handleAddCondition = useCallback<HandleAddCondition>(({ id, name, type }) => {
let operator: ComparisonOperator = ComparisonOperator.is
if (type === MetadataFilteringVariableType.number)
operator = ComparisonOperator.equal
const newCondition = {
id: uuid4(),
metadata_id: id, // Save metadata.id for reliable reference
name,
comparison_operator: operator,
}
const newInputs = produce(inputRef.current, (draft) => {
if (draft.metadata_filtering_conditions) {
draft.metadata_filtering_conditions.conditions.push(newCondition)
}
else {
draft.metadata_filtering_conditions = {
logical_operator: LogicalOperator.and,
conditions: [newCondition],
}
}
})
setInputs(newInputs)
}, [setInputs])
const handleRemoveCondition = useCallback<HandleRemoveCondition>((id) => {
const conditions = inputRef.current.metadata_filtering_conditions?.conditions || []
const index = conditions.findIndex(c => c.id === id)
const newInputs = produce(inputRef.current, (draft) => {
if (index > -1)
draft.metadata_filtering_conditions?.conditions.splice(index, 1)
})
setInputs(newInputs)
}, [setInputs])
const handleUpdateCondition = useCallback<HandleUpdateCondition>((id, newCondition) => {
const conditions = inputRef.current.metadata_filtering_conditions?.conditions || []
const index = conditions.findIndex(c => c.id === id)
const newInputs = produce(inputRef.current, (draft) => {
if (index > -1)
draft.metadata_filtering_conditions!.conditions[index] = newCondition
})
setInputs(newInputs)
}, [setInputs])
const handleToggleConditionLogicalOperator = useCallback<HandleToggleConditionLogicalOperator>(() => {
const oldLogicalOperator = inputRef.current.metadata_filtering_conditions?.logical_operator
const newLogicalOperator = oldLogicalOperator === LogicalOperator.and ? LogicalOperator.or : LogicalOperator.and
const newInputs = produce(inputRef.current, (draft) => {
draft.metadata_filtering_conditions!.logical_operator = newLogicalOperator
})
setInputs(newInputs)
}, [setInputs])
const handleMetadataModelChange = useCallback((model: { provider: string, modelId: string, mode?: string }) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.metadata_model_config = {
provider: model.provider,
name: model.modelId,
mode: model.mode || AppModeEnum.CHAT,
completion_params: draft.metadata_model_config?.completion_params || { temperature: 0.7 },
}
})
setInputs(newInputs)
}, [setInputs])
const handleMetadataCompletionParamsChange = useCallback((newParams: Record<string, any>) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.metadata_model_config = {
...draft.metadata_model_config!,
completion_params: newParams,
}
})
setInputs(newInputs)
}, [setInputs])
const fallbackRerankModel = useMemo(() => ({
provider: currentRerankProvider?.provider,
model: currentRerankModel?.model,
}), [currentRerankModel?.model, currentRerankProvider?.provider])
const {
availableVars: availableStringVars,
availableNodesWithParent: availableStringNodesWithParent,
} = useAvailableVarList(id, {
onlyLeafNodeVar: false,
filterVar: filterStringVar,
selectedDatasets,
selectedDatasetsLoaded,
rerankModelOpen,
setRerankModelOpen,
handleOnDatasetsChange,
showImageQueryVarSelector,
} = useKnowledgeDatasetSelection({
inputs,
inputRef,
setInputs,
payloadRetrievalMode: payload.retrieval_mode,
updateDatasetsDetail,
fallbackRerankModel,
})
const {
availableVars: availableNumberVars,
availableNodesWithParent: availableNumberNodesWithParent,
} = useAvailableVarList(id, {
onlyLeafNodeVar: false,
filterVar: filterNumberVar,
handleModelChanged,
handleCompletionParamsChange,
handleRetrievalModeChange,
handleMultipleRetrievalConfigChange,
} = useKnowledgeModelConfig({
inputs,
inputRef,
setInputs,
selectedDatasets,
currentProvider,
currentModel,
fallbackRerankModel,
hasRerankDefaultModel: Boolean(currentRerankModel && rerankDefaultModel),
})
const showImageQueryVarSelector = useMemo(() => {
return selectedDatasets.some(d => d.is_multimodal)
}, [selectedDatasets])
useEffect(() => {
const currentInputs = inputRef.current
let nextQueryVariableSelector: ValueSelector = currentInputs.query_variable_selector
if (isChatMode && currentInputs.query_variable_selector.length === 0 && startNodeId)
nextQueryVariableSelector = [startNodeId, 'sys.query']
setInputs(produce(currentInputs, (draft) => {
draft.query_variable_selector = nextQueryVariableSelector
}))
}, [inputRef, isChatMode, setInputs, startNodeId])
const metadataConfig = useKnowledgeMetadataConfig({
id,
inputRef,
setInputs,
})
return {
readOnly,
inputs,
handleQueryVarChange,
handleQueryAttachmentChange,
filterStringVar,
filterFileVar,
filterStringVar: metadataConfig.filterStringVar,
filterFileVar: metadataConfig.filterFileVar,
handleRetrievalModeChange,
handleMultipleRetrievalConfigChange,
handleModelChanged,
@ -419,17 +131,17 @@ const useConfig = (id: string, payload: KnowledgeRetrievalNodeType) => {
handleOnDatasetsChange,
rerankModelOpen,
setRerankModelOpen,
handleMetadataFilterModeChange,
handleUpdateCondition,
handleAddCondition,
handleRemoveCondition,
handleToggleConditionLogicalOperator,
handleMetadataModelChange,
handleMetadataCompletionParamsChange,
availableStringVars,
availableStringNodesWithParent,
availableNumberVars,
availableNumberNodesWithParent,
handleMetadataFilterModeChange: metadataConfig.handleMetadataFilterModeChange,
handleUpdateCondition: metadataConfig.handleUpdateCondition,
handleAddCondition: metadataConfig.handleAddCondition,
handleRemoveCondition: metadataConfig.handleRemoveCondition,
handleToggleConditionLogicalOperator: metadataConfig.handleToggleConditionLogicalOperator,
handleMetadataModelChange: metadataConfig.handleMetadataModelChange,
handleMetadataCompletionParamsChange: metadataConfig.handleMetadataCompletionParamsChange,
availableStringVars: metadataConfig.availableStringVars,
availableStringNodesWithParent: metadataConfig.availableStringNodesWithParent,
availableNumberVars: metadataConfig.availableNumberVars,
availableNumberNodesWithParent: metadataConfig.availableNumberNodesWithParent,
showImageQueryVarSelector,
}
}

View File

@ -0,0 +1,80 @@
import type { LLMNodeType } from '../types'
import { render, screen } from '@testing-library/react'
import {
useTextGenerationCurrentProviderAndModelAndModelList,
} from '@/app/components/header/account-setting/model-provider-page/hooks'
import { BlockEnum } from '@/app/components/workflow/types'
import { AppModeEnum } from '@/types/app'
import Node from '../node'
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useTextGenerationCurrentProviderAndModelAndModelList: vi.fn(),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/model-selector', () => ({
__esModule: true,
default: ({ defaultModel }: { defaultModel?: { provider: string, model: string } }) => (
<div>{defaultModel ? `${defaultModel.provider}:${defaultModel.model}` : 'no-model'}</div>
),
}))
const mockUseTextGeneration = vi.mocked(useTextGenerationCurrentProviderAndModelAndModelList)
const createData = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
title: 'LLM',
desc: '',
type: BlockEnum.LLM,
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {},
} as LLMNodeType['model'],
prompt_template: [],
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
...overrides,
})
describe('llm/node', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseTextGeneration.mockReturnValue({
textGenerationModelList: [{ provider: 'openai', model: 'gpt-4o' }],
} as unknown as ReturnType<typeof useTextGenerationCurrentProviderAndModelAndModelList>)
})
it('renders the readonly model selector when a model is configured', () => {
render(
<Node
id="llm-node"
data={createData()}
/>,
)
expect(screen.getByText('openai:gpt-4o')).toBeInTheDocument()
})
it('renders nothing when the node has no configured model', () => {
render(
<Node
id="llm-node"
data={createData({
model: {
provider: '',
name: '',
mode: AppModeEnum.CHAT,
completion_params: {},
} as LLMNodeType['model'],
})}
/>,
)
expect(screen.queryByText('openai:gpt-4o')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,313 @@
import type { MutableRefObject } from 'react'
import type { LLMNodeType } from '../types'
import { act, renderHook, waitFor } from '@testing-library/react'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import {
useIsChatMode,
useNodesReadOnly,
} from '@/app/components/workflow/hooks'
import useInspectVarsCrud from '@/app/components/workflow/hooks/use-inspect-vars-crud'
import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud'
import { useStore } from '@/app/components/workflow/store'
import { BlockEnum } from '@/app/components/workflow/types'
import { AppModeEnum, Resolution } from '@/types/app'
import useConfigVision from '../../../hooks/use-config-vision'
import useAvailableVarList from '../../_base/hooks/use-available-var-list'
import useLLMInputManager from '../hooks/use-llm-input-manager'
import useLLMPromptConfig from '../hooks/use-llm-prompt-config'
import useLLMStructuredOutputConfig from '../hooks/use-llm-structured-output-config'
import useConfig from '../use-config'
vi.mock('@/app/components/workflow/hooks', () => ({
useNodesReadOnly: vi.fn(),
useIsChatMode: vi.fn(),
}))
vi.mock('@/app/components/workflow/nodes/_base/hooks/use-node-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('@/app/components/workflow/hooks/use-inspect-vars-crud', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useModelListAndDefaultModelAndCurrentProviderAndModel: vi.fn(),
}))
vi.mock('@/app/components/workflow/store', () => ({
useStore: vi.fn(),
}))
vi.mock('../../_base/hooks/use-available-var-list', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../../../hooks/use-config-vision', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../hooks/use-llm-input-manager', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../hooks/use-llm-prompt-config', () => ({
__esModule: true,
default: vi.fn(),
}))
vi.mock('../hooks/use-llm-structured-output-config', () => ({
__esModule: true,
default: vi.fn(),
}))
const mockUseNodesReadOnly = vi.mocked(useNodesReadOnly)
const mockUseIsChatMode = vi.mocked(useIsChatMode)
const mockUseNodeCrud = vi.mocked(useNodeCrud)
const mockUseInspectVarsCrud = vi.mocked(useInspectVarsCrud)
const mockUseModelListAndDefaultModelAndCurrentProviderAndModel = vi.mocked(useModelListAndDefaultModelAndCurrentProviderAndModel)
const mockUseStore = vi.mocked(useStore)
const mockUseAvailableVarList = vi.mocked(useAvailableVarList)
const mockUseConfigVision = vi.mocked(useConfigVision)
const mockUseLLMInputManager = vi.mocked(useLLMInputManager)
const mockUseLLMPromptConfig = vi.mocked(useLLMPromptConfig)
const mockUseLLMStructuredOutputConfig = vi.mocked(useLLMStructuredOutputConfig)
const createPayload = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
type: BlockEnum.LLM,
title: 'LLM',
desc: '',
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {
temperature: 0.7,
},
},
prompt_template: [],
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
...overrides,
})
describe('llm/use-config', () => {
const setInputs = vi.fn()
const appendDefaultPromptConfig = vi.fn()
const deleteNodeInspectorVars = vi.fn()
const handleVisionConfigAfterModelChanged = vi.fn()
const handleVisionResolutionEnabledChange = vi.fn()
const handleVisionResolutionChange = vi.fn()
const inputRef = { current: createPayload() } as MutableRefObject<LLMNodeType>
let latestVisionOptions: {
onChange: (payload: LLMNodeType['vision']) => void
} | null = null
const promptConfig = {
hasSetBlockStatus: {
history: false,
query: true,
context: true,
},
shouldShowContextTip: false,
isShowVars: true,
handleVarListChange: vi.fn(),
handleVarNameChange: vi.fn(),
handleAddVariable: vi.fn(),
handleAddEmptyVariable: vi.fn(),
handleContextVarChange: vi.fn(),
handlePromptChange: vi.fn(),
handleMemoryChange: vi.fn(),
handleSyeQueryChange: vi.fn(),
filterInputVar: vi.fn(),
filterJinja2InputVar: vi.fn(),
filterVar: vi.fn(),
}
const structuredOutputConfig = {
isModelSupportStructuredOutput: true,
handleStructureOutputChange: vi.fn(),
structuredOutputCollapsed: false,
setStructuredOutputCollapsed: vi.fn(),
handleStructureOutputEnableChange: vi.fn(),
handleReasoningFormatChange: vi.fn(),
}
beforeEach(() => {
vi.clearAllMocks()
inputRef.current = createPayload()
latestVisionOptions = null
setInputs.mockImplementation((nextInputs: LLMNodeType) => {
inputRef.current = nextInputs
})
mockUseNodesReadOnly.mockReturnValue({ nodesReadOnly: false, getNodesReadOnly: () => false })
mockUseIsChatMode.mockReturnValue(true)
mockUseNodeCrud.mockImplementation(() => ({
inputs: inputRef.current,
setInputs: vi.fn(),
}))
mockUseInspectVarsCrud.mockReturnValue({
deleteNodeInspectorVars,
} as unknown as ReturnType<typeof useInspectVarsCrud>)
mockUseModelListAndDefaultModelAndCurrentProviderAndModel.mockReturnValue({
modelList: [],
defaultModel: undefined,
currentProvider: undefined,
currentModel: undefined,
} as ReturnType<typeof useModelListAndDefaultModelAndCurrentProviderAndModel>)
mockUseStore.mockImplementation((selector) => {
return selector({
nodesDefaultConfigs: {
[BlockEnum.LLM]: {
prompt_templates: {
chat_model: { prompts: [] },
completion_model: {
prompt: { text: 'default completion prompt' },
conversation_histories_role: {
user_prefix: 'User',
assistant_prefix: 'Assistant',
},
},
},
},
},
} as never)
})
mockUseAvailableVarList.mockReturnValue({
availableVars: [{ nodeId: 'previous-node', title: 'Previous', vars: [] }],
availableNodes: [],
availableNodesWithParent: [{ id: 'previous-node', data: { title: 'Previous' } }],
} as unknown as ReturnType<typeof useAvailableVarList>)
mockUseConfigVision.mockImplementation((_model, options) => {
latestVisionOptions = options as typeof latestVisionOptions
return {
isVisionModel: false,
handleVisionResolutionEnabledChange,
handleVisionResolutionChange,
handleModelChanged: handleVisionConfigAfterModelChanged,
}
})
mockUseLLMInputManager.mockReturnValue({
inputRef,
setInputs,
appendDefaultPromptConfig,
} as ReturnType<typeof useLLMInputManager>)
mockUseLLMPromptConfig.mockReturnValue(promptConfig as ReturnType<typeof useLLMPromptConfig>)
mockUseLLMStructuredOutputConfig.mockReturnValue(structuredOutputConfig as ReturnType<typeof useLLMStructuredOutputConfig>)
})
it('composes the helper hooks, forwards filterVar to available vars, and updates completion params', () => {
const { result } = renderHook(() => useConfig('llm-node', inputRef.current))
expect(result.current.readOnly).toBe(false)
expect(result.current.isChatMode).toBe(true)
expect(result.current.isChatModel).toBe(true)
expect(result.current.isCompletionModel).toBe(false)
expect(result.current.availableVars).toEqual([{ nodeId: 'previous-node', title: 'Previous', vars: [] }])
expect(result.current.availableNodesWithParent).toEqual([{ id: 'previous-node', data: { title: 'Previous' } }])
expect(mockUseAvailableVarList).toHaveBeenCalledWith('llm-node', {
onlyLeafNodeVar: false,
filterVar: promptConfig.filterVar,
})
expect(mockUseLLMInputManager).toHaveBeenCalledWith({
inputs: inputRef.current,
doSetInputs: expect.any(Function),
defaultConfig: expect.objectContaining({
prompt_templates: expect.any(Object),
}),
isChatModel: true,
})
act(() => {
latestVisionOptions?.onChange({
enabled: true,
configs: {
detail: Resolution.high,
variable_selector: ['sys', 'files'],
},
})
result.current.handleCompletionParamsChange({ top_p: 0.5 })
result.current.handleModelChanged({
provider: 'openai',
modelId: 'gpt-4.1',
mode: AppModeEnum.CHAT,
})
})
expect(setInputs).toHaveBeenNthCalledWith(1, expect.objectContaining({
vision: {
enabled: true,
configs: {
detail: Resolution.high,
variable_selector: ['sys', 'files'],
},
},
}))
expect(setInputs).toHaveBeenNthCalledWith(2, expect.objectContaining({
model: expect.objectContaining({
completion_params: { top_p: 0.5 },
}),
}))
expect(setInputs).toHaveBeenNthCalledWith(3, expect.objectContaining({
model: expect.objectContaining({
provider: 'openai',
name: 'gpt-4.1',
mode: AppModeEnum.CHAT,
}),
}))
expect(appendDefaultPromptConfig).not.toHaveBeenCalled()
})
it('hydrates the model from the current provider, appends mode-specific defaults, and triggers the vision follow-up effect', async () => {
inputRef.current = createPayload({
model: {
provider: '',
name: '',
mode: AppModeEnum.COMPLETION,
completion_params: {},
},
})
mockUseNodeCrud.mockImplementation(() => ({
inputs: inputRef.current,
setInputs: vi.fn(),
}))
mockUseModelListAndDefaultModelAndCurrentProviderAndModel.mockReturnValue({
modelList: [],
defaultModel: undefined,
currentProvider: { provider: 'anthropic' },
currentModel: {
model: 'claude-sonnet',
model_properties: {
mode: AppModeEnum.CHAT,
},
},
} as unknown as ReturnType<typeof useModelListAndDefaultModelAndCurrentProviderAndModel>)
renderHook(() => useConfig('llm-node', inputRef.current))
await waitFor(() => {
expect(appendDefaultPromptConfig).toHaveBeenCalled()
expect(appendDefaultPromptConfig.mock.calls[0][1]).toEqual(expect.objectContaining({
prompt_templates: expect.any(Object),
}))
expect(appendDefaultPromptConfig.mock.calls[0][2]).toBe(true)
expect(setInputs).toHaveBeenCalledWith(expect.objectContaining({
model: expect.objectContaining({
provider: 'anthropic',
name: 'claude-sonnet',
mode: AppModeEnum.CHAT,
}),
}))
expect(handleVisionConfigAfterModelChanged).toHaveBeenCalled()
})
})
})

View File

@ -0,0 +1,130 @@
import type { LLMNodeType } from '../../types'
import type { Node, NodeOutPutVar } from '@/app/components/workflow/types'
import { render, screen } from '@testing-library/react'
import { AppModeEnum } from '@/types/app'
import PanelMemorySection from '../panel-memory-section'
const mockEditor = vi.hoisted(() => vi.fn())
const mockMemoryConfig = vi.hoisted(() => vi.fn())
type EditorProps = {
value?: string
isChatApp?: boolean
isChatModel?: boolean
isShowContext?: boolean
}
vi.mock('@/app/components/workflow/nodes/_base/components/prompt/editor', () => ({
__esModule: true,
default: (props: EditorProps) => {
mockEditor(props)
return <div data-testid="editor">{props.value}</div>
},
}))
vi.mock('@/app/components/workflow/nodes/_base/components/memory-config', () => ({
__esModule: true,
default: (props: { canSetRoleName: boolean, config: { data?: LLMNodeType['memory'] } }) => {
mockMemoryConfig(props)
return <div data-testid="memory-config">{props.canSetRoleName ? 'can-set-role' : 'cannot-set-role'}</div>
},
}))
const createInputs = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
title: 'LLM',
desc: '',
type: 'llm' as LLMNodeType['type'],
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {},
} as LLMNodeType['model'],
prompt_template: [],
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
memory: {
window: {
enabled: false,
size: 10,
},
query_prompt_template: '',
},
...overrides,
})
const baseProps = {
readOnly: false,
isChatMode: true,
isChatModel: true,
isCompletionModel: false,
inputs: createInputs(),
hasSetBlockStatus: {
history: false,
query: true,
context: false,
},
availableVars: [] as NodeOutPutVar[],
availableNodesWithParent: [] as Node[],
handleSyeQueryChange: vi.fn(),
handleMemoryChange: vi.fn(),
}
describe('llm/panel-memory-section', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('renders the built-in chat memory editor and memory config in chat mode', () => {
render(<PanelMemorySection {...baseProps} />)
expect(screen.getByText('workflow.nodes.common.memories.title')).toBeInTheDocument()
expect(screen.getByTestId('editor')).toHaveTextContent('{{#sys.query#}}')
expect(screen.getByTestId('memory-config')).toHaveTextContent('cannot-set-role')
expect(mockEditor).toHaveBeenCalledWith(expect.objectContaining({
isChatApp: true,
isChatModel: true,
isShowContext: false,
}))
})
it('shows the sys query warning when the memory prompt omits the required placeholder', () => {
render(
<PanelMemorySection
{...baseProps}
inputs={createInputs({
memory: {
window: {
enabled: false,
size: 10,
},
query_prompt_template: 'custom prompt',
},
})}
/>,
)
expect(screen.getByText('workflow.nodes.llm.sysQueryInUser')).toBeInTheDocument()
expect(screen.getByTestId('editor')).toHaveTextContent('custom prompt')
})
it('renders nothing outside chat mode', () => {
render(
<PanelMemorySection
{...baseProps}
isChatMode={false}
isChatModel={false}
isCompletionModel={true}
/>,
)
expect(screen.queryByText('workflow.nodes.common.memories.title')).not.toBeInTheDocument()
expect(screen.queryByTestId('editor')).not.toBeInTheDocument()
expect(screen.queryByTestId('memory-config')).not.toBeInTheDocument()
})
})

View File

@ -0,0 +1,120 @@
import type { ReactNode } from 'react'
import type { LLMNodeType, StructuredOutput } from '../../types'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { AppModeEnum } from '@/types/app'
import { Type } from '../../types'
import PanelOutputSection from '../panel-output-section'
const mockOutputVars = vi.hoisted(() => vi.fn())
const mockStructureOutput = vi.hoisted(() => vi.fn())
vi.mock('@/app/components/workflow/nodes/_base/components/output-vars', () => ({
__esModule: true,
default: ({
children,
operations,
collapsed,
}: {
children: ReactNode
operations?: ReactNode
collapsed?: boolean
}) => {
mockOutputVars({ collapsed })
return (
<div>
<div data-testid="output-vars-operations">{operations}</div>
<div data-testid="output-vars-children">{children}</div>
</div>
)
},
VarItem: ({ name }: { name: string }) => <div>{name}</div>,
}))
vi.mock('../structure-output', () => ({
__esModule: true,
default: (props: { className?: string, value?: StructuredOutput, onChange: (value: StructuredOutput) => void }) => {
mockStructureOutput(props)
return <div data-testid="structure-output">structured-output</div>
},
}))
const createInputs = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
title: 'LLM',
desc: '',
type: 'llm' as LLMNodeType['type'],
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {},
} as LLMNodeType['model'],
prompt_template: [],
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
structured_output_enabled: false,
...overrides,
})
describe('llm/panel-output-section', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('renders the default output vars and keeps structured output collapsed state', () => {
render(
<PanelOutputSection
readOnly={false}
inputs={createInputs()}
isModelSupportStructuredOutput={true}
structuredOutputCollapsed={true}
setStructuredOutputCollapsed={vi.fn()}
handleStructureOutputEnableChange={vi.fn()}
handleStructureOutputChange={vi.fn()}
/>,
)
expect(screen.getByText('text')).toBeInTheDocument()
expect(screen.getByText('reasoning_content')).toBeInTheDocument()
expect(screen.getByText('usage')).toBeInTheDocument()
expect(mockOutputVars).toHaveBeenCalledWith({ collapsed: true })
expect(screen.queryByTestId('structure-output')).not.toBeInTheDocument()
})
it('renders the structured output editor and toggles the switch when structured output is enabled', async () => {
const user = userEvent.setup()
const handleStructureOutputEnableChange = vi.fn()
render(
<PanelOutputSection
readOnly={false}
inputs={createInputs({
structured_output_enabled: true,
structured_output: {
schema: {
type: Type.object,
properties: {},
additionalProperties: false,
},
},
})}
isModelSupportStructuredOutput={false}
structuredOutputCollapsed={false}
setStructuredOutputCollapsed={vi.fn()}
handleStructureOutputEnableChange={handleStructureOutputEnableChange}
handleStructureOutputChange={vi.fn()}
/>,
)
expect(screen.getByTestId('structure-output')).toBeInTheDocument()
expect(mockStructureOutput).toHaveBeenCalled()
await user.click(screen.getByRole('switch'))
expect(handleStructureOutputEnableChange).toHaveBeenCalledWith(false)
})
})

View File

@ -0,0 +1,122 @@
import type { FC } from 'react'
import type { LLMNodeType } from '../types'
import type { Memory, Node, NodeOutPutVar } from '@/app/components/workflow/types'
import * as React from 'react'
import { useTranslation } from 'react-i18next'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/app/components/base/ui/tooltip'
import MemoryConfig from '@/app/components/workflow/nodes/_base/components/memory-config'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
type Props = {
readOnly: boolean
isChatMode: boolean
isChatModel: boolean
isCompletionModel: boolean
inputs: LLMNodeType
hasSetBlockStatus: {
history: boolean
query: boolean
context: boolean
}
availableVars: NodeOutPutVar[]
availableNodesWithParent: Node[]
handleSyeQueryChange: (query: string) => void
handleMemoryChange: (memory?: Memory) => void
}
const i18nPrefix = 'nodes.llm'
const PanelMemorySection: FC<Props> = ({
readOnly,
isChatMode,
isChatModel,
isCompletionModel,
inputs,
hasSetBlockStatus,
availableVars,
availableNodesWithParent,
handleSyeQueryChange,
handleMemoryChange,
}) => {
const { t } = useTranslation()
if (!isChatMode)
return null
return (
<>
{isChatModel && !!inputs.memory && (
<div className="mt-4">
<div className="flex h-8 items-center justify-between rounded-lg bg-components-input-bg-normal pr-2 pl-3">
<div className="flex items-center space-x-1">
<div className="text-xs font-semibold text-text-secondary uppercase">{t('nodes.common.memories.title', { ns: 'workflow' })}</div>
<Tooltip>
<TooltipTrigger
delay={0}
render={(
<span className="ml-1 flex h-4 w-4 shrink-0 items-center justify-center">
<span aria-hidden className="i-ri-question-line h-3.5 w-3.5 text-text-quaternary hover:text-text-tertiary" />
</span>
)}
/>
<TooltipContent>
{t('nodes.common.memories.tip', { ns: 'workflow' })}
</TooltipContent>
</Tooltip>
</div>
<div className="flex h-[18px] items-center rounded-[5px] border border-divider-deep bg-components-badge-bg-dimm px-1 text-xs font-semibold text-text-tertiary uppercase">
{t('nodes.common.memories.builtIn', { ns: 'workflow' })}
</div>
</div>
<div className="mt-4">
<Editor
title={(
<div className="flex items-center space-x-1">
<div className="text-xs font-semibold text-text-secondary uppercase">user</div>
<Tooltip>
<TooltipTrigger
delay={0}
render={(
<span className="ml-1 flex h-4 w-4 shrink-0 items-center justify-center">
<span aria-hidden className="i-ri-question-line h-3.5 w-3.5 text-text-quaternary hover:text-text-tertiary" />
</span>
)}
/>
<TooltipContent>
<div className="max-w-[180px]">{t('nodes.llm.roleDescription.user', { ns: 'workflow' })}</div>
</TooltipContent>
</Tooltip>
</div>
)}
value={inputs.memory.query_prompt_template || '{{#sys.query#}}'}
onChange={handleSyeQueryChange}
readOnly={readOnly}
isShowContext={false}
isChatApp
isChatModel
hasSetBlockStatus={hasSetBlockStatus}
nodesOutputVars={availableVars}
availableNodes={availableNodesWithParent}
isSupportFileVar
/>
{inputs.memory.query_prompt_template && !inputs.memory.query_prompt_template.includes('{{#sys.query#}}') && (
<div className="text-xs leading-[18px] font-normal text-[#DC6803]">
{t(`${i18nPrefix}.sysQueryInUser`, { ns: 'workflow' })}
</div>
)}
</div>
</div>
)}
<MemoryConfig
readonly={readOnly}
config={{ data: inputs.memory }}
onChange={handleMemoryChange}
canSetRoleName={isCompletionModel}
/>
</>
)
}
export default React.memo(PanelMemorySection)

View File

@ -0,0 +1,117 @@
import type { FC } from 'react'
import type { LLMNodeType, StructuredOutput } from '../types'
import { RiAlertFill, RiQuestionLine } from '@remixicon/react'
import * as React from 'react'
import { useTranslation } from 'react-i18next'
import Switch from '@/app/components/base/switch'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/app/components/base/ui/tooltip'
import OutputVars, { VarItem } from '@/app/components/workflow/nodes/_base/components/output-vars'
import Split from '@/app/components/workflow/nodes/_base/components/split'
import StructureOutput from './structure-output'
type Props = {
readOnly: boolean
inputs: LLMNodeType
isModelSupportStructuredOutput: boolean | undefined
structuredOutputCollapsed: boolean
setStructuredOutputCollapsed: (collapsed: boolean) => void
handleStructureOutputEnableChange: (enabled: boolean) => void
handleStructureOutputChange: (newOutput: StructuredOutput) => void
}
const i18nPrefix = 'nodes.llm'
const PanelOutputSection: FC<Props> = ({
readOnly,
inputs,
isModelSupportStructuredOutput,
structuredOutputCollapsed,
setStructuredOutputCollapsed,
handleStructureOutputEnableChange,
handleStructureOutputChange,
}) => {
const { t } = useTranslation()
return (
<>
<Split />
<OutputVars
collapsed={structuredOutputCollapsed}
onCollapse={setStructuredOutputCollapsed}
operations={(
<div className="mr-4 flex shrink-0 items-center">
{(!isModelSupportStructuredOutput && !!inputs.structured_output_enabled) && (
<Tooltip>
<TooltipTrigger
delay={0}
render={(
<div>
<RiAlertFill className="mr-1 size-4 text-text-warning-secondary" />
</div>
)}
/>
<TooltipContent>
<div className="w-[232px] rounded-xl border-[0.5px] border-components-panel-border bg-components-tooltip-bg px-4 py-3.5 shadow-lg backdrop-blur-[5px]">
<div className="title-xs-semi-bold text-text-primary">{t('structOutput.modelNotSupported', { ns: 'app' })}</div>
<div className="mt-1 body-xs-regular text-text-secondary">{t('structOutput.modelNotSupportedTip', { ns: 'app' })}</div>
</div>
</TooltipContent>
</Tooltip>
)}
<div className="mr-0.5 system-xs-medium-uppercase text-text-tertiary">{t('structOutput.structured', { ns: 'app' })}</div>
<Tooltip>
<TooltipTrigger
delay={0}
render={(
<div>
<RiQuestionLine className="size-3.5 text-text-quaternary" />
</div>
)}
/>
<TooltipContent>
<div className="max-w-[150px]">{t('structOutput.structuredTip', { ns: 'app' })}</div>
</TooltipContent>
</Tooltip>
<Switch
className="ml-2"
value={!!inputs.structured_output_enabled}
onChange={handleStructureOutputEnableChange}
size="md"
disabled={readOnly}
/>
</div>
)}
>
<>
<VarItem
name="text"
type="string"
description={t(`${i18nPrefix}.outputVars.output`, { ns: 'workflow' })}
/>
<VarItem
name="reasoning_content"
type="string"
description={t(`${i18nPrefix}.outputVars.reasoning_content`, { ns: 'workflow' })}
/>
<VarItem
name="usage"
type="object"
description={t(`${i18nPrefix}.outputVars.usage`, { ns: 'workflow' })}
/>
{inputs.structured_output_enabled && (
<>
<Split className="mt-3" />
<StructureOutput
className="mt-4"
value={inputs.structured_output}
onChange={handleStructureOutputChange}
/>
</>
)}
</>
</OutputVars>
</>
)
}
export default React.memo(PanelOutputSection)

View File

@ -0,0 +1,171 @@
import type { LLMNodeType } from '../../types'
import type { LLMDefaultConfig } from '../use-llm-input-manager'
import { act, renderHook, waitFor } from '@testing-library/react'
import {
BlockEnum,
EditionType,
PromptRole,
} from '@/app/components/workflow/types'
import { AppModeEnum } from '@/types/app'
import useLLMInputManager from '../use-llm-input-manager'
const createPayload = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
type: BlockEnum.LLM,
title: 'LLM',
desc: '',
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {},
},
prompt_template: [{
role: PromptRole.system,
text: 'You are helpful.',
edition_type: EditionType.basic,
}],
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
...overrides,
})
const defaultConfig: LLMDefaultConfig = {
prompt_templates: {
chat_model: {
prompts: [{
role: PromptRole.system,
text: 'Default chat prompt',
edition_type: EditionType.basic,
}],
},
completion_model: {
prompt: {
role: PromptRole.user,
text: 'Default completion prompt',
edition_type: EditionType.basic,
},
conversation_histories_role: {
user_prefix: 'USER',
assistant_prefix: 'ASSISTANT',
},
},
},
}
describe('use-llm-input-manager', () => {
it('hydrates the default chat prompt when the payload has no prompt template', async () => {
const handleSetInputs = vi.fn()
renderHook(() => useLLMInputManager({
inputs: createPayload({
prompt_template: undefined as unknown as LLMNodeType['prompt_template'],
}),
doSetInputs: handleSetInputs,
defaultConfig,
isChatModel: true,
}))
await waitFor(() => {
expect(handleSetInputs).toHaveBeenCalledWith(expect.objectContaining({
prompt_template: defaultConfig.prompt_templates!.chat_model.prompts,
}))
})
})
it('applies completion defaults and injects the default role prefix when memory has none', async () => {
const handleSetInputs = vi.fn()
const { result } = renderHook(() => useLLMInputManager({
inputs: createPayload({
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: AppModeEnum.COMPLETION,
completion_params: {},
},
}),
doSetInputs: handleSetInputs,
defaultConfig,
isChatModel: false,
}))
const draftPayload = createPayload({
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: AppModeEnum.COMPLETION,
completion_params: {},
},
})
act(() => {
result.current.appendDefaultPromptConfig(draftPayload, defaultConfig, false)
})
act(() => {
result.current.setInputs(createPayload({
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: AppModeEnum.COMPLETION,
completion_params: {},
},
prompt_template: draftPayload.prompt_template,
memory: {
window: {
enabled: true,
size: 8,
},
query_prompt_template: '{{#sys.query#}}',
},
}))
})
expect(handleSetInputs).toHaveBeenLastCalledWith(expect.objectContaining({
prompt_template: defaultConfig.prompt_templates!.completion_model.prompt,
memory: expect.objectContaining({
role_prefix: {
user: 'USER',
assistant: 'ASSISTANT',
},
}),
}))
})
it('passes inputs through unchanged when memory already has a role prefix or is absent', () => {
const handleSetInputs = vi.fn()
const { result } = renderHook(() => useLLMInputManager({
inputs: createPayload(),
doSetInputs: handleSetInputs,
defaultConfig,
isChatModel: true,
}))
const payloadWithoutMemory = createPayload()
const payloadWithRolePrefix = createPayload({
memory: {
role_prefix: {
user: 'U',
assistant: 'A',
},
window: {
enabled: false,
size: 10,
},
query_prompt_template: '{{#sys.query#}}',
},
})
act(() => {
result.current.setInputs(payloadWithoutMemory)
result.current.setInputs(payloadWithRolePrefix)
})
expect(handleSetInputs).toHaveBeenNthCalledWith(1, payloadWithoutMemory)
expect(handleSetInputs).toHaveBeenNthCalledWith(2, payloadWithRolePrefix)
})
})

View File

@ -0,0 +1,286 @@
import type { MutableRefObject } from 'react'
import type { LLMNodeType } from '../../types'
import { act, renderHook } from '@testing-library/react'
import {
CONTEXT_PLACEHOLDER_TEXT,
HISTORY_PLACEHOLDER_TEXT,
QUERY_PLACEHOLDER_TEXT,
} from '@/app/components/base/prompt-editor/constants'
import {
BlockEnum,
EditionType,
PromptRole,
VarType,
} from '@/app/components/workflow/types'
import { AppModeEnum } from '@/types/app'
import useLLMPromptConfig from '../use-llm-prompt-config'
const createPayload = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
type: BlockEnum.LLM,
title: 'LLM',
desc: '',
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {},
},
prompt_template: [{
role: PromptRole.system,
text: 'Base prompt',
edition_type: EditionType.basic,
}],
prompt_config: {
jinja2_variables: [],
},
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
...overrides,
})
const createSetInputs = (inputRef: MutableRefObject<LLMNodeType>) => {
return vi.fn((nextInputs: LLMNodeType) => {
inputRef.current = nextInputs
})
}
describe('use-llm-prompt-config', () => {
it('derives chat prompt status and filters the supported variable types', () => {
const inputRef = {
current: createPayload({
prompt_template: [
{
role: PromptRole.system,
text: `Question: ${QUERY_PLACEHOLDER_TEXT}`,
edition_type: EditionType.basic,
},
{
role: PromptRole.user,
text: 'Template',
edition_type: EditionType.jinja2,
jinja2_text: '{{ old_name }}',
},
],
context: {
enabled: true,
variable_selector: [],
},
}),
} as MutableRefObject<LLMNodeType>
const { result } = renderHook(() => useLLMPromptConfig({
inputs: inputRef.current,
inputRef,
isChatMode: true,
isChatModel: true,
setInputs: createSetInputs(inputRef),
}))
expect(result.current.hasSetBlockStatus).toEqual({
history: false,
query: true,
context: false,
})
expect(result.current.shouldShowContextTip).toBe(true)
expect(result.current.isShowVars).toBe(true)
expect(result.current.filterInputVar({ type: VarType.string } as never)).toBe(true)
expect(result.current.filterInputVar({ type: VarType.boolean } as never)).toBe(false)
expect(result.current.filterJinja2InputVar({ type: VarType.object } as never)).toBe(true)
expect(result.current.filterJinja2InputVar({ type: VarType.file } as never)).toBe(false)
expect(result.current.filterVar({ type: VarType.arrayObject } as never)).toBe(true)
expect(result.current.filterVar({ type: VarType.boolean } as never)).toBe(false)
})
it('updates variables, context, prompt, and memory for chat prompts', () => {
const inputRef = {
current: createPayload({
prompt_template: [{
role: PromptRole.user,
text: 'Template',
edition_type: EditionType.jinja2,
jinja2_text: '{{ old_name }}',
}],
prompt_config: {
jinja2_variables: [{
variable: 'old_name',
value_selector: ['start', 'old_name'],
}],
},
}),
} as MutableRefObject<LLMNodeType>
const handleSetInputs = createSetInputs(inputRef)
const { result } = renderHook(() => useLLMPromptConfig({
inputs: inputRef.current,
inputRef,
isChatMode: true,
isChatModel: true,
setInputs: handleSetInputs,
}))
act(() => {
result.current.handleAddEmptyVariable()
result.current.handleAddVariable({
variable: 'budget',
value_selector: ['start', 'budget'],
})
result.current.handleVarListChange([{
variable: 'city',
value_selector: ['start', 'city'],
}])
result.current.handleVarNameChange('old_name', 'new_name')
result.current.handleContextVarChange(['start', 'sys.query'])
result.current.handleContextVarChange([])
result.current.handlePromptChange([{
role: PromptRole.system,
text: 'Updated prompt',
edition_type: EditionType.basic,
}])
result.current.handleSyeQueryChange('{{#sys.query#}}')
result.current.handleSyeQueryChange('custom query')
result.current.handleMemoryChange({
window: {
enabled: true,
size: 6,
},
query_prompt_template: 'saved memory',
})
})
expect(handleSetInputs).toHaveBeenCalled()
expect(handleSetInputs.mock.calls[0][0].prompt_config?.jinja2_variables).toHaveLength(2)
expect(handleSetInputs.mock.calls[1][0].prompt_config?.jinja2_variables).toEqual([
{
variable: 'old_name',
value_selector: ['start', 'old_name'],
},
{
variable: '',
value_selector: [],
},
{
variable: 'budget',
value_selector: ['start', 'budget'],
},
])
expect(handleSetInputs.mock.calls[2][0].prompt_config?.jinja2_variables).toEqual([{
variable: 'city',
value_selector: ['start', 'city'],
}])
expect((handleSetInputs.mock.calls[3][0].prompt_template as Array<{ jinja2_text?: string }>)[0].jinja2_text).toBe('{{ new_name }}')
expect(handleSetInputs.mock.calls[4][0].context).toEqual({
enabled: true,
variable_selector: ['start', 'sys.query'],
})
expect(handleSetInputs.mock.calls[5][0].context).toEqual({
enabled: false,
variable_selector: [],
})
expect(handleSetInputs.mock.calls[6][0].prompt_template).toEqual([{
role: PromptRole.system,
text: 'Updated prompt',
edition_type: EditionType.basic,
}])
expect(handleSetInputs.mock.calls[7][0].memory).toEqual({
window: {
enabled: false,
size: 10,
},
query_prompt_template: '{{#sys.query#}}',
})
expect(handleSetInputs.mock.calls[8][0].memory?.query_prompt_template).toBe('custom query')
expect(handleSetInputs.mock.calls[9][0].memory).toEqual({
window: {
enabled: true,
size: 6,
},
query_prompt_template: 'saved memory',
})
})
it('handles completion prompt branches, including the non-jinja early return and jinja rename flow', () => {
const basicInputRef = {
current: createPayload({
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: AppModeEnum.COMPLETION,
completion_params: {},
},
prompt_template: {
role: PromptRole.user,
text: `${CONTEXT_PLACEHOLDER_TEXT} ${HISTORY_PLACEHOLDER_TEXT} ${QUERY_PLACEHOLDER_TEXT}`,
edition_type: EditionType.basic,
},
}),
} as MutableRefObject<LLMNodeType>
const handleSetInputs = createSetInputs(basicInputRef)
const { result } = renderHook(() => useLLMPromptConfig({
inputs: basicInputRef.current,
inputRef: basicInputRef,
isChatMode: true,
isChatModel: false,
setInputs: handleSetInputs,
}))
expect(result.current.hasSetBlockStatus).toEqual({
history: true,
query: true,
context: true,
})
expect(result.current.shouldShowContextTip).toBe(false)
expect(result.current.isShowVars).toBe(false)
act(() => {
result.current.handleVarNameChange('old_name', 'new_name')
})
expect(handleSetInputs).toHaveBeenCalledWith(expect.objectContaining({
prompt_template: expect.objectContaining({
text: `${CONTEXT_PLACEHOLDER_TEXT} ${HISTORY_PLACEHOLDER_TEXT} ${QUERY_PLACEHOLDER_TEXT}`,
}),
}))
const jinjaInputRef = {
current: createPayload({
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: AppModeEnum.COMPLETION,
completion_params: {},
},
prompt_template: {
role: PromptRole.user,
text: 'Template',
edition_type: EditionType.jinja2,
jinja2_text: '{{ old_name }}',
},
}),
} as MutableRefObject<LLMNodeType>
const handleJinjaInputs = createSetInputs(jinjaInputRef)
const { result: jinjaResult } = renderHook(() => useLLMPromptConfig({
inputs: jinjaInputRef.current,
inputRef: jinjaInputRef,
isChatMode: false,
isChatModel: false,
setInputs: handleJinjaInputs,
}))
act(() => {
jinjaResult.current.handleVarNameChange('old_name', 'budget')
})
expect(handleJinjaInputs).toHaveBeenCalledWith(expect.objectContaining({
prompt_template: expect.objectContaining({
jinja2_text: '{{ budget }}',
}),
}))
})
})

View File

@ -0,0 +1,143 @@
import type { MutableRefObject } from 'react'
import type { LLMNodeType } from '../../types'
import { act, renderHook } from '@testing-library/react'
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useModelList } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { BlockEnum } from '@/app/components/workflow/types'
import { AppModeEnum } from '@/types/app'
import { Type } from '../../types'
import useLLMStructuredOutputConfig from '../use-llm-structured-output-config'
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useModelList: vi.fn(),
}))
const mockUseModelList = vi.mocked(useModelList)
const createPayload = (overrides: Partial<LLMNodeType> = {}): LLMNodeType => ({
type: BlockEnum.LLM,
title: 'LLM',
desc: '',
model: {
provider: 'openai',
name: 'gpt-4o',
mode: AppModeEnum.CHAT,
completion_params: {},
},
prompt_template: [],
context: {
enabled: false,
variable_selector: [],
},
vision: {
enabled: false,
},
...overrides,
})
describe('use-llm-structured-output-config', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('detects supported models and updates structured output state', () => {
mockUseModelList.mockReturnValue({
data: [{
provider: 'openai',
models: [{
model: 'gpt-4o',
features: [ModelFeatureEnum.StructuredOutput],
}],
}],
} as ReturnType<typeof useModelList>)
const inputRef = {
current: createPayload(),
} as MutableRefObject<LLMNodeType>
const handleSetInputs = vi.fn((nextInputs: LLMNodeType) => {
inputRef.current = nextInputs
})
const deleteNodeInspectorVars = vi.fn()
const { result } = renderHook(() => useLLMStructuredOutputConfig({
id: 'llm-node',
model: inputRef.current.model,
inputRef,
setInputs: handleSetInputs,
deleteNodeInspectorVars,
}))
expect(result.current.isModelSupportStructuredOutput).toBe(true)
expect(result.current.structuredOutputCollapsed).toBe(true)
act(() => {
result.current.handleStructureOutputEnableChange(true)
})
expect(handleSetInputs).toHaveBeenNthCalledWith(1, expect.objectContaining({
structured_output_enabled: true,
}))
expect(result.current.structuredOutputCollapsed).toBe(false)
expect(deleteNodeInspectorVars).toHaveBeenCalledWith('llm-node')
act(() => {
result.current.handleStructureOutputChange({
schema: {
type: Type.object,
properties: {
answer: {
type: Type.string,
},
},
additionalProperties: false,
},
})
result.current.handleReasoningFormatChange('separated')
result.current.handleStructureOutputEnableChange(false)
})
expect(handleSetInputs).toHaveBeenNthCalledWith(2, expect.objectContaining({
structured_output: {
schema: {
type: Type.object,
properties: {
answer: {
type: Type.string,
},
},
additionalProperties: false,
},
},
}))
expect(handleSetInputs).toHaveBeenNthCalledWith(3, expect.objectContaining({
reasoning_format: 'separated',
}))
expect(handleSetInputs).toHaveBeenNthCalledWith(4, expect.objectContaining({
structured_output_enabled: false,
}))
})
it('returns undefined support when the model is missing from the list', () => {
mockUseModelList.mockReturnValue({
data: [{
provider: 'anthropic',
models: [{
model: 'claude',
features: [],
}],
}],
mutate: vi.fn(),
isLoading: false,
} as unknown as ReturnType<typeof useModelList>)
const { result } = renderHook(() => useLLMStructuredOutputConfig({
id: 'llm-node',
model: createPayload().model,
inputRef: { current: createPayload() } as MutableRefObject<LLMNodeType>,
setInputs: vi.fn(),
deleteNodeInspectorVars: vi.fn(),
}))
expect(result.current.isModelSupportStructuredOutput).toBeUndefined()
})
})

View File

@ -0,0 +1,100 @@
import type { LLMNodeType } from '../types'
import type {
PromptItem,
RolePrefix,
} from '@/app/components/workflow/types'
import { produce } from 'immer'
import {
useCallback,
useEffect,
useRef,
useState,
} from 'react'
type CompletionPromptTemplate = {
prompt: PromptItem
conversation_histories_role: {
user_prefix: string
assistant_prefix: string
}
}
export type LLMDefaultConfig = {
prompt_templates?: {
chat_model: {
prompts: PromptItem[]
}
completion_model: CompletionPromptTemplate
}
}
type Params = {
inputs: LLMNodeType
doSetInputs: (inputs: LLMNodeType) => void
defaultConfig?: LLMDefaultConfig
isChatModel: boolean
}
const useLLMInputManager = ({
inputs,
doSetInputs,
defaultConfig,
isChatModel,
}: Params) => {
const [defaultRolePrefix, setDefaultRolePrefix] = useState<RolePrefix>({ user: '', assistant: '' })
const inputRef = useRef(inputs)
useEffect(() => {
inputRef.current = inputs
}, [inputs])
const setInputs = useCallback((newInputs: LLMNodeType) => {
if (newInputs.memory && !newInputs.memory.role_prefix) {
const payloadWithRolePrefix = produce(newInputs, (draft) => {
draft.memory!.role_prefix = defaultRolePrefix
})
doSetInputs(payloadWithRolePrefix)
inputRef.current = payloadWithRolePrefix
return
}
doSetInputs(newInputs)
inputRef.current = newInputs
}, [defaultRolePrefix, doSetInputs])
const appendDefaultPromptConfig = useCallback((draft: LLMNodeType, nextDefaultConfig: LLMDefaultConfig, passInIsChatMode?: boolean) => {
const promptTemplates = nextDefaultConfig.prompt_templates
if (!promptTemplates)
return
if (passInIsChatMode === undefined ? isChatModel : passInIsChatMode) {
draft.prompt_template = promptTemplates.chat_model.prompts
return
}
draft.prompt_template = promptTemplates.completion_model.prompt
setDefaultRolePrefix({
user: promptTemplates.completion_model.conversation_histories_role.user_prefix,
assistant: promptTemplates.completion_model.conversation_histories_role.assistant_prefix,
})
}, [isChatModel])
useEffect(() => {
const isReady = defaultConfig && Object.keys(defaultConfig).length > 0
if (!isReady || inputs.prompt_template)
return
const nextInputs = produce(inputs, (draft) => {
appendDefaultPromptConfig(draft, defaultConfig)
})
setInputs(nextInputs)
}, [appendDefaultPromptConfig, defaultConfig, inputs, setInputs])
return {
inputRef,
setInputs,
appendDefaultPromptConfig,
}
}
export default useLLMInputManager

View File

@ -0,0 +1,240 @@
import type { Draft } from 'immer'
import type { MutableRefObject } from 'react'
import type { LLMNodeType } from '../types'
import type {
Memory,
PromptItem,
ValueSelector,
Var,
Variable,
} from '@/app/components/workflow/types'
import { produce } from 'immer'
import {
useCallback,
useMemo,
} from 'react'
import {
checkHasContextBlock,
checkHasHistoryBlock,
checkHasQueryBlock,
} from '@/app/components/base/prompt-editor/constants'
import {
EditionType,
VarType,
} from '@/app/components/workflow/types'
type Params = {
inputs: LLMNodeType
inputRef: MutableRefObject<LLMNodeType>
isChatMode: boolean
isChatModel: boolean
setInputs: (inputs: LLMNodeType) => void
}
const createPromptConfig = () => ({
jinja2_variables: [] as Variable[],
})
const ensurePromptConfig = (draft: Draft<LLMNodeType>): { jinja2_variables: Variable[] } => {
if (!draft.prompt_config)
draft.prompt_config = createPromptConfig()
if (!draft.prompt_config.jinja2_variables)
draft.prompt_config.jinja2_variables = []
return draft.prompt_config as { jinja2_variables: Variable[] }
}
const filterInputVar = (varPayload: Var) => {
return [
VarType.number,
VarType.string,
VarType.secret,
VarType.arrayString,
VarType.arrayNumber,
VarType.file,
VarType.arrayFile,
].includes(varPayload.type)
}
const filterJinja2InputVar = (varPayload: Var) => {
return [
VarType.number,
VarType.string,
VarType.secret,
VarType.arrayString,
VarType.arrayNumber,
VarType.arrayBoolean,
VarType.arrayObject,
VarType.object,
VarType.array,
VarType.boolean,
].includes(varPayload.type)
}
const filterMemoryPromptVar = (varPayload: Var) => {
return [
VarType.arrayObject,
VarType.array,
VarType.number,
VarType.string,
VarType.secret,
VarType.arrayString,
VarType.arrayNumber,
VarType.file,
VarType.arrayFile,
].includes(varPayload.type)
}
const useLLMPromptConfig = ({
inputs,
inputRef,
isChatMode,
isChatModel,
setInputs,
}: Params) => {
const hasSetBlockStatus = useMemo(() => {
const promptTemplate = inputs.prompt_template
const hasSetContext = isChatModel
? (promptTemplate as PromptItem[]).some(item => checkHasContextBlock(item.text))
: checkHasContextBlock((promptTemplate as PromptItem).text)
if (!isChatMode) {
return {
history: false,
query: false,
context: hasSetContext,
}
}
if (isChatModel) {
return {
history: false,
query: (promptTemplate as PromptItem[]).some(item => checkHasQueryBlock(item.text)),
context: hasSetContext,
}
}
return {
history: checkHasHistoryBlock((promptTemplate as PromptItem).text),
query: checkHasQueryBlock((promptTemplate as PromptItem).text),
context: hasSetContext,
}
}, [inputs.prompt_template, isChatMode, isChatModel])
const shouldShowContextTip = !hasSetBlockStatus.context && inputs.context.enabled
const isShowVars = useMemo(() => {
if (isChatModel)
return (inputs.prompt_template as PromptItem[]).some(item => item.edition_type === EditionType.jinja2)
return (inputs.prompt_template as PromptItem).edition_type === EditionType.jinja2
}, [inputs.prompt_template, isChatModel])
const handleAddEmptyVariable = useCallback(() => {
const nextInputs = produce(inputRef.current, (draft) => {
const promptConfig = ensurePromptConfig(draft)
promptConfig.jinja2_variables.push({
variable: '',
value_selector: [],
})
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleAddVariable = useCallback((payload: Variable) => {
const nextInputs = produce(inputRef.current, (draft) => {
const promptConfig = ensurePromptConfig(draft)
promptConfig.jinja2_variables.push(payload)
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleVarListChange = useCallback((newList: Variable[]) => {
const nextInputs = produce(inputRef.current, (draft) => {
const promptConfig = ensurePromptConfig(draft)
promptConfig.jinja2_variables = newList
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleVarNameChange = useCallback((oldName: string, newName: string) => {
const nextInputs = produce(inputRef.current, (draft) => {
if (isChatModel) {
const promptTemplate = draft.prompt_template as PromptItem[]
promptTemplate
.filter(item => item.edition_type === EditionType.jinja2)
.forEach((item) => {
item.jinja2_text = (item.jinja2_text || '').replaceAll(`{{ ${oldName} }}`, `{{ ${newName} }}`)
})
return
}
const promptTemplate = draft.prompt_template as PromptItem
if (promptTemplate.edition_type !== EditionType.jinja2)
return
promptTemplate.jinja2_text = (promptTemplate.jinja2_text || '').replaceAll(`{{ ${oldName} }}`, `{{ ${newName} }}`)
})
setInputs(nextInputs)
}, [inputRef, isChatModel, setInputs])
const handleContextVarChange = useCallback((newVar: ValueSelector | string) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.context.variable_selector = (newVar as ValueSelector) || []
draft.context.enabled = !!(newVar && newVar.length > 0)
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.prompt_template = newPrompt
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleMemoryChange = useCallback((newMemory?: Memory) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.memory = newMemory
})
setInputs(nextInputs)
}, [inputRef, setInputs])
const handleSyeQueryChange = useCallback((newQuery: string) => {
const nextInputs = produce(inputRef.current, (draft) => {
if (!draft.memory) {
draft.memory = {
window: {
enabled: false,
size: 10,
},
query_prompt_template: newQuery,
}
return
}
draft.memory.query_prompt_template = newQuery
})
setInputs(nextInputs)
}, [inputRef, setInputs])
return {
hasSetBlockStatus,
shouldShowContextTip,
isShowVars,
handleAddEmptyVariable,
handleAddVariable,
handleVarListChange,
handleVarNameChange,
handleContextVarChange,
handlePromptChange,
handleMemoryChange,
handleSyeQueryChange,
filterInputVar,
filterJinja2InputVar,
filterVar: filterMemoryPromptVar,
}
}
export default useLLMPromptConfig

View File

@ -0,0 +1,74 @@
import type { MutableRefObject } from 'react'
import type { LLMNodeType, StructuredOutput } from '../types'
import { produce } from 'immer'
import {
useCallback,
useState,
} from 'react'
import {
ModelFeatureEnum,
ModelTypeEnum,
} from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useModelList } from '@/app/components/header/account-setting/model-provider-page/hooks'
type Params = {
id: string
model: LLMNodeType['model']
inputRef: MutableRefObject<LLMNodeType>
setInputs: (inputs: LLMNodeType) => void
deleteNodeInspectorVars: (nodeId: string) => void
}
const useLLMStructuredOutputConfig = ({
id,
model,
inputRef,
setInputs,
deleteNodeInspectorVars,
}: Params) => {
const { data: modelList } = useModelList(ModelTypeEnum.textGeneration)
const isModelSupportStructuredOutput = modelList
?.find(providerItem => providerItem.provider === model?.provider)
?.models
.find(modelItem => modelItem.model === model?.name)
?.features
?.includes(ModelFeatureEnum.StructuredOutput)
const [structuredOutputCollapsed, setStructuredOutputCollapsed] = useState(true)
const handleStructureOutputEnableChange = useCallback((enabled: boolean) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.structured_output_enabled = enabled
})
setInputs(nextInputs)
if (enabled)
setStructuredOutputCollapsed(false)
deleteNodeInspectorVars(id)
}, [deleteNodeInspectorVars, id, inputRef, setInputs])
const handleStructureOutputChange = useCallback((newOutput: StructuredOutput) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.structured_output = newOutput
})
setInputs(nextInputs)
deleteNodeInspectorVars(id)
}, [deleteNodeInspectorVars, id, inputRef, setInputs])
const handleReasoningFormatChange = useCallback((reasoningFormat: 'tagged' | 'separated') => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.reasoning_format = reasoningFormat
})
setInputs(nextInputs)
}, [inputRef, setInputs])
return {
isModelSupportStructuredOutput,
structuredOutputCollapsed,
setStructuredOutputCollapsed,
handleStructureOutputEnableChange,
handleStructureOutputChange,
handleReasoningFormatChange,
}
}
export default useLLMStructuredOutputConfig

View File

@ -1,29 +1,24 @@
import type { FC } from 'react'
import type { LLMNodeType } from './types'
import type { NodePanelProps } from '@/app/components/workflow/types'
import { RiAlertFill, RiQuestionLine } from '@remixicon/react'
import * as React from 'react'
import { useCallback } from 'react'
import { useTranslation } from 'react-i18next'
import AddButton2 from '@/app/components/base/button/add-button'
import Switch from '@/app/components/base/switch'
import Tooltip from '@/app/components/base/tooltip'
import { toast } from '@/app/components/base/ui/toast'
import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal'
import Field from '@/app/components/workflow/nodes/_base/components/field'
import OutputVars, { VarItem } from '@/app/components/workflow/nodes/_base/components/output-vars'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
import Split from '@/app/components/workflow/nodes/_base/components/split'
import VarList from '@/app/components/workflow/nodes/_base/components/variable/var-list'
import { useProviderContextSelector } from '@/context/provider-context'
import { fetchAndMergeValidCompletionParams } from '@/utils/completion-params'
import { extractPluginId } from '../../utils/plugin'
import ConfigVision from '../_base/components/config-vision'
import MemoryConfig from '../_base/components/memory-config'
import VarReferencePicker from '../_base/components/variable/var-reference-picker'
import ConfigPrompt from './components/config-prompt'
import PanelMemorySection from './components/panel-memory-section'
import PanelOutputSection from './components/panel-output-section'
import ReasoningFormatConfig from './components/reasoning-format-config'
import StructureOutput from './components/structure-output'
import useConfig from './use-config'
import { getLLMModelIssue, LLMModelIssueCode } from './utils'
@ -193,61 +188,20 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
</Field>
)}
{/* Memory put place examples. */}
{isChatMode && isChatModel && !!inputs.memory && (
<div className="mt-4">
<div className="flex h-8 items-center justify-between rounded-lg bg-components-input-bg-normal pl-3 pr-2">
<div className="flex items-center space-x-1">
<div className="text-xs font-semibold uppercase text-text-secondary">{t('nodes.common.memories.title', { ns: 'workflow' })}</div>
<Tooltip
popupContent={t('nodes.common.memories.tip', { ns: 'workflow' })}
triggerClassName="w-4 h-4"
/>
</div>
<div className="flex h-[18px] items-center rounded-[5px] border border-divider-deep bg-components-badge-bg-dimm px-1 text-xs font-semibold uppercase text-text-tertiary">{t('nodes.common.memories.builtIn', { ns: 'workflow' })}</div>
</div>
{/* Readonly User Query */}
<div className="mt-4">
<Editor
title={(
<div className="flex items-center space-x-1">
<div className="text-xs font-semibold uppercase text-text-secondary">user</div>
<Tooltip
popupContent={
<div className="max-w-[180px]">{t('nodes.llm.roleDescription.user', { ns: 'workflow' })}</div>
}
triggerClassName="w-4 h-4"
/>
</div>
)}
value={inputs.memory.query_prompt_template || '{{#sys.query#}}'}
onChange={handleSyeQueryChange}
readOnly={readOnly}
isShowContext={false}
isChatApp
isChatModel
hasSetBlockStatus={hasSetBlockStatus}
nodesOutputVars={availableVars}
availableNodes={availableNodesWithParent}
isSupportFileVar
/>
{inputs.memory.query_prompt_template && !inputs.memory.query_prompt_template.includes('{{#sys.query#}}') && (
<div className="text-xs font-normal leading-[18px] text-[#DC6803]">{t(`${i18nPrefix}.sysQueryInUser`, { ns: 'workflow' })}</div>
)}
</div>
</div>
)}
{/* Memory */}
{isChatMode && (
<>
<Split />
<MemoryConfig
readonly={readOnly}
config={{ data: inputs.memory }}
onChange={handleMemoryChange}
canSetRoleName={isCompletionModel}
<PanelMemorySection
readOnly={readOnly}
isChatMode={isChatMode}
isChatModel={isChatModel}
isCompletionModel={isCompletionModel}
inputs={inputs}
hasSetBlockStatus={hasSetBlockStatus}
availableVars={availableVars}
availableNodesWithParent={availableNodesWithParent}
handleSyeQueryChange={handleSyeQueryChange}
handleMemoryChange={handleMemoryChange}
/>
</>
)}
@ -271,74 +225,15 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
readonly={readOnly}
/>
</div>
<Split />
<OutputVars
collapsed={structuredOutputCollapsed}
onCollapse={setStructuredOutputCollapsed}
operations={(
<div className="mr-4 flex shrink-0 items-center">
{(!isModelSupportStructuredOutput && !!inputs.structured_output_enabled) && (
<Tooltip
noDecoration
popupContent={(
<div className="w-[232px] rounded-xl border-[0.5px] border-components-panel-border bg-components-tooltip-bg px-4 py-3.5 shadow-lg backdrop-blur-[5px]">
<div className="text-text-primary title-xs-semi-bold">{t('structOutput.modelNotSupported', { ns: 'app' })}</div>
<div className="mt-1 text-text-secondary body-xs-regular">{t('structOutput.modelNotSupportedTip', { ns: 'app' })}</div>
</div>
)}
>
<div>
<RiAlertFill className="mr-1 size-4 text-text-warning-secondary" />
</div>
</Tooltip>
)}
<div className="mr-0.5 text-text-tertiary system-xs-medium-uppercase">{t('structOutput.structured', { ns: 'app' })}</div>
<Tooltip popupContent={
<div className="max-w-[150px]">{t('structOutput.structuredTip', { ns: 'app' })}</div>
}
>
<div>
<RiQuestionLine className="size-3.5 text-text-quaternary" />
</div>
</Tooltip>
<Switch
className="ml-2"
value={!!inputs.structured_output_enabled}
onChange={handleStructureOutputEnableChange}
size="md"
disabled={readOnly}
/>
</div>
)}
>
<>
<VarItem
name="text"
type="string"
description={t(`${i18nPrefix}.outputVars.output`, { ns: 'workflow' })}
/>
<VarItem
name="reasoning_content"
type="string"
description={t(`${i18nPrefix}.outputVars.reasoning_content`, { ns: 'workflow' })}
/>
<VarItem
name="usage"
type="object"
description={t(`${i18nPrefix}.outputVars.usage`, { ns: 'workflow' })}
/>
{inputs.structured_output_enabled && (
<>
<Split className="mt-3" />
<StructureOutput
className="mt-4"
value={inputs.structured_output}
onChange={handleStructureOutputChange}
/>
</>
)}
</>
</OutputVars>
<PanelOutputSection
readOnly={readOnly}
inputs={inputs}
isModelSupportStructuredOutput={isModelSupportStructuredOutput}
structuredOutputCollapsed={structuredOutputCollapsed}
setStructuredOutputCollapsed={setStructuredOutputCollapsed}
handleStructureOutputEnableChange={handleStructureOutputEnableChange}
handleStructureOutputChange={handleStructureOutputChange}
/>
</div>
)
}

View File

@ -1,13 +1,15 @@
import type { Memory, PromptItem, ValueSelector, Var, Variable } from '../../types'
import type { LLMNodeType, StructuredOutput } from './types'
import type { LLMDefaultConfig } from './hooks/use-llm-input-manager'
import type { LLMNodeType } from './types'
import { produce } from 'immer'
import { useCallback, useEffect, useRef, useState } from 'react'
import { checkHasContextBlock, checkHasHistoryBlock, checkHasQueryBlock } from '@/app/components/base/prompt-editor/constants'
import {
ModelFeatureEnum,
useCallback,
useEffect,
useState,
} from 'react'
import {
ModelTypeEnum,
} from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useModelList, useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import useInspectVarsCrud from '@/app/components/workflow/hooks/use-inspect-vars-crud'
import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud'
import { AppModeEnum } from '@/types/app'
@ -17,95 +19,34 @@ import {
} from '../../hooks'
import useConfigVision from '../../hooks/use-config-vision'
import { useStore } from '../../store'
import { EditionType, VarType } from '../../types'
import useAvailableVarList from '../_base/hooks/use-available-var-list'
import useLLMInputManager from './hooks/use-llm-input-manager'
import useLLMPromptConfig from './hooks/use-llm-prompt-config'
import useLLMStructuredOutputConfig from './hooks/use-llm-structured-output-config'
const useConfig = (id: string, payload: LLMNodeType) => {
const { nodesReadOnly: readOnly } = useNodesReadOnly()
const isChatMode = useIsChatMode()
const defaultConfig = useStore(s => s.nodesDefaultConfigs)?.[payload.type]
const [defaultRolePrefix, setDefaultRolePrefix] = useState<{ user: string, assistant: string }>({ user: '', assistant: '' })
const defaultConfig = useStore(s => s.nodesDefaultConfigs)?.[payload.type] as LLMDefaultConfig | undefined
const { inputs, setInputs: doSetInputs } = useNodeCrud<LLMNodeType>(id, payload)
const inputRef = useRef(inputs)
useEffect(() => {
inputRef.current = inputs
}, [inputs])
const { deleteNodeInspectorVars } = useInspectVarsCrud()
const setInputs = useCallback((newInputs: LLMNodeType) => {
if (newInputs.memory && !newInputs.memory.role_prefix) {
const newPayload = produce(newInputs, (draft) => {
draft.memory!.role_prefix = defaultRolePrefix
})
doSetInputs(newPayload)
inputRef.current = newPayload
return
}
doSetInputs(newInputs)
inputRef.current = newInputs
}, [doSetInputs, defaultRolePrefix])
// model
const model = inputs.model
const modelMode = inputs.model?.mode
const isChatModel = modelMode === AppModeEnum.CHAT
const isCompletionModel = !isChatModel
const hasSetBlockStatus = (() => {
const promptTemplate = inputs.prompt_template
const hasSetContext = isChatModel ? (promptTemplate as PromptItem[]).some(item => checkHasContextBlock(item.text)) : checkHasContextBlock((promptTemplate as PromptItem).text)
if (!isChatMode) {
return {
history: false,
query: false,
context: hasSetContext,
}
}
if (isChatModel) {
return {
history: false,
query: (promptTemplate as PromptItem[]).some(item => checkHasQueryBlock(item.text)),
context: hasSetContext,
}
}
else {
return {
history: checkHasHistoryBlock((promptTemplate as PromptItem).text),
query: checkHasQueryBlock((promptTemplate as PromptItem).text),
context: hasSetContext,
}
}
})()
const {
inputRef,
setInputs,
appendDefaultPromptConfig,
} = useLLMInputManager({
inputs,
doSetInputs,
defaultConfig,
isChatModel,
})
const shouldShowContextTip = !hasSetBlockStatus.context && inputs.context.enabled
const appendDefaultPromptConfig = useCallback((draft: LLMNodeType, defaultConfig: any, passInIsChatMode?: boolean) => {
const promptTemplates = defaultConfig.prompt_templates
if (passInIsChatMode === undefined ? isChatModel : passInIsChatMode) {
draft.prompt_template = promptTemplates.chat_model.prompts
}
else {
draft.prompt_template = promptTemplates.completion_model.prompt
setDefaultRolePrefix({
user: promptTemplates.completion_model.conversation_histories_role.user_prefix,
assistant: promptTemplates.completion_model.conversation_histories_role.assistant_prefix,
})
}
}, [isChatModel])
useEffect(() => {
const isReady = defaultConfig && Object.keys(defaultConfig).length > 0
if (isReady && !inputs.prompt_template) {
const newInputs = produce(inputs, (draft) => {
appendDefaultPromptConfig(draft, defaultConfig)
})
setInputs(newInputs)
}
}, [defaultConfig, isChatModel])
const { deleteNodeInspectorVars } = useInspectVarsCrud()
const [modelChanged, setModelChanged] = useState(false)
const {
@ -129,7 +70,7 @@ const useConfig = (id: string, payload: LLMNodeType) => {
})
const handleModelChanged = useCallback((model: { provider: string, modelId: string, mode?: string }) => {
const newInputs = produce(inputRef.current, (draft) => {
const nextInputs = produce(inputRef.current, (draft) => {
draft.model.provider = model.provider
draft.model.name = model.modelId
draft.model.mode = model.mode!
@ -137,7 +78,7 @@ const useConfig = (id: string, payload: LLMNodeType) => {
if (isModeChange && defaultConfig && Object.keys(defaultConfig).length > 0)
appendDefaultPromptConfig(draft, defaultConfig, model.mode === AppModeEnum.CHAT)
})
setInputs(newInputs)
setInputs(nextInputs)
setModelChanged(true)
}, [setInputs, defaultConfig, appendDefaultPromptConfig])
@ -165,176 +106,28 @@ const useConfig = (id: string, payload: LLMNodeType) => {
setModelChanged(false)
handleVisionConfigAfterModelChanged()
}, [isVisionModel, modelChanged])
const promptConfig = useLLMPromptConfig({
inputs,
inputRef,
isChatMode,
isChatModel,
setInputs,
})
// variables
const isShowVars = (() => {
if (isChatModel)
return (inputs.prompt_template as PromptItem[]).some(item => item.edition_type === EditionType.jinja2)
return (inputs.prompt_template as PromptItem).edition_type === EditionType.jinja2
})()
const handleAddEmptyVariable = useCallback(() => {
const newInputs = produce(inputRef.current, (draft) => {
if (!draft.prompt_config) {
draft.prompt_config = {
jinja2_variables: [],
}
}
if (!draft.prompt_config.jinja2_variables)
draft.prompt_config.jinja2_variables = []
draft.prompt_config.jinja2_variables.push({
variable: '',
value_selector: [],
})
})
setInputs(newInputs)
}, [setInputs])
const handleAddVariable = useCallback((payload: Variable) => {
const newInputs = produce(inputRef.current, (draft) => {
if (!draft.prompt_config) {
draft.prompt_config = {
jinja2_variables: [],
}
}
if (!draft.prompt_config.jinja2_variables)
draft.prompt_config.jinja2_variables = []
draft.prompt_config.jinja2_variables.push(payload)
})
setInputs(newInputs)
}, [setInputs])
const handleVarListChange = useCallback((newList: Variable[]) => {
const newInputs = produce(inputRef.current, (draft) => {
if (!draft.prompt_config) {
draft.prompt_config = {
jinja2_variables: [],
}
}
if (!draft.prompt_config.jinja2_variables)
draft.prompt_config.jinja2_variables = []
draft.prompt_config.jinja2_variables = newList
})
setInputs(newInputs)
}, [setInputs])
const handleVarNameChange = useCallback((oldName: string, newName: string) => {
const newInputs = produce(inputRef.current, (draft) => {
if (isChatModel) {
const promptTemplate = draft.prompt_template as PromptItem[]
promptTemplate.filter(item => item.edition_type === EditionType.jinja2).forEach((item) => {
item.jinja2_text = (item.jinja2_text || '').replaceAll(`{{ ${oldName} }}`, `{{ ${newName} }}`)
})
}
else {
if ((draft.prompt_template as PromptItem).edition_type !== EditionType.jinja2)
return
const promptTemplate = draft.prompt_template as PromptItem
promptTemplate.jinja2_text = (promptTemplate.jinja2_text || '').replaceAll(`{{ ${oldName} }}`, `{{ ${newName} }}`)
}
})
setInputs(newInputs)
}, [isChatModel, setInputs])
// context
const handleContextVarChange = useCallback((newVar: ValueSelector | string) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.context.variable_selector = newVar as ValueSelector || []
draft.context.enabled = !!(newVar && newVar.length > 0)
})
setInputs(newInputs)
}, [setInputs])
const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.prompt_template = newPrompt
})
setInputs(newInputs)
}, [setInputs])
const handleMemoryChange = useCallback((newMemory?: Memory) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.memory = newMemory
})
setInputs(newInputs)
}, [setInputs])
const handleSyeQueryChange = useCallback((newQuery: string) => {
const newInputs = produce(inputRef.current, (draft) => {
if (!draft.memory) {
draft.memory = {
window: {
enabled: false,
size: 10,
},
query_prompt_template: newQuery,
}
}
else {
draft.memory.query_prompt_template = newQuery
}
})
setInputs(newInputs)
}, [setInputs])
// structure output
const { data: modelList } = useModelList(ModelTypeEnum.textGeneration)
const isModelSupportStructuredOutput = modelList
?.find(provideItem => provideItem.provider === model?.provider)
?.models
.find(modelItem => modelItem.model === model?.name)
?.features
?.includes(ModelFeatureEnum.StructuredOutput)
const [structuredOutputCollapsed, setStructuredOutputCollapsed] = useState(true)
const handleStructureOutputEnableChange = useCallback((enabled: boolean) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.structured_output_enabled = enabled
})
setInputs(newInputs)
if (enabled)
setStructuredOutputCollapsed(false)
deleteNodeInspectorVars(id)
}, [setInputs, deleteNodeInspectorVars, id])
const handleStructureOutputChange = useCallback((newOutput: StructuredOutput) => {
const newInputs = produce(inputRef.current, (draft) => {
draft.structured_output = newOutput
})
setInputs(newInputs)
deleteNodeInspectorVars(id)
}, [setInputs, deleteNodeInspectorVars, id])
const filterInputVar = useCallback((varPayload: Var) => {
return [VarType.number, VarType.string, VarType.secret, VarType.arrayString, VarType.arrayNumber, VarType.file, VarType.arrayFile].includes(varPayload.type)
}, [])
const filterJinja2InputVar = useCallback((varPayload: Var) => {
return [VarType.number, VarType.string, VarType.secret, VarType.arrayString, VarType.arrayNumber, VarType.arrayBoolean, VarType.arrayObject, VarType.object, VarType.array, VarType.boolean].includes(varPayload.type)
}, [])
const filterMemoryPromptVar = useCallback((varPayload: Var) => {
return [VarType.arrayObject, VarType.array, VarType.number, VarType.string, VarType.secret, VarType.arrayString, VarType.arrayNumber, VarType.file, VarType.arrayFile].includes(varPayload.type)
}, [])
// reasoning format
const handleReasoningFormatChange = useCallback((reasoningFormat: 'tagged' | 'separated') => {
const newInputs = produce(inputRef.current, (draft) => {
draft.reasoning_format = reasoningFormat
})
setInputs(newInputs)
}, [setInputs])
const structuredOutputConfig = useLLMStructuredOutputConfig({
id,
model,
inputRef,
setInputs,
deleteNodeInspectorVars,
})
const {
availableVars,
availableNodesWithParent,
} = useAvailableVarList(id, {
onlyLeafNodeVar: false,
filterVar: filterMemoryPromptVar,
filterVar: promptConfig.filterVar,
})
return {
@ -343,33 +136,33 @@ const useConfig = (id: string, payload: LLMNodeType) => {
inputs,
isChatModel,
isCompletionModel,
hasSetBlockStatus,
shouldShowContextTip,
hasSetBlockStatus: promptConfig.hasSetBlockStatus,
shouldShowContextTip: promptConfig.shouldShowContextTip,
isVisionModel,
handleModelChanged,
handleCompletionParamsChange,
isShowVars,
handleVarListChange,
handleVarNameChange,
handleAddVariable,
handleAddEmptyVariable,
handleContextVarChange,
filterInputVar,
filterVar: filterMemoryPromptVar,
isShowVars: promptConfig.isShowVars,
handleVarListChange: promptConfig.handleVarListChange,
handleVarNameChange: promptConfig.handleVarNameChange,
handleAddVariable: promptConfig.handleAddVariable,
handleAddEmptyVariable: promptConfig.handleAddEmptyVariable,
handleContextVarChange: promptConfig.handleContextVarChange,
filterInputVar: promptConfig.filterInputVar,
filterVar: promptConfig.filterVar,
availableVars,
availableNodesWithParent,
handlePromptChange,
handleMemoryChange,
handleSyeQueryChange,
handlePromptChange: promptConfig.handlePromptChange,
handleMemoryChange: promptConfig.handleMemoryChange,
handleSyeQueryChange: promptConfig.handleSyeQueryChange,
handleVisionResolutionEnabledChange,
handleVisionResolutionChange,
isModelSupportStructuredOutput,
handleStructureOutputChange,
structuredOutputCollapsed,
setStructuredOutputCollapsed,
handleStructureOutputEnableChange,
filterJinja2InputVar,
handleReasoningFormatChange,
isModelSupportStructuredOutput: structuredOutputConfig.isModelSupportStructuredOutput,
handleStructureOutputChange: structuredOutputConfig.handleStructureOutputChange,
structuredOutputCollapsed: structuredOutputConfig.structuredOutputCollapsed,
setStructuredOutputCollapsed: structuredOutputConfig.setStructuredOutputCollapsed,
handleStructureOutputEnableChange: structuredOutputConfig.handleStructureOutputEnableChange,
filterJinja2InputVar: promptConfig.filterJinja2InputVar,
handleReasoningFormatChange: structuredOutputConfig.handleReasoningFormatChange,
}
}

View File

@ -9870,11 +9870,6 @@
"count": 1
}
},
"app/components/workflow/nodes/knowledge-retrieval/use-config.ts": {
"ts/no-explicit-any": {
"count": 3
}
},
"app/components/workflow/nodes/knowledge-retrieval/use-single-run-form-params.ts": {
"ts/no-explicit-any": {
"count": 5
@ -10091,11 +10086,8 @@
}
},
"app/components/workflow/nodes/llm/panel.tsx": {
"no-restricted-imports": {
"count": 1
},
"tailwindcss/enforce-consistent-class-order": {
"count": 9
"count": 1
}
},
"app/components/workflow/nodes/llm/types.ts": {
@ -10108,7 +10100,7 @@
"count": 2
},
"ts/no-explicit-any": {
"count": 2
"count": 1
}
},
"app/components/workflow/nodes/llm/use-single-run-form-params.ts": {