mirror of
https://github.com/langgenius/dify.git
synced 2026-05-06 02:18:08 +08:00
fix: memory support switch
This commit is contained in:
@ -16,13 +16,6 @@ const nodeDefault: NodeDefault<LLMNodeType> = {
|
||||
},
|
||||
},
|
||||
variables: [],
|
||||
memory: {
|
||||
role_prefix: undefined,
|
||||
window: {
|
||||
enabled: false,
|
||||
size: 50,
|
||||
},
|
||||
},
|
||||
prompt_template: [{
|
||||
role: PromptRole.system,
|
||||
text: '',
|
||||
|
||||
@ -180,11 +180,11 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
|
||||
<div className='text-xs text-gray-300'>Memory examples(Designing)</div>
|
||||
)} */}
|
||||
{/* Memory */}
|
||||
{isChatModel && (
|
||||
{isChatMode && (
|
||||
<>
|
||||
<MemoryConfig
|
||||
readonly={readOnly}
|
||||
payload={inputs.memory}
|
||||
config={{ data: inputs.memory }}
|
||||
onChange={handleMemoryChange}
|
||||
canSetRoleName={isCompletionModel}
|
||||
/>
|
||||
|
||||
@ -5,7 +5,7 @@ export type LLMNodeType = CommonNodeType & {
|
||||
model: ModelConfig
|
||||
variables: Variable[]
|
||||
prompt_template: PromptItem[] | PromptItem
|
||||
memory: Memory
|
||||
memory?: Memory
|
||||
context: {
|
||||
enabled: boolean
|
||||
variable_selector: ValueSelector
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { useCallback, useEffect, useRef } from 'react'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import produce from 'immer'
|
||||
import useVarList from '../_base/hooks/use-var-list'
|
||||
import { VarType } from '../../types'
|
||||
@ -23,7 +23,18 @@ const useConfig = (id: string, payload: LLMNodeType) => {
|
||||
const isChatMode = useIsChatMode()
|
||||
|
||||
const defaultConfig = useStore(s => s.nodesDefaultConfigs)[payload.type]
|
||||
const { inputs, setInputs } = useNodeCrud<LLMNodeType>(id, payload)
|
||||
const [defaultRolePrefix, setDefaultRolePrefix] = useState<{ user: string; assistant: string }>({ user: '', assistant: '' })
|
||||
const { inputs, setInputs: doSetInputs } = useNodeCrud<LLMNodeType>(id, payload)
|
||||
const setInputs = useCallback((newInputs: LLMNodeType) => {
|
||||
if (newInputs.memory && !newInputs.memory.role_prefix) {
|
||||
const newPayload = produce(newInputs, (draft) => {
|
||||
draft.memory!.role_prefix = defaultRolePrefix
|
||||
})
|
||||
doSetInputs(newPayload)
|
||||
return
|
||||
}
|
||||
doSetInputs(newInputs)
|
||||
}, [doSetInputs, defaultRolePrefix])
|
||||
const inputRef = useRef(inputs)
|
||||
useEffect(() => {
|
||||
inputRef.current = inputs
|
||||
@ -68,23 +79,11 @@ const useConfig = (id: string, payload: LLMNodeType) => {
|
||||
}
|
||||
else {
|
||||
draft.prompt_template = promptTemplates.completion_model.prompt
|
||||
if (!draft.memory) {
|
||||
draft.memory = {
|
||||
role_prefix: {
|
||||
user: '',
|
||||
assistant: '',
|
||||
},
|
||||
window: {
|
||||
enabled: false,
|
||||
size: '',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
draft.memory.role_prefix = {
|
||||
setDefaultRolePrefix({
|
||||
user: promptTemplates.completion_model.conversation_histories_role.user_prefix,
|
||||
assistant: promptTemplates.completion_model.conversation_histories_role.assistant_prefix,
|
||||
}
|
||||
})
|
||||
}
|
||||
}, [isChatModel])
|
||||
useEffect(() => {
|
||||
@ -165,7 +164,7 @@ const useConfig = (id: string, payload: LLMNodeType) => {
|
||||
setInputs(newInputs)
|
||||
}, [inputs, setInputs])
|
||||
|
||||
const handleMemoryChange = useCallback((newMemory: Memory) => {
|
||||
const handleMemoryChange = useCallback((newMemory?: Memory) => {
|
||||
const newInputs = produce(inputs, (draft) => {
|
||||
draft.memory = newMemory
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user