mirror of
https://github.com/langgenius/dify.git
synced 2026-04-30 23:48:04 +08:00
fix: use model parameters from memory_spec in llm_generator
This commit is contained in:
@ -605,7 +605,7 @@ class LLMGenerator:
|
||||
LLMResult,
|
||||
model_instance.invoke_llm(
|
||||
prompt_messages=[UserPromptMessage(content=formatted_prompt)],
|
||||
model_parameters={"temperature": 0.01, "max_tokens": 2000},
|
||||
model_parameters=memory_spec.model.completion_params,
|
||||
stream=False,
|
||||
)
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user