mirror of
https://github.com/langgenius/dify.git
synced 2026-04-30 15:38:08 +08:00
Merge remote-tracking branch 'origin/main' into feat/support-agent-sandbox
This commit is contained in:
@ -1,6 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Sequence
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any, cast
|
||||
|
||||
from sqlalchemy import select
|
||||
@ -47,6 +50,11 @@ from .exc import (
|
||||
)
|
||||
from .protocols import TemplateRenderer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
VARIABLE_PATTERN = re.compile(r"\{\{#[^#]+#\}\}")
|
||||
MAX_RESOLVED_VALUE_LENGTH = 1024
|
||||
|
||||
|
||||
def fetch_model_config(*, tenant_id: str, node_data_model: ModelConfig) -> tuple[ModelInstance, Any]:
|
||||
from core.app.llm.model_access import build_dify_model_access
|
||||
@ -688,3 +696,61 @@ def _append_file_prompts(
|
||||
prompt_messages[-1] = UserPromptMessage(content=file_prompts + existing_contents)
|
||||
else:
|
||||
prompt_messages.append(UserPromptMessage(content=file_prompts))
|
||||
|
||||
|
||||
def _coerce_resolved_value(raw: str) -> int | float | bool | str:
|
||||
"""Try to restore the original type from a resolved template string.
|
||||
|
||||
Variable references are always resolved to text, but completion params may
|
||||
expect numeric or boolean values (e.g. a variable that holds "0.7" mapped to
|
||||
the ``temperature`` parameter). This helper attempts a JSON parse so that
|
||||
``"0.7"`` → ``0.7``, ``"true"`` → ``True``, etc. Plain strings that are not
|
||||
valid JSON literals are returned as-is.
|
||||
"""
|
||||
stripped = raw.strip()
|
||||
if not stripped:
|
||||
return raw
|
||||
|
||||
try:
|
||||
parsed: object = json.loads(stripped)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
return raw
|
||||
|
||||
if isinstance(parsed, (int, float, bool)):
|
||||
return parsed
|
||||
return raw
|
||||
|
||||
|
||||
def resolve_completion_params_variables(
|
||||
completion_params: Mapping[str, Any],
|
||||
variable_pool: VariablePool,
|
||||
) -> dict[str, Any]:
|
||||
"""Resolve variable references (``{{#node_id.var#}}``) in string-typed completion params.
|
||||
|
||||
Security notes:
|
||||
- Resolved values are length-capped to ``MAX_RESOLVED_VALUE_LENGTH`` to
|
||||
prevent denial-of-service through excessively large variable payloads.
|
||||
- This follows the same ``VariablePool.convert_template`` pattern used across
|
||||
Dify (Answer Node, HTTP Request Node, Agent Node, etc.). The downstream
|
||||
model plugin receives these values as structured JSON key-value pairs — they
|
||||
are never concatenated into raw HTTP headers or SQL queries.
|
||||
- Numeric/boolean coercion is applied so that variables holding ``"0.7"`` are
|
||||
restored to their native type rather than sent as a bare string.
|
||||
"""
|
||||
resolved: dict[str, Any] = {}
|
||||
for key, value in completion_params.items():
|
||||
if isinstance(value, str) and VARIABLE_PATTERN.search(value):
|
||||
segment_group = variable_pool.convert_template(value)
|
||||
text = segment_group.text
|
||||
if len(text) > MAX_RESOLVED_VALUE_LENGTH:
|
||||
logger.warning(
|
||||
"Resolved value for param '%s' truncated from %d to %d chars",
|
||||
key,
|
||||
len(text),
|
||||
MAX_RESOLVED_VALUE_LENGTH,
|
||||
)
|
||||
text = text[:MAX_RESOLVED_VALUE_LENGTH]
|
||||
resolved[key] = _coerce_resolved_value(text)
|
||||
else:
|
||||
resolved[key] = value
|
||||
return resolved
|
||||
|
||||
@ -246,6 +246,13 @@ class LLMNode(Node[LLMNodeData]):
|
||||
node_data_model=self.node_data.model,
|
||||
tenant_id=self.tenant_id,
|
||||
)
|
||||
resolved_completion_params = llm_utils.resolve_completion_params_variables(
|
||||
model_config.parameters,
|
||||
variable_pool,
|
||||
)
|
||||
model_instance.parameters = resolved_completion_params
|
||||
model_config.parameters = resolved_completion_params
|
||||
self.node_data.model.completion_params = resolved_completion_params
|
||||
|
||||
# fetch memory
|
||||
memory = llm_utils.fetch_memory(
|
||||
|
||||
@ -164,6 +164,10 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
|
||||
)
|
||||
|
||||
model_instance = self._model_instance
|
||||
# Resolve variable references in string-typed completion params
|
||||
model_instance.parameters = llm_utils.resolve_completion_params_variables(
|
||||
model_instance.parameters, variable_pool
|
||||
)
|
||||
if not isinstance(model_instance.model_type_instance, LargeLanguageModel):
|
||||
raise InvalidModelTypeError("Model is not a Large Language Model")
|
||||
|
||||
|
||||
@ -113,6 +113,10 @@ class QuestionClassifierNode(Node[QuestionClassifierNodeData]):
|
||||
query = variable.value if variable else None
|
||||
variables = {"query": query}
|
||||
model_instance = self._model_instance
|
||||
# Resolve variable references in string-typed completion params
|
||||
model_instance.parameters = llm_utils.resolve_completion_params_variables(
|
||||
model_instance.parameters, variable_pool
|
||||
)
|
||||
memory = self._memory
|
||||
# fetch instruction
|
||||
node_data.instruction = node_data.instruction or ""
|
||||
|
||||
Reference in New Issue
Block a user