mirror of
https://github.com/langgenius/dify.git
synced 2026-03-06 16:16:38 +08:00
99 lines
3.4 KiB
Python
99 lines
3.4 KiB
Python
from collections.abc import Mapping, Sequence
|
|
from typing import Any, Literal
|
|
|
|
from pydantic import BaseModel, Field, field_validator
|
|
|
|
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
|
|
from dify_graph.model_runtime.entities import ImagePromptMessageContent, LLMMode
|
|
from dify_graph.nodes.base import BaseNodeData
|
|
from dify_graph.nodes.base.entities import VariableSelector
|
|
|
|
|
|
class ModelConfig(BaseModel):
|
|
provider: str
|
|
name: str
|
|
mode: LLMMode
|
|
completion_params: dict[str, Any] = Field(default_factory=dict)
|
|
|
|
|
|
class ContextConfig(BaseModel):
|
|
enabled: bool
|
|
variable_selector: list[str] | None = None
|
|
|
|
|
|
class VisionConfigOptions(BaseModel):
|
|
variable_selector: Sequence[str] = Field(default_factory=lambda: ["sys", "files"])
|
|
detail: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.HIGH
|
|
|
|
|
|
class VisionConfig(BaseModel):
|
|
enabled: bool = False
|
|
configs: VisionConfigOptions = Field(default_factory=VisionConfigOptions)
|
|
|
|
@field_validator("configs", mode="before")
|
|
@classmethod
|
|
def convert_none_configs(cls, v: Any):
|
|
if v is None:
|
|
return VisionConfigOptions()
|
|
return v
|
|
|
|
|
|
class PromptConfig(BaseModel):
|
|
jinja2_variables: Sequence[VariableSelector] = Field(default_factory=list)
|
|
|
|
@field_validator("jinja2_variables", mode="before")
|
|
@classmethod
|
|
def convert_none_jinja2_variables(cls, v: Any):
|
|
if v is None:
|
|
return []
|
|
return v
|
|
|
|
|
|
class LLMNodeChatModelMessage(ChatModelMessage):
|
|
text: str = ""
|
|
jinja2_text: str | None = None
|
|
|
|
|
|
class LLMNodeCompletionModelPromptTemplate(CompletionModelPromptTemplate):
|
|
jinja2_text: str | None = None
|
|
|
|
|
|
class LLMNodeData(BaseNodeData):
|
|
model: ModelConfig
|
|
prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate
|
|
prompt_config: PromptConfig = Field(default_factory=PromptConfig)
|
|
memory: MemoryConfig | None = None
|
|
context: ContextConfig
|
|
vision: VisionConfig = Field(default_factory=VisionConfig)
|
|
structured_output: Mapping[str, Any] | None = None
|
|
# We used 'structured_output_enabled' in the past, but it's not a good name.
|
|
structured_output_switch_on: bool = Field(False, alias="structured_output_enabled")
|
|
reasoning_format: Literal["separated", "tagged"] = Field(
|
|
# Keep tagged as default for backward compatibility
|
|
default="tagged",
|
|
description=(
|
|
"""
|
|
Strategy for handling model reasoning output.
|
|
|
|
separated: Return clean text (without <think> tags) + reasoning_content field.
|
|
Recommended for new workflows. Enables safe downstream parsing and
|
|
workflow variable access: {{#node_id.reasoning_content#}}
|
|
|
|
tagged : Return original text (with <think> tags) + reasoning_content field.
|
|
Maintains full backward compatibility while still providing reasoning_content
|
|
for workflow automation. Frontend thinking panels work as before.
|
|
"""
|
|
),
|
|
)
|
|
|
|
@field_validator("prompt_config", mode="before")
|
|
@classmethod
|
|
def convert_none_prompt_config(cls, v: Any):
|
|
if v is None:
|
|
return PromptConfig()
|
|
return v
|
|
|
|
@property
|
|
def structured_output_enabled(self) -> bool:
|
|
return self.structured_output_switch_on and self.structured_output is not None
|