feat(sandbox): enhance sandbox management and tool artifact handling

- Introduced SandboxManager.delete_storage method for improved storage management.
- Refactored skill loading and tool artifact handling in DifyCliInitializer and SandboxBashSession.
- Updated LLMNode to extract and compile tool artifacts, enhancing integration with skills.
- Improved attribute management in AttrMap for better error handling and retrieval methods.
This commit is contained in:
Harry
2026-01-22 17:25:58 +08:00
parent e7c3e4cd21
commit 9d80770dfc
13 changed files with 147 additions and 154 deletions

View File

@ -59,6 +59,8 @@ class PromptConfig(BaseModel):
class LLMNodeChatModelMessage(ChatModelMessage):
text: str = ""
jinja2_text: str | None = None
skill: bool = False
metadata: Mapping[str, Any] | None = None
class LLMNodeCompletionModelPromptTemplate(CompletionModelPromptTemplate):

View File

@ -7,13 +7,16 @@ import logging
import re
import time
from collections.abc import Generator, Mapping, Sequence
from functools import reduce
from typing import TYPE_CHECKING, Any, Literal, cast
from sqlalchemy import select
from core.agent.entities import AgentEntity, AgentLog, AgentResult, AgentToolEntity, ExecutionContext
from core.agent.patterns import StrategyFactory
from core.app.entities.app_asset_entities import AppAssetFileTree
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.app_assets.constants import AppAssetsAttrs
from core.file import File, FileTransferMethod, FileType, file_manager
from core.helper.code_executor import CodeExecutor, CodeLanguage
from core.llm_generator.output_parser.errors import OutputParserError
@ -52,6 +55,11 @@ from core.prompt.utils.prompt_message_util import PromptMessageUtil
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
from core.sandbox import Sandbox
from core.sandbox.bash.session import SandboxBashSession
from core.skill.constants import SkillAttrs
from core.skill.entities.skill_artifact_set import SkillArtifactSet
from core.skill.entities.skill_document import SkillDocument
from core.skill.entities.tool_artifact import ToolArtifact
from core.skill.skill_compiler import SkillCompiler
from core.tools.__base.tool import Tool
from core.tools.signature import sign_upload_file
from core.tools.tool_manager import ToolManager
@ -281,6 +289,7 @@ class LLMNode(Node[LLMNodeData]):
jinja2_variables=self.node_data.prompt_config.jinja2_variables,
tenant_id=self.tenant_id,
context_files=context_files,
sandbox=self.graph_runtime_state.sandbox,
)
# Variables for outputs
@ -289,12 +298,14 @@ class LLMNode(Node[LLMNodeData]):
sandbox = self.graph_runtime_state.sandbox
if sandbox:
tool_artifact = self._extract_tool_artifact()
generator = self._invoke_llm_with_sandbox(
sandbox=sandbox,
model_instance=model_instance,
prompt_messages=prompt_messages,
stop=stop,
variable_pool=variable_pool,
tool_artifact=tool_artifact,
)
elif self.tool_call_enabled:
generator = self._invoke_llm_with_tools(
@ -847,6 +858,7 @@ class LLMNode(Node[LLMNodeData]):
jinja2_variables=self.node_data.prompt_config.jinja2_variables or [],
variable_pool=variable_pool,
vision_detail_config=self.node_data.vision.configs.detail,
sandbox=self.graph_runtime_state.sandbox,
)
combined_messages.extend(processed_msgs)
static_idx += 1
@ -1181,6 +1193,7 @@ class LLMNode(Node[LLMNodeData]):
jinja2_variables: Sequence[VariableSelector],
tenant_id: str,
context_files: list[File] | None = None,
sandbox: Sandbox | None = None,
) -> tuple[Sequence[PromptMessage], Sequence[str] | None]:
prompt_messages: list[PromptMessage] = []
@ -1193,6 +1206,7 @@ class LLMNode(Node[LLMNodeData]):
jinja2_variables=jinja2_variables,
variable_pool=variable_pool,
vision_detail_config=vision_detail,
sandbox=sandbox,
)
)
@ -1473,8 +1487,17 @@ class LLMNode(Node[LLMNodeData]):
jinja2_variables: Sequence[VariableSelector],
variable_pool: VariablePool,
vision_detail_config: ImagePromptMessageContent.DETAIL,
sandbox: Sandbox | None = None,
) -> Sequence[PromptMessage]:
prompt_messages: list[PromptMessage] = []
# Extract skill compilation context from sandbox if available
artifact_set: SkillArtifactSet | None = None
file_tree: AppAssetFileTree | None = None
if sandbox:
artifact_set = sandbox.attrs.get(SkillAttrs.ARTIFACT_SET)
file_tree = sandbox.attrs.get(AppAssetsAttrs.FILE_TREE)
for message in messages:
if message.edition_type == "jinja2":
result_text = _render_jinja2_message(
@ -1482,6 +1505,16 @@ class LLMNode(Node[LLMNodeData]):
jinja2_variables=jinja2_variables,
variable_pool=variable_pool,
)
# Compile skill references after jinja2 rendering
if artifact_set is not None and file_tree is not None:
skill_artifact = SkillCompiler().compile_one(
artifact_set,
SkillDocument(skill_id="anonymous", content=result_text, metadata={}),
file_tree,
)
result_text = skill_artifact.content
prompt_message = _combine_message_content_with_role(
contents=[TextPromptMessageContent(data=result_text)], role=message.role
)
@ -1514,6 +1547,16 @@ class LLMNode(Node[LLMNodeData]):
# Create message with text from all segments
plain_text = segment_group.text
# Compile skill references after context and variable substitution
if plain_text and artifact_set is not None and file_tree is not None:
skill_artifact = SkillCompiler().compile_one(
artifact_set,
SkillDocument(skill_id="anonymous", content=plain_text, metadata={}),
file_tree,
)
plain_text = skill_artifact.content
if plain_text:
prompt_message = _combine_message_content_with_role(
contents=[TextPromptMessageContent(data=plain_text)], role=message.role
@ -1767,6 +1810,28 @@ class LLMNode(Node[LLMNodeData]):
generation_data,
)
def _extract_tool_artifact(self) -> ToolArtifact | None:
"""Extract tool artifact from prompt template."""
sandbox = self.graph_runtime_state.sandbox
if not sandbox:
raise LLMNodeError("Sandbox not found")
artifact_set = sandbox.attrs.get(SkillAttrs.ARTIFACT_SET)
file_tree = sandbox.attrs.get(AppAssetsAttrs.FILE_TREE)
tool_artifacts: list[ToolArtifact] = []
for prompt in self.node_data.prompt_template:
if isinstance(prompt, LLMNodeChatModelMessage):
skill_artifact = SkillCompiler().compile_one(
artifact_set, SkillDocument(skill_id="anonymous", content=prompt.text, metadata={}), file_tree
)
tool_artifacts.append(skill_artifact.tools)
if len(tool_artifacts) == 0:
return None
return reduce(lambda x, y: x.merge(y), tool_artifacts)
def _invoke_llm_with_tools(
self,
model_instance: ModelInstance,
@ -1811,17 +1876,6 @@ class LLMNode(Node[LLMNodeData]):
result = yield from self._process_tool_outputs(outputs)
return result
def _get_allow_tools_list(self) -> list[tuple[str, str]] | None:
if not self._node_data.tools:
return None
allow_tools = []
for tool in self._node_data.tools:
if not tool.enabled:
continue
allow_tools.append((tool.provider_name, tool.tool_name))
return allow_tools or None
def _invoke_llm_with_sandbox(
self,
sandbox: Sandbox,
@ -1829,12 +1883,11 @@ class LLMNode(Node[LLMNodeData]):
prompt_messages: Sequence[PromptMessage],
stop: Sequence[str] | None,
variable_pool: VariablePool,
tool_artifact: ToolArtifact | None,
) -> Generator[NodeEventBase, None, LLMGenerationData]:
allow_tools = self._get_allow_tools_list()
result: LLMGenerationData | None = None
with SandboxBashSession(sandbox=sandbox, node_id=self.id, allow_tools=allow_tools) as session:
with SandboxBashSession(sandbox=sandbox, node_id=self.id, tools=tool_artifact) as session:
prompt_files = self._extract_prompt_files(variable_pool)
model_features = self._get_model_features(model_instance)