feat: add computer use feature to LLMNodeData

- Introduced a new boolean field `computer_use` in LLMNodeData to indicate whether the computer use feature should be enabled.
- Updated LLMNode to check the `computer_use` field when determining sandbox usage, ensuring proper error handling if sandbox is not available.
- Removed the obsolete `_has_skill_prompt` method to streamline the code.
This commit is contained in:
Harry
2026-01-28 18:00:33 +08:00
parent f9f3d33911
commit 2513e191fb
2 changed files with 7 additions and 12 deletions

View File

@ -392,7 +392,8 @@ class LLMNodeData(BaseNodeData):
"""
),
)
# Computer Use
computer_use: bool = Field(default=False, description="Whether to use the computer use feature")
# Tool support
tools: Sequence[ToolMetadata] = Field(default_factory=list)
tool_settings: Sequence[ToolSetting] = Field(default_factory=list)

View File

@ -297,9 +297,10 @@ class LLMNode(Node[LLMNodeData]):
generation_data: LLMGenerationData | None = None
structured_output: LLMStructuredOutput | None = None
sandbox = self.graph_runtime_state.sandbox
has_skill_prompt = self._has_skill_prompt()
if sandbox and has_skill_prompt:
if self.node_data.computer_use:
sandbox = self.graph_runtime_state.sandbox
if not sandbox:
raise LLMNodeError("computer use is enabled but no sandbox found")
tool_dependencies = self._extract_tool_dependencies()
generator = self._invoke_llm_with_sandbox(
sandbox=sandbox,
@ -422,8 +423,7 @@ class LLMNode(Node[LLMNodeData]):
# Send final chunk event to indicate streaming is complete
# For tool calls and sandbox, final events are already sent in _process_tool_outputs
sandbox_used = sandbox and has_skill_prompt
if not self.tool_call_enabled and not sandbox_used:
if not self.tool_call_enabled and not self.node_data.computer_use:
yield StreamChunkEvent(
selector=[self._node_id, "text"],
chunk="",
@ -1822,12 +1822,6 @@ class LLMNode(Node[LLMNodeData]):
generation_data,
)
def _has_skill_prompt(self) -> bool:
for prompt in self.node_data.prompt_template:
if isinstance(prompt, LLMNodeChatModelMessage) and prompt.skill:
return True
return False
def _extract_tool_dependencies(self) -> ToolDependencies | None:
"""Extract tool artifact from prompt template."""