Merge branch 'feat/support-agent-sandbox' into sandboxed-agent-rebase

This commit is contained in:
Novice
2026-03-23 20:19:16 +08:00
48 changed files with 322 additions and 2149 deletions

View File

@ -20,7 +20,6 @@ class BaseMemory(ABC):
@abstractmethod
def get_history_prompt_messages(
self,
*,
max_token_limit: int = 2000,
message_limit: int | None = None,
) -> Sequence[PromptMessage]:

View File

@ -144,7 +144,6 @@ class NodeTokenBufferMemory(BaseMemory):
def get_history_prompt_messages(
self,
*,
max_token_limit: int = 2000,
message_limit: int | None = None,
) -> Sequence[PromptMessage]:

View File

@ -116,7 +116,6 @@ class TokenBufferMemory(BaseMemory):
def get_history_prompt_messages(
self,
*,
max_token_limit: int = 2000,
message_limit: int | None = None,
) -> Sequence[PromptMessage]:

View File

@ -118,37 +118,26 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation):
user=user_id,
)
if isinstance(response, Generator):
if response.usage:
deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=response.usage)
def handle() -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
for chunk in response:
if chunk.delta.usage:
deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=chunk.delta.usage)
chunk.prompt_messages = []
yield chunk
def handle_non_streaming(
response: LLMResultWithStructuredOutput,
) -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
yield LLMResultChunkWithStructuredOutput(
model=response.model,
prompt_messages=[],
system_fingerprint=response.system_fingerprint,
structured_output=response.structured_output,
delta=LLMResultChunkDelta(
index=0,
message=response.message,
usage=response.usage,
finish_reason="",
),
)
return handle()
else:
if response.usage:
deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=response.usage)
def handle_non_streaming(
response: LLMResultWithStructuredOutput,
) -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
yield LLMResultChunkWithStructuredOutput(
model=response.model,
prompt_messages=[],
system_fingerprint=response.system_fingerprint,
structured_output=response.structured_output,
delta=LLMResultChunkDelta(
index=0,
message=response.message,
usage=response.usage,
finish_reason="",
),
)
return handle_non_streaming(response)
return handle_non_streaming(response)
@classmethod
def invoke_text_embedding(cls, user_id: str, tenant: Tenant, payload: RequestInvokeTextEmbedding):

View File

@ -175,11 +175,11 @@ class SandboxBuilder:
if sandbox.is_cancelled():
return
# Storage mount is part of readiness. If restore/mount fails,
# the sandbox must surface initialization failure instead of
# becoming "ready" with missing files.
if not sandbox.mount():
raise RuntimeError("Sandbox storage mount failed")
# Attempt to restore prior workspace state. mount() returns
# False when no archive exists yet (first run for this
# sandbox_id), which is a normal case — not an error.
# Actual failures (download/extract) surface as exceptions.
sandbox.mount()
sandbox.mark_ready()
except Exception as exc:
try:

View File

@ -4,7 +4,6 @@ from __future__ import annotations
import logging
from core.virtual_environment.__base.exec import PipelineExecutionError
from core.virtual_environment.__base.helpers import pipeline
from core.virtual_environment.__base.virtual_environment import VirtualEnvironment
from extensions.storage.base_storage import BaseStorage
@ -47,19 +46,15 @@ class ArchiveSandboxStorage(SandboxStorage):
download_url = self._storage.get_download_url(self._storage_key, _ARCHIVE_TIMEOUT)
archive = "archive.tar.gz"
try:
(
pipeline(sandbox)
.add(["curl", "-fsSL", download_url, "-o", archive], error_message="Failed to download archive")
.add(
["sh", "-c", 'tar -xzf "$1" 2>/dev/null; exit $?', "sh", archive], error_message="Failed to extract"
)
.add(["rm", archive], error_message="Failed to cleanup")
.execute(timeout=_ARCHIVE_TIMEOUT, raise_on_error=True)
(
pipeline(sandbox)
.add(["curl", "-fsSL", download_url, "-o", archive], error_message="Failed to download archive")
.add(
["sh", "-c", 'tar -xzf "$1" 2>/dev/null; exit $?', "sh", archive], error_message="Failed to extract"
)
except PipelineExecutionError:
logger.exception("Failed to mount archive for sandbox %s", self._sandbox_id)
return False
.add(["rm", archive], error_message="Failed to cleanup")
.execute(timeout=_ARCHIVE_TIMEOUT, raise_on_error=True)
)
logger.info("Mounted archive for sandbox %s", self._sandbox_id)
return True

View File

@ -148,8 +148,7 @@ class DockerDemuxer:
to periodically check for errors and closed state instead of blocking forever.
"""
if self._error:
error = cast(BaseException, self._error)
raise TransportEOFError(f"Demuxer error: {error}") from error
raise TransportEOFError(f"Demuxer error: {self._error}") from self._error
while True:
try:
@ -584,7 +583,7 @@ class DockerDaemonEnvironment(VirtualEnvironment):
stderr=True,
tty=False,
workdir=working_dir,
environment=environments,
environment=dict(environments) if environments else None,
),
)

View File

@ -135,11 +135,11 @@ class CommandNode(Node[CommandNodeData]):
*,
graph_config: Mapping[str, Any],
node_id: str,
node_data: Mapping[str, Any],
node_data: CommandNodeData,
) -> Mapping[str, Sequence[str]]:
_ = graph_config
typed_node_data = CommandNodeData.model_validate(node_data)
typed_node_data = node_data
selectors: list[VariableSelector] = []
selectors += list(variable_template_parser.extract_selectors_from_template(typed_node_data.command))

View File

@ -157,10 +157,10 @@ class FileUploadNode(Node[FileUploadNodeData]):
*,
graph_config: Mapping[str, Any],
node_id: str,
node_data: Mapping[str, Any],
node_data: FileUploadNodeData,
) -> Mapping[str, Sequence[str]]:
_ = graph_config
typed_node_data = FileUploadNodeData.model_validate(node_data)
typed_node_data = node_data
return {node_id + ".files": typed_node_data.variable_selector}
@staticmethod