fix: stream never ruff

This commit is contained in:
Harry
2026-02-03 14:42:30 +08:00
parent 49befa6d3f
commit 10f5d9e7ad
10 changed files with 25 additions and 34 deletions

View File

@ -253,9 +253,11 @@ class BaseAgentRunner(AppRunner):
# save tool entity
tool_instances[dataset_tool.entity.identity.name] = dataset_tool
output_tools = build_agent_output_tools(tenant_id=self.tenant_id,
invoke_from=self.application_generate_entity.invoke_from,
tool_invoke_from=ToolInvokeFrom.AGENT)
output_tools = build_agent_output_tools(
tenant_id=self.tenant_id,
invoke_from=self.application_generate_entity.invoke_from,
tool_invoke_from=ToolInvokeFrom.AGENT,
)
for tool in output_tools:
tool_instances[tool.entity.identity.name] = tool

View File

@ -194,6 +194,7 @@ class AgentResult(BaseModel):
"""
Agent execution result.
"""
output: str | dict = Field(default="", description="The generated output")
files: list[Any] = Field(default_factory=list, description="Files produced during execution")
usage: Any | None = Field(default=None, description="LLM usage statistics")

View File

@ -101,6 +101,7 @@ def build_agent_output_tools(
message_id: str | None = None,
) -> ToolInvokeMessage:
return ToolInvokeMessage(message=ToolInvokeMessage.TextMessage(text=TERMINAL_OUTPUT_MESSAGE))
raw_tool._invoke = invoke_tool # pyright: ignore[reportPrivateUsage]
tools.append(raw_tool)

View File

@ -57,10 +57,7 @@ class AgentPattern(ABC):
@abstractmethod
def run(
self,
prompt_messages: list[PromptMessage],
model_parameters: dict[str, Any],
stop: list[str]
self, prompt_messages: list[PromptMessage], model_parameters: dict[str, Any], stop: list[str]
) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]:
"""Execute the agent strategy."""
pass

View File

@ -30,10 +30,7 @@ class FunctionCallStrategy(AgentPattern):
"""Function Call strategy using model's native tool calling capability."""
def run(
self,
prompt_messages: list[PromptMessage],
model_parameters: dict[str, Any],
stop: list[str]
self, prompt_messages: list[PromptMessage], model_parameters: dict[str, Any], stop: list[str]
) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]:
"""Execute the function call agent strategy."""
# Convert tools to prompt format
@ -144,9 +141,7 @@ class FunctionCallStrategy(AgentPattern):
output_files.extend(tool_files)
if tool_response == TERMINAL_OUTPUT_MESSAGE:
function_call_state = False
final_tool_args = tool_entity.transform_tool_parameters_type(
tool_args
)
final_tool_args = tool_entity.transform_tool_parameters_type(tool_args)
yield self._finish_log(
round_log,

View File

@ -59,11 +59,7 @@ class ReActStrategy(AgentPattern):
self.instruction = instruction
def run(
self,
prompt_messages:
list[PromptMessage],
model_parameters: dict[str, Any],
stop: list[str]
self, prompt_messages: list[PromptMessage], model_parameters: dict[str, Any], stop: list[str]
) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]:
"""Execute the ReAct agent strategy."""
# Initialize tracking

View File

@ -43,7 +43,7 @@ class StrategyFactory:
agent_strategy: AgentEntity.Strategy | None = None,
tool_invoke_hook: ToolInvokeHook | None = None,
instruction: str = "",
structured_output_schema: Mapping[str, Any] | None = None
structured_output_schema: Mapping[str, Any] | None = None,
) -> AgentPattern:
"""
Create an appropriate strategy based on model features.
@ -71,7 +71,7 @@ class StrategyFactory:
tenant_id=tenant_id,
invoke_from=invoke_from,
tool_invoke_from=tool_invoke_from,
structured_output_schema=structured_output_schema
structured_output_schema=structured_output_schema,
)
tools.extend(output_tools)