refactor: move execution limits from engine core to layer

Remove max_execution_time and max_execution_steps from ExecutionContext and GraphEngine since these limits are now handled by ExecutionLimitsLayer. This follows the separation of concerns principle by keeping execution limits as a cross-cutting concern handled by layers rather than embedded in core engine components.

Changes:
- Remove max_execution_time and max_execution_steps from ExecutionContext
- Remove these parameters from GraphEngine.__init__()
- Remove max_execution_time from Dispatcher
- Update workflow_entry.py to no longer pass these parameters
- Update all tests to remove these parameters
This commit is contained in:
-LAN-
2025-09-10 01:32:45 +08:00
parent e0e82fbfaa
commit a23c8fcb1a
13 changed files with 2 additions and 44 deletions

View File

@ -52,8 +52,6 @@ def test_abort_command():
graph=mock_graph,
graph_config={},
graph_runtime_state=shared_runtime_state, # Use shared instance
max_execution_steps=100,
max_execution_time=10,
command_channel=command_channel,
)

View File

@ -55,8 +55,6 @@ def test_streaming_output_with_blocking_equals_one():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)
@ -162,8 +160,6 @@ def test_streaming_output_with_blocking_not_equals_one():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)

View File

@ -470,8 +470,6 @@ def test_layer_system_basic():
graph=graph,
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
graph_runtime_state=graph_runtime_state,
max_execution_steps=300,
max_execution_time=60,
command_channel=InMemoryChannel(),
)
@ -535,8 +533,6 @@ def test_layer_chaining():
graph=graph,
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
graph_runtime_state=graph_runtime_state,
max_execution_steps=300,
max_execution_time=60,
command_channel=InMemoryChannel(),
)
@ -591,8 +587,6 @@ def test_layer_error_handling():
graph=graph,
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
graph_runtime_state=graph_runtime_state,
max_execution_steps=300,
max_execution_time=60,
command_channel=InMemoryChannel(),
)

View File

@ -625,8 +625,6 @@ class MockIterationNode(MockNodeMixin, IterationNode):
graph=iteration_graph,
graph_config=self.graph_config,
graph_runtime_state=graph_runtime_state_copy,
max_execution_steps=10000, # Use default or config value
max_execution_time=600, # Use default or config value
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
)
@ -695,8 +693,6 @@ class MockLoopNode(MockNodeMixin, LoopNode):
graph=loop_graph,
graph_config=self.graph_config,
graph_runtime_state=graph_runtime_state_copy,
max_execution_steps=10000, # Use default or config value
max_execution_time=600, # Use default or config value
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
)

View File

@ -128,8 +128,6 @@ def test_parallel_streaming_workflow():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)

View File

@ -388,8 +388,6 @@ class TableTestRunner:
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=int(test_case.timeout),
command_channel=InMemoryChannel(),
min_workers=self.graph_engine_min_workers,
max_workers=self.graph_engine_max_workers,

View File

@ -38,8 +38,6 @@ def test_tool_in_chatflow():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)