mirror of
https://github.com/langgenius/dify.git
synced 2026-05-03 08:58:09 +08:00
refactor: move execution limits from engine core to layer
Remove max_execution_time and max_execution_steps from ExecutionContext and GraphEngine since these limits are now handled by ExecutionLimitsLayer. This follows the separation of concerns principle by keeping execution limits as a cross-cutting concern handled by layers rather than embedded in core engine components. Changes: - Remove max_execution_time and max_execution_steps from ExecutionContext - Remove these parameters from GraphEngine.__init__() - Remove max_execution_time from Dispatcher - Update workflow_entry.py to no longer pass these parameters - Update all tests to remove these parameters
This commit is contained in:
@ -52,8 +52,6 @@ def test_abort_command():
|
||||
graph=mock_graph,
|
||||
graph_config={},
|
||||
graph_runtime_state=shared_runtime_state, # Use shared instance
|
||||
max_execution_steps=100,
|
||||
max_execution_time=10,
|
||||
command_channel=command_channel,
|
||||
)
|
||||
|
||||
|
||||
@ -55,8 +55,6 @@ def test_streaming_output_with_blocking_equals_one():
|
||||
graph=graph,
|
||||
graph_config=graph_config,
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=30,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
@ -162,8 +160,6 @@ def test_streaming_output_with_blocking_not_equals_one():
|
||||
graph=graph,
|
||||
graph_config=graph_config,
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=30,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
|
||||
@ -470,8 +470,6 @@ def test_layer_system_basic():
|
||||
graph=graph,
|
||||
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=300,
|
||||
max_execution_time=60,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
@ -535,8 +533,6 @@ def test_layer_chaining():
|
||||
graph=graph,
|
||||
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=300,
|
||||
max_execution_time=60,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
@ -591,8 +587,6 @@ def test_layer_error_handling():
|
||||
graph=graph,
|
||||
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=300,
|
||||
max_execution_time=60,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
|
||||
@ -625,8 +625,6 @@ class MockIterationNode(MockNodeMixin, IterationNode):
|
||||
graph=iteration_graph,
|
||||
graph_config=self.graph_config,
|
||||
graph_runtime_state=graph_runtime_state_copy,
|
||||
max_execution_steps=10000, # Use default or config value
|
||||
max_execution_time=600, # Use default or config value
|
||||
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
|
||||
)
|
||||
|
||||
@ -695,8 +693,6 @@ class MockLoopNode(MockNodeMixin, LoopNode):
|
||||
graph=loop_graph,
|
||||
graph_config=self.graph_config,
|
||||
graph_runtime_state=graph_runtime_state_copy,
|
||||
max_execution_steps=10000, # Use default or config value
|
||||
max_execution_time=600, # Use default or config value
|
||||
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
|
||||
)
|
||||
|
||||
|
||||
@ -128,8 +128,6 @@ def test_parallel_streaming_workflow():
|
||||
graph=graph,
|
||||
graph_config=graph_config,
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=30,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
|
||||
@ -388,8 +388,6 @@ class TableTestRunner:
|
||||
graph=graph,
|
||||
graph_config=graph_config,
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=int(test_case.timeout),
|
||||
command_channel=InMemoryChannel(),
|
||||
min_workers=self.graph_engine_min_workers,
|
||||
max_workers=self.graph_engine_max_workers,
|
||||
|
||||
@ -38,8 +38,6 @@ def test_tool_in_chatflow():
|
||||
graph=graph,
|
||||
graph_config=graph_config,
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=30,
|
||||
command_channel=InMemoryChannel(),
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user