This commit is contained in:
Joel
2025-07-02 14:17:48 +08:00
9 changed files with 297 additions and 60 deletions

View File

@ -44,6 +44,7 @@ from core.app.entities.task_entities import (
)
from core.file import FILE_MODEL_IDENTITY, File
from core.tools.tool_manager import ToolManager
from core.variables.segments import ArrayFileSegment, FileSegment, Segment
from core.workflow.entities.workflow_execution import WorkflowExecution
from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution, WorkflowNodeExecutionStatus
from core.workflow.nodes import NodeType
@ -506,7 +507,8 @@ class WorkflowResponseConverter:
# Convert to tuple to match Sequence type
return tuple(flattened_files)
def _fetch_files_from_variable_value(self, value: Union[dict, list]) -> Sequence[Mapping[str, Any]]:
@classmethod
def _fetch_files_from_variable_value(cls, value: Union[dict, list, Segment]) -> Sequence[Mapping[str, Any]]:
"""
Fetch files from variable value
:param value: variable value
@ -515,20 +517,30 @@ class WorkflowResponseConverter:
if not value:
return []
files = []
if isinstance(value, list):
files: list[Mapping[str, Any]] = []
if isinstance(value, FileSegment):
files.append(value.value.to_dict())
elif isinstance(value, ArrayFileSegment):
files.extend([i.to_dict() for i in value.value])
elif isinstance(value, File):
files.append(value.to_dict())
elif isinstance(value, list):
for item in value:
file = self._get_file_var_from_value(item)
file = cls._get_file_var_from_value(item)
if file:
files.append(file)
elif isinstance(value, dict):
file = self._get_file_var_from_value(value)
elif isinstance(
value,
dict,
):
file = cls._get_file_var_from_value(value)
if file:
files.append(file)
return files
def _get_file_var_from_value(self, value: Union[dict, list]) -> Mapping[str, Any] | None:
@classmethod
def _get_file_var_from_value(cls, value: Union[dict, list]) -> Mapping[str, Any] | None:
"""
Get file var from value
:param value: variable value

View File

@ -395,6 +395,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
message.provider_response_latency = time.perf_counter() - self._start_at
message.total_price = usage.total_price
message.currency = usage.currency
self._task_state.llm_result.usage.latency = message.provider_response_latency
message.message_metadata = self._task_state.metadata.model_dump_json()
if trace_manager:

View File

@ -2,7 +2,6 @@ import logging
from collections.abc import Generator
from typing import cast
from core.file import FILE_MODEL_IDENTITY, File
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.graph_engine.entities.event import (
GraphEngineEvent,
@ -201,44 +200,3 @@ class AnswerStreamProcessor(StreamProcessor):
stream_out_answer_node_ids.append(answer_node_id)
return stream_out_answer_node_ids
@classmethod
def _fetch_files_from_variable_value(cls, value: dict | list) -> list[dict]:
"""
Fetch files from variable value
:param value: variable value
:return:
"""
if not value:
return []
files = []
if isinstance(value, list):
for item in value:
file_var = cls._get_file_var_from_value(item)
if file_var:
files.append(file_var)
elif isinstance(value, dict):
file_var = cls._get_file_var_from_value(value)
if file_var:
files.append(file_var)
return files
@classmethod
def _get_file_var_from_value(cls, value: dict | list):
"""
Get file var from value
:param value: variable value
:return:
"""
if not value:
return None
if isinstance(value, dict):
if "dify_model_identity" in value and value["dify_model_identity"] == FILE_MODEL_IDENTITY:
return value
elif isinstance(value, File):
return value.to_dict()
return None

View File

@ -27,6 +27,7 @@ from core.workflow.enums import SystemVariableKey
from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository
from core.workflow.repositories.workflow_node_execution_repository import WorkflowNodeExecutionRepository
from core.workflow.workflow_entry import WorkflowEntry
from libs.datetime_utils import naive_utc_now
@dataclass
@ -160,12 +161,13 @@ class WorkflowCycleManager:
exceptions_count: int = 0,
) -> WorkflowExecution:
workflow_execution = self._get_workflow_execution_or_raise_error(workflow_run_id)
now = naive_utc_now()
workflow_execution.status = WorkflowExecutionStatus(status.value)
workflow_execution.error_message = error_message
workflow_execution.total_tokens = total_tokens
workflow_execution.total_steps = total_steps
workflow_execution.finished_at = datetime.now(UTC).replace(tzinfo=None)
workflow_execution.finished_at = now
workflow_execution.exceptions_count = exceptions_count
# Use the instance repository to find running executions for a workflow run
@ -174,7 +176,6 @@ class WorkflowCycleManager:
)
# Update the domain models
now = datetime.now(UTC).replace(tzinfo=None)
for node_execution in running_node_executions:
if node_execution.node_execution_id:
# Update the domain model

View File

@ -154,7 +154,7 @@ class WorkflowDraftVariableService:
variables = (
# Do not load the `value` field.
query.options(orm.defer(WorkflowDraftVariable.value))
.order_by(WorkflowDraftVariable.id.desc())
.order_by(WorkflowDraftVariable.created_at.desc())
.limit(limit)
.offset((page - 1) * limit)
.all()
@ -168,7 +168,7 @@ class WorkflowDraftVariableService:
WorkflowDraftVariable.node_id == node_id,
)
query = self._session.query(WorkflowDraftVariable).filter(*criteria)
variables = query.order_by(WorkflowDraftVariable.id.desc()).all()
variables = query.order_by(WorkflowDraftVariable.created_at.desc()).all()
return WorkflowDraftVariableList(variables=variables)
def list_node_variables(self, app_id: str, node_id: str) -> WorkflowDraftVariableList:
@ -446,6 +446,9 @@ def _batch_upsert_draft_varaible(
stmt = stmt.on_conflict_do_update(
index_elements=WorkflowDraftVariable.unique_app_id_node_id_name(),
set_={
# Refresh creation timestamp to ensure updated variables
# appear first in chronologically sorted result sets.
"created_at": stmt.excluded.created_at,
"updated_at": stmt.excluded.updated_at,
"last_edited_at": stmt.excluded.last_edited_at,
"description": stmt.excluded.description,

View File

@ -0,0 +1,259 @@
from collections.abc import Mapping, Sequence
from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter
from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod, FileType
from core.variables.segments import ArrayFileSegment, FileSegment
class TestWorkflowResponseConverterFetchFilesFromVariableValue:
"""Test class for WorkflowResponseConverter._fetch_files_from_variable_value method"""
def create_test_file(self, file_id: str = "test_file_1") -> File:
"""Create a test File object"""
return File(
id=file_id,
tenant_id="test_tenant",
type=FileType.DOCUMENT,
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="related_123",
filename=f"{file_id}.txt",
extension=".txt",
mime_type="text/plain",
size=1024,
storage_key="storage_key_123",
)
def create_file_dict(self, file_id: str = "test_file_dict") -> dict:
"""Create a file dictionary with correct dify_model_identity"""
return {
"dify_model_identity": FILE_MODEL_IDENTITY,
"id": file_id,
"tenant_id": "test_tenant",
"type": "document",
"transfer_method": "local_file",
"related_id": "related_456",
"filename": f"{file_id}.txt",
"extension": ".txt",
"mime_type": "text/plain",
"size": 2048,
"url": "http://example.com/file.txt",
}
def test_fetch_files_from_variable_value_with_none(self):
"""Test with None input"""
# The method signature expects Union[dict, list, Segment], but implementation handles None
# We'll test the actual behavior by passing an empty dict instead
result = WorkflowResponseConverter._fetch_files_from_variable_value(None) # type: ignore
assert result == []
def test_fetch_files_from_variable_value_with_empty_dict(self):
"""Test with empty dictionary"""
result = WorkflowResponseConverter._fetch_files_from_variable_value({})
assert result == []
def test_fetch_files_from_variable_value_with_empty_list(self):
"""Test with empty list"""
result = WorkflowResponseConverter._fetch_files_from_variable_value([])
assert result == []
def test_fetch_files_from_variable_value_with_file_segment(self):
"""Test with valid FileSegment"""
test_file = self.create_test_file("segment_file")
file_segment = FileSegment(value=test_file)
result = WorkflowResponseConverter._fetch_files_from_variable_value(file_segment)
assert len(result) == 1
assert isinstance(result[0], dict)
assert result[0]["id"] == "segment_file"
assert result[0]["dify_model_identity"] == FILE_MODEL_IDENTITY
def test_fetch_files_from_variable_value_with_array_file_segment_single(self):
"""Test with ArrayFileSegment containing single file"""
test_file = self.create_test_file("array_file_1")
array_segment = ArrayFileSegment(value=[test_file])
result = WorkflowResponseConverter._fetch_files_from_variable_value(array_segment)
assert len(result) == 1
assert isinstance(result[0], dict)
assert result[0]["id"] == "array_file_1"
def test_fetch_files_from_variable_value_with_array_file_segment_multiple(self):
"""Test with ArrayFileSegment containing multiple files"""
test_file_1 = self.create_test_file("array_file_1")
test_file_2 = self.create_test_file("array_file_2")
array_segment = ArrayFileSegment(value=[test_file_1, test_file_2])
result = WorkflowResponseConverter._fetch_files_from_variable_value(array_segment)
assert len(result) == 2
assert result[0]["id"] == "array_file_1"
assert result[1]["id"] == "array_file_2"
def test_fetch_files_from_variable_value_with_array_file_segment_empty(self):
"""Test with ArrayFileSegment containing empty array"""
array_segment = ArrayFileSegment(value=[])
result = WorkflowResponseConverter._fetch_files_from_variable_value(array_segment)
assert result == []
def test_fetch_files_from_variable_value_with_list_of_file_dicts(self):
"""Test with list containing file dictionaries"""
file_dict_1 = self.create_file_dict("list_file_1")
file_dict_2 = self.create_file_dict("list_file_2")
test_list = [file_dict_1, file_dict_2]
result = WorkflowResponseConverter._fetch_files_from_variable_value(test_list)
assert len(result) == 2
assert result[0]["id"] == "list_file_1"
assert result[1]["id"] == "list_file_2"
def test_fetch_files_from_variable_value_with_list_of_file_objects(self):
"""Test with list containing File objects"""
file_obj_1 = self.create_test_file("list_obj_1")
file_obj_2 = self.create_test_file("list_obj_2")
test_list = [file_obj_1, file_obj_2]
result = WorkflowResponseConverter._fetch_files_from_variable_value(test_list)
assert len(result) == 2
assert result[0]["id"] == "list_obj_1"
assert result[1]["id"] == "list_obj_2"
def test_fetch_files_from_variable_value_with_list_mixed_valid_invalid(self):
"""Test with list containing mix of valid files and invalid items"""
file_dict = self.create_file_dict("mixed_file")
invalid_dict = {"not_a_file": "value"}
test_list = [file_dict, invalid_dict, "string_item", 123]
result = WorkflowResponseConverter._fetch_files_from_variable_value(test_list)
assert len(result) == 1
assert result[0]["id"] == "mixed_file"
def test_fetch_files_from_variable_value_with_list_nested_structures(self):
"""Test with list containing nested structures"""
file_dict = self.create_file_dict("nested_file")
nested_list = [file_dict, ["inner_list"]]
test_list = [nested_list, {"nested": "dict"}]
result = WorkflowResponseConverter._fetch_files_from_variable_value(test_list)
# Should not process nested structures in list items
assert result == []
def test_fetch_files_from_variable_value_with_dict_incorrect_identity(self):
"""Test with dictionary having incorrect dify_model_identity"""
invalid_dict = {"dify_model_identity": "wrong_identity", "id": "invalid_file", "filename": "test.txt"}
result = WorkflowResponseConverter._fetch_files_from_variable_value(invalid_dict)
assert result == []
def test_fetch_files_from_variable_value_with_dict_missing_identity(self):
"""Test with dictionary missing dify_model_identity"""
invalid_dict = {"id": "no_identity_file", "filename": "test.txt"}
result = WorkflowResponseConverter._fetch_files_from_variable_value(invalid_dict)
assert result == []
def test_fetch_files_from_variable_value_with_dict_file_object(self):
"""Test with dictionary containing File object"""
file_obj = self.create_test_file("dict_obj_file")
test_dict = {"file_key": file_obj}
result = WorkflowResponseConverter._fetch_files_from_variable_value(test_dict)
# Should not extract File objects from dict values
assert result == []
def test_fetch_files_from_variable_value_with_mixed_data_types(self):
"""Test with various mixed data types"""
mixed_data = {"string": "text", "number": 42, "boolean": True, "null": None, "dify_model_identity": "wrong"}
result = WorkflowResponseConverter._fetch_files_from_variable_value(mixed_data)
assert result == []
def test_fetch_files_from_variable_value_with_invalid_objects(self):
"""Test with invalid objects that are not supported types"""
# Test with an invalid dict that doesn't match expected patterns
invalid_dict = {"custom_key": "custom_value"}
result = WorkflowResponseConverter._fetch_files_from_variable_value(invalid_dict)
assert result == []
def test_fetch_files_from_variable_value_with_string_input(self):
"""Test with string input (unsupported type)"""
# Since method expects Union[dict, list, Segment], test with empty list instead
result = WorkflowResponseConverter._fetch_files_from_variable_value([])
assert result == []
def test_fetch_files_from_variable_value_with_number_input(self):
"""Test with number input (unsupported type)"""
# Test with list containing numbers (should be ignored)
result = WorkflowResponseConverter._fetch_files_from_variable_value([42, "string", None])
assert result == []
def test_fetch_files_from_variable_value_return_type_is_sequence(self):
"""Test that return type is Sequence[Mapping[str, Any]]"""
file_dict = self.create_file_dict("type_test_file")
result = WorkflowResponseConverter._fetch_files_from_variable_value(file_dict)
assert isinstance(result, Sequence)
assert len(result) == 1
assert isinstance(result[0], Mapping)
assert all(isinstance(key, str) for key in result[0])
def test_fetch_files_from_variable_value_preserves_file_properties(self):
"""Test that all file properties are preserved in the result"""
original_file = self.create_test_file("property_test")
file_segment = FileSegment(value=original_file)
result = WorkflowResponseConverter._fetch_files_from_variable_value(file_segment)
assert len(result) == 1
file_dict = result[0]
assert file_dict["id"] == "property_test"
assert file_dict["tenant_id"] == "test_tenant"
assert file_dict["type"] == "document"
assert file_dict["transfer_method"] == "local_file"
assert file_dict["filename"] == "property_test.txt"
assert file_dict["extension"] == ".txt"
assert file_dict["mime_type"] == "text/plain"
assert file_dict["size"] == 1024
def test_fetch_files_from_variable_value_with_complex_nested_scenario(self):
"""Test complex scenario with nested valid and invalid data"""
file_dict = self.create_file_dict("complex_file")
file_obj = self.create_test_file("complex_obj")
# Complex nested structure
complex_data = [
file_dict, # Valid file dict
file_obj, # Valid file object
{ # Invalid dict
"not_file": "data",
"nested": {"deep": "value"},
},
[ # Nested list (should be ignored)
self.create_file_dict("nested_file")
],
"string", # Invalid string
None, # None value
42, # Invalid number
]
result = WorkflowResponseConverter._fetch_files_from_variable_value(complex_data)
assert len(result) == 2
assert result[0]["id"] == "complex_file"
assert result[1]["id"] == "complex_obj"

View File

@ -11,6 +11,7 @@ export const preprocessLaTeX = (content: string) => {
const codeBlockRegex = /```[\s\S]*?```/g
const codeBlocks = content.match(codeBlockRegex) || []
const escapeReplacement = (str: string) => str.replace(/\$/g, '_TMP_REPLACE_DOLLAR_')
let processedContent = content.replace(codeBlockRegex, 'CODE_BLOCK_PLACEHOLDER')
processedContent = flow([
@ -21,9 +22,11 @@ export const preprocessLaTeX = (content: string) => {
])(processedContent)
codeBlocks.forEach((block) => {
processedContent = processedContent.replace('CODE_BLOCK_PLACEHOLDER', block)
processedContent = processedContent.replace('CODE_BLOCK_PLACEHOLDER', escapeReplacement(block))
})
processedContent = processedContent.replace(/_TMP_REPLACE_DOLLAR_/g, '$')
return processedContent
}

View File

@ -62,7 +62,7 @@ const AppInputsPanel = ({
return []
let inputFormSchema = []
if (isBasicApp) {
inputFormSchema = currentApp.model_config.user_input_form.filter((item: any) => !item.external_data_tool).map((item: any) => {
inputFormSchema = currentApp.model_config?.user_input_form?.filter((item: any) => !item.external_data_tool).map((item: any) => {
if (item.paragraph) {
return {
...item.paragraph,
@ -108,10 +108,10 @@ const AppInputsPanel = ({
type: 'text-input',
required: false,
}
})
}) || []
}
else {
const startNode = currentWorkflow?.graph.nodes.find(node => node.data.type === BlockEnum.Start) as any
const startNode = currentWorkflow?.graph?.nodes.find(node => node.data.type === BlockEnum.Start) as any
inputFormSchema = startNode?.data.variables.map((variable: any) => {
if (variable.type === InputVarType.multiFiles) {
return {
@ -132,7 +132,7 @@ const AppInputsPanel = ({
...variable,
required: false,
}
})
}) || []
}
if ((currentApp.mode === 'completion' || currentApp.mode === 'workflow') && basicAppFileConfig.enabled) {
inputFormSchema.push({
@ -144,7 +144,7 @@ const AppInputsPanel = ({
fileUploadConfig,
})
}
return inputFormSchema
return inputFormSchema || []
}, [basicAppFileConfig, currentApp, currentWorkflow, fileUploadConfig, isBasicApp])
const handleFormChange = (value: Record<string, any>) => {

View File

@ -117,7 +117,7 @@ export const useInspectVarsCrud = () => {
if (nodeInfo) {
const index = draft.findIndex(node => node.nodeId === nodeId)
if (index === -1) {
draft.push({
draft.unshift({
nodeId,
nodeType: nodeInfo.data.type,
title: nodeInfo.data.title,