fix(api): Preserving the content transform logic in fetch_prompt_messages (#33666)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
QuantumGhost
2026-03-18 19:41:25 +08:00
committed by GitHub
parent 7c99c9f3e8
commit 29c70736dc
2 changed files with 112 additions and 2 deletions

View File

@ -256,9 +256,13 @@ def fetch_prompt_messages(
):
continue
prompt_message_content.append(content_item)
if prompt_message_content:
if not prompt_message_content:
continue
if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
prompt_message.content = prompt_message_content[0].data
else:
prompt_message.content = prompt_message_content
filtered_prompt_messages.append(prompt_message)
filtered_prompt_messages.append(prompt_message)
elif not prompt_message.is_empty():
filtered_prompt_messages.append(prompt_message)

View File

@ -0,0 +1,106 @@
from unittest import mock
import pytest
from core.model_manager import ModelInstance
from dify_graph.model_runtime.entities import ImagePromptMessageContent, PromptMessageRole, TextPromptMessageContent
from dify_graph.model_runtime.entities.message_entities import SystemPromptMessage
from dify_graph.nodes.llm import llm_utils
from dify_graph.nodes.llm.entities import LLMNodeChatModelMessage
from dify_graph.nodes.llm.exc import NoPromptFoundError
from dify_graph.runtime import VariablePool
def _fetch_prompt_messages_with_mocked_content(content):
variable_pool = VariablePool.empty()
model_instance = mock.MagicMock(spec=ModelInstance)
prompt_template = [
LLMNodeChatModelMessage(
text="You are a classifier.",
role=PromptMessageRole.SYSTEM,
edition_type="basic",
)
]
with (
mock.patch(
"dify_graph.nodes.llm.llm_utils.fetch_model_schema",
return_value=mock.MagicMock(features=[]),
),
mock.patch(
"dify_graph.nodes.llm.llm_utils.handle_list_messages",
return_value=[SystemPromptMessage(content=content)],
),
mock.patch(
"dify_graph.nodes.llm.llm_utils.handle_memory_chat_mode",
return_value=[],
),
):
return llm_utils.fetch_prompt_messages(
sys_query=None,
sys_files=[],
context=None,
memory=None,
model_instance=model_instance,
prompt_template=prompt_template,
stop=["END"],
memory_config=None,
vision_enabled=False,
vision_detail=ImagePromptMessageContent.DETAIL.HIGH,
variable_pool=variable_pool,
jinja2_variables=[],
template_renderer=None,
)
def test_fetch_prompt_messages_skips_messages_when_all_contents_are_filtered_out():
with pytest.raises(NoPromptFoundError):
_fetch_prompt_messages_with_mocked_content(
[
ImagePromptMessageContent(
format="url",
url="https://example.com/image.png",
mime_type="image/png",
),
]
)
def test_fetch_prompt_messages_flattens_single_text_content_after_filtering_unsupported_multimodal_items():
prompt_messages, stop = _fetch_prompt_messages_with_mocked_content(
[
TextPromptMessageContent(data="You are a classifier."),
ImagePromptMessageContent(
format="url",
url="https://example.com/image.png",
mime_type="image/png",
),
]
)
assert stop == ["END"]
assert prompt_messages == [SystemPromptMessage(content="You are a classifier.")]
def test_fetch_prompt_messages_keeps_list_content_when_multiple_supported_items_remain():
prompt_messages, stop = _fetch_prompt_messages_with_mocked_content(
[
TextPromptMessageContent(data="You are"),
TextPromptMessageContent(data=" a classifier."),
ImagePromptMessageContent(
format="url",
url="https://example.com/image.png",
mime_type="image/png",
),
]
)
assert stop == ["END"]
assert prompt_messages == [
SystemPromptMessage(
content=[
TextPromptMessageContent(data="You are"),
TextPromptMessageContent(data=" a classifier."),
]
)
]