feat: add prompt optimization metaprompt

This commit is contained in:
stream
2025-07-08 14:01:56 +08:00
committed by Stream
parent ccc0e58e64
commit 2a54cec5a7
2 changed files with 107 additions and 1 deletions

View File

@ -4,6 +4,7 @@ import re
from typing import Optional, cast
import json_repair
from pydantic import BaseModel
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
@ -13,7 +14,7 @@ from core.llm_generator.prompts import (
JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE,
PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE,
SYSTEM_STRUCTURED_OUTPUT_GENERATE,
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, PROMPT_OPTIMIZATION_METAPROMPT_SYSTEM,
)
from core.model_manager import ModelManager
from core.model_runtime.entities.llm_entities import LLMResult
@ -394,3 +395,55 @@ class LLMGenerator:
except Exception as e:
logging.exception(f"Failed to invoke LLM model, model: {model_config.get('name')}")
return {"output": "", "error": f"An unexpected error occurred: {str(e)}"}
@staticmethod
def generate_prompt_optimization(
tenant_id: str,
message: str,
last_run: dict,
current: str,
model_config: dict,
) -> dict:
prompt_messages = [
SystemPromptMessage(content=PROMPT_OPTIMIZATION_METAPROMPT_SYSTEM),
UserPromptMessage(content=json.dumps({
"current": current,
"last_run": last_run,
"message": message,
}))
]
model_instance = ModelManager().get_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
provider=model_config.get("provider", ""),
model=model_config.get("name", ""),
)
model_parameters = model_config.get("model_parameters", {})
try:
response = cast(
LLMResult,
model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=model_parameters,
stream=False
),
)
raw_content = response.message.content
if not isinstance(raw_content, str):
raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}")
cleaned_content = re.sub(r'^[^{]*({.*})[^}]*$', r'\1', raw_content, flags=re.DOTALL)
result = json.loads(cleaned_content)
return {
"message": result.get("message", ""),
"modified": result.get("modified", ""),
}
except InvokeError as e:
error = str(e)
return {
"error": f"Failed to invoke LLM model for prompt optimization. Error: {error}",
}
except Exception as e:
logging.exception(f"Failed to invoke LLM model for prompt optimization, model: {model_config.get('name')}")
return {
"error": f"An unexpected error occurred: {str(e)}",
}

View File

@ -309,3 +309,56 @@ eg:
Here is the JSON schema:
{{schema}}
""" # noqa: E501
PROMPT_OPTIMIZATION_METAPROMPT_SYSTEM = """
Both your input and output should be in JSON format.
! Below is the schema for input content !
{
"type": "object",
"description": "The user is trying to process some content with a prompt, but the output is not as expected. They hope to achieve their goal by modifying the prompt.",
"properties": {
"current": {
"type": "string",
"description": "The prompt before modification, where placeholders {{}} will be replaced with actual values for the large language model."
},
"last_run": {
"type": "object",
"description": "Last running process from the large language model after receiving the prompt.",
},
"message": {
"type": "string",
"description": "User's feedback on the current prompt, input, and output."
}
},
"required": [
"current",
"last_run",
"message"
]
}
! Above is the schema for input content !
! Below is the schema for output content !
{
"type": "object",
"description": "Your feedback to the user after they provide modification suggestions.",
"properties": {
"modified": {
"type": "string",
"description": "Your modified prompt. You should change the original prompt as little as possible to achieve the goal."
},
"message": {
"type": "string",
"description": "Your feedback to the user, explaining what you did and your thought process in text, providing sufficient emotional value to the user."
}
},
"required": [
"modified",
"message"
]
}
! Above is the schema for output content !
Your output must strictly follow the schema format, do not output any content outside of the JSON body.
"""