mirror of
https://github.com/langgenius/dify.git
synced 2026-05-05 01:48:04 +08:00
merge main
This commit is contained in:
@ -69,7 +69,7 @@ class BaseNode(Generic[GenericNodeData]):
|
||||
try:
|
||||
result = self._run()
|
||||
except Exception as e:
|
||||
logger.error(f"Node {self.node_id} failed to run: {e}")
|
||||
logger.exception(f"Node {self.node_id} failed to run: {e}")
|
||||
result = NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
error=str(e),
|
||||
|
||||
@ -97,15 +97,6 @@ class Executor:
|
||||
headers = self.variable_pool.convert_template(self.node_data.headers).text
|
||||
self.headers = _plain_text_to_dict(headers)
|
||||
|
||||
body = self.node_data.body
|
||||
if body is None:
|
||||
return
|
||||
if "content-type" not in (k.lower() for k in self.headers) and body.type in BODY_TYPE_TO_CONTENT_TYPE:
|
||||
self.headers["Content-Type"] = BODY_TYPE_TO_CONTENT_TYPE[body.type]
|
||||
if body.type == "form-data":
|
||||
self.boundary = f"----WebKitFormBoundary{_generate_random_string(16)}"
|
||||
self.headers["Content-Type"] = f"multipart/form-data; boundary={self.boundary}"
|
||||
|
||||
def _init_body(self):
|
||||
body = self.node_data.body
|
||||
if body is not None:
|
||||
@ -154,9 +145,8 @@ class Executor:
|
||||
for k, v in files.items()
|
||||
if v.related_id is not None
|
||||
}
|
||||
|
||||
self.data = form_data
|
||||
self.files = files
|
||||
self.files = files or None
|
||||
|
||||
def _assembling_headers(self) -> dict[str, Any]:
|
||||
authorization = deepcopy(self.auth)
|
||||
@ -217,6 +207,7 @@ class Executor:
|
||||
"timeout": (self.timeout.connect, self.timeout.read, self.timeout.write),
|
||||
"follow_redirects": True,
|
||||
}
|
||||
# request_args = {k: v for k, v in request_args.items() if v is not None}
|
||||
|
||||
response = getattr(ssrf_proxy, self.method)(**request_args)
|
||||
return response
|
||||
@ -244,6 +235,13 @@ class Executor:
|
||||
raw += f"Host: {url_parts.netloc}\r\n"
|
||||
|
||||
headers = self._assembling_headers()
|
||||
body = self.node_data.body
|
||||
boundary = f"----WebKitFormBoundary{_generate_random_string(16)}"
|
||||
if body:
|
||||
if "content-type" not in (k.lower() for k in self.headers) and body.type in BODY_TYPE_TO_CONTENT_TYPE:
|
||||
headers["Content-Type"] = BODY_TYPE_TO_CONTENT_TYPE[body.type]
|
||||
if body.type == "form-data":
|
||||
headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
|
||||
for k, v in headers.items():
|
||||
if self.auth.type == "api-key":
|
||||
authorization_header = "Authorization"
|
||||
@ -256,7 +254,6 @@ class Executor:
|
||||
|
||||
body = ""
|
||||
if self.files:
|
||||
boundary = self.boundary
|
||||
for k, v in self.files.items():
|
||||
body += f"--{boundary}\r\n"
|
||||
body += f'Content-Disposition: form-data; name="{k}"\r\n\r\n'
|
||||
@ -271,7 +268,6 @@ class Executor:
|
||||
elif self.data and self.node_data.body.type == "x-www-form-urlencoded":
|
||||
body = urlencode(self.data)
|
||||
elif self.data and self.node_data.body.type == "form-data":
|
||||
boundary = self.boundary
|
||||
for key, value in self.data.items():
|
||||
body += f"--{boundary}\r\n"
|
||||
body += f'Content-Disposition: form-data; name="{key}"\r\n\r\n'
|
||||
|
||||
@ -14,6 +14,7 @@ from core.model_runtime.entities import (
|
||||
PromptMessage,
|
||||
PromptMessageContentType,
|
||||
TextPromptMessageContent,
|
||||
VideoPromptMessageContent,
|
||||
)
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
@ -560,7 +561,9 @@ class LLMNode(BaseNode[LLMNodeData]):
|
||||
# cuz vision detail is related to the configuration from FileUpload feature.
|
||||
content_item.detail = vision_detail
|
||||
prompt_message_content.append(content_item)
|
||||
elif isinstance(content_item, TextPromptMessageContent | AudioPromptMessageContent):
|
||||
elif isinstance(
|
||||
content_item, TextPromptMessageContent | AudioPromptMessageContent | VideoPromptMessageContent
|
||||
):
|
||||
prompt_message_content.append(content_item)
|
||||
|
||||
if len(prompt_message_content) > 1:
|
||||
|
||||
@ -49,11 +49,13 @@ class QuestionClassifierNode(LLMNode):
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
# extract variables
|
||||
variable = variable_pool.get(node_data.query_variable_selector) if node_data.query_variable_selector else None
|
||||
variable = variable_pool.get(
|
||||
node_data.query_variable_selector) if node_data.query_variable_selector else None
|
||||
query = variable.value if variable else None
|
||||
variables = {"query": query}
|
||||
# fetch model config
|
||||
model_instance, model_config = self._fetch_model_config(node_data.model)
|
||||
model_instance, model_config = self._fetch_model_config(
|
||||
node_data.model)
|
||||
# fetch memory
|
||||
memory = self._fetch_memory(
|
||||
node_data_memory=node_data.memory,
|
||||
@ -61,7 +63,8 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
# fetch instruction
|
||||
node_data.instruction = node_data.instruction or ""
|
||||
node_data.instruction = variable_pool.convert_template(node_data.instruction).text
|
||||
node_data.instruction = variable_pool.convert_template(
|
||||
node_data.instruction).text
|
||||
|
||||
files: Sequence[File] = (
|
||||
self._fetch_files(
|
||||
@ -127,7 +130,7 @@ class QuestionClassifierNode(LLMNode):
|
||||
category_id = category_id_result
|
||||
|
||||
except OutputParserError:
|
||||
logging.error(f"Failed to parse result text: {result_text}")
|
||||
logging.exception(f"Failed to parse result text: {result_text}")
|
||||
try:
|
||||
process_data = {
|
||||
"model_mode": model_config.mode,
|
||||
@ -184,12 +187,15 @@ class QuestionClassifierNode(LLMNode):
|
||||
variable_mapping = {"query": node_data.query_variable_selector}
|
||||
variable_selectors = []
|
||||
if node_data.instruction:
|
||||
variable_template_parser = VariableTemplateParser(template=node_data.instruction)
|
||||
variable_selectors.extend(variable_template_parser.extract_variable_selectors())
|
||||
variable_template_parser = VariableTemplateParser(
|
||||
template=node_data.instruction)
|
||||
variable_selectors.extend(
|
||||
variable_template_parser.extract_variable_selectors())
|
||||
for variable_selector in variable_selectors:
|
||||
variable_mapping[variable_selector.variable] = variable_selector.value_selector
|
||||
|
||||
variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
|
||||
variable_mapping = {node_id + "." + key: value for key,
|
||||
value in variable_mapping.items()}
|
||||
|
||||
return variable_mapping
|
||||
|
||||
@ -210,7 +216,8 @@ class QuestionClassifierNode(LLMNode):
|
||||
context: Optional[str],
|
||||
) -> int:
|
||||
prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True)
|
||||
prompt_template = self._get_prompt_template(node_data, query, None, 2000)
|
||||
prompt_template = self._get_prompt_template(
|
||||
node_data, query, None, 2000)
|
||||
prompt_messages = prompt_transform.get_prompt(
|
||||
prompt_template=prompt_template,
|
||||
inputs={},
|
||||
@ -223,13 +230,15 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
rest_tokens = 2000
|
||||
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(
|
||||
ModelPropertyKey.CONTEXT_SIZE)
|
||||
if model_context_tokens:
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
||||
)
|
||||
|
||||
curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
||||
curr_message_tokens = model_instance.get_llm_num_tokens(
|
||||
prompt_messages)
|
||||
|
||||
max_tokens = 0
|
||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
||||
@ -270,7 +279,8 @@ class QuestionClassifierNode(LLMNode):
|
||||
prompt_messages: list[LLMNodeChatModelMessage] = []
|
||||
if model_mode == ModelMode.CHAT:
|
||||
system_prompt_messages = LLMNodeChatModelMessage(
|
||||
role=PromptMessageRole.SYSTEM, text=QUESTION_CLASSIFIER_SYSTEM_PROMPT.format(histories=memory_str)
|
||||
role=PromptMessageRole.SYSTEM, text=QUESTION_CLASSIFIER_SYSTEM_PROMPT.format(
|
||||
histories=memory_str)
|
||||
)
|
||||
prompt_messages.append(system_prompt_messages)
|
||||
user_prompt_message_1 = LLMNodeChatModelMessage(
|
||||
@ -311,4 +321,5 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
|
||||
else:
|
||||
raise InvalidModelTypeError(f"Model mode {model_mode} not support.")
|
||||
raise InvalidModelTypeError(
|
||||
f"Model mode {model_mode} not support.")
|
||||
|
||||
Reference in New Issue
Block a user