mirror of
https://github.com/langgenius/dify.git
synced 2026-05-02 00:18:03 +08:00
Fix: style checks and unittests (#12603)
This commit is contained in:
@ -44,13 +44,11 @@ class QuestionClassifierNode(LLMNode):
|
||||
variable_pool = self.graph_runtime_state.variable_pool
|
||||
|
||||
# extract variables
|
||||
variable = variable_pool.get(
|
||||
node_data.query_variable_selector) if node_data.query_variable_selector else None
|
||||
variable = variable_pool.get(node_data.query_variable_selector) if node_data.query_variable_selector else None
|
||||
query = variable.value if variable else None
|
||||
variables = {"query": query}
|
||||
# fetch model config
|
||||
model_instance, model_config = self._fetch_model_config(
|
||||
node_data.model)
|
||||
model_instance, model_config = self._fetch_model_config(node_data.model)
|
||||
# fetch memory
|
||||
memory = self._fetch_memory(
|
||||
node_data_memory=node_data.memory,
|
||||
@ -58,8 +56,7 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
# fetch instruction
|
||||
node_data.instruction = node_data.instruction or ""
|
||||
node_data.instruction = variable_pool.convert_template(
|
||||
node_data.instruction).text
|
||||
node_data.instruction = variable_pool.convert_template(node_data.instruction).text
|
||||
|
||||
files = (
|
||||
self._fetch_files(
|
||||
@ -181,15 +178,12 @@ class QuestionClassifierNode(LLMNode):
|
||||
variable_mapping = {"query": node_data.query_variable_selector}
|
||||
variable_selectors = []
|
||||
if node_data.instruction:
|
||||
variable_template_parser = VariableTemplateParser(
|
||||
template=node_data.instruction)
|
||||
variable_selectors.extend(
|
||||
variable_template_parser.extract_variable_selectors())
|
||||
variable_template_parser = VariableTemplateParser(template=node_data.instruction)
|
||||
variable_selectors.extend(variable_template_parser.extract_variable_selectors())
|
||||
for variable_selector in variable_selectors:
|
||||
variable_mapping[variable_selector.variable] = variable_selector.value_selector
|
||||
|
||||
variable_mapping = {node_id + "." + key: value for key,
|
||||
value in variable_mapping.items()}
|
||||
variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
|
||||
|
||||
return variable_mapping
|
||||
|
||||
@ -210,8 +204,7 @@ class QuestionClassifierNode(LLMNode):
|
||||
context: Optional[str],
|
||||
) -> int:
|
||||
prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True)
|
||||
prompt_template = self._get_prompt_template(
|
||||
node_data, query, None, 2000)
|
||||
prompt_template = self._get_prompt_template(node_data, query, None, 2000)
|
||||
prompt_messages = prompt_transform.get_prompt(
|
||||
prompt_template=prompt_template,
|
||||
inputs={},
|
||||
@ -224,15 +217,13 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
rest_tokens = 2000
|
||||
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(
|
||||
ModelPropertyKey.CONTEXT_SIZE)
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
||||
if model_context_tokens:
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
||||
)
|
||||
|
||||
curr_message_tokens = model_instance.get_llm_num_tokens(
|
||||
prompt_messages)
|
||||
curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
||||
|
||||
max_tokens = 0
|
||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
||||
@ -273,8 +264,7 @@ class QuestionClassifierNode(LLMNode):
|
||||
prompt_messages: list[LLMNodeChatModelMessage] = []
|
||||
if model_mode == ModelMode.CHAT:
|
||||
system_prompt_messages = LLMNodeChatModelMessage(
|
||||
role=PromptMessageRole.SYSTEM, text=QUESTION_CLASSIFIER_SYSTEM_PROMPT.format(
|
||||
histories=memory_str)
|
||||
role=PromptMessageRole.SYSTEM, text=QUESTION_CLASSIFIER_SYSTEM_PROMPT.format(histories=memory_str)
|
||||
)
|
||||
prompt_messages.append(system_prompt_messages)
|
||||
user_prompt_message_1 = LLMNodeChatModelMessage(
|
||||
@ -315,5 +305,4 @@ class QuestionClassifierNode(LLMNode):
|
||||
)
|
||||
|
||||
else:
|
||||
raise InvalidModelTypeError(
|
||||
f"Model mode {model_mode} not support.")
|
||||
raise InvalidModelTypeError(f"Model mode {model_mode} not support.")
|
||||
|
||||
Reference in New Issue
Block a user