mirror of
https://github.com/langgenius/dify.git
synced 2026-05-06 02:18:08 +08:00
refactor: consolidate LLM runtime model state on ModelInstance (#32746)
Signed-off-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
@ -4,6 +4,7 @@ from typing import cast
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.helper.code_executor.jinja2.jinja2_formatter import Jinja2Formatter
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessage,
|
||||
@ -44,7 +45,8 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
context: str | None,
|
||||
memory_config: MemoryConfig | None,
|
||||
memory: TokenBufferMemory | None,
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL | None = None,
|
||||
) -> list[PromptMessage]:
|
||||
prompt_messages = []
|
||||
@ -59,6 +61,7 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
memory_config=memory_config,
|
||||
memory=memory,
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
image_detail_config=image_detail_config,
|
||||
)
|
||||
elif isinstance(prompt_template, list) and all(isinstance(item, ChatModelMessage) for item in prompt_template):
|
||||
@ -71,6 +74,7 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
memory_config=memory_config,
|
||||
memory=memory,
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
image_detail_config=image_detail_config,
|
||||
)
|
||||
|
||||
@ -85,7 +89,8 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
context: str | None,
|
||||
memory_config: MemoryConfig | None,
|
||||
memory: TokenBufferMemory | None,
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL | None = None,
|
||||
) -> list[PromptMessage]:
|
||||
"""
|
||||
@ -111,6 +116,7 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
parser=parser,
|
||||
prompt_inputs=prompt_inputs,
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
|
||||
if query:
|
||||
@ -146,7 +152,8 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
context: str | None,
|
||||
memory_config: MemoryConfig | None,
|
||||
memory: TokenBufferMemory | None,
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
image_detail_config: ImagePromptMessageContent.DETAIL | None = None,
|
||||
) -> list[PromptMessage]:
|
||||
"""
|
||||
@ -198,8 +205,13 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
|
||||
prompt_message_contents: list[PromptMessageContentUnionTypes] = []
|
||||
if memory and memory_config:
|
||||
prompt_messages = self._append_chat_histories(memory, memory_config, prompt_messages, model_config)
|
||||
|
||||
prompt_messages = self._append_chat_histories(
|
||||
memory,
|
||||
memory_config,
|
||||
prompt_messages,
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
if files and query is not None:
|
||||
for file in files:
|
||||
prompt_message_contents.append(
|
||||
@ -276,7 +288,8 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
role_prefix: MemoryConfig.RolePrefix,
|
||||
parser: PromptTemplateParser,
|
||||
prompt_inputs: Mapping[str, str],
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
) -> Mapping[str, str]:
|
||||
prompt_inputs = dict(prompt_inputs)
|
||||
if "#histories#" in parser.variable_keys:
|
||||
@ -286,7 +299,11 @@ class AdvancedPromptTransform(PromptTransform):
|
||||
prompt_inputs = {k: inputs[k] for k in parser.variable_keys if k in inputs}
|
||||
tmp_human_message = UserPromptMessage(content=parser.format(prompt_inputs))
|
||||
|
||||
rest_tokens = self._calculate_rest_token([tmp_human_message], model_config)
|
||||
rest_tokens = self._calculate_rest_token(
|
||||
[tmp_human_message],
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
|
||||
histories = self._get_history_messages_from_memory(
|
||||
memory=memory,
|
||||
|
||||
@ -41,7 +41,7 @@ class AgentHistoryPromptTransform(PromptTransform):
|
||||
if not self.memory:
|
||||
return prompt_messages
|
||||
|
||||
max_token_limit = self._calculate_rest_token(self.prompt_messages, self.model_config)
|
||||
max_token_limit = self._calculate_rest_token(self.prompt_messages, model_config=self.model_config)
|
||||
|
||||
model_type_instance = self.model_config.provider_model_bundle.model_type_instance
|
||||
model_type_instance = cast(LargeLanguageModel, model_type_instance)
|
||||
|
||||
@ -4,45 +4,83 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.message_entities import PromptMessage
|
||||
from core.model_runtime.entities.model_entities import ModelPropertyKey
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey
|
||||
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
|
||||
|
||||
|
||||
class PromptTransform:
|
||||
def _resolve_model_runtime(
|
||||
self,
|
||||
*,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
) -> tuple[ModelInstance, AIModelEntity]:
|
||||
if model_instance is None:
|
||||
if model_config is None:
|
||||
raise ValueError("Either model_config or model_instance must be provided.")
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
||||
)
|
||||
model_instance.credentials = model_config.credentials
|
||||
model_instance.parameters = model_config.parameters
|
||||
model_instance.stop = model_config.stop
|
||||
|
||||
model_schema = model_instance.model_type_instance.get_model_schema(
|
||||
model=model_instance.model_name,
|
||||
credentials=model_instance.credentials,
|
||||
)
|
||||
if model_schema is None:
|
||||
if model_config is None:
|
||||
raise ValueError("Model schema not found for the provided model instance.")
|
||||
model_schema = model_config.model_schema
|
||||
|
||||
return model_instance, model_schema
|
||||
|
||||
def _append_chat_histories(
|
||||
self,
|
||||
memory: TokenBufferMemory,
|
||||
memory_config: MemoryConfig,
|
||||
prompt_messages: list[PromptMessage],
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
*,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
) -> list[PromptMessage]:
|
||||
rest_tokens = self._calculate_rest_token(prompt_messages, model_config)
|
||||
rest_tokens = self._calculate_rest_token(
|
||||
prompt_messages,
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
histories = self._get_history_messages_list_from_memory(memory, memory_config, rest_tokens)
|
||||
prompt_messages.extend(histories)
|
||||
|
||||
return prompt_messages
|
||||
|
||||
def _calculate_rest_token(
|
||||
self, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
|
||||
self,
|
||||
prompt_messages: list[PromptMessage],
|
||||
*,
|
||||
model_config: ModelConfigWithCredentialsEntity | None = None,
|
||||
model_instance: ModelInstance | None = None,
|
||||
) -> int:
|
||||
model_instance, model_schema = self._resolve_model_runtime(
|
||||
model_config=model_config,
|
||||
model_instance=model_instance,
|
||||
)
|
||||
model_parameters = model_instance.parameters
|
||||
rest_tokens = 2000
|
||||
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
||||
model_context_tokens = model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
||||
if model_context_tokens:
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
||||
)
|
||||
|
||||
curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
||||
|
||||
max_tokens = 0
|
||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
||||
for parameter_rule in model_schema.parameter_rules:
|
||||
if parameter_rule.name == "max_tokens" or (
|
||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
||||
):
|
||||
max_tokens = (
|
||||
model_config.parameters.get(parameter_rule.name)
|
||||
or model_config.parameters.get(parameter_rule.use_template or "")
|
||||
model_parameters.get(parameter_rule.name)
|
||||
or model_parameters.get(parameter_rule.use_template or "")
|
||||
) or 0
|
||||
|
||||
rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
|
||||
|
||||
@ -252,7 +252,7 @@ class SimplePromptTransform(PromptTransform):
|
||||
if memory:
|
||||
tmp_human_message = UserPromptMessage(content=prompt)
|
||||
|
||||
rest_tokens = self._calculate_rest_token([tmp_human_message], model_config)
|
||||
rest_tokens = self._calculate_rest_token([tmp_human_message], model_config=model_config)
|
||||
histories = self._get_history_messages_from_memory(
|
||||
memory=memory,
|
||||
memory_config=MemoryConfig(
|
||||
|
||||
Reference in New Issue
Block a user