Compare commits

..

15 Commits

Author SHA1 Message Date
f101956d0f fix: remove proxy 2024-12-20 18:33:45 +08:00
5c5221f2cc revert: these 2 settings 2024-12-20 17:53:33 +08:00
ef7e47d162 fix: rerank switch (#11897) 2024-12-20 16:12:34 +08:00
4211b9abbd chore: translate i18n files (#11892)
Co-authored-by: zxhlyh <16177003+zxhlyh@users.noreply.github.com>
2024-12-20 16:12:01 +08:00
0c0120ef27 Feat/workflow retry (#11885) 2024-12-20 15:44:37 +08:00
dacd457478 feat: add workflow parallel depth limit configuration (#11460)
Signed-off-by: -LAN- <laipz8200@outlook.com>
Co-authored-by: zxhlyh <jasonapring2015@outlook.com>
2024-12-20 14:52:20 +08:00
7b03a0316d fix: better memory usage from 800+ to 500+ (#11796)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2024-12-20 14:51:43 +08:00
52201d95b1 chore: add retry index migration (#11887)
Co-authored-by: Novice Lee <novicelee@NoviPro.local>
2024-12-20 14:40:33 +08:00
e2cde628bb chore: translate i18n files (#11855)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-12-20 14:19:47 +08:00
3335fa78fc fix: node 22 build (#11883) 2024-12-20 14:14:27 +08:00
7abc7fa573 Feat: Retry on node execution errors (#11871)
Co-authored-by: Novice Lee <novicelee@NoviPro.local>
2024-12-20 14:14:06 +08:00
f6247fe67c Feat: Add partial success status to the app log (#11869)
Co-authored-by: Novice Lee <novicelee@NoviPro.local>
2024-12-20 14:13:44 +08:00
996a9135f6 feat(llm_node): support order in text and files (#11837)
Signed-off-by: -LAN- <laipz8200@outlook.com>
2024-12-20 14:12:50 +08:00
3599751f93 chore(db): use a better way to export models and remove unused table (#11838)
Signed-off-by: -LAN- <laipz8200@outlook.com>
2024-12-20 14:12:29 +08:00
2d186e1e76 chore(deps): bump nanoid from 3.3.7 to 3.3.8 in /web (#11876)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-20 13:46:54 +08:00
124 changed files with 1607 additions and 384 deletions

View File

@ -399,6 +399,7 @@ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
MAX_VARIABLE_SIZE=204800
# App configuration

View File

@ -433,6 +433,11 @@ class WorkflowConfig(BaseSettings):
default=5,
)
WORKFLOW_PARALLEL_DEPTH_LIMIT: PositiveInt = Field(
description="Maximum allowed depth for nested parallel executions",
default=3,
)
MAX_VARIABLE_SIZE: PositiveInt = Field(
description="Maximum size in bytes for a single variable in workflows. Default to 200 KB.",
default=200 * 1024,

View File

@ -6,6 +6,7 @@ from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
import services
from configs import dify_config
from controllers.console import api
from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist, DraftWorkflowNotSync
from controllers.console.app.wraps import get_app_model
@ -426,7 +427,21 @@ class ConvertToWorkflowApi(Resource):
}
class WorkflowConfigApi(Resource):
"""Resource for workflow configuration."""
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
def get(self, app_model: App):
return {
"parallel_depth_limit": dify_config.WORKFLOW_PARALLEL_DEPTH_LIMIT,
}
api.add_resource(DraftWorkflowApi, "/apps/<uuid:app_id>/workflows/draft")
api.add_resource(WorkflowConfigApi, "/apps/<uuid:app_id>/workflows/draft/config")
api.add_resource(AdvancedChatDraftWorkflowRunApi, "/apps/<uuid:app_id>/advanced-chat/workflows/draft/run")
api.add_resource(DraftWorkflowRunApi, "/apps/<uuid:app_id>/workflows/draft/run")
api.add_resource(WorkflowTaskStopApi, "/apps/<uuid:app_id>/workflow-runs/tasks/<string:task_id>/stop")

View File

@ -5,8 +5,7 @@ from typing import Optional, Union
from controllers.console.app.error import AppNotFoundError
from extensions.ext_database import db
from libs.login import current_user
from models import App
from models.model import AppMode
from models import App, AppMode
def get_app_model(view: Optional[Callable] = None, *, mode: Union[AppMode, list[AppMode]] = None):

View File

@ -1,15 +1,14 @@
import base64
from configs import dify_config
from core.file import file_repository
from core.helper import ssrf_proxy
from core.model_runtime.entities import (
AudioPromptMessageContent,
DocumentPromptMessageContent,
ImagePromptMessageContent,
MultiModalPromptMessageContent,
VideoPromptMessageContent,
)
from extensions.ext_database import db
from extensions.ext_storage import storage
from . import helpers
@ -41,7 +40,7 @@ def to_prompt_message_content(
/,
*,
image_detail_config: ImagePromptMessageContent.DETAIL | None = None,
):
) -> MultiModalPromptMessageContent:
if f.extension is None:
raise ValueError("Missing file extension")
if f.mime_type is None:
@ -70,16 +69,13 @@ def to_prompt_message_content(
def download(f: File, /):
if f.transfer_method == FileTransferMethod.TOOL_FILE:
tool_file = file_repository.get_tool_file(session=db.session(), file=f)
return _download_file_content(tool_file.file_key)
elif f.transfer_method == FileTransferMethod.LOCAL_FILE:
upload_file = file_repository.get_upload_file(session=db.session(), file=f)
return _download_file_content(upload_file.key)
# remote file
response = ssrf_proxy.get(f.remote_url, follow_redirects=True)
response.raise_for_status()
return response.content
if f.transfer_method in (FileTransferMethod.TOOL_FILE, FileTransferMethod.LOCAL_FILE):
return _download_file_content(f._storage_key)
elif f.transfer_method == FileTransferMethod.REMOTE_URL:
response = ssrf_proxy.get(f.remote_url, follow_redirects=True)
response.raise_for_status()
return response.content
raise ValueError(f"unsupported transfer method: {f.transfer_method}")
def _download_file_content(path: str, /):
@ -110,11 +106,9 @@ def _get_encoded_string(f: File, /):
response.raise_for_status()
data = response.content
case FileTransferMethod.LOCAL_FILE:
upload_file = file_repository.get_upload_file(session=db.session(), file=f)
data = _download_file_content(upload_file.key)
data = _download_file_content(f._storage_key)
case FileTransferMethod.TOOL_FILE:
tool_file = file_repository.get_tool_file(session=db.session(), file=f)
data = _download_file_content(tool_file.file_key)
data = _download_file_content(f._storage_key)
encoded_string = base64.b64encode(data).decode("utf-8")
return encoded_string

View File

@ -1,32 +0,0 @@
from sqlalchemy import select
from sqlalchemy.orm import Session
from models import ToolFile, UploadFile
from .models import File
def get_upload_file(*, session: Session, file: File):
if file.related_id is None:
raise ValueError("Missing file related_id")
stmt = select(UploadFile).filter(
UploadFile.id == file.related_id,
UploadFile.tenant_id == file.tenant_id,
)
record = session.scalar(stmt)
if not record:
raise ValueError(f"upload file {file.related_id} not found")
return record
def get_tool_file(*, session: Session, file: File):
if file.related_id is None:
raise ValueError("Missing file related_id")
stmt = select(ToolFile).filter(
ToolFile.id == file.related_id,
ToolFile.tenant_id == file.tenant_id,
)
record = session.scalar(stmt)
if not record:
raise ValueError(f"tool file {file.related_id} not found")
return record

View File

@ -47,6 +47,38 @@ class File(BaseModel):
mime_type: Optional[str] = None
size: int = -1
# Those properties are private, should not be exposed to the outside.
_storage_key: str
def __init__(
self,
*,
id: Optional[str] = None,
tenant_id: str,
type: FileType,
transfer_method: FileTransferMethod,
remote_url: Optional[str] = None,
related_id: Optional[str] = None,
filename: Optional[str] = None,
extension: Optional[str] = None,
mime_type: Optional[str] = None,
size: int = -1,
storage_key: str,
):
super().__init__(
id=id,
tenant_id=tenant_id,
type=type,
transfer_method=transfer_method,
remote_url=remote_url,
related_id=related_id,
filename=filename,
extension=extension,
mime_type=mime_type,
size=size,
)
self._storage_key = storage_key
def to_dict(self) -> Mapping[str, str | int | None]:
data = self.model_dump(mode="json")
return {

View File

@ -1,6 +1,5 @@
import base64
from extensions.ext_database import db
from libs import rsa
@ -14,6 +13,7 @@ def obfuscated_token(token: str):
def encrypt_token(tenant_id: str, token: str):
from models.account import Tenant
from models.engine import db
if not (tenant := db.session.query(Tenant).filter(Tenant.id == tenant_id).first()):
raise ValueError(f"Tenant with id {tenant_id} not found")

View File

@ -4,6 +4,7 @@ from .message_entities import (
AudioPromptMessageContent,
DocumentPromptMessageContent,
ImagePromptMessageContent,
MultiModalPromptMessageContent,
PromptMessage,
PromptMessageContent,
PromptMessageContentType,
@ -27,6 +28,7 @@ __all__ = [
"LLMResultChunkDelta",
"LLMUsage",
"ModelPropertyKey",
"MultiModalPromptMessageContent",
"PromptMessage",
"PromptMessage",
"PromptMessageContent",

View File

@ -84,10 +84,10 @@ class MultiModalPromptMessageContent(PromptMessageContent):
"""
type: PromptMessageContentType
format: str = Field(..., description="the format of multi-modal file")
base64_data: str = Field("", description="the base64 data of multi-modal file")
url: str = Field("", description="the url of multi-modal file")
mime_type: str = Field(..., description="the mime type of multi-modal file")
format: str = Field(default=..., description="the format of multi-modal file")
base64_data: str = Field(default="", description="the base64 data of multi-modal file")
url: str = Field(default="", description="the url of multi-modal file")
mime_type: str = Field(default=..., description="the mime type of multi-modal file")
@computed_field(return_type=str)
@property

View File

@ -4,11 +4,10 @@ import json
import logging
import time
from collections.abc import Generator
from typing import Optional, Union, cast
from typing import TYPE_CHECKING, Optional, Union, cast
import google.auth.transport.requests
import requests
import vertexai.generative_models as glm
from anthropic import AnthropicVertex, Stream
from anthropic.types import (
ContentBlockDeltaEvent,
@ -19,8 +18,6 @@ from anthropic.types import (
MessageStreamEvent,
)
from google.api_core import exceptions
from google.cloud import aiplatform
from google.oauth2 import service_account
from PIL import Image
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
@ -47,6 +44,9 @@ from core.model_runtime.errors.invoke import (
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
if TYPE_CHECKING:
import vertexai.generative_models as glm
logger = logging.getLogger(__name__)
@ -102,6 +102,8 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
:param stream: is stream response
:return: full response or stream response chunk generator result
"""
from google.oauth2 import service_account
# use Anthropic official SDK references
# - https://github.com/anthropics/anthropic-sdk-python
service_account_key = credentials.get("vertex_service_account_key", "")
@ -406,13 +408,15 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
return text.rstrip()
def _convert_tools_to_glm_tool(self, tools: list[PromptMessageTool]) -> glm.Tool:
def _convert_tools_to_glm_tool(self, tools: list[PromptMessageTool]) -> "glm.Tool":
"""
Convert tool messages to glm tools
:param tools: tool messages
:return: glm tools
"""
import vertexai.generative_models as glm
return glm.Tool(
function_declarations=[
glm.FunctionDeclaration(
@ -473,6 +477,10 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
:param user: unique user id
:return: full response or stream response chunk generator result
"""
import vertexai.generative_models as glm
from google.cloud import aiplatform
from google.oauth2 import service_account
config_kwargs = model_parameters.copy()
config_kwargs["max_output_tokens"] = config_kwargs.pop("max_tokens_to_sample", None)
@ -522,7 +530,7 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
return self._handle_generate_response(model, credentials, response, prompt_messages)
def _handle_generate_response(
self, model: str, credentials: dict, response: glm.GenerationResponse, prompt_messages: list[PromptMessage]
self, model: str, credentials: dict, response: "glm.GenerationResponse", prompt_messages: list[PromptMessage]
) -> LLMResult:
"""
Handle llm response
@ -554,7 +562,7 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
return result
def _handle_generate_stream_response(
self, model: str, credentials: dict, response: glm.GenerationResponse, prompt_messages: list[PromptMessage]
self, model: str, credentials: dict, response: "glm.GenerationResponse", prompt_messages: list[PromptMessage]
) -> Generator:
"""
Handle llm stream response
@ -638,13 +646,15 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
return message_text
def _format_message_to_glm_content(self, message: PromptMessage) -> glm.Content:
def _format_message_to_glm_content(self, message: PromptMessage) -> "glm.Content":
"""
Format a single message into glm.Content for Google API
:param message: one PromptMessage
:return: glm Content representation of message
"""
import vertexai.generative_models as glm
if isinstance(message, UserPromptMessage):
glm_content = glm.Content(role="user", parts=[])

View File

@ -2,12 +2,9 @@ import base64
import json
import time
from decimal import Decimal
from typing import Optional
from typing import TYPE_CHECKING, Optional
import tiktoken
from google.cloud import aiplatform
from google.oauth2 import service_account
from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel
from core.entities.embedding_type import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
@ -24,6 +21,11 @@ from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
from core.model_runtime.model_providers.vertex_ai._common import _CommonVertexAi
if TYPE_CHECKING:
from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel
else:
VertexTextEmbeddingModel = None
class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
"""
@ -48,6 +50,10 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
:param input_type: input type
:return: embeddings result
"""
from google.cloud import aiplatform
from google.oauth2 import service_account
from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel
service_account_key = credentials.get("vertex_service_account_key", "")
project_id = credentials["vertex_project_id"]
location = credentials["vertex_location"]
@ -100,6 +106,10 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
:param credentials: model credentials
:return:
"""
from google.cloud import aiplatform
from google.oauth2 import service_account
from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel
try:
service_account_key = credentials.get("vertex_service_account_key", "")
project_id = credentials["vertex_project_id"]

View File

@ -1,18 +1,19 @@
import re
from typing import Optional
import jieba
from jieba.analyse import default_tfidf
from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS
class JiebaKeywordTableHandler:
def __init__(self):
default_tfidf.stop_words = STOPWORDS
import jieba.analyse
from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS
jieba.analyse.default_tfidf.stop_words = STOPWORDS
def extract_keywords(self, text: str, max_keywords_per_chunk: Optional[int] = 10) -> set[str]:
"""Extract keywords with JIEBA tfidf."""
import jieba
keywords = jieba.analyse.extract_tags(
sentence=text,
topK=max_keywords_per_chunk,
@ -22,6 +23,8 @@ class JiebaKeywordTableHandler:
def _expand_tokens_with_subtokens(self, tokens: set[str]) -> set[str]:
"""Get subtokens from a list of tokens., filtering for stopwords."""
from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS
results = set()
for token in tokens:
results.add(token)

View File

@ -6,10 +6,8 @@ from contextlib import contextmanager
from typing import Any
import jieba.posseg as pseg
import nltk
import numpy
import oracledb
from nltk.corpus import stopwords
from pydantic import BaseModel, model_validator
from configs import dify_config
@ -202,6 +200,10 @@ class OracleVector(BaseVector):
return docs
def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
# lazy import
import nltk
from nltk.corpus import stopwords
top_k = kwargs.get("top_k", 5)
# just not implement fetch by score_threshold now, may be later
score_threshold = float(kwargs.get("score_threshold") or 0.0)

View File

@ -4,6 +4,7 @@ from typing import Any, Optional, cast
from pydantic import BaseModel, Field
from configs import dify_config
from core.workflow.graph_engine.entities.run_condition import RunCondition
from core.workflow.nodes import NodeType
from core.workflow.nodes.answer.answer_stream_generate_router import AnswerStreamGeneratorRouter
@ -170,7 +171,9 @@ class Graph(BaseModel):
for parallel in parallel_mapping.values():
if parallel.parent_parallel_id:
cls._check_exceed_parallel_limit(
parallel_mapping=parallel_mapping, level_limit=3, parent_parallel_id=parallel.parent_parallel_id
parallel_mapping=parallel_mapping,
level_limit=dify_config.WORKFLOW_PARALLEL_DEPTH_LIMIT,
parent_parallel_id=parallel.parent_parallel_id,
)
# init answer stream generate routes

View File

@ -8,12 +8,6 @@ import docx
import pandas as pd
import pypdfium2 # type: ignore
import yaml # type: ignore
from unstructured.partition.api import partition_via_api
from unstructured.partition.email import partition_email
from unstructured.partition.epub import partition_epub
from unstructured.partition.msg import partition_msg
from unstructured.partition.ppt import partition_ppt
from unstructured.partition.pptx import partition_pptx
from configs import dify_config
from core.file import File, FileTransferMethod, file_manager
@ -256,6 +250,8 @@ def _extract_text_from_excel(file_content: bytes) -> str:
def _extract_text_from_ppt(file_content: bytes) -> str:
from unstructured.partition.ppt import partition_ppt
try:
with io.BytesIO(file_content) as file:
elements = partition_ppt(file=file)
@ -265,6 +261,9 @@ def _extract_text_from_ppt(file_content: bytes) -> str:
def _extract_text_from_pptx(file_content: bytes) -> str:
from unstructured.partition.api import partition_via_api
from unstructured.partition.pptx import partition_pptx
try:
if dify_config.UNSTRUCTURED_API_URL and dify_config.UNSTRUCTURED_API_KEY:
with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as temp_file:
@ -287,6 +286,8 @@ def _extract_text_from_pptx(file_content: bytes) -> str:
def _extract_text_from_epub(file_content: bytes) -> str:
from unstructured.partition.epub import partition_epub
try:
with io.BytesIO(file_content) as file:
elements = partition_epub(file=file)
@ -296,6 +297,8 @@ def _extract_text_from_epub(file_content: bytes) -> str:
def _extract_text_from_eml(file_content: bytes) -> str:
from unstructured.partition.email import partition_email
try:
with io.BytesIO(file_content) as file:
elements = partition_email(file=file)
@ -305,6 +308,8 @@ def _extract_text_from_eml(file_content: bytes) -> str:
def _extract_text_from_msg(file_content: bytes) -> str:
from unstructured.partition.msg import partition_msg
try:
with io.BytesIO(file_content) as file:
elements = partition_msg(file=file)

View File

@ -50,6 +50,7 @@ class PromptConfig(BaseModel):
class LLMNodeChatModelMessage(ChatModelMessage):
text: str = ""
jinja2_text: Optional[str] = None

View File

@ -145,8 +145,8 @@ class LLMNode(BaseNode[LLMNodeData]):
query = query_variable.text
prompt_messages, stop = self._fetch_prompt_messages(
user_query=query,
user_files=files,
sys_query=query,
sys_files=files,
context=context,
memory=memory,
model_config=model_config,
@ -545,8 +545,8 @@ class LLMNode(BaseNode[LLMNodeData]):
def _fetch_prompt_messages(
self,
*,
user_query: str | None = None,
user_files: Sequence["File"],
sys_query: str | None = None,
sys_files: Sequence["File"],
context: str | None = None,
memory: TokenBufferMemory | None = None,
model_config: ModelConfigWithCredentialsEntity,
@ -562,7 +562,7 @@ class LLMNode(BaseNode[LLMNodeData]):
if isinstance(prompt_template, list):
# For chat model
prompt_messages.extend(
_handle_list_messages(
self._handle_list_messages(
messages=prompt_template,
context=context,
jinja2_variables=jinja2_variables,
@ -581,14 +581,14 @@ class LLMNode(BaseNode[LLMNodeData]):
prompt_messages.extend(memory_messages)
# Add current query to the prompt messages
if user_query:
if sys_query:
message = LLMNodeChatModelMessage(
text=user_query,
text=sys_query,
role=PromptMessageRole.USER,
edition_type="basic",
)
prompt_messages.extend(
_handle_list_messages(
self._handle_list_messages(
messages=[message],
context="",
jinja2_variables=[],
@ -635,24 +635,27 @@ class LLMNode(BaseNode[LLMNodeData]):
raise ValueError("Invalid prompt content type")
# Add current query to the prompt message
if user_query:
if sys_query:
if prompt_content_type == str:
prompt_content = prompt_messages[0].content.replace("#sys.query#", user_query)
prompt_content = prompt_messages[0].content.replace("#sys.query#", sys_query)
prompt_messages[0].content = prompt_content
elif prompt_content_type == list:
for content_item in prompt_content:
if content_item.type == PromptMessageContentType.TEXT:
content_item.data = user_query + "\n" + content_item.data
content_item.data = sys_query + "\n" + content_item.data
else:
raise ValueError("Invalid prompt content type")
else:
raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
if vision_enabled and user_files:
# The sys_files will be deprecated later
if vision_enabled and sys_files:
file_prompts = []
for file in user_files:
for file in sys_files:
file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
file_prompts.append(file_prompt)
# If last prompt is a user prompt, add files into its contents,
# otherwise append a new user prompt
if (
len(prompt_messages) > 0
and isinstance(prompt_messages[-1], UserPromptMessage)
@ -662,7 +665,7 @@ class LLMNode(BaseNode[LLMNodeData]):
else:
prompt_messages.append(UserPromptMessage(content=file_prompts))
# Filter prompt messages
# Remove empty messages and filter unsupported content
filtered_prompt_messages = []
for prompt_message in prompt_messages:
if isinstance(prompt_message.content, list):
@ -846,6 +849,58 @@ class LLMNode(BaseNode[LLMNodeData]):
},
}
def _handle_list_messages(
self,
*,
messages: Sequence[LLMNodeChatModelMessage],
context: Optional[str],
jinja2_variables: Sequence[VariableSelector],
variable_pool: VariablePool,
vision_detail_config: ImagePromptMessageContent.DETAIL,
) -> Sequence[PromptMessage]:
prompt_messages: list[PromptMessage] = []
for message in messages:
contents: list[PromptMessageContent] = []
if message.edition_type == "jinja2":
result_text = _render_jinja2_message(
template=message.jinja2_text or "",
jinjia2_variables=jinja2_variables,
variable_pool=variable_pool,
)
contents.append(TextPromptMessageContent(data=result_text))
else:
# Get segment group from basic message
if context:
template = message.text.replace("{#context#}", context)
else:
template = message.text
segment_group = variable_pool.convert_template(template)
# Process segments for images
for segment in segment_group.value:
if isinstance(segment, ArrayFileSegment):
for file in segment.value:
if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
file_content = file_manager.to_prompt_message_content(
file, image_detail_config=vision_detail_config
)
contents.append(file_content)
elif isinstance(segment, FileSegment):
file = segment.value
if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
file_content = file_manager.to_prompt_message_content(
file, image_detail_config=vision_detail_config
)
contents.append(file_content)
else:
plain_text = segment.markdown.strip()
if plain_text:
contents.append(TextPromptMessageContent(data=plain_text))
prompt_message = _combine_message_content_with_role(contents=contents, role=message.role)
prompt_messages.append(prompt_message)
return prompt_messages
def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole):
match role:
@ -880,68 +935,6 @@ def _render_jinja2_message(
return result_text
def _handle_list_messages(
*,
messages: Sequence[LLMNodeChatModelMessage],
context: Optional[str],
jinja2_variables: Sequence[VariableSelector],
variable_pool: VariablePool,
vision_detail_config: ImagePromptMessageContent.DETAIL,
) -> Sequence[PromptMessage]:
prompt_messages = []
for message in messages:
if message.edition_type == "jinja2":
result_text = _render_jinja2_message(
template=message.jinja2_text or "",
jinjia2_variables=jinja2_variables,
variable_pool=variable_pool,
)
prompt_message = _combine_message_content_with_role(
contents=[TextPromptMessageContent(data=result_text)], role=message.role
)
prompt_messages.append(prompt_message)
else:
# Get segment group from basic message
if context:
template = message.text.replace("{#context#}", context)
else:
template = message.text
segment_group = variable_pool.convert_template(template)
# Process segments for images
file_contents = []
for segment in segment_group.value:
if isinstance(segment, ArrayFileSegment):
for file in segment.value:
if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
file_content = file_manager.to_prompt_message_content(
file, image_detail_config=vision_detail_config
)
file_contents.append(file_content)
if isinstance(segment, FileSegment):
file = segment.value
if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
file_content = file_manager.to_prompt_message_content(
file, image_detail_config=vision_detail_config
)
file_contents.append(file_content)
# Create message with text from all segments
plain_text = segment_group.text
if plain_text:
prompt_message = _combine_message_content_with_role(
contents=[TextPromptMessageContent(data=plain_text)], role=message.role
)
prompt_messages.append(prompt_message)
if file_contents:
# Create message with image contents
prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role)
prompt_messages.append(prompt_message)
return prompt_messages
def _calculate_rest_token(
*, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
) -> int:

View File

@ -86,10 +86,10 @@ class QuestionClassifierNode(LLMNode):
)
prompt_messages, stop = self._fetch_prompt_messages(
prompt_template=prompt_template,
user_query=query,
sys_query=query,
memory=memory,
model_config=model_config,
user_files=files,
sys_files=files,
vision_enabled=node_data.vision.enabled,
vision_detail=node_data.vision.configs.detail,
variable_pool=variable_pool,

View File

@ -1,18 +1,5 @@
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import MetaData
from dify_app import DifyApp
POSTGRES_INDEXES_NAMING_CONVENTION = {
"ix": "%(column_0_label)s_idx",
"uq": "%(table_name)s_%(column_0_name)s_key",
"ck": "%(table_name)s_%(constraint_name)s_check",
"fk": "%(table_name)s_%(column_0_name)s_fkey",
"pk": "%(table_name)s_pkey",
}
metadata = MetaData(naming_convention=POSTGRES_INDEXES_NAMING_CONVENTION)
db = SQLAlchemy(metadata=metadata)
from models import db
def init_app(app: DifyApp):

View File

@ -3,4 +3,3 @@ from dify_app import DifyApp
def init_app(app: DifyApp):
from events import event_handlers # noqa: F401
from models import account, dataset, model, source, task, tool, tools, web # noqa: F401

View File

@ -139,6 +139,7 @@ def _build_from_local_file(
remote_url=row.source_url,
related_id=mapping.get("upload_file_id"),
size=row.size,
storage_key=row.key,
)
@ -168,6 +169,7 @@ def _build_from_remote_url(
mime_type=mime_type,
extension=extension,
size=file_size,
storage_key="",
)
@ -220,6 +222,7 @@ def _build_from_tool_file(
extension=extension,
mime_type=tool_file.mimetype,
size=tool_file.size,
storage_key=tool_file.file_key,
)

View File

@ -85,7 +85,7 @@ message_detail_fields = {
}
feedback_stat_fields = {"like": fields.Integer, "dislike": fields.Integer}
status_count_fields = {"success": fields.Integer, "failed": fields.Integer, "partial_success": fields.Integer}
model_config_fields = {
"opening_statement": fields.String,
"suggested_questions": fields.Raw,
@ -166,6 +166,7 @@ conversation_with_summary_fields = {
"message_count": fields.Integer,
"user_feedback_stats": fields.Nested(feedback_stat_fields),
"admin_feedback_stats": fields.Nested(feedback_stat_fields),
"status_count": fields.Nested(status_count_fields),
}
conversation_with_summary_pagination_fields = {

View File

@ -13,7 +13,7 @@ from typing import Any, Optional, Union, cast
from zoneinfo import available_timezones
from flask import Response, stream_with_context
from flask_restful import fields # type: ignore
from flask_restful import fields
from configs import dify_config
from core.app.features.rate_limiting.rate_limit import RateLimitGenerator

View File

@ -0,0 +1,39 @@
"""remove unused tool_providers
Revision ID: 11b07f66c737
Revises: cf8f4fc45278
Create Date: 2024-12-19 17:46:25.780116
"""
from alembic import op
import models as models
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '11b07f66c737'
down_revision = 'cf8f4fc45278'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tool_providers')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tool_providers',
sa.Column('id', sa.UUID(), server_default=sa.text('uuid_generate_v4()'), autoincrement=False, nullable=False),
sa.Column('tenant_id', sa.UUID(), autoincrement=False, nullable=False),
sa.Column('tool_name', sa.VARCHAR(length=40), autoincrement=False, nullable=False),
sa.Column('encrypted_credentials', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('is_enabled', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='tool_provider_pkey'),
sa.UniqueConstraint('tenant_id', 'tool_name', name='unique_tool_provider_tool_name')
)
# ### end Alembic commands ###

View File

@ -1,8 +1,8 @@
"""add retry_index field to node-execution model
"""add retry_index field to node-execution model
Revision ID: 348cb0a93d53
Revises: cf8f4fc45278
Create Date: 2024-12-16 01:23:13.093432
Revision ID: e1944c35e15e
Revises: 11b07f66c737
Create Date: 2024-12-20 06:28:30.287197
"""
from alembic import op
@ -11,8 +11,8 @@ import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '348cb0a93d53'
down_revision = 'cf8f4fc45278'
revision = 'e1944c35e15e'
down_revision = '11b07f66c737'
branch_labels = None
depends_on = None

View File

@ -1,53 +1,187 @@
from .account import Account, AccountIntegrate, InvitationCode, Tenant
from .dataset import Dataset, DatasetProcessRule, Document, DocumentSegment
from .account import (
Account,
AccountIntegrate,
AccountStatus,
InvitationCode,
Tenant,
TenantAccountJoin,
TenantAccountJoinRole,
TenantAccountRole,
TenantStatus,
)
from .api_based_extension import APIBasedExtension, APIBasedExtensionPoint
from .dataset import (
AppDatasetJoin,
Dataset,
DatasetCollectionBinding,
DatasetKeywordTable,
DatasetPermission,
DatasetPermissionEnum,
DatasetProcessRule,
DatasetQuery,
Document,
DocumentSegment,
Embedding,
ExternalKnowledgeApis,
ExternalKnowledgeBindings,
TidbAuthBinding,
Whitelist,
)
from .engine import db
from .enums import CreatedByRole, UserFrom, WorkflowRunTriggeredFrom
from .model import (
ApiRequest,
ApiToken,
App,
AppAnnotationHitHistory,
AppAnnotationSetting,
AppMode,
AppModelConfig,
Conversation,
DatasetRetrieverResource,
DifySetup,
EndUser,
IconType,
InstalledApp,
Message,
MessageAgentThought,
MessageAnnotation,
MessageChain,
MessageFeedback,
MessageFile,
OperationLog,
RecommendedApp,
Site,
Tag,
TagBinding,
TraceAppConfig,
UploadFile,
)
from .source import DataSourceOauthBinding
from .tools import ToolFile
from .provider import (
LoadBalancingModelConfig,
Provider,
ProviderModel,
ProviderModelSetting,
ProviderOrder,
ProviderQuotaType,
ProviderType,
TenantDefaultModel,
TenantPreferredModelProvider,
)
from .source import DataSourceApiKeyAuthBinding, DataSourceOauthBinding
from .task import CeleryTask, CeleryTaskSet
from .tools import (
ApiToolProvider,
BuiltinToolProvider,
PublishedAppTool,
ToolConversationVariables,
ToolFile,
ToolLabelBinding,
ToolModelInvoke,
WorkflowToolProvider,
)
from .web import PinnedConversation, SavedMessage
from .workflow import (
ConversationVariable,
Workflow,
WorkflowAppLog,
WorkflowAppLogCreatedFrom,
WorkflowNodeExecution,
WorkflowNodeExecutionStatus,
WorkflowNodeExecutionTriggeredFrom,
WorkflowRun,
WorkflowRunStatus,
WorkflowType,
)
__all__ = [
"APIBasedExtension",
"APIBasedExtensionPoint",
"Account",
"AccountIntegrate",
"AccountStatus",
"ApiRequest",
"ApiToken",
"ApiToolProvider", # Added
"App",
"AppAnnotationHitHistory",
"AppAnnotationSetting",
"AppDatasetJoin",
"AppMode",
"AppModelConfig",
"BuiltinToolProvider", # Added
"CeleryTask",
"CeleryTaskSet",
"Conversation",
"ConversationVariable",
"CreatedByRole",
"DataSourceApiKeyAuthBinding",
"DataSourceOauthBinding",
"Dataset",
"DatasetCollectionBinding",
"DatasetKeywordTable",
"DatasetPermission",
"DatasetPermissionEnum",
"DatasetProcessRule",
"DatasetQuery",
"DatasetRetrieverResource",
"DifySetup",
"Document",
"DocumentSegment",
"Embedding",
"EndUser",
"ExternalKnowledgeApis",
"ExternalKnowledgeBindings",
"IconType",
"InstalledApp",
"InvitationCode",
"LoadBalancingModelConfig",
"Message",
"MessageAgentThought",
"MessageAnnotation",
"MessageChain",
"MessageFeedback",
"MessageFile",
"OperationLog",
"PinnedConversation",
"Provider",
"ProviderModel",
"ProviderModelSetting",
"ProviderOrder",
"ProviderQuotaType",
"ProviderType",
"PublishedAppTool",
"RecommendedApp",
"SavedMessage",
"Site",
"Tag",
"TagBinding",
"Tenant",
"TenantAccountJoin",
"TenantAccountJoinRole",
"TenantAccountRole",
"TenantDefaultModel",
"TenantPreferredModelProvider",
"TenantStatus",
"TidbAuthBinding",
"ToolConversationVariables",
"ToolFile",
"ToolLabelBinding",
"ToolModelInvoke",
"TraceAppConfig",
"UploadFile",
"UserFrom",
"Whitelist",
"Workflow",
"WorkflowAppLog",
"WorkflowAppLogCreatedFrom",
"WorkflowNodeExecution",
"WorkflowNodeExecutionStatus",
"WorkflowNodeExecutionTriggeredFrom",
"WorkflowRun",
"WorkflowRunStatus",
"WorkflowRunTriggeredFrom",
"WorkflowToolProvider",
"WorkflowType",
"db",
]

View File

@ -3,8 +3,7 @@ import json
from flask_login import UserMixin
from extensions.ext_database import db
from .engine import db
from .types import StringUUID

View File

@ -1,7 +1,6 @@
import enum
from extensions.ext_database import db
from .engine import db
from .types import StringUUID

View File

@ -15,10 +15,10 @@ from sqlalchemy.dialects.postgresql import JSONB
from configs import dify_config
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from extensions.ext_database import db
from extensions.ext_storage import storage
from .account import Account
from .engine import db
from .model import App, Tag, TagBinding, UploadFile
from .types import StringUUID

13
api/models/engine.py Normal file
View File

@ -0,0 +1,13 @@
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import MetaData
POSTGRES_INDEXES_NAMING_CONVENTION = {
"ix": "%(column_0_label)s_idx",
"uq": "%(table_name)s_%(column_0_name)s_key",
"ck": "%(table_name)s_%(constraint_name)s_check",
"fk": "%(table_name)s_%(column_0_name)s_fkey",
"pk": "%(table_name)s_pkey",
}
metadata = MetaData(naming_convention=POSTGRES_INDEXES_NAMING_CONVENTION)
db = SQLAlchemy(metadata=metadata)

View File

@ -16,11 +16,12 @@ from configs import dify_config
from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod, FileType
from core.file import helpers as file_helpers
from core.file.tool_file_parser import ToolFileParser
from extensions.ext_database import db
from libs.helper import generate_string
from models.enums import CreatedByRole
from models.workflow import WorkflowRunStatus
from .account import Account, Tenant
from .engine import db
from .types import StringUUID
@ -560,13 +561,29 @@ class Conversation(db.Model):
@property
def inputs(self):
inputs = self._inputs.copy()
# Convert file mapping to File object
for key, value in inputs.items():
# NOTE: It's not the best way to implement this, but it's the only way to avoid circular import for now.
from factories import file_factory
if isinstance(value, dict) and value.get("dify_model_identity") == FILE_MODEL_IDENTITY:
inputs[key] = File.model_validate(value)
if value["transfer_method"] == FileTransferMethod.TOOL_FILE:
value["tool_file_id"] = value["related_id"]
elif value["transfer_method"] == FileTransferMethod.LOCAL_FILE:
value["upload_file_id"] = value["related_id"]
inputs[key] = file_factory.build_from_mapping(mapping=value, tenant_id=value["tenant_id"])
elif isinstance(value, list) and all(
isinstance(item, dict) and item.get("dify_model_identity") == FILE_MODEL_IDENTITY for item in value
):
inputs[key] = [File.model_validate(item) for item in value]
inputs[key] = []
for item in value:
if item["transfer_method"] == FileTransferMethod.TOOL_FILE:
item["tool_file_id"] = item["related_id"]
elif item["transfer_method"] == FileTransferMethod.LOCAL_FILE:
item["upload_file_id"] = item["related_id"]
inputs[key].append(file_factory.build_from_mapping(mapping=item, tenant_id=item["tenant_id"]))
return inputs
@inputs.setter
@ -679,6 +696,29 @@ class Conversation(db.Model):
return {"like": like, "dislike": dislike}
@property
def status_count(self):
messages = db.session.query(Message).filter(Message.conversation_id == self.id).all()
status_counts = {
WorkflowRunStatus.SUCCEEDED: 0,
WorkflowRunStatus.FAILED: 0,
WorkflowRunStatus.PARTIAL_SUCCESSED: 0,
}
for message in messages:
if message.workflow_run:
status_counts[message.workflow_run.status] += 1
return (
{
"success": status_counts[WorkflowRunStatus.SUCCEEDED],
"failed": status_counts[WorkflowRunStatus.FAILED],
"partial_success": status_counts[WorkflowRunStatus.PARTIAL_SUCCESSED],
}
if messages
else None
)
@property
def first_message(self):
return db.session.query(Message).filter(Message.conversation_id == self.id).first()
@ -758,12 +798,25 @@ class Message(db.Model):
def inputs(self):
inputs = self._inputs.copy()
for key, value in inputs.items():
# NOTE: It's not the best way to implement this, but it's the only way to avoid circular import for now.
from factories import file_factory
if isinstance(value, dict) and value.get("dify_model_identity") == FILE_MODEL_IDENTITY:
inputs[key] = File.model_validate(value)
if value["transfer_method"] == FileTransferMethod.TOOL_FILE:
value["tool_file_id"] = value["related_id"]
elif value["transfer_method"] == FileTransferMethod.LOCAL_FILE:
value["upload_file_id"] = value["related_id"]
inputs[key] = file_factory.build_from_mapping(mapping=value, tenant_id=value["tenant_id"])
elif isinstance(value, list) and all(
isinstance(item, dict) and item.get("dify_model_identity") == FILE_MODEL_IDENTITY for item in value
):
inputs[key] = [File.model_validate(item) for item in value]
inputs[key] = []
for item in value:
if item["transfer_method"] == FileTransferMethod.TOOL_FILE:
item["tool_file_id"] = item["related_id"]
elif item["transfer_method"] == FileTransferMethod.LOCAL_FILE:
item["upload_file_id"] = item["related_id"]
inputs[key].append(file_factory.build_from_mapping(mapping=item, tenant_id=item["tenant_id"]))
return inputs
@inputs.setter

View File

@ -1,7 +1,6 @@
from enum import Enum
from extensions.ext_database import db
from .engine import db
from .types import StringUUID

View File

@ -2,8 +2,7 @@ import json
from sqlalchemy.dialects.postgresql import JSONB
from extensions.ext_database import db
from .engine import db
from .types import StringUUID

View File

@ -2,7 +2,7 @@ from datetime import UTC, datetime
from celery import states
from extensions.ext_database import db
from .engine import db
class CeleryTask(db.Model):

View File

@ -1,47 +0,0 @@
import json
from enum import Enum
from extensions.ext_database import db
from .types import StringUUID
class ToolProviderName(Enum):
SERPAPI = "serpapi"
@staticmethod
def value_of(value):
for member in ToolProviderName:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class ToolProvider(db.Model):
__tablename__ = "tool_providers"
__table_args__ = (
db.PrimaryKeyConstraint("id", name="tool_provider_pkey"),
db.UniqueConstraint("tenant_id", "tool_name", name="unique_tool_provider_tool_name"),
)
id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"))
tenant_id = db.Column(StringUUID, nullable=False)
tool_name = db.Column(db.String(40), nullable=False)
encrypted_credentials = db.Column(db.Text, nullable=True)
is_enabled = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)"))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)"))
@property
def credentials_is_set(self):
"""
Returns True if the encrypted_config is not None, indicating that the token is set.
"""
return self.encrypted_credentials is not None
@property
def credentials(self):
"""
Returns the decrypted config.
"""
return json.loads(self.encrypted_credentials) if self.encrypted_credentials is not None else None

View File

@ -8,8 +8,8 @@ from sqlalchemy.orm import Mapped, mapped_column
from core.tools.entities.common_entities import I18nObject
from core.tools.entities.tool_bundle import ApiToolBundle
from core.tools.entities.tool_entities import ApiProviderSchemaType, WorkflowToolParameterConfiguration
from extensions.ext_database import db
from .engine import db
from .model import Account, App, Tenant
from .types import StringUUID
@ -82,7 +82,7 @@ class PublishedAppTool(db.Model):
return I18nObject(**json.loads(self.description))
@property
def app(self) -> App:
def app(self):
return db.session.query(App).filter(App.id == self.app_id).first()
@ -201,10 +201,6 @@ class WorkflowToolProvider(db.Model):
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)"))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)"))
@property
def schema_type(self) -> ApiProviderSchemaType:
return ApiProviderSchemaType.value_of(self.schema_type_str)
@property
def user(self) -> Account | None:
return db.session.query(Account).filter(Account.id == self.user_id).first()

View File

@ -1,5 +1,4 @@
from extensions.ext_database import db
from .engine import db
from .model import Message
from .types import StringUUID

View File

@ -12,12 +12,12 @@ import contexts
from constants import HIDDEN_VALUE
from core.helper import encrypter
from core.variables import SecretVariable, Variable
from extensions.ext_database import db
from factories import variable_factory
from libs import helper
from models.enums import CreatedByRole
from .account import Account
from .engine import db
from .types import StringUUID
@ -399,7 +399,7 @@ class WorkflowRun(db.Model):
graph = db.Column(db.Text)
inputs = db.Column(db.Text)
status = db.Column(db.String(255), nullable=False) # running, succeeded, failed, stopped, partial-succeeded
outputs: Mapped[str] = mapped_column(sa.Text, default="{}")
outputs: Mapped[Optional[str]] = mapped_column(sa.Text, default="{}")
error = db.Column(db.Text)
elapsed_time = db.Column(db.Float, nullable=False, server_default=db.text("0"))
total_tokens = db.Column(db.Integer, nullable=False, server_default=db.text("0"))

View File

@ -59,6 +59,8 @@ def test_dify_config(example_env_file):
# annotated field with configured value
assert config.HTTP_REQUEST_MAX_WRITE_TIMEOUT == 30
assert config.WORKFLOW_PARALLEL_DEPTH_LIMIT == 3
# NOTE: If there is a `.env` file in your Workspace, this test might not succeed as expected.
# This is due to `pymilvus` loading all the variables from the `.env` file into `os.environ`.

View File

@ -136,6 +136,7 @@ def test__get_chat_model_prompt_messages_with_files_no_memory(get_chat_model_arg
type=FileType.IMAGE,
transfer_method=FileTransferMethod.REMOTE_URL,
remote_url="https://example.com/image1.jpg",
storage_key="",
)
]

View File

@ -1,34 +1,9 @@
import json
from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod, FileType, FileUploadConfig
from core.file import File, FileTransferMethod, FileType, FileUploadConfig
from models.workflow import Workflow
def test_file_loads_and_dumps():
file = File(
id="file1",
tenant_id="tenant1",
type=FileType.IMAGE,
transfer_method=FileTransferMethod.REMOTE_URL,
remote_url="https://example.com/image1.jpg",
)
file_dict = file.model_dump()
assert file_dict["dify_model_identity"] == FILE_MODEL_IDENTITY
assert file_dict["type"] == file.type.value
assert isinstance(file_dict["type"], str)
assert file_dict["transfer_method"] == file.transfer_method.value
assert isinstance(file_dict["transfer_method"], str)
assert "_extra_config" not in file_dict
file_obj = File.model_validate(file_dict)
assert file_obj.id == file.id
assert file_obj.tenant_id == file.tenant_id
assert file_obj.type == file.type
assert file_obj.transfer_method == file.transfer_method
assert file_obj.remote_url == file.remote_url
def test_file_to_dict():
file = File(
id="file1",
@ -36,10 +11,11 @@ def test_file_to_dict():
type=FileType.IMAGE,
transfer_method=FileTransferMethod.REMOTE_URL,
remote_url="https://example.com/image1.jpg",
storage_key="storage_key",
)
file_dict = file.to_dict()
assert "_extra_config" not in file_dict
assert "_storage_key" not in file_dict
assert "url" in file_dict

View File

@ -51,6 +51,7 @@ def test_http_request_node_binary_file(monkeypatch):
type=FileType.IMAGE,
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="1111",
storage_key="",
),
),
)
@ -138,6 +139,7 @@ def test_http_request_node_form_with_file(monkeypatch):
type=FileType.IMAGE,
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="1111",
storage_key="",
),
),
)

View File

@ -21,7 +21,8 @@ from core.model_runtime.entities.message_entities import (
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelFeature, ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment
from core.variables import ArrayAnySegment, ArrayFileSegment, NoneSegment, StringSegment
from core.workflow.entities.variable_entities import VariableSelector
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.graph_engine import Graph, GraphInitParams, GraphRuntimeState
from core.workflow.nodes.answer import AnswerStreamGenerateRoute
@ -157,6 +158,7 @@ def test_fetch_files_with_file_segment(llm_node):
filename="test.jpg",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="1",
storage_key="",
)
llm_node.graph_runtime_state.variable_pool.add(["sys", "files"], file)
@ -173,6 +175,7 @@ def test_fetch_files_with_array_file_segment(llm_node):
filename="test1.jpg",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="1",
storage_key="",
),
File(
id="2",
@ -181,6 +184,7 @@ def test_fetch_files_with_array_file_segment(llm_node):
filename="test2.jpg",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="2",
storage_key="",
),
]
llm_node.graph_runtime_state.variable_pool.add(["sys", "files"], ArrayFileSegment(value=files))
@ -224,14 +228,15 @@ def test_fetch_prompt_messages__vison_disabled(faker, llm_node, model_config):
filename="test1.jpg",
transfer_method=FileTransferMethod.REMOTE_URL,
remote_url=fake_remote_url,
storage_key="",
)
]
fake_query = faker.sentence()
prompt_messages, _ = llm_node._fetch_prompt_messages(
user_query=fake_query,
user_files=files,
sys_query=fake_query,
sys_files=files,
context=None,
memory=None,
model_config=model_config,
@ -283,8 +288,8 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
test_scenarios = [
LLMNodeTestScenario(
description="No files",
user_query=fake_query,
user_files=[],
sys_query=fake_query,
sys_files=[],
features=[],
vision_enabled=False,
vision_detail=None,
@ -318,8 +323,8 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
),
LLMNodeTestScenario(
description="User files",
user_query=fake_query,
user_files=[
sys_query=fake_query,
sys_files=[
File(
tenant_id="test",
type=FileType.IMAGE,
@ -328,6 +333,7 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
remote_url=fake_remote_url,
extension=".jpg",
mime_type="image/jpg",
storage_key="",
)
],
vision_enabled=True,
@ -370,8 +376,8 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
),
LLMNodeTestScenario(
description="Prompt template with variable selector of File",
user_query=fake_query,
user_files=[],
sys_query=fake_query,
sys_files=[],
vision_enabled=False,
vision_detail=fake_vision_detail,
features=[ModelFeature.VISION],
@ -403,6 +409,7 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
remote_url=fake_remote_url,
extension=".jpg",
mime_type="image/jpg",
storage_key="",
)
},
),
@ -417,8 +424,8 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
# Call the method under test
prompt_messages, _ = llm_node._fetch_prompt_messages(
user_query=scenario.user_query,
user_files=scenario.user_files,
sys_query=scenario.sys_query,
sys_files=scenario.sys_files,
context=fake_context,
memory=memory,
model_config=model_config,
@ -435,3 +442,29 @@ def test_fetch_prompt_messages__basic(faker, llm_node, model_config):
assert (
prompt_messages == scenario.expected_messages
), f"Message content mismatch in scenario: {scenario.description}"
def test_handle_list_messages_basic(llm_node):
messages = [
LLMNodeChatModelMessage(
text="Hello, {#context#}",
role=PromptMessageRole.USER,
edition_type="basic",
)
]
context = "world"
jinja2_variables = []
variable_pool = llm_node.graph_runtime_state.variable_pool
vision_detail_config = ImagePromptMessageContent.DETAIL.HIGH
result = llm_node._handle_list_messages(
messages=messages,
context=context,
jinja2_variables=jinja2_variables,
variable_pool=variable_pool,
vision_detail_config=vision_detail_config,
)
assert len(result) == 1
assert isinstance(result[0], UserPromptMessage)
assert result[0].content == [TextPromptMessageContent(data="Hello, world")]

View File

@ -12,8 +12,8 @@ class LLMNodeTestScenario(BaseModel):
"""Test scenario for LLM node testing."""
description: str = Field(..., description="Description of the test scenario")
user_query: str = Field(..., description="User query input")
user_files: Sequence[File] = Field(default_factory=list, description="List of user files")
sys_query: str = Field(..., description="User query input")
sys_files: Sequence[File] = Field(default_factory=list, description="List of user files")
vision_enabled: bool = Field(default=False, description="Whether vision is enabled")
vision_detail: str | None = Field(None, description="Vision detail level if vision is enabled")
features: Sequence[ModelFeature] = Field(default_factory=list, description="List of model features")

View File

@ -248,6 +248,7 @@ def test_array_file_contains_file_name():
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="1",
filename="ab",
storage_key="",
),
],
)

View File

@ -57,6 +57,7 @@ def test_filter_files_by_type(list_operator_node):
tenant_id="tenant1",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="related1",
storage_key="",
),
File(
filename="document1.pdf",
@ -64,6 +65,7 @@ def test_filter_files_by_type(list_operator_node):
tenant_id="tenant1",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="related2",
storage_key="",
),
File(
filename="image2.png",
@ -71,6 +73,7 @@ def test_filter_files_by_type(list_operator_node):
tenant_id="tenant1",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="related3",
storage_key="",
),
File(
filename="audio1.mp3",
@ -78,6 +81,7 @@ def test_filter_files_by_type(list_operator_node):
tenant_id="tenant1",
transfer_method=FileTransferMethod.LOCAL_FILE,
related_id="related4",
storage_key="",
),
]
variable = ArrayFileSegment(value=files)
@ -130,6 +134,7 @@ def test_get_file_extract_string_func():
mime_type="text/plain",
remote_url="https://example.com/test_file.txt",
related_id="test_related_id",
storage_key="",
)
# Test each case
@ -150,6 +155,7 @@ def test_get_file_extract_string_func():
mime_type=None,
remote_url=None,
related_id="test_related_id",
storage_key="",
)
assert _get_file_extract_string_func(key="name")(empty_file) == ""

View File

@ -19,6 +19,7 @@ def file():
related_id="test_related_id",
remote_url="test_url",
filename="test_file.txt",
storage_key="",
)

View File

@ -699,6 +699,7 @@ WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
MAX_VARIABLE_SIZE=204800
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
WORKFLOW_FILE_UPLOAD_LIMIT=10
# HTTP request node in workflow configuration
@ -921,7 +922,3 @@ CREATE_TIDB_SERVICE_JOB_ENABLED=false
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
MAX_SUBMIT_COUNT=100
# Proxy
HTTP_PROXY=
HTTPS_PROXY=

View File

@ -18,7 +18,6 @@ x-shared-env: &shared-api-worker-env
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-"%Y-%m-%d %H:%M:%S"}
LOG_TZ: ${LOG_TZ:-UTC}
DEBUG: ${DEBUG:-false}
SENTRY_DSN: ${SENTRY_DSN:-}
FLASK_DEBUG: ${FLASK_DEBUG:-false}
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
INIT_PASSWORD: ${INIT_PASSWORD:-}
@ -260,6 +259,7 @@ x-shared-env: &shared-api-worker-env
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
SENTRY_DSN: ${SENTRY_DSN:-}
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
@ -299,6 +299,7 @@ x-shared-env: &shared-api-worker-env
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3}
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
@ -385,8 +386,6 @@ x-shared-env: &shared-api-worker-env
CSP_WHITELIST: ${CSP_WHITELIST:-}
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
HTTP_PROXY: ${HTTP_PROXY:-}
HTTPS_PROXY: ${HTTPS_PROXY:-}
services:
# API service

View File

@ -16,6 +16,7 @@ import { createContext, useContext } from 'use-context-selector'
import { useShallow } from 'zustand/react/shallow'
import { useTranslation } from 'react-i18next'
import type { ChatItemInTree } from '../../base/chat/types'
import Indicator from '../../header/indicator'
import VarPanel from './var-panel'
import type { FeedbackFunc, FeedbackType, IChatItem, SubmitAnnotationFunc } from '@/app/components/base/chat/chat/type'
import type { Annotation, ChatConversationGeneralDetail, ChatConversationsResponse, ChatMessage, ChatMessagesRequest, CompletionConversationGeneralDetail, CompletionConversationsResponse, LogAnnotation } from '@/models/log'
@ -57,6 +58,12 @@ type IDrawerContext = {
appDetail?: App
}
type StatusCount = {
success: number
failed: number
partial_success: number
}
const DrawerContext = createContext<IDrawerContext>({} as IDrawerContext)
/**
@ -71,6 +78,33 @@ const HandThumbIconWithCount: FC<{ count: number; iconType: 'up' | 'down' }> = (
</div>
}
const statusTdRender = (statusCount: StatusCount) => {
if (statusCount.partial_success + statusCount.failed === 0) {
return (
<div className='inline-flex items-center gap-1 system-xs-semibold-uppercase'>
<Indicator color={'green'} />
<span className='text-util-colors-green-green-600'>Success</span>
</div>
)
}
else if (statusCount.failed === 0) {
return (
<div className='inline-flex items-center gap-1 system-xs-semibold-uppercase'>
<Indicator color={'green'} />
<span className='text-util-colors-green-green-600'>Partial Success</span>
</div>
)
}
else {
return (
<div className='inline-flex items-center gap-1 system-xs-semibold-uppercase'>
<Indicator color={'red'} />
<span className='text-util-colors-red-red-600'>{statusCount.failed} {`${statusCount.failed > 1 ? 'Failures' : 'Failure'}`}</span>
</div>
)
}
}
const getFormattedChatList = (messages: ChatMessage[], conversationId: string, timezone: string, format: string) => {
const newChatList: IChatItem[] = []
messages.forEach((item: ChatMessage) => {
@ -496,8 +530,8 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
}
/**
* Text App Conversation Detail Component
*/
* Text App Conversation Detail Component
*/
const CompletionConversationDetailComp: FC<{ appId?: string; conversationId?: string }> = ({ appId, conversationId }) => {
// Text Generator App Session Details Including Message List
const detailParams = ({ url: `/apps/${appId}/completion-conversations/${conversationId}` })
@ -542,8 +576,8 @@ const CompletionConversationDetailComp: FC<{ appId?: string; conversationId?: st
}
/**
* Chat App Conversation Detail Component
*/
* Chat App Conversation Detail Component
*/
const ChatConversationDetailComp: FC<{ appId?: string; conversationId?: string }> = ({ appId, conversationId }) => {
const detailParams = { url: `/apps/${appId}/chat-conversations/${conversationId}` }
const { data: conversationDetail } = useSWR(() => (appId && conversationId) ? detailParams : null, fetchChatConversationDetail)
@ -585,8 +619,8 @@ const ChatConversationDetailComp: FC<{ appId?: string; conversationId?: string }
}
/**
* Conversation list component including basic information
*/
* Conversation list component including basic information
*/
const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh }) => {
const { t } = useTranslation()
const { formatTime } = useTimestamp()
@ -597,6 +631,7 @@ const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh })
const [showDrawer, setShowDrawer] = useState<boolean>(false) // Whether to display the chat details drawer
const [currentConversation, setCurrentConversation] = useState<ChatConversationGeneralDetail | CompletionConversationGeneralDetail | undefined>() // Currently selected conversation
const isChatMode = appDetail.mode !== 'completion' // Whether the app is a chat app
const isChatflow = appDetail.mode === 'advanced-chat' // Whether the app is a chatflow app
const { setShowPromptLogModal, setShowAgentLogModal } = useAppStore(useShallow(state => ({
setShowPromptLogModal: state.setShowPromptLogModal,
setShowAgentLogModal: state.setShowAgentLogModal,
@ -639,6 +674,7 @@ const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh })
<td className='pl-2 pr-1 w-5 rounded-l-lg bg-background-section-burn whitespace-nowrap'></td>
<td className='pl-3 py-1.5 bg-background-section-burn whitespace-nowrap'>{isChatMode ? t('appLog.table.header.summary') : t('appLog.table.header.input')}</td>
<td className='pl-3 py-1.5 bg-background-section-burn whitespace-nowrap'>{t('appLog.table.header.endUser')}</td>
{isChatflow && <td className='pl-3 py-1.5 bg-background-section-burn whitespace-nowrap'>{t('appLog.table.header.status')}</td>}
<td className='pl-3 py-1.5 bg-background-section-burn whitespace-nowrap'>{isChatMode ? t('appLog.table.header.messageCount') : t('appLog.table.header.output')}</td>
<td className='pl-3 py-1.5 bg-background-section-burn whitespace-nowrap'>{t('appLog.table.header.userRate')}</td>
<td className='pl-3 py-1.5 bg-background-section-burn whitespace-nowrap'>{t('appLog.table.header.adminRate')}</td>
@ -669,6 +705,9 @@ const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh })
{renderTdValue(leftValue || t('appLog.table.empty.noChat'), !leftValue, isChatMode && log.annotated)}
</td>
<td className='p-3 pr-2'>{renderTdValue(endUser || defaultValue, !endUser)}</td>
{isChatflow && <td className='p-3 pr-2 w-[160px]' style={{ maxWidth: isChatMode ? 300 : 200 }}>
{statusTdRender(log.status_count)}
</td>}
<td className='p-3 pr-2' style={{ maxWidth: isChatMode ? 100 : 200 }}>
{renderTdValue(rightValue === 0 ? 0 : (rightValue || t('appLog.table.empty.noOutput')), !rightValue, !isChatMode && !!log.annotation?.content, log.annotation)}
</td>

View File

@ -63,6 +63,14 @@ const WorkflowAppLogList: FC<ILogs> = ({ logs, appDetail, onRefresh }) => {
</div>
)
}
if (status === 'partial-succeeded') {
return (
<div className='inline-flex items-center gap-1 system-xs-semibold-uppercase'>
<Indicator color={'green'} />
<span className='text-util-colors-green-green-600'>Partial Success</span>
</div>
)
}
}
const onCloseDrawer = () => {

View File

@ -64,6 +64,12 @@ const WorkflowProcessItem = ({
setShowMessageLogModal(true)
}, [item, setCurrentLogItem, setCurrentLogModalActiveTab, setShowMessageLogModal])
const showRetryDetail = useCallback(() => {
setCurrentLogItem(item)
setCurrentLogModalActiveTab('TRACING')
setShowMessageLogModal(true)
}, [item, setCurrentLogItem, setCurrentLogModalActiveTab, setShowMessageLogModal])
return (
<div
className={cn(
@ -105,6 +111,7 @@ const WorkflowProcessItem = ({
<TracingPanel
list={data.tracing}
onShowIterationDetail={showIterationDetail}
onShowRetryDetail={showRetryDetail}
hideNodeInfo={hideInfo}
hideNodeProcessDetail={hideProcessDetail}
/>

View File

@ -28,6 +28,7 @@ export type InputProps = {
destructive?: boolean
wrapperClassName?: string
styleCss?: CSSProperties
unit?: string
} & React.InputHTMLAttributes<HTMLInputElement> & VariantProps<typeof inputVariants>
const Input = ({
@ -43,6 +44,7 @@ const Input = ({
value,
placeholder,
onChange,
unit,
...props
}: InputProps) => {
const { t } = useTranslation()
@ -80,6 +82,13 @@ const Input = ({
{destructive && (
<RiErrorWarningLine className='absolute right-2 top-1/2 -translate-y-1/2 w-4 h-4 text-text-destructive-secondary' />
)}
{
unit && (
<div className='absolute right-2 top-1/2 -translate-y-1/2 system-sm-regular text-text-tertiary'>
{unit}
</div>
)
}
</div>
)
}

View File

@ -506,3 +506,5 @@ export const WORKFLOW_DATA_UPDATE = 'WORKFLOW_DATA_UPDATE'
export const CUSTOM_NODE = 'custom'
export const CUSTOM_EDGE = 'custom'
export const DSL_EXPORT_CHECK = 'DSL_EXPORT_CHECK'
export const DEFAULT_RETRY_MAX = 3
export const DEFAULT_RETRY_INTERVAL = 100

View File

@ -28,6 +28,7 @@ import {
getFilesInLogs,
} from '@/app/components/base/file-uploader/utils'
import { ErrorHandleTypeEnum } from '@/app/components/workflow/nodes/_base/components/error-handle/types'
import type { NodeTracing } from '@/types/workflow'
export const useWorkflowRun = () => {
const store = useStoreApi()
@ -114,6 +115,7 @@ export const useWorkflowRun = () => {
onIterationStart,
onIterationNext,
onIterationFinish,
onNodeRetry,
onError,
...restCallback
} = callback || {}
@ -440,10 +442,13 @@ export const useWorkflowRun = () => {
})
if (currentIndex > -1 && draft.tracing) {
draft.tracing[currentIndex] = {
...data,
...(draft.tracing[currentIndex].extras
? { extras: draft.tracing[currentIndex].extras }
: {}),
...data,
...(draft.tracing[currentIndex].retryDetail
? { retryDetail: draft.tracing[currentIndex].retryDetail }
: {}),
} as any
}
}))
@ -616,6 +621,41 @@ export const useWorkflowRun = () => {
if (onIterationFinish)
onIterationFinish(params)
},
onNodeRetry: (params) => {
const { data } = params
const {
workflowRunningData,
setWorkflowRunningData,
} = workflowStore.getState()
const {
getNodes,
setNodes,
} = store.getState()
const nodes = getNodes()
setWorkflowRunningData(produce(workflowRunningData!, (draft) => {
const tracing = draft.tracing!
const currentRetryNodeIndex = tracing.findIndex(trace => trace.node_id === data.node_id)
if (currentRetryNodeIndex > -1) {
const currentRetryNode = tracing[currentRetryNodeIndex]
if (currentRetryNode.retryDetail)
draft.tracing![currentRetryNodeIndex].retryDetail!.push(data as NodeTracing)
else
draft.tracing![currentRetryNodeIndex].retryDetail = [data as NodeTracing]
}
}))
const newNodes = produce(nodes, (draft) => {
const currentNode = draft.find(node => node.id === data.node_id)!
currentNode.data._retryIndex = data.retry_index
})
setNodes(newNodes)
if (onNodeRetry)
onNodeRetry(params)
},
onParallelBranchStarted: (params) => {
// console.log(params, 'parallel start')
},

View File

@ -57,6 +57,7 @@ import {
import I18n from '@/context/i18n'
import { CollectionType } from '@/app/components/tools/types'
import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants'
import { useWorkflowConfig } from '@/service/use-workflow'
export const useIsChatMode = () => {
const appDetail = useAppStore(s => s.appDetail)
@ -69,7 +70,9 @@ export const useWorkflow = () => {
const { locale } = useContext(I18n)
const store = useStoreApi()
const workflowStore = useWorkflowStore()
const appId = useStore(s => s.appId)
const nodesExtraData = useNodesExtraData()
const { data: workflowConfig } = useWorkflowConfig(appId)
const setPanelWidth = useCallback((width: number) => {
localStorage.setItem('workflow-node-panel-width', `${width}`)
workflowStore.setState({ panelWidth: width })
@ -336,15 +339,15 @@ export const useWorkflow = () => {
for (let i = 0; i < parallelList.length; i++) {
const parallel = parallelList[i]
if (parallel.depth > PARALLEL_DEPTH_LIMIT) {
if (parallel.depth > (workflowConfig?.parallel_depth_limit || PARALLEL_DEPTH_LIMIT)) {
const { setShowTips } = workflowStore.getState()
setShowTips(t('workflow.common.parallelTip.depthLimit', { num: PARALLEL_DEPTH_LIMIT }))
setShowTips(t('workflow.common.parallelTip.depthLimit', { num: (workflowConfig?.parallel_depth_limit || PARALLEL_DEPTH_LIMIT) }))
return false
}
}
return true
}, [t, workflowStore])
}, [t, workflowStore, workflowConfig?.parallel_depth_limit])
const isValidConnection = useCallback(({ source, sourceHandle, target }: Connection) => {
const {

View File

@ -17,17 +17,25 @@ import ResultPanel from '@/app/components/workflow/run/result-panel'
import Toast from '@/app/components/base/toast'
import { TransferMethod } from '@/types/app'
import { getProcessedFiles } from '@/app/components/base/file-uploader/utils'
import type { NodeTracing } from '@/types/workflow'
import RetryResultPanel from '@/app/components/workflow/run/retry-result-panel'
import type { BlockEnum } from '@/app/components/workflow/types'
import type { Emoji } from '@/app/components/tools/types'
const i18nPrefix = 'workflow.singleRun'
type BeforeRunFormProps = {
nodeName: string
nodeType?: BlockEnum
toolIcon?: string | Emoji
onHide: () => void
onRun: (submitData: Record<string, any>) => void
onStop: () => void
runningStatus: NodeRunningStatus
result?: JSX.Element
forms: FormProps[]
retryDetails?: NodeTracing[]
onRetryDetailBack?: any
}
function formatValue(value: string | any, type: InputVarType) {
@ -50,12 +58,16 @@ function formatValue(value: string | any, type: InputVarType) {
}
const BeforeRunForm: FC<BeforeRunFormProps> = ({
nodeName,
nodeType,
toolIcon,
onHide,
onRun,
onStop,
runningStatus,
result,
forms,
retryDetails,
onRetryDetailBack = () => { },
}) => {
const { t } = useTranslation()
@ -122,48 +134,69 @@ const BeforeRunForm: FC<BeforeRunFormProps> = ({
<div className='text-base font-semibold text-gray-900 truncate'>
{t(`${i18nPrefix}.testRun`)} {nodeName}
</div>
<div className='ml-2 shrink-0 p-1 cursor-pointer' onClick={onHide}>
<div className='ml-2 shrink-0 p-1 cursor-pointer' onClick={() => {
onHide()
}}>
<RiCloseLine className='w-4 h-4 text-gray-500 ' />
</div>
</div>
<div className='h-0 grow overflow-y-auto pb-4'>
<div className='mt-3 px-4 space-y-4'>
{forms.map((form, index) => (
<div key={index}>
<Form
key={index}
className={cn(index < forms.length - 1 && 'mb-4')}
{...form}
/>
{index < forms.length - 1 && <Split />}
{
retryDetails?.length && (
<div className='h-0 grow overflow-y-auto pb-4'>
<RetryResultPanel
list={retryDetails.map((item, index) => ({
...item,
title: `${t('workflow.nodes.common.retry.retry')} ${index + 1}`,
node_type: nodeType!,
extras: {
icon: toolIcon!,
},
}))}
onBack={onRetryDetailBack}
/>
</div>
)
}
{
!retryDetails?.length && (
<div className='h-0 grow overflow-y-auto pb-4'>
<div className='mt-3 px-4 space-y-4'>
{forms.map((form, index) => (
<div key={index}>
<Form
key={index}
className={cn(index < forms.length - 1 && 'mb-4')}
{...form}
/>
{index < forms.length - 1 && <Split />}
</div>
))}
</div>
))}
</div>
<div className='mt-4 flex justify-between space-x-2 px-4' >
{isRunning && (
<div
className='p-2 rounded-lg border border-gray-200 bg-white shadow-xs cursor-pointer'
onClick={onStop}
>
<StopCircle className='w-4 h-4 text-gray-500' />
<div className='mt-4 flex justify-between space-x-2 px-4' >
{isRunning && (
<div
className='p-2 rounded-lg border border-gray-200 bg-white shadow-xs cursor-pointer'
onClick={onStop}
>
<StopCircle className='w-4 h-4 text-gray-500' />
</div>
)}
<Button disabled={!isFileLoaded || isRunning} variant='primary' className='w-0 grow space-x-2' onClick={handleRun}>
{isRunning && <RiLoader2Line className='animate-spin w-4 h-4 text-white' />}
<div>{t(`${i18nPrefix}.${isRunning ? 'running' : 'startRun'}`)}</div>
</Button>
</div>
)}
<Button disabled={!isFileLoaded || isRunning} variant='primary' className='w-0 grow space-x-2' onClick={handleRun}>
{isRunning && <RiLoader2Line className='animate-spin w-4 h-4 text-white' />}
<div>{t(`${i18nPrefix}.${isRunning ? 'running' : 'startRun'}`)}</div>
</Button>
</div>
{isRunning && (
<ResultPanel status='running' showSteps={false} />
)}
{isFinished && (
<>
{result}
</>
)}
</div>
{isRunning && (
<ResultPanel status='running' showSteps={false} />
)}
{isFinished && (
<>
{result}
</>
)}
</div>
)
}
</div>
</div>
)

View File

@ -14,7 +14,6 @@ import type {
CommonNodeType,
Node,
} from '@/app/components/workflow/types'
import Split from '@/app/components/workflow/nodes/_base/components/split'
import Tooltip from '@/app/components/base/tooltip'
type ErrorHandleProps = Pick<Node, 'id' | 'data'>
@ -45,7 +44,6 @@ const ErrorHandle = ({
return (
<>
<Split />
<div className='py-4'>
<Collapse
disabled={!error_strategy}

View File

@ -0,0 +1,41 @@
import {
useCallback,
useState,
} from 'react'
import type { WorkflowRetryConfig } from './types'
import {
useNodeDataUpdate,
} from '@/app/components/workflow/hooks'
import type { NodeTracing } from '@/types/workflow'
export const useRetryConfig = (
id: string,
) => {
const { handleNodeDataUpdateWithSyncDraft } = useNodeDataUpdate()
const handleRetryConfigChange = useCallback((value?: WorkflowRetryConfig) => {
handleNodeDataUpdateWithSyncDraft({
id,
data: {
retry_config: value,
},
})
}, [id, handleNodeDataUpdateWithSyncDraft])
return {
handleRetryConfigChange,
}
}
export const useRetryDetailShowInSingleRun = () => {
const [retryDetails, setRetryDetails] = useState<NodeTracing[] | undefined>()
const handleRetryDetailsChange = useCallback((details: NodeTracing[] | undefined) => {
setRetryDetails(details)
}, [])
return {
retryDetails,
handleRetryDetailsChange,
}
}

View File

@ -0,0 +1,88 @@
import { useMemo } from 'react'
import { useTranslation } from 'react-i18next'
import {
RiAlertFill,
RiCheckboxCircleFill,
RiLoader2Line,
} from '@remixicon/react'
import type { Node } from '@/app/components/workflow/types'
import { NodeRunningStatus } from '@/app/components/workflow/types'
import cn from '@/utils/classnames'
type RetryOnNodeProps = Pick<Node, 'id' | 'data'>
const RetryOnNode = ({
data,
}: RetryOnNodeProps) => {
const { t } = useTranslation()
const { retry_config } = data
const showSelectedBorder = data.selected || data._isBundled || data._isEntering
const {
isRunning,
isSuccessful,
isException,
isFailed,
} = useMemo(() => {
return {
isRunning: data._runningStatus === NodeRunningStatus.Running && !showSelectedBorder,
isSuccessful: data._runningStatus === NodeRunningStatus.Succeeded && !showSelectedBorder,
isFailed: data._runningStatus === NodeRunningStatus.Failed && !showSelectedBorder,
isException: data._runningStatus === NodeRunningStatus.Exception && !showSelectedBorder,
}
}, [data._runningStatus, showSelectedBorder])
const showDefault = !isRunning && !isSuccessful && !isException && !isFailed
if (!retry_config)
return null
return (
<div className='px-3'>
<div className={cn(
'flex items-center justify-between px-[5px] py-1 bg-workflow-block-parma-bg border-[0.5px] border-transparent rounded-md system-xs-medium-uppercase text-text-tertiary',
isRunning && 'bg-state-accent-hover border-state-accent-active text-text-accent',
isSuccessful && 'bg-state-success-hover border-state-success-active text-text-success',
(isException || isFailed) && 'bg-state-warning-hover border-state-warning-active text-text-warning',
)}>
<div className='flex items-center'>
{
showDefault && (
t('workflow.nodes.common.retry.retryTimes', { times: retry_config.max_retries })
)
}
{
isRunning && (
<>
<RiLoader2Line className='animate-spin mr-1 w-3.5 h-3.5' />
{t('workflow.nodes.common.retry.retrying')}
</>
)
}
{
isSuccessful && (
<>
<RiCheckboxCircleFill className='mr-1 w-3.5 h-3.5' />
{t('workflow.nodes.common.retry.retrySuccessful')}
</>
)
}
{
(isFailed || isException) && (
<>
<RiAlertFill className='mr-1 w-3.5 h-3.5' />
{t('workflow.nodes.common.retry.retryFailed')}
</>
)
}
</div>
{
!showDefault && (
<div>
{data._retryIndex}/{data.retry_config?.max_retries}
</div>
)
}
</div>
</div>
)
}
export default RetryOnNode

View File

@ -0,0 +1,117 @@
import { useTranslation } from 'react-i18next'
import { useRetryConfig } from './hooks'
import s from './style.module.css'
import Switch from '@/app/components/base/switch'
import Slider from '@/app/components/base/slider'
import Input from '@/app/components/base/input'
import type {
Node,
} from '@/app/components/workflow/types'
import Split from '@/app/components/workflow/nodes/_base/components/split'
type RetryOnPanelProps = Pick<Node, 'id' | 'data'>
const RetryOnPanel = ({
id,
data,
}: RetryOnPanelProps) => {
const { t } = useTranslation()
const { handleRetryConfigChange } = useRetryConfig(id)
const { retry_config } = data
const handleRetryEnabledChange = (value: boolean) => {
handleRetryConfigChange({
retry_enabled: value,
max_retries: retry_config?.max_retries || 3,
retry_interval: retry_config?.retry_interval || 1000,
})
}
const handleMaxRetriesChange = (value: number) => {
if (value > 10)
value = 10
else if (value < 1)
value = 1
handleRetryConfigChange({
retry_enabled: true,
max_retries: value,
retry_interval: retry_config?.retry_interval || 1000,
})
}
const handleRetryIntervalChange = (value: number) => {
if (value > 5000)
value = 5000
else if (value < 100)
value = 100
handleRetryConfigChange({
retry_enabled: true,
max_retries: retry_config?.max_retries || 3,
retry_interval: value,
})
}
return (
<>
<div className='pt-2'>
<div className='flex items-center justify-between px-4 py-2 h-10'>
<div className='flex items-center'>
<div className='mr-0.5 system-sm-semibold-uppercase text-text-secondary'>{t('workflow.nodes.common.retry.retryOnFailure')}</div>
</div>
<Switch
defaultValue={retry_config?.retry_enabled}
onChange={v => handleRetryEnabledChange(v)}
/>
</div>
{
retry_config?.retry_enabled && (
<div className='px-4 pb-2'>
<div className='flex items-center mb-1 w-full'>
<div className='grow mr-2 system-xs-medium-uppercase'>{t('workflow.nodes.common.retry.maxRetries')}</div>
<Slider
className='mr-3 w-[108px]'
value={retry_config?.max_retries || 3}
onChange={handleMaxRetriesChange}
min={1}
max={10}
/>
<Input
type='number'
wrapperClassName='w-[80px]'
value={retry_config?.max_retries || 3}
onChange={e => handleMaxRetriesChange(e.target.value as any)}
min={1}
max={10}
unit={t('workflow.nodes.common.retry.times') || ''}
className={s.input}
/>
</div>
<div className='flex items-center'>
<div className='grow mr-2 system-xs-medium-uppercase'>{t('workflow.nodes.common.retry.retryInterval')}</div>
<Slider
className='mr-3 w-[108px]'
value={retry_config?.retry_interval || 1000}
onChange={handleRetryIntervalChange}
min={100}
max={5000}
/>
<Input
type='number'
wrapperClassName='w-[80px]'
value={retry_config?.retry_interval || 1000}
onChange={e => handleRetryIntervalChange(e.target.value as any)}
min={100}
max={5000}
unit={t('workflow.nodes.common.retry.ms') || ''}
className={s.input}
/>
</div>
</div>
)
}
</div>
<Split className='mx-4 mt-2' />
</>
)
}
export default RetryOnPanel

View File

@ -0,0 +1,5 @@
.input::-webkit-inner-spin-button,
.input::-webkit-outer-spin-button {
-webkit-appearance: none;
margin: 0;
}

View File

@ -0,0 +1,5 @@
export type WorkflowRetryConfig = {
max_retries: number
retry_interval: number
retry_enabled: boolean
}

View File

@ -25,7 +25,10 @@ import {
useNodesReadOnly,
useToolIcon,
} from '../../hooks'
import { hasErrorHandleNode } from '../../utils'
import {
hasErrorHandleNode,
hasRetryNode,
} from '../../utils'
import { useNodeIterationInteractions } from '../iteration/use-interactions'
import type { IterationNodeType } from '../iteration/types'
import {
@ -35,6 +38,7 @@ import {
import NodeResizer from './components/node-resizer'
import NodeControl from './components/node-control'
import ErrorHandleOnNode from './components/error-handle/error-handle-on-node'
import RetryOnNode from './components/retry/retry-on-node'
import AddVariablePopupWithPosition from './components/add-variable-popup-with-position'
import cn from '@/utils/classnames'
import BlockIcon from '@/app/components/workflow/block-icon'
@ -237,6 +241,14 @@ const BaseNode: FC<BaseNodeProps> = ({
</div>
)
}
{
hasRetryNode(data.type) && (
<RetryOnNode
id={id}
data={data}
/>
)
}
{
hasErrorHandleNode(data.type) && (
<ErrorHandleOnNode

View File

@ -21,9 +21,11 @@ import {
TitleInput,
} from './components/title-description-input'
import ErrorHandleOnPanel from './components/error-handle/error-handle-on-panel'
import RetryOnPanel from './components/retry/retry-on-panel'
import { useResizePanel } from './hooks/use-resize-panel'
import cn from '@/utils/classnames'
import BlockIcon from '@/app/components/workflow/block-icon'
import Split from '@/app/components/workflow/nodes/_base/components/split'
import {
WorkflowHistoryEvent,
useAvailableBlocks,
@ -38,6 +40,7 @@ import {
import {
canRunBySingle,
hasErrorHandleNode,
hasRetryNode,
} from '@/app/components/workflow/utils'
import Tooltip from '@/app/components/base/tooltip'
import type { Node } from '@/app/components/workflow/types'
@ -168,6 +171,15 @@ const BasePanel: FC<BasePanelProps> = ({
<div>
{cloneElement(children, { id, data })}
</div>
<Split />
{
hasRetryNode(data.type) && (
<RetryOnPanel
id={id}
data={data}
/>
)
}
{
hasErrorHandleNode(data.type) && (
<ErrorHandleOnPanel

View File

@ -2,7 +2,10 @@ import { BlockEnum } from '../../types'
import type { NodeDefault } from '../../types'
import { AuthorizationType, BodyType, Method } from './types'
import type { BodyPayload, HttpNodeType } from './types'
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
import {
ALL_CHAT_AVAILABLE_BLOCKS,
ALL_COMPLETION_AVAILABLE_BLOCKS,
} from '@/app/components/workflow/constants'
const nodeDefault: NodeDefault<HttpNodeType> = {
defaultValue: {
@ -24,6 +27,11 @@ const nodeDefault: NodeDefault<HttpNodeType> = {
max_read_timeout: 0,
max_write_timeout: 0,
},
retry_config: {
retry_enabled: true,
max_retries: 3,
retry_interval: 100,
},
},
getAvailablePrevNodes(isChatMode: boolean) {
const nodes = isChatMode

View File

@ -1,5 +1,5 @@
import type { FC } from 'react'
import React from 'react'
import { memo } from 'react'
import { useTranslation } from 'react-i18next'
import useConfig from './use-config'
import ApiInput from './components/api-input'
@ -18,6 +18,7 @@ import { FileArrow01 } from '@/app/components/base/icons/src/vender/line/files'
import type { NodePanelProps } from '@/app/components/workflow/types'
import BeforeRunForm from '@/app/components/workflow/nodes/_base/components/before-run-form'
import ResultPanel from '@/app/components/workflow/run/result-panel'
import { useRetryDetailShowInSingleRun } from '@/app/components/workflow/nodes/_base/components/retry/hooks'
const i18nPrefix = 'workflow.nodes.http'
@ -60,6 +61,10 @@ const Panel: FC<NodePanelProps<HttpNodeType>> = ({
hideCurlPanel,
handleCurlImport,
} = useConfig(id, data)
const {
retryDetails,
handleRetryDetailsChange,
} = useRetryDetailShowInSingleRun()
// To prevent prompt editor in body not update data.
if (!isDataReady)
return null
@ -181,6 +186,7 @@ const Panel: FC<NodePanelProps<HttpNodeType>> = ({
{isShowSingleRun && (
<BeforeRunForm
nodeName={inputs.title}
nodeType={inputs.type}
onHide={hideSingleRun}
forms={[
{
@ -192,7 +198,9 @@ const Panel: FC<NodePanelProps<HttpNodeType>> = ({
runningStatus={runningStatus}
onRun={handleRun}
onStop={handleStop}
result={<ResultPanel {...runResult} showSteps={false} />}
retryDetails={retryDetails}
onRetryDetailBack={handleRetryDetailsChange}
result={<ResultPanel {...runResult} showSteps={false} onShowRetryDetail={handleRetryDetailsChange} />}
/>
)}
{(isShowCurlPanel && !readOnly) && (
@ -207,4 +215,4 @@ const Panel: FC<NodePanelProps<HttpNodeType>> = ({
)
}
export default React.memo(Panel)
export default memo(Panel)

View File

@ -129,9 +129,6 @@ export const getMultipleRetrievalConfig = (
reranking_enable: ((allInternal && allEconomic) || allExternal) ? reranking_enable : true,
}
if (!rerankModelIsValid)
result.reranking_model = undefined
const setDefaultWeights = () => {
result.weights = {
vector_setting: {
@ -198,7 +195,6 @@ export const getMultipleRetrievalConfig = (
setDefaultWeights()
}
}
if (reranking_mode === RerankingModeEnum.RerankingModel && !rerankModelIsValid && shouldSetWeightDefaultValue) {
result.reranking_mode = RerankingModeEnum.WeightedScore
setDefaultWeights()

View File

@ -19,6 +19,7 @@ import type { Props as FormProps } from '@/app/components/workflow/nodes/_base/c
import ResultPanel from '@/app/components/workflow/run/result-panel'
import Tooltip from '@/app/components/base/tooltip'
import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
import { useRetryDetailShowInSingleRun } from '@/app/components/workflow/nodes/_base/components/retry/hooks'
const i18nPrefix = 'workflow.nodes.llm'
@ -69,6 +70,10 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
runResult,
filterJinjia2InputVar,
} = useConfig(id, data)
const {
retryDetails,
handleRetryDetailsChange,
} = useRetryDetailShowInSingleRun()
const model = inputs.model
@ -282,12 +287,15 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
{isShowSingleRun && (
<BeforeRunForm
nodeName={inputs.title}
nodeType={inputs.type}
onHide={hideSingleRun}
forms={singleRunForms}
runningStatus={runningStatus}
onRun={handleRun}
onStop={handleStop}
result={<ResultPanel {...runResult} showSteps={false} />}
retryDetails={retryDetails}
onRetryDetailBack={handleRetryDetailsChange}
result={<ResultPanel {...runResult} showSteps={false} onShowRetryDetail={handleRetryDetailsChange} />}
/>
)}
</div>

View File

@ -14,6 +14,8 @@ import Loading from '@/app/components/base/loading'
import BeforeRunForm from '@/app/components/workflow/nodes/_base/components/before-run-form'
import OutputVars, { VarItem } from '@/app/components/workflow/nodes/_base/components/output-vars'
import ResultPanel from '@/app/components/workflow/run/result-panel'
import { useRetryDetailShowInSingleRun } from '@/app/components/workflow/nodes/_base/components/retry/hooks'
import { useToolIcon } from '@/app/components/workflow/hooks'
const i18nPrefix = 'workflow.nodes.tool'
@ -48,6 +50,11 @@ const Panel: FC<NodePanelProps<ToolNodeType>> = ({
handleStop,
runResult,
} = useConfig(id, data)
const toolIcon = useToolIcon(data)
const {
retryDetails,
handleRetryDetailsChange,
} = useRetryDetailShowInSingleRun()
if (isLoading) {
return <div className='flex h-[200px] items-center justify-center'>
@ -143,12 +150,16 @@ const Panel: FC<NodePanelProps<ToolNodeType>> = ({
{isShowSingleRun && (
<BeforeRunForm
nodeName={inputs.title}
nodeType={inputs.type}
toolIcon={toolIcon}
onHide={hideSingleRun}
forms={singleRunForms}
runningStatus={runningStatus}
onRun={handleRun}
onStop={handleStop}
result={<ResultPanel {...runResult} showSteps={false} />}
retryDetails={retryDetails}
onRetryDetailBack={handleRetryDetailsChange}
result={<ResultPanel {...runResult} showSteps={false} onShowRetryDetail={handleRetryDetailsChange} />}
/>
)}
</div>

View File

@ -27,6 +27,7 @@ import {
getProcessedFilesFromResponse,
} from '@/app/components/base/file-uploader/utils'
import type { FileEntity } from '@/app/components/base/file-uploader/types'
import type { NodeTracing } from '@/types/workflow'
type GetAbortController = (abortController: AbortController) => void
type SendCallback = {
@ -381,6 +382,28 @@ export const useChat = (
}
}))
},
onNodeRetry: ({ data }) => {
if (data.iteration_id)
return
const currentIndex = responseItem.workflowProcess!.tracing!.findIndex((item) => {
if (!item.execution_metadata?.parallel_id)
return item.node_id === data.node_id
return item.node_id === data.node_id && (item.execution_metadata?.parallel_id === data.execution_metadata?.parallel_id || item.parallel_id === data.execution_metadata?.parallel_id)
})
if (responseItem.workflowProcess!.tracing[currentIndex].retryDetail)
responseItem.workflowProcess!.tracing[currentIndex].retryDetail?.push(data as NodeTracing)
else
responseItem.workflowProcess!.tracing[currentIndex].retryDetail = [data as NodeTracing]
handleUpdateChatList(produce(chatListRef.current, (draft) => {
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
draft[currentIndex] = {
...draft[currentIndex],
...responseItem,
}
}))
},
onNodeFinished: ({ data }) => {
if (data.iteration_id)
return
@ -394,6 +417,9 @@ export const useChat = (
...(responseItem.workflowProcess!.tracing[currentIndex]?.extras
? { extras: responseItem.workflowProcess!.tracing[currentIndex].extras }
: {}),
...(responseItem.workflowProcess!.tracing[currentIndex]?.retryDetail
? { retryDetail: responseItem.workflowProcess!.tracing[currentIndex].retryDetail }
: {}),
...data,
} as any
handleUpdateChatList(produce(chatListRef.current, (draft) => {

View File

@ -25,6 +25,7 @@ import {
import { SimpleBtn } from '../../app/text-generate/item'
import Toast from '../../base/toast'
import IterationResultPanel from '../run/iteration-result-panel'
import RetryResultPanel from '../run/retry-result-panel'
import InputsPanel from './inputs-panel'
import cn from '@/utils/classnames'
import Loading from '@/app/components/base/loading'
@ -53,11 +54,16 @@ const WorkflowPreview = () => {
}, [workflowRunningData])
const [iterationRunResult, setIterationRunResult] = useState<NodeTracing[][]>([])
const [retryRunResult, setRetryRunResult] = useState<NodeTracing[]>([])
const [iterDurationMap, setIterDurationMap] = useState<IterationDurationMap>({})
const [isShowIterationDetail, {
setTrue: doShowIterationDetail,
setFalse: doHideIterationDetail,
}] = useBoolean(false)
const [isShowRetryDetail, {
setTrue: doShowRetryDetail,
setFalse: doHideRetryDetail,
}] = useBoolean(false)
const handleShowIterationDetail = useCallback((detail: NodeTracing[][], iterationDurationMap: IterationDurationMap) => {
setIterDurationMap(iterationDurationMap)
@ -65,6 +71,11 @@ const WorkflowPreview = () => {
doShowIterationDetail()
}, [doShowIterationDetail])
const handleRetryDetail = useCallback((detail: NodeTracing[]) => {
setRetryRunResult(detail)
doShowRetryDetail()
}, [doShowRetryDetail])
if (isShowIterationDetail) {
return (
<div className={`
@ -201,11 +212,12 @@ const WorkflowPreview = () => {
<Loading />
</div>
)}
{currentTab === 'TRACING' && (
{currentTab === 'TRACING' && !isShowRetryDetail && (
<TracingPanel
className='bg-background-section-burn'
list={workflowRunningData?.tracing || []}
onShowIterationDetail={handleShowIterationDetail}
onShowRetryDetail={handleRetryDetail}
/>
)}
{currentTab === 'TRACING' && !workflowRunningData?.tracing?.length && (
@ -213,7 +225,14 @@ const WorkflowPreview = () => {
<Loading />
</div>
)}
{
currentTab === 'TRACING' && isShowRetryDetail && (
<RetryResultPanel
list={retryRunResult}
onBack={doHideRetryDetail}
/>
)
}
</div>
</>
)}

View File

@ -9,6 +9,7 @@ import OutputPanel from './output-panel'
import ResultPanel from './result-panel'
import TracingPanel from './tracing-panel'
import IterationResultPanel from './iteration-result-panel'
import RetryResultPanel from './retry-result-panel'
import cn from '@/utils/classnames'
import { ToastContext } from '@/app/components/base/toast'
import Loading from '@/app/components/base/loading'
@ -107,6 +108,18 @@ const RunPanel: FC<RunProps> = ({ hideResult, activeTab = 'RESULT', runID, getRe
const processNonIterationNode = (item: NodeTracing) => {
const { execution_metadata } = item
if (!execution_metadata?.iteration_id) {
if (item.status === 'retry') {
const retryNode = result.find(node => node.node_id === item.node_id)
if (retryNode) {
if (retryNode?.retryDetail)
retryNode.retryDetail.push(item)
else
retryNode.retryDetail = [item]
}
return
}
result.push(item)
return
}
@ -181,10 +194,15 @@ const RunPanel: FC<RunProps> = ({ hideResult, activeTab = 'RESULT', runID, getRe
const [iterationRunResult, setIterationRunResult] = useState<NodeTracing[][]>([])
const [iterDurationMap, setIterDurationMap] = useState<IterationDurationMap>({})
const [retryRunResult, setRetryRunResult] = useState<NodeTracing[]>([])
const [isShowIterationDetail, {
setTrue: doShowIterationDetail,
setFalse: doHideIterationDetail,
}] = useBoolean(false)
const [isShowRetryDetail, {
setTrue: doShowRetryDetail,
setFalse: doHideRetryDetail,
}] = useBoolean(false)
const handleShowIterationDetail = useCallback((detail: NodeTracing[][], iterDurationMap: IterationDurationMap) => {
setIterationRunResult(detail)
@ -192,6 +210,11 @@ const RunPanel: FC<RunProps> = ({ hideResult, activeTab = 'RESULT', runID, getRe
setIterDurationMap(iterDurationMap)
}, [doShowIterationDetail, setIterationRunResult, setIterDurationMap])
const handleShowRetryDetail = useCallback((detail: NodeTracing[]) => {
setRetryRunResult(detail)
doShowRetryDetail()
}, [doShowRetryDetail, setRetryRunResult])
if (isShowIterationDetail) {
return (
<div className='grow relative flex flex-col'>
@ -261,13 +284,22 @@ const RunPanel: FC<RunProps> = ({ hideResult, activeTab = 'RESULT', runID, getRe
exceptionCounts={runDetail.exceptions_count}
/>
)}
{!loading && currentTab === 'TRACING' && (
{!loading && currentTab === 'TRACING' && !isShowRetryDetail && (
<TracingPanel
className='bg-background-section-burn'
list={list}
onShowIterationDetail={handleShowIterationDetail}
onShowRetryDetail={handleShowRetryDetail}
/>
)}
{
!loading && currentTab === 'TRACING' && isShowRetryDetail && (
<RetryResultPanel
list={retryRunResult}
onBack={doHideRetryDetail}
/>
)
}
</div>
</div>
)

View File

@ -8,6 +8,7 @@ import {
RiCheckboxCircleFill,
RiErrorWarningLine,
RiLoader2Line,
RiRestartFill,
} from '@remixicon/react'
import BlockIcon from '../block-icon'
import { BlockEnum } from '../types'
@ -20,6 +21,7 @@ import Button from '@/app/components/base/button'
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
import type { IterationDurationMap, NodeTracing } from '@/types/workflow'
import ErrorHandleTip from '@/app/components/workflow/nodes/_base/components/error-handle/error-handle-tip'
import { hasRetryNode } from '@/app/components/workflow/utils'
type Props = {
className?: string
@ -28,8 +30,10 @@ type Props = {
hideInfo?: boolean
hideProcessDetail?: boolean
onShowIterationDetail?: (detail: NodeTracing[][], iterDurationMap: IterationDurationMap) => void
onShowRetryDetail?: (detail: NodeTracing[]) => void
notShowIterationNav?: boolean
justShowIterationNavArrow?: boolean
justShowRetryNavArrow?: boolean
}
const NodePanel: FC<Props> = ({
@ -39,6 +43,7 @@ const NodePanel: FC<Props> = ({
hideInfo = false,
hideProcessDetail,
onShowIterationDetail,
onShowRetryDetail,
notShowIterationNav,
justShowIterationNavArrow,
}) => {
@ -88,11 +93,17 @@ const NodePanel: FC<Props> = ({
}, [nodeInfo.expand, setCollapseState])
const isIterationNode = nodeInfo.node_type === BlockEnum.Iteration
const isRetryNode = hasRetryNode(nodeInfo.node_type) && nodeInfo.retryDetail
const handleOnShowIterationDetail = (e: React.MouseEvent<HTMLButtonElement>) => {
e.stopPropagation()
e.nativeEvent.stopImmediatePropagation()
onShowIterationDetail?.(nodeInfo.details || [], nodeInfo?.iterDurationMap || nodeInfo.execution_metadata?.iteration_duration_map || {})
}
const handleOnShowRetryDetail = (e: React.MouseEvent<HTMLButtonElement>) => {
e.stopPropagation()
e.nativeEvent.stopImmediatePropagation()
onShowRetryDetail?.(nodeInfo.retryDetail || [])
}
return (
<div className={cn('px-2 py-1', className)}>
<div className='group transition-all bg-background-default border border-components-panel-border rounded-[10px] shadow-xs hover:shadow-md'>
@ -169,6 +180,19 @@ const NodePanel: FC<Props> = ({
<Split className='mt-2' />
</div>
)}
{isRetryNode && (
<Button
className='flex items-center justify-between mb-1 w-full'
variant='tertiary'
onClick={handleOnShowRetryDetail}
>
<div className='flex items-center'>
<RiRestartFill className='mr-0.5 w-4 h-4 text-components-button-tertiary-text flex-shrink-0' />
{t('workflow.nodes.common.retry.retries', { num: nodeInfo.retryDetail?.length })}
</div>
<RiArrowRightSLine className='w-4 h-4 text-components-button-tertiary-text flex-shrink-0' />
</Button>
)}
<div className={cn('mb-1', hideInfo && '!px-2 !py-0.5')}>
{(nodeInfo.status === 'stopped') && (
<StatusContainer status='stopped'>

View File

@ -1,11 +1,17 @@
'use client'
import type { FC } from 'react'
import { useTranslation } from 'react-i18next'
import {
RiArrowRightSLine,
RiRestartFill,
} from '@remixicon/react'
import StatusPanel from './status'
import MetaData from './meta'
import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor'
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
import ErrorHandleTip from '@/app/components/workflow/nodes/_base/components/error-handle/error-handle-tip'
import type { NodeTracing } from '@/types/workflow'
import Button from '@/app/components/base/button'
type ResultPanelProps = {
inputs?: string
@ -22,6 +28,8 @@ type ResultPanelProps = {
showSteps?: boolean
exceptionCounts?: number
execution_metadata?: any
retry_events?: NodeTracing[]
onShowRetryDetail?: (retries: NodeTracing[]) => void
}
const ResultPanel: FC<ResultPanelProps> = ({
@ -38,8 +46,11 @@ const ResultPanel: FC<ResultPanelProps> = ({
showSteps,
exceptionCounts,
execution_metadata,
retry_events,
onShowRetryDetail,
}) => {
const { t } = useTranslation()
return (
<div className='bg-components-panel-bg py-2'>
<div className='px-4 py-2'>
@ -51,6 +62,23 @@ const ResultPanel: FC<ResultPanelProps> = ({
exceptionCounts={exceptionCounts}
/>
</div>
{
retry_events?.length && onShowRetryDetail && (
<div className='px-4'>
<Button
className='flex items-center justify-between w-full'
variant='tertiary'
onClick={() => onShowRetryDetail(retry_events)}
>
<div className='flex items-center'>
<RiRestartFill className='mr-0.5 w-4 h-4 text-components-button-tertiary-text flex-shrink-0' />
{t('workflow.nodes.common.retry.retries', { num: retry_events?.length })}
</div>
<RiArrowRightSLine className='w-4 h-4 text-components-button-tertiary-text flex-shrink-0' />
</Button>
</div>
)
}
<div className='px-4 py-2 flex flex-col gap-2'>
<CodeEditor
readOnly

View File

@ -0,0 +1,46 @@
'use client'
import type { FC } from 'react'
import { memo } from 'react'
import { useTranslation } from 'react-i18next'
import {
RiArrowLeftLine,
} from '@remixicon/react'
import TracingPanel from './tracing-panel'
import type { NodeTracing } from '@/types/workflow'
type Props = {
list: NodeTracing[]
onBack: () => void
}
const RetryResultPanel: FC<Props> = ({
list,
onBack,
}) => {
const { t } = useTranslation()
return (
<div>
<div
className='flex items-center px-4 h-8 text-text-accent-secondary bg-components-panel-bg system-sm-medium cursor-pointer'
onClick={(e) => {
e.stopPropagation()
e.nativeEvent.stopImmediatePropagation()
onBack()
}}
>
<RiArrowLeftLine className='mr-1 w-4 h-4' />
{t('workflow.singleRun.back')}
</div>
<TracingPanel
list={list.map((item, index) => ({
...item,
title: `${t('workflow.nodes.common.retry.retry')} ${index + 1}`,
}))}
className='bg-background-section-burn'
/>
</div >
)
}
export default memo(RetryResultPanel)

View File

@ -21,6 +21,7 @@ import type { IterationDurationMap, NodeTracing } from '@/types/workflow'
type TracingPanelProps = {
list: NodeTracing[]
onShowIterationDetail?: (detail: NodeTracing[][], iterDurationMap: IterationDurationMap) => void
onShowRetryDetail?: (detail: NodeTracing[]) => void
className?: string
hideNodeInfo?: boolean
hideNodeProcessDetail?: boolean
@ -160,6 +161,7 @@ function buildLogTree(nodes: NodeTracing[], t: (key: string) => string): Tracing
const TracingPanel: FC<TracingPanelProps> = ({
list,
onShowIterationDetail,
onShowRetryDetail,
className,
hideNodeInfo = false,
hideNodeProcessDetail = false,
@ -251,7 +253,9 @@ const TracingPanel: FC<TracingPanelProps> = ({
<NodePanel
nodeInfo={node.data!}
onShowIterationDetail={onShowIterationDetail}
onShowRetryDetail={onShowRetryDetail}
justShowIterationNavArrow={true}
justShowRetryNavArrow={true}
hideInfo={hideNodeInfo}
hideProcessDetail={hideNodeProcessDetail}
/>

View File

@ -13,6 +13,7 @@ import type {
DefaultValueForm,
ErrorHandleTypeEnum,
} from '@/app/components/workflow/nodes/_base/components/error-handle/types'
import type { WorkflowRetryConfig } from '@/app/components/workflow/nodes/_base/components/retry/types'
export enum BlockEnum {
Start = 'start',
@ -68,6 +69,7 @@ export type CommonNodeType<T = {}> = {
_iterationIndex?: number
_inParallelHovering?: boolean
_waitingRun?: boolean
_retryIndex?: number
isInIteration?: boolean
iteration_id?: string
selected?: boolean
@ -77,6 +79,7 @@ export type CommonNodeType<T = {}> = {
width?: number
height?: number
error_strategy?: ErrorHandleTypeEnum
retry_config?: WorkflowRetryConfig
default_value?: DefaultValueForm[]
} & T & Partial<Pick<ToolDefaultValue, 'provider_id' | 'provider_type' | 'provider_name' | 'tool_name'>>
@ -293,6 +296,7 @@ export enum NodeRunningStatus {
Succeeded = 'succeeded',
Failed = 'failed',
Exception = 'exception',
Retry = 'retry',
}
export type OnNodeAdd = (

View File

@ -26,6 +26,8 @@ import {
} from './types'
import {
CUSTOM_NODE,
DEFAULT_RETRY_INTERVAL,
DEFAULT_RETRY_MAX,
ITERATION_CHILDREN_Z_INDEX,
ITERATION_NODE_Z_INDEX,
NODE_WIDTH_X_OFFSET,
@ -279,6 +281,14 @@ export const initialNodes = (originNodes: Node[], originEdges: Edge[]) => {
iterationNodeData.error_handle_mode = iterationNodeData.error_handle_mode || ErrorHandleMode.Terminated
}
if (node.data.type === BlockEnum.HttpRequest && !node.data.retry_config) {
node.data.retry_config = {
retry_enabled: true,
max_retries: DEFAULT_RETRY_MAX,
retry_interval: DEFAULT_RETRY_INTERVAL,
}
}
return node
})
}
@ -797,3 +807,7 @@ export const isExceptionVariable = (variable: string, nodeType?: BlockEnum) => {
return false
}
export const hasRetryNode = (nodeType?: BlockEnum) => {
return nodeType === BlockEnum.LLM || nodeType === BlockEnum.Tool || nodeType === BlockEnum.HttpRequest || nodeType === BlockEnum.Code
}

View File

@ -144,6 +144,8 @@ const translation = {
emptyTitle: 'Kein Workflow-Tool verfügbar',
type: 'Art',
emptyTip: 'Gehen Sie zu "Workflow -> Als Tool veröffentlichen"',
emptyTitleCustom: 'Kein benutzerdefiniertes Tool verfügbar',
emptyTipCustom: 'Erstellen eines benutzerdefinierten Werkzeugs',
},
toolNameUsageTip: 'Name des Tool-Aufrufs für die Argumentation und Aufforderung des Agenten',
customToolTip: 'Erfahren Sie mehr über benutzerdefinierte Dify-Tools',

View File

@ -322,6 +322,20 @@ const translation = {
title: 'Fehlerbehandlung',
tip: 'Ausnahmebehandlungsstrategie, die ausgelöst wird, wenn ein Knoten auf eine Ausnahme stößt.',
},
retry: {
retry: 'Wiederholen',
retryOnFailure: 'Wiederholen bei Fehler',
maxRetries: 'Max. Wiederholungen',
retryInterval: 'Wiederholungsintervall',
retryTimes: 'Wiederholen Sie {{times}} mal bei einem Fehler',
retrying: 'Wiederholung...',
retrySuccessful: 'Wiederholen erfolgreich',
retryFailed: 'Wiederholung fehlgeschlagen',
retryFailedTimes: '{{times}} fehlgeschlagene Wiederholungen',
times: 'mal',
ms: 'Frau',
retries: '{{num}} Wiederholungen',
},
},
start: {
required: 'erforderlich',

View File

@ -329,6 +329,20 @@ const translation = {
tip: 'There are {{num}} nodes in the process running abnormally, please go to tracing to check the logs.',
},
},
retry: {
retry: 'Retry',
retryOnFailure: 'retry on failure',
maxRetries: 'max retries',
retryInterval: 'retry interval',
retryTimes: 'Retry {{times}} times on failure',
retrying: 'Retrying...',
retrySuccessful: 'Retry successful',
retryFailed: 'Retry failed',
retryFailedTimes: '{{times}} retries failed',
times: 'times',
ms: 'ms',
retries: '{{num}} Retries',
},
},
start: {
required: 'required',

View File

@ -31,6 +31,8 @@ const translation = {
manageInTools: 'Administrar en Herramientas',
emptyTitle: 'No hay herramientas de flujo de trabajo disponibles',
emptyTip: 'Ir a "Flujo de Trabajo -> Publicar como Herramienta"',
emptyTitleCustom: 'No hay herramienta personalizada disponible',
emptyTipCustom: 'Crear una herramienta personalizada',
},
createTool: {
title: 'Crear Herramienta Personalizada',

View File

@ -322,6 +322,20 @@ const translation = {
title: 'Manejo de errores',
tip: 'Estrategia de control de excepciones, que se desencadena cuando un nodo encuentra una excepción.',
},
retry: {
retryOnFailure: 'Volver a intentarlo en caso de error',
maxRetries: 'Número máximo de reintentos',
retryInterval: 'Intervalo de reintento',
retryTimes: 'Reintentar {{times}} veces en caso de error',
retrying: 'Reintentando...',
retrySuccessful: 'Volver a intentarlo correctamente',
retryFailed: 'Error en el reintento',
retryFailedTimes: '{{veces}} reintentos fallidos',
times: 'veces',
ms: 'Sra.',
retries: '{{num}} Reintentos',
retry: 'Reintentar',
},
},
start: {
required: 'requerido',

View File

@ -31,6 +31,8 @@ const translation = {
manageInTools: 'مدیریت در ابزارها',
emptyTitle: 'هیچ ابزار جریان کاری در دسترس نیست',
emptyTip: 'به "جریان کاری -> انتشار به عنوان ابزار" بروید',
emptyTipCustom: 'ایجاد یک ابزار سفارشی',
emptyTitleCustom: 'هیچ ابزار سفارشی در دسترس نیست',
},
createTool: {
title: 'ایجاد ابزار سفارشی',

View File

@ -322,6 +322,20 @@ const translation = {
title: 'مدیریت خطا',
tip: 'استراتژی مدیریت استثنا، زمانی که یک گره با یک استثنا مواجه می شود، فعال می شود.',
},
retry: {
times: 'بار',
retryInterval: 'فاصله تلاش مجدد',
retryOnFailure: 'در مورد شکست دوباره امتحان کنید',
ms: 'خانم',
retry: 'دوباره',
retries: '{{عدد}} تلاش های مجدد',
maxRetries: 'حداکثر تلاش مجدد',
retrying: 'تلاش مجدد...',
retryFailed: 'تلاش مجدد ناموفق بود',
retryTimes: '{{times}} بار در صورت شکست دوباره امتحان کنید',
retrySuccessful: 'امتحان مجدد با موفقیت انجام دهید',
retryFailedTimes: '{{بار}} تلاش های مجدد ناموفق بود',
},
},
start: {
required: 'الزامی',

View File

@ -144,6 +144,8 @@ const translation = {
category: 'catégorie',
manageInTools: 'Gérer dans Outils',
emptyTip: 'Allez dans « Flux de travail -> Publier en tant quoutil »',
emptyTitleCustom: 'Aucun outil personnalisé disponible',
emptyTipCustom: 'Créer un outil personnalisé',
},
openInStudio: 'Ouvrir dans Studio',
customToolTip: 'En savoir plus sur les outils personnalisés Dify',

View File

@ -322,6 +322,20 @@ const translation = {
title: 'Gestion des erreurs',
tip: 'Stratégie de gestion des exceptions, déclenchée lorsquun nœud rencontre une exception.',
},
retry: {
retry: 'Réessayer',
retryOnFailure: 'Réessai en cas déchec',
maxRetries: 'Nombre maximal de tentatives',
retryInterval: 'intervalle de nouvelle tentative',
retryTimes: 'Réessayez {{times}} fois en cas déchec',
retrying: 'Réessayer...',
retrySuccessful: 'Réessai réussi',
retryFailed: 'Échec de la nouvelle tentative',
retryFailedTimes: '{{times}} les tentatives ont échoué',
times: 'fois',
ms: 'ms',
retries: '{{num}} Tentatives',
},
},
start: {
required: 'requis',

View File

@ -32,6 +32,8 @@ const translation = {
manageInTools: 'उपकरणों में प्रबंधित करें',
emptyTitle: 'कोई कार्यप्रवाह उपकरण उपलब्ध नहीं',
emptyTip: 'कार्यप्रवाह -> उपकरण के रूप में प्रकाशित पर जाएं',
emptyTipCustom: 'एक कस्टम टूल बनाएं',
emptyTitleCustom: 'कोई कस्टम टूल उपलब्ध नहीं है',
},
createTool: {
title: 'कस्टम उपकरण बनाएं',

View File

@ -334,6 +334,20 @@ const translation = {
title: 'त्रुटि हैंडलिंग',
tip: 'अपवाद हैंडलिंग रणनीति, ट्रिगर जब एक नोड एक अपवाद का सामना करता है।',
},
retry: {
times: 'गुणा',
ms: 'सुश्री',
retryInterval: 'अंतराल का पुनः प्रयास करें',
retrying: 'पुनर्प्रयास।।।',
retryFailed: 'पुनः प्रयास विफल रहा',
retryFailedTimes: '{{times}} पुनः प्रयास विफल रहे',
retryTimes: 'विफलता पर {{times}} बार पुनः प्रयास करें',
retries: '{{num}} पुनर्प्रयास',
maxRetries: 'अधिकतम पुनः प्रयास करता है',
retrySuccessful: 'पुनः प्रयास सफल',
retry: 'पुनर्प्रयास',
retryOnFailure: 'विफलता पर पुनः प्रयास करें',
},
},
start: {
required: 'आवश्यक',

View File

@ -32,6 +32,8 @@ const translation = {
manageInTools: 'Gestisci in Strumenti',
emptyTitle: 'Nessun strumento di flusso di lavoro disponibile',
emptyTip: 'Vai a `Flusso di lavoro -> Pubblica come Strumento`',
emptyTitleCustom: 'Nessun attrezzo personalizzato disponibile',
emptyTipCustom: 'Creare uno strumento personalizzato',
},
createTool: {
title: 'Crea Strumento Personalizzato',

View File

@ -337,6 +337,20 @@ const translation = {
title: 'Gestione degli errori',
tip: 'Strategia di gestione delle eccezioni, attivata quando un nodo rileva un\'eccezione.',
},
retry: {
retry: 'Ripetere',
retryOnFailure: 'Riprova in caso di errore',
maxRetries: 'Numero massimo di tentativi',
retryInterval: 'Intervallo tentativi',
retryTimes: 'Riprova {{times}} volte in caso di errore',
retrying: 'Riprovare...',
retryFailedTimes: '{{times}} tentativi falliti',
times: 'tempi',
retries: '{{num}} Tentativi',
retrySuccessful: 'Riprova riuscito',
retryFailed: 'Nuovo tentativo non riuscito',
ms: 'ms',
},
},
start: {
required: 'richiesto',

View File

@ -31,6 +31,8 @@ const translation = {
manageInTools: 'ツールリストに移動して管理する',
emptyTitle: '利用可能なワークフローツールはありません',
emptyTip: '追加するには、「ワークフロー -> ツールとして公開 」に移動する',
emptyTitleCustom: 'カスタムツールはありません',
emptyTipCustom: 'カスタムツールの作成',
},
createTool: {
title: 'カスタムツールを作成する',

View File

@ -322,6 +322,20 @@ const translation = {
title: 'エラー処理',
tip: 'ノードが例外を検出したときにトリガーされる例外処理戦略。',
},
retry: {
retry: 'リトライ',
retryOnFailure: '失敗時の再試行',
maxRetries: '最大再試行回数',
retryInterval: '再試行間隔',
retrying: '再試行。。。',
retryFailed: '再試行に失敗しました',
times: '倍',
ms: 'さん',
retryTimes: '失敗時に{{times}}回再試行',
retrySuccessful: '再試行に成功しました',
retries: '{{num}} 回の再試行',
retryFailedTimes: '{{times}}回のリトライが失敗しました',
},
},
start: {
required: '必須',

View File

@ -31,6 +31,8 @@ const translation = {
manageInTools: '도구에서 관리',
emptyTitle: '사용 가능한 워크플로우 도구 없음',
emptyTip: '"워크플로우 -> 도구로 등록하기"로 이동',
emptyTipCustom: '사용자 지정 도구 만들기',
emptyTitleCustom: '사용 가능한 사용자 지정 도구가 없습니다.',
},
createTool: {
title: '커스텀 도구 만들기',

View File

@ -322,6 +322,20 @@ const translation = {
title: '오류 처리',
tip: '노드에 예외가 발생할 때 트리거되는 예외 처리 전략입니다.',
},
retry: {
retry: '재시도',
retryOnFailure: '실패 시 재시도',
maxRetries: '최대 재시도 횟수',
retryInterval: '재시도 간격',
retryTimes: '실패 시 {{times}}번 재시도',
retrying: '재시도...',
retrySuccessful: '재시도 성공',
retryFailed: '재시도 실패',
retryFailedTimes: '{{times}} 재시도 실패',
times: '배',
ms: '미에스',
retries: '{{숫자}} 재시도',
},
},
start: {
required: '필수',

View File

@ -148,6 +148,8 @@ const translation = {
add: 'dodawać',
emptyTitle: 'Brak dostępnego narzędzia do przepływu pracy',
emptyTip: 'Przejdź do "Przepływ pracy -> Opublikuj jako narzędzie"',
emptyTitleCustom: 'Brak dostępnego narzędzia niestandardowego',
emptyTipCustom: 'Tworzenie narzędzia niestandardowego',
},
openInStudio: 'Otwieranie w Studio',
customToolTip: 'Dowiedz się więcej o niestandardowych narzędziach Dify',

View File

@ -322,6 +322,20 @@ const translation = {
tip: 'Strategia obsługi wyjątków, wyzwalana, gdy węzeł napotka wyjątek.',
title: 'Obsługa błędów',
},
retry: {
retry: 'Ponów próbę',
maxRetries: 'Maksymalna liczba ponownych prób',
retryInterval: 'Interwał ponawiania prób',
retryTimes: 'Ponów próbę {{times}} razy w przypadku niepowodzenia',
retrying: 'Ponawianie...',
retrySuccessful: 'Ponawianie próby powiodło się',
retryFailed: 'Ponawianie próby nie powiodło się',
times: 'razy',
retries: '{{liczba}} Ponownych prób',
retryOnFailure: 'Ponawianie próby w przypadku niepowodzenia',
retryFailedTimes: '{{times}} ponawianie prób nie powiodło się',
ms: 'Ms',
},
},
start: {
required: 'wymagane',

Some files were not shown because too many files have changed in this diff Show More