Compare commits

..

37 Commits

Author SHA1 Message Date
459f017de5 add build task for release/e-3.0.2 2025-07-11 16:11:41 +08:00
1458368047 fix: allow update plugin install settings (#22111) 2025-07-11 16:07:39 +08:00
41f4eb044d Merge branch 'fix/explore-tabs-change-failed' into fix/e-300 2025-06-30 17:45:59 +08:00
e979768949 fix: user cannot select 'Customer Service & Operations' category 2025-06-30 17:45:23 +08:00
745d67989b fix: check user permission to show 'open in explore' menu item at app list popover 2025-06-30 17:34:13 +08:00
a4aed673c3 fix: update i18n 'Web application' to 'web app' 2025-06-30 15:31:02 +08:00
47e5e1bc69 add build task 2025-06-30 14:08:20 +08:00
c2ec9d7b9a Merge branch 'fix/marketplace-source-link' into fix/e-300 2025-06-30 14:06:41 +08:00
384570ebfa fix: Get marketplace URL with specific function, preserving all search params. 2025-06-30 14:05:41 +08:00
37e19de7ab feat(inner-api/workspace): include tenant details in CreateWorkspace response (#21636) 2025-06-27 18:28:03 +08:00
28f5c37211 Add Env 'CELERY_SENTINEL_PASSWORD' for celery connect redis sentinel. (#21198) 2025-06-27 17:37:11 +08:00
38a704743c chore: Add missing svg icon sources (#21627) 2025-06-27 16:56:22 +08:00
87efe45240 feat(plugin): Add API endpoint for invoking LLM with structured output (#21624) 2025-06-27 15:57:44 +08:00
7236c4895b fix: annotation remove functionality Fixes #21448 (#21616)
Co-authored-by: siqiuchen <siqiu.chen2@dentsplysirona.com>
2025-06-27 15:45:24 +08:00
0cb00d5fd2 refactor: move structured output support outside LLM Node (#21565)
Co-authored-by: Novice <novice12185727@gmail.com>
2025-06-27 14:55:31 +08:00
cdb9eecbaf fix: Resolving conflicts caused by tablestore dependency on enum34 (#21605)
Co-authored-by: xiaozhiqing.xzq <xiaozhiqing.xzq@alibaba-inc.com>
2025-06-27 14:17:52 +08:00
ae8653beb0 style: decrease navbar z-index value from 30 to 15, fix style error (#21612) 2025-06-27 13:22:29 +08:00
787ad5ab38 feat: Refactor panel component, add adaptive width observer to optimize panel width management (#21576) 2025-06-27 10:50:33 +08:00
81fc49d78c fix: value_selector will be empty string (#21598) 2025-06-27 10:38:22 +08:00
7d9d670f90 feat: to add tag when tag input is unfocus (#21555) 2025-06-27 10:36:01 +08:00
fd41645f95 feat: Add display control logic for the variable inspection panel (#21539) 2025-06-27 10:22:39 +08:00
a06af88b26 Feat/api validate model provider (#21582)
Co-authored-by: crazywoola <427733928@qq.com>
2025-06-27 09:59:44 +08:00
33f0457a23 fix: wrong token number when using qa_model and answer is updated. (#21574) 2025-06-27 09:11:41 +08:00
cea6522122 feat: add DYNAMIC_SELECT parameter type for dynamic options in parameter entities (#21425) 2025-06-26 17:44:14 +08:00
ae00ba44db fix: fix create custom modal overlay add tool (#21553) 2025-06-26 16:45:45 +08:00
79fa3c7519 fix(web): optimize the pop logic of the tool selector (#21558) (#21559) 2025-06-26 16:45:01 +08:00
d2814650e6 feat: prevent input of non-numeric values ​​in numer input (#21562) 2025-06-26 16:43:26 +08:00
17722f581b feat: add tooltip to workflow run node name (#21564) 2025-06-26 16:42:48 +08:00
ad9eebd02d fix: update retrieval method cache (#21409) 2025-06-26 10:58:21 +08:00
cefb8e4218 chore: Simplify code logic (#21496)
Co-authored-by: 刘江波 <jiangbo721@163.com>
2025-06-26 10:09:52 +08:00
90aba77471 chore: remove unused code (#21497)
Co-authored-by: 刘江波 <jiangbo721@163.com>
2025-06-26 10:08:17 +08:00
785d4b3de7 feat: refactor: test_dataset unit tests #21499 (#21502) 2025-06-26 10:07:54 +08:00
6bb82f8ee0 Fix minor comment missing (#21517) 2025-06-26 10:06:49 +08:00
45dc0a43d3 fix: prompt editor insert context (#21526) 2025-06-26 09:58:58 +08:00
1610f62a28 fix: var inspect doc link error (#21515) 2025-06-25 20:21:51 +08:00
d454f09e13 feat: add a magic field in the cancel invite api response (#21505) 2025-06-25 18:37:56 +08:00
00f0b569cc Feat/kb index (#20868)
Co-authored-by: twwu <twwu@dify.ai>
2025-06-25 17:52:59 +08:00
85 changed files with 4791 additions and 4506 deletions

View File

@ -6,6 +6,7 @@ on:
- "main"
- "deploy/dev"
- "deploy/enterprise"
- "release/e-*"
tags:
- "*"

View File

@ -223,6 +223,10 @@ class CeleryConfig(DatabaseConfig):
default=None,
)
CELERY_SENTINEL_PASSWORD: Optional[str] = Field(
description="Password of the Redis Sentinel master.",
default=None,
)
CELERY_SENTINEL_SOCKET_TIMEOUT: Optional[PositiveFloat] = Field(
description="Timeout for Redis Sentinel socket operations in seconds.",
default=0.1,

View File

@ -85,6 +85,7 @@ class MemberInviteEmailApi(Resource):
return {
"result": "success",
"invitation_results": invitation_results,
"tenant_id": str(current_user.current_tenant.id),
}, 201
@ -110,7 +111,7 @@ class MemberCancelInviteApi(Resource):
except Exception as e:
raise ValueError(str(e))
return {"result": "success"}, 204
return {"result": "success", "tenant_id": str(current_user.current_tenant.id)}, 200
class MemberUpdateRoleApi(Resource):

View File

@ -13,6 +13,7 @@ from core.model_runtime.utils.encoders import jsonable_encoder
from core.plugin.impl.exc import PluginDaemonClientSideError
from libs.login import login_required
from models.account import TenantPluginPermission
from services.plugin.plugin_parameter_service import PluginParameterService
from services.plugin.plugin_permission_service import PluginPermissionService
from services.plugin.plugin_service import PluginService
@ -497,6 +498,42 @@ class PluginFetchPermissionApi(Resource):
)
class PluginFetchDynamicSelectOptionsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
# check if the user is admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
tenant_id = current_user.current_tenant_id
user_id = current_user.id
parser = reqparse.RequestParser()
parser.add_argument("plugin_id", type=str, required=True, location="args")
parser.add_argument("provider", type=str, required=True, location="args")
parser.add_argument("action", type=str, required=True, location="args")
parser.add_argument("parameter", type=str, required=True, location="args")
parser.add_argument("provider_type", type=str, required=True, location="args")
args = parser.parse_args()
try:
options = PluginParameterService.get_dynamic_select_options(
tenant_id,
user_id,
args["plugin_id"],
args["provider"],
args["action"],
args["parameter"],
args["provider_type"],
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder({"options": options})
api.add_resource(PluginDebuggingKeyApi, "/workspaces/current/plugin/debugging-key")
api.add_resource(PluginListApi, "/workspaces/current/plugin/list")
api.add_resource(PluginListLatestVersionsApi, "/workspaces/current/plugin/list/latest-versions")
@ -521,3 +558,5 @@ api.add_resource(PluginFetchMarketplacePkgApi, "/workspaces/current/plugin/marke
api.add_resource(PluginChangePermissionApi, "/workspaces/current/plugin/permission/change")
api.add_resource(PluginFetchPermissionApi, "/workspaces/current/plugin/permission/fetch")
api.add_resource(PluginFetchDynamicSelectOptionsApi, "/workspaces/current/plugin/parameters/dynamic-options")

View File

@ -17,6 +17,7 @@ from core.plugin.entities.request import (
RequestInvokeApp,
RequestInvokeEncrypt,
RequestInvokeLLM,
RequestInvokeLLMWithStructuredOutput,
RequestInvokeModeration,
RequestInvokeParameterExtractorNode,
RequestInvokeQuestionClassifierNode,
@ -47,6 +48,21 @@ class PluginInvokeLLMApi(Resource):
return length_prefixed_response(0xF, generator())
class PluginInvokeLLMWithStructuredOutputApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeLLMWithStructuredOutput)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeLLMWithStructuredOutput):
def generator():
response = PluginModelBackwardsInvocation.invoke_llm_with_structured_output(
user_model.id, tenant_model, payload
)
return PluginModelBackwardsInvocation.convert_to_event_stream(response)
return length_prefixed_response(0xF, generator())
class PluginInvokeTextEmbeddingApi(Resource):
@setup_required
@plugin_inner_api_only
@ -291,6 +307,7 @@ class PluginFetchAppInfoApi(Resource):
api.add_resource(PluginInvokeLLMApi, "/invoke/llm")
api.add_resource(PluginInvokeLLMWithStructuredOutputApi, "/invoke/llm/structured-output")
api.add_resource(PluginInvokeTextEmbeddingApi, "/invoke/text-embedding")
api.add_resource(PluginInvokeRerankApi, "/invoke/rerank")
api.add_resource(PluginInvokeTTSApi, "/invoke/tts")

View File

@ -29,7 +29,19 @@ class EnterpriseWorkspace(Resource):
tenant_was_created.send(tenant)
return {"message": "enterprise workspace created."}
resp = {
"id": tenant.id,
"name": tenant.name,
"plan": tenant.plan,
"status": tenant.status,
"created_at": tenant.created_at.isoformat() + "Z" if tenant.created_at else None,
"updated_at": tenant.updated_at.isoformat() + "Z" if tenant.updated_at else None,
}
return {
"message": "enterprise workspace created.",
"tenant": resp,
}
class EnterpriseWorkspaceNoOwnerEmail(Resource):

View File

@ -133,6 +133,22 @@ class DatasetListApi(DatasetApiResource):
parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
args = parser.parse_args()
if args.get("embedding_model_provider"):
DatasetService.check_embedding_model_setting(
tenant_id, args.get("embedding_model_provider"), args.get("embedding_model")
)
if (
args.get("retrieval_model")
and args.get("retrieval_model").get("reranking_model")
and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
try:
dataset = DatasetService.create_empty_dataset(
tenant_id=tenant_id,
@ -265,10 +281,20 @@ class DatasetApi(DatasetApiResource):
data = request.get_json()
# check embedding model setting
if data.get("indexing_technique") == "high_quality":
if data.get("indexing_technique") == "high_quality" or data.get("embedding_model_provider"):
DatasetService.check_embedding_model_setting(
dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
)
if (
data.get("retrieval_model")
and data.get("retrieval_model").get("reranking_model")
and data.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
dataset.tenant_id,
data.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
data.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
# The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
DatasetPermissionService.check_permission(

View File

@ -29,7 +29,7 @@ from extensions.ext_database import db
from fields.document_fields import document_fields, document_status_fields
from libs.login import current_user
from models.dataset import Dataset, Document, DocumentSegment
from services.dataset_service import DocumentService
from services.dataset_service import DatasetService, DocumentService
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
from services.file_service import FileService
@ -59,6 +59,7 @@ class DocumentAddByTextApi(DatasetApiResource):
parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
@ -74,6 +75,21 @@ class DocumentAddByTextApi(DatasetApiResource):
if text is None or name is None:
raise ValueError("Both 'text' and 'name' must be non-null values.")
if args.get("embedding_model_provider"):
DatasetService.check_embedding_model_setting(
tenant_id, args.get("embedding_model_provider"), args.get("embedding_model")
)
if (
args.get("retrieval_model")
and args.get("retrieval_model").get("reranking_model")
and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
upload_file = FileService.upload_text(text=str(text), text_name=str(name))
data_source = {
"type": "upload_file",
@ -124,6 +140,17 @@ class DocumentUpdateByTextApi(DatasetApiResource):
if not dataset:
raise ValueError("Dataset does not exist.")
if (
args.get("retrieval_model")
and args.get("retrieval_model").get("reranking_model")
and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
)
# indexing_technique is already set in dataset since this is an update
args["indexing_technique"] = dataset.indexing_technique
@ -188,6 +215,21 @@ class DocumentAddByFileApi(DatasetApiResource):
raise ValueError("indexing_technique is required.")
args["indexing_technique"] = indexing_technique
if "embedding_model_provider" in args:
DatasetService.check_embedding_model_setting(
tenant_id, args["embedding_model_provider"], args["embedding_model"]
)
if (
"retrieval_model" in args
and args["retrieval_model"].get("reranking_model")
and args["retrieval_model"].get("reranking_model").get("reranking_provider_name")
):
DatasetService.check_reranking_model_setting(
tenant_id,
args["retrieval_model"].get("reranking_model").get("reranking_provider_name"),
args["retrieval_model"].get("reranking_model").get("reranking_model_name"),
)
# save file info
file = request.files["file"]
# check file

View File

@ -36,7 +36,6 @@ from libs.flask_utils import preserve_flask_contexts
from models import Account, App, Conversation, EndUser, Message, Workflow, WorkflowNodeExecutionTriggeredFrom
from models.enums import WorkflowRunTriggeredFrom
from services.conversation_service import ConversationService
from services.errors.message import MessageNotExistsError
from services.workflow_draft_variable_service import DraftVarLoader, WorkflowDraftVariableService
logger = logging.getLogger(__name__)
@ -480,8 +479,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
# get conversation and message
conversation = self._get_conversation(conversation_id)
message = self._get_message(message_id)
if message is None:
raise MessageNotExistsError("Message not exists")
# chatbot app
runner = AdvancedChatAppRunner(

View File

@ -26,7 +26,6 @@ from factories import file_factory
from libs.flask_utils import preserve_flask_contexts
from models import Account, App, EndUser
from services.conversation_service import ConversationService
from services.errors.message import MessageNotExistsError
logger = logging.getLogger(__name__)
@ -238,8 +237,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
# get conversation and message
conversation = self._get_conversation(conversation_id)
message = self._get_message(message_id)
if message is None:
raise MessageNotExistsError("Message not exists")
# chatbot app
runner = AgentChatAppRunner()

View File

@ -25,7 +25,6 @@ from factories import file_factory
from models.account import Account
from models.model import App, EndUser
from services.conversation_service import ConversationService
from services.errors.message import MessageNotExistsError
logger = logging.getLogger(__name__)
@ -224,8 +223,6 @@ class ChatAppGenerator(MessageBasedAppGenerator):
# get conversation and message
conversation = self._get_conversation(conversation_id)
message = self._get_message(message_id)
if message is None:
raise MessageNotExistsError("Message not exists")
# chatbot app
runner = ChatAppRunner()

View File

@ -201,8 +201,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
try:
# get message
message = self._get_message(message_id)
if message is None:
raise MessageNotExistsError()
# chatbot app
runner = CompletionAppRunner()

View File

@ -29,6 +29,7 @@ from models.enums import CreatorUserRole
from models.model import App, AppMode, AppModelConfig, Conversation, EndUser, Message, MessageFile
from services.errors.app_model_config import AppModelConfigBrokenError
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError
logger = logging.getLogger(__name__)
@ -251,7 +252,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
return introduction or ""
def _get_conversation(self, conversation_id: str):
def _get_conversation(self, conversation_id: str) -> Conversation:
"""
Get conversation by conversation id
:param conversation_id: conversation id
@ -260,11 +261,11 @@ class MessageBasedAppGenerator(BaseAppGenerator):
conversation = db.session.query(Conversation).filter(Conversation.id == conversation_id).first()
if not conversation:
raise ConversationNotExistsError()
raise ConversationNotExistsError("Conversation not exists")
return conversation
def _get_message(self, message_id: str) -> Optional[Message]:
def _get_message(self, message_id: str) -> Message:
"""
Get message by message id
:param message_id: message id
@ -272,4 +273,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
"""
message = db.session.query(Message).filter(Message.id == message_id).first()
if message is None:
raise MessageNotExistsError("Message not exists")
return message

View File

@ -15,6 +15,11 @@ class CommonParameterType(StrEnum):
MODEL_SELECTOR = "model-selector"
TOOLS_SELECTOR = "array[tools]"
# Dynamic select parameter
# Once you are not sure about the available options until authorization is done
# eg: Select a Slack channel from a Slack workspace
DYNAMIC_SELECT = "dynamic-select"
# TOOL_SELECTOR = "tool-selector"

View File

@ -534,7 +534,7 @@ class IndexingRunner:
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX:
if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX and dataset.indexing_technique == "economy":
# create keyword index
create_keyword_thread = threading.Thread(
target=self._process_keyword_index,
@ -572,7 +572,7 @@ class IndexingRunner:
for future in futures:
tokens += future.result()
if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX:
if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX and dataset.indexing_technique == "economy":
create_keyword_thread.join()
indexing_end_at = time.perf_counter()

View File

@ -0,0 +1,374 @@
import json
from collections.abc import Generator, Mapping, Sequence
from copy import deepcopy
from enum import StrEnum
from typing import Any, Literal, Optional, cast, overload
import json_repair
from pydantic import TypeAdapter, ValidationError
from core.llm_generator.output_parser.errors import OutputParserError
from core.llm_generator.prompts import STRUCTURED_OUTPUT_PROMPT
from core.model_manager import ModelInstance
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkDelta,
LLMResultChunkWithStructuredOutput,
LLMResultWithStructuredOutput,
)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageTool,
SystemPromptMessage,
)
from core.model_runtime.entities.model_entities import AIModelEntity, ParameterRule
class ResponseFormat(StrEnum):
"""Constants for model response formats"""
JSON_SCHEMA = "json_schema" # model's structured output mode. some model like gemini, gpt-4o, support this mode.
JSON = "JSON" # model's json mode. some model like claude support this mode.
JSON_OBJECT = "json_object" # json mode's another alias. some model like deepseek-chat, qwen use this alias.
class SpecialModelType(StrEnum):
"""Constants for identifying model types"""
GEMINI = "gemini"
OLLAMA = "ollama"
@overload
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: Literal[True] = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> Generator[LLMResultChunkWithStructuredOutput, None, None]: ...
@overload
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: Literal[False] = False,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> LLMResultWithStructuredOutput: ...
@overload
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: ...
def invoke_llm_with_structured_output(
provider: str,
model_schema: AIModelEntity,
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
json_schema: Mapping[str, Any],
model_parameters: Optional[Mapping] = None,
tools: Sequence[PromptMessageTool] | None = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]:
"""
Invoke large language model with structured output
1. This method invokes model_instance.invoke_llm with json_schema
2. Try to parse the result as structured output
:param prompt_messages: prompt messages
:param json_schema: json schema
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
:param callbacks: callbacks
:return: full response or stream response chunk generator result
"""
# handle native json schema
model_parameters_with_json_schema: dict[str, Any] = {
**(model_parameters or {}),
}
if model_schema.support_structure_output:
model_parameters = _handle_native_json_schema(
provider, model_schema, json_schema, model_parameters_with_json_schema, model_schema.parameter_rules
)
else:
# Set appropriate response format based on model capabilities
_set_response_format(model_parameters_with_json_schema, model_schema.parameter_rules)
# handle prompt based schema
prompt_messages = _handle_prompt_based_schema(
prompt_messages=prompt_messages,
structured_output_schema=json_schema,
)
llm_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=model_parameters_with_json_schema,
tools=tools,
stop=stop,
stream=stream,
user=user,
callbacks=callbacks,
)
if isinstance(llm_result, LLMResult):
if not isinstance(llm_result.message.content, str):
raise OutputParserError(
f"Failed to parse structured output, LLM result is not a string: {llm_result.message.content}"
)
return LLMResultWithStructuredOutput(
structured_output=_parse_structured_output(llm_result.message.content),
model=llm_result.model,
message=llm_result.message,
usage=llm_result.usage,
system_fingerprint=llm_result.system_fingerprint,
prompt_messages=llm_result.prompt_messages,
)
else:
def generator() -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
result_text: str = ""
prompt_messages: Sequence[PromptMessage] = []
system_fingerprint: Optional[str] = None
for event in llm_result:
if isinstance(event, LLMResultChunk):
if isinstance(event.delta.message.content, str):
result_text += event.delta.message.content
prompt_messages = event.prompt_messages
system_fingerprint = event.system_fingerprint
yield LLMResultChunkWithStructuredOutput(
model=model_schema.model,
prompt_messages=prompt_messages,
system_fingerprint=system_fingerprint,
delta=event.delta,
)
yield LLMResultChunkWithStructuredOutput(
structured_output=_parse_structured_output(result_text),
model=model_schema.model,
prompt_messages=prompt_messages,
system_fingerprint=system_fingerprint,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=""),
usage=None,
finish_reason=None,
),
)
return generator()
def _handle_native_json_schema(
provider: str,
model_schema: AIModelEntity,
structured_output_schema: Mapping,
model_parameters: dict,
rules: list[ParameterRule],
) -> dict:
"""
Handle structured output for models with native JSON schema support.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
:return: Updated model parameters with JSON schema configuration
"""
# Process schema according to model requirements
schema_json = _prepare_schema_for_model(provider, model_schema, structured_output_schema)
# Set JSON schema in parameters
model_parameters["json_schema"] = json.dumps(schema_json, ensure_ascii=False)
# Set appropriate response format if required by the model
for rule in rules:
if rule.name == "response_format" and ResponseFormat.JSON_SCHEMA.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_SCHEMA.value
return model_parameters
def _set_response_format(model_parameters: dict, rules: list) -> None:
"""
Set the appropriate response format parameter based on model rules.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
"""
for rule in rules:
if rule.name == "response_format":
if ResponseFormat.JSON.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON.value
elif ResponseFormat.JSON_OBJECT.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_OBJECT.value
def _handle_prompt_based_schema(
prompt_messages: Sequence[PromptMessage], structured_output_schema: Mapping
) -> list[PromptMessage]:
"""
Handle structured output for models without native JSON schema support.
This function modifies the prompt messages to include schema-based output requirements.
Args:
prompt_messages: Original sequence of prompt messages
Returns:
list[PromptMessage]: Updated prompt messages with structured output requirements
"""
# Convert schema to string format
schema_str = json.dumps(structured_output_schema, ensure_ascii=False)
# Find existing system prompt with schema placeholder
system_prompt = next(
(prompt for prompt in prompt_messages if isinstance(prompt, SystemPromptMessage)),
None,
)
structured_output_prompt = STRUCTURED_OUTPUT_PROMPT.replace("{{schema}}", schema_str)
# Prepare system prompt content
system_prompt_content = (
structured_output_prompt + "\n\n" + system_prompt.content
if system_prompt and isinstance(system_prompt.content, str)
else structured_output_prompt
)
system_prompt = SystemPromptMessage(content=system_prompt_content)
# Extract content from the last user message
filtered_prompts = [prompt for prompt in prompt_messages if not isinstance(prompt, SystemPromptMessage)]
updated_prompt = [system_prompt] + filtered_prompts
return updated_prompt
def _parse_structured_output(result_text: str) -> Mapping[str, Any]:
structured_output: Mapping[str, Any] = {}
parsed: Mapping[str, Any] = {}
try:
parsed = TypeAdapter(Mapping).validate_json(result_text)
if not isinstance(parsed, dict):
raise OutputParserError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
except ValidationError:
# if the result_text is not a valid json, try to repair it
temp_parsed = json_repair.loads(result_text)
if not isinstance(temp_parsed, dict):
# handle reasoning model like deepseek-r1 got '<think>\n\n</think>\n' prefix
if isinstance(temp_parsed, list):
temp_parsed = next((item for item in temp_parsed if isinstance(item, dict)), {})
else:
raise OutputParserError(f"Failed to parse structured output: {result_text}")
structured_output = cast(dict, temp_parsed)
return structured_output
def _prepare_schema_for_model(provider: str, model_schema: AIModelEntity, schema: Mapping) -> dict:
"""
Prepare JSON schema based on model requirements.
Different models have different requirements for JSON schema formatting.
This function handles these differences.
:param schema: The original JSON schema
:return: Processed schema compatible with the current model
"""
# Deep copy to avoid modifying the original schema
processed_schema = dict(deepcopy(schema))
# Convert boolean types to string types (common requirement)
convert_boolean_to_string(processed_schema)
# Apply model-specific transformations
if SpecialModelType.GEMINI in model_schema.model:
remove_additional_properties(processed_schema)
return processed_schema
elif SpecialModelType.OLLAMA in provider:
return processed_schema
else:
# Default format with name field
return {"schema": processed_schema, "name": "llm_response"}
def remove_additional_properties(schema: dict) -> None:
"""
Remove additionalProperties fields from JSON schema.
Used for models like Gemini that don't support this property.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Remove additionalProperties at current level
schema.pop("additionalProperties", None)
# Process nested structures recursively
for value in schema.values():
if isinstance(value, dict):
remove_additional_properties(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
remove_additional_properties(item)
def convert_boolean_to_string(schema: dict) -> None:
"""
Convert boolean type specifications to string in JSON schema.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Check for boolean type at current level
if schema.get("type") == "boolean":
schema["type"] = "string"
# Process nested dictionaries and lists recursively
for value in schema.values():
if isinstance(value, dict):
convert_boolean_to_string(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_boolean_to_string(item)

View File

@ -291,3 +291,21 @@ Your task is to convert simple user descriptions into properly formatted JSON Sc
Now, generate a JSON Schema based on my description
""" # noqa: E501
STRUCTURED_OUTPUT_PROMPT = """Youre a helpful AI assistant. You could answer questions and output in JSON format.
constraints:
- You must output in JSON format.
- Do not output boolean value, use string type instead.
- Do not output integer or float value, use number type instead.
eg:
Here is the JSON schema:
{"additionalProperties": false, "properties": {"age": {"type": "number"}, "name": {"type": "string"}}, "required": ["name", "age"], "type": "object"}
Here is the user's question:
My name is John Doe and I am 30 years old.
output:
{"name": "John Doe", "age": 30}
Here is the JSON schema:
{{schema}}
""" # noqa: E501

View File

@ -1,7 +1,7 @@
from collections.abc import Sequence
from collections.abc import Mapping, Sequence
from decimal import Decimal
from enum import StrEnum
from typing import Optional
from typing import Any, Optional
from pydantic import BaseModel, Field
@ -101,6 +101,20 @@ class LLMResult(BaseModel):
system_fingerprint: Optional[str] = None
class LLMStructuredOutput(BaseModel):
"""
Model class for llm structured output.
"""
structured_output: Optional[Mapping[str, Any]] = None
class LLMResultWithStructuredOutput(LLMResult, LLMStructuredOutput):
"""
Model class for llm result with structured output.
"""
class LLMResultChunkDelta(BaseModel):
"""
Model class for llm result chunk delta.
@ -123,6 +137,12 @@ class LLMResultChunk(BaseModel):
delta: LLMResultChunkDelta
class LLMResultChunkWithStructuredOutput(LLMResultChunk, LLMStructuredOutput):
"""
Model class for llm result chunk with structured output.
"""
class NumTokensResult(PriceInfo):
"""
Model class for number of tokens result.

View File

@ -2,8 +2,15 @@ import tempfile
from binascii import hexlify, unhexlify
from collections.abc import Generator
from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output
from core.model_manager import ModelManager
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkDelta,
LLMResultChunkWithStructuredOutput,
LLMResultWithStructuredOutput,
)
from core.model_runtime.entities.message_entities import (
PromptMessage,
SystemPromptMessage,
@ -12,6 +19,7 @@ from core.model_runtime.entities.message_entities import (
from core.plugin.backwards_invocation.base import BaseBackwardsInvocation
from core.plugin.entities.request import (
RequestInvokeLLM,
RequestInvokeLLMWithStructuredOutput,
RequestInvokeModeration,
RequestInvokeRerank,
RequestInvokeSpeech2Text,
@ -81,6 +89,72 @@ class PluginModelBackwardsInvocation(BaseBackwardsInvocation):
return handle_non_streaming(response)
@classmethod
def invoke_llm_with_structured_output(
cls, user_id: str, tenant: Tenant, payload: RequestInvokeLLMWithStructuredOutput
):
"""
invoke llm with structured output
"""
model_instance = ModelManager().get_model_instance(
tenant_id=tenant.id,
provider=payload.provider,
model_type=payload.model_type,
model=payload.model,
)
model_schema = model_instance.model_type_instance.get_model_schema(payload.model, model_instance.credentials)
if not model_schema:
raise ValueError(f"Model schema not found for {payload.model}")
response = invoke_llm_with_structured_output(
provider=payload.provider,
model_schema=model_schema,
model_instance=model_instance,
prompt_messages=payload.prompt_messages,
json_schema=payload.structured_output_schema,
tools=payload.tools,
stop=payload.stop,
stream=True if payload.stream is None else payload.stream,
user=user_id,
model_parameters=payload.completion_params,
)
if isinstance(response, Generator):
def handle() -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
for chunk in response:
if chunk.delta.usage:
llm_utils.deduct_llm_quota(
tenant_id=tenant.id, model_instance=model_instance, usage=chunk.delta.usage
)
chunk.prompt_messages = []
yield chunk
return handle()
else:
if response.usage:
llm_utils.deduct_llm_quota(tenant_id=tenant.id, model_instance=model_instance, usage=response.usage)
def handle_non_streaming(
response: LLMResultWithStructuredOutput,
) -> Generator[LLMResultChunkWithStructuredOutput, None, None]:
yield LLMResultChunkWithStructuredOutput(
model=response.model,
prompt_messages=[],
system_fingerprint=response.system_fingerprint,
structured_output=response.structured_output,
delta=LLMResultChunkDelta(
index=0,
message=response.message,
usage=response.usage,
finish_reason="",
),
)
return handle_non_streaming(response)
@classmethod
def invoke_text_embedding(cls, user_id: str, tenant: Tenant, payload: RequestInvokeTextEmbedding):
"""

View File

@ -10,6 +10,9 @@ from core.tools.entities.common_entities import I18nObject
class PluginParameterOption(BaseModel):
value: str = Field(..., description="The value of the option")
label: I18nObject = Field(..., description="The label of the option")
icon: Optional[str] = Field(
default=None, description="The icon of the option, can be a url or a base64 encoded image"
)
@field_validator("value", mode="before")
@classmethod
@ -35,6 +38,7 @@ class PluginParameterType(enum.StrEnum):
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
DYNAMIC_SELECT = CommonParameterType.DYNAMIC_SELECT.value
# deprecated, should not use.
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES.value

View File

@ -1,4 +1,4 @@
from collections.abc import Mapping
from collections.abc import Mapping, Sequence
from datetime import datetime
from enum import StrEnum
from typing import Any, Generic, Optional, TypeVar
@ -9,6 +9,7 @@ from core.agent.plugin_entities import AgentProviderEntityWithPlugin
from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.entities.provider_entities import ProviderEntity
from core.plugin.entities.base import BasePluginEntity
from core.plugin.entities.parameters import PluginParameterOption
from core.plugin.entities.plugin import PluginDeclaration, PluginEntity
from core.tools.entities.common_entities import I18nObject
from core.tools.entities.tool_entities import ToolProviderEntityWithPlugin
@ -186,3 +187,7 @@ class PluginOAuthCredentialsResponse(BaseModel):
class PluginListResponse(BaseModel):
list: list[PluginEntity]
total: int
class PluginDynamicSelectOptionsResponse(BaseModel):
options: Sequence[PluginParameterOption] = Field(description="The options of the dynamic select.")

View File

@ -82,6 +82,16 @@ class RequestInvokeLLM(BaseRequestInvokeModel):
return v
class RequestInvokeLLMWithStructuredOutput(RequestInvokeLLM):
"""
Request to invoke LLM with structured output
"""
structured_output_schema: dict[str, Any] = Field(
default_factory=dict, description="The schema of the structured output in JSON schema format"
)
class RequestInvokeTextEmbedding(BaseRequestInvokeModel):
"""
Request to invoke text embedding

View File

@ -0,0 +1,45 @@
from collections.abc import Mapping
from typing import Any
from core.plugin.entities.plugin import GenericProviderID
from core.plugin.entities.plugin_daemon import PluginDynamicSelectOptionsResponse
from core.plugin.impl.base import BasePluginClient
class DynamicSelectClient(BasePluginClient):
def fetch_dynamic_select_options(
self,
tenant_id: str,
user_id: str,
plugin_id: str,
provider: str,
action: str,
credentials: Mapping[str, Any],
parameter: str,
) -> PluginDynamicSelectOptionsResponse:
"""
Fetch dynamic select options for a plugin parameter.
"""
response = self._request_with_plugin_daemon_response_stream(
"POST",
f"plugin/{tenant_id}/dispatch/dynamic_select/fetch_parameter_options",
PluginDynamicSelectOptionsResponse,
data={
"user_id": user_id,
"data": {
"provider": GenericProviderID(provider).provider_name,
"credentials": credentials,
"provider_action": action,
"parameter": parameter,
},
},
headers={
"X-Plugin-ID": plugin_id,
"Content-Type": "application/json",
},
)
for options in response:
return options
raise ValueError("Plugin service returned no options")

View File

@ -76,6 +76,7 @@ class ParagraphIndexProcessor(BaseIndexProcessor):
if dataset.indexing_technique == "high_quality":
vector = Vector(dataset)
vector.create(documents)
with_keywords = False
if with_keywords:
keywords_list = kwargs.get("keywords_list")
keyword = Keyword(dataset)
@ -91,6 +92,7 @@ class ParagraphIndexProcessor(BaseIndexProcessor):
vector.delete_by_ids(node_ids)
else:
vector.delete()
with_keywords = False
if with_keywords:
keyword = Keyword(dataset)
if node_ids:

View File

@ -240,6 +240,7 @@ class ToolParameter(PluginParameter):
FILES = PluginParameterType.FILES.value
APP_SELECTOR = PluginParameterType.APP_SELECTOR.value
MODEL_SELECTOR = PluginParameterType.MODEL_SELECTOR.value
DYNAMIC_SELECT = PluginParameterType.DYNAMIC_SELECT.value
# deprecated, should not use.
SYSTEM_FILES = PluginParameterType.SYSTEM_FILES.value

View File

@ -86,6 +86,7 @@ class ProviderConfigEncrypter(BaseModel):
cached_credentials = cache.get()
if cached_credentials:
return cached_credentials
data = self._deep_copy(data)
# get fields need to be decrypted
fields = dict[str, BasicProviderConfig]()

View File

@ -5,11 +5,11 @@ import logging
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Optional, cast
import json_repair
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.file import FileType, file_manager
from core.helper.code_executor import CodeExecutor, CodeLanguage
from core.llm_generator.output_parser.errors import OutputParserError
from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities import (
@ -18,7 +18,13 @@ from core.model_runtime.entities import (
PromptMessageContentType,
TextPromptMessageContent,
)
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMUsage
from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkWithStructuredOutput,
LLMStructuredOutput,
LLMUsage,
)
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessageContentUnionTypes,
@ -31,7 +37,6 @@ from core.model_runtime.entities.model_entities import (
ModelFeature,
ModelPropertyKey,
ModelType,
ParameterRule,
)
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
@ -62,11 +67,6 @@ from core.workflow.nodes.event import (
RunRetrieverResourceEvent,
RunStreamChunkEvent,
)
from core.workflow.utils.structured_output.entities import (
ResponseFormat,
SpecialModelType,
)
from core.workflow.utils.structured_output.prompt import STRUCTURED_OUTPUT_PROMPT
from core.workflow.utils.variable_template_parser import VariableTemplateParser
from . import llm_utils
@ -143,12 +143,6 @@ class LLMNode(BaseNode[LLMNodeData]):
return "1"
def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
def process_structured_output(text: str) -> Optional[dict[str, Any]]:
"""Process structured output if enabled"""
if not self.node_data.structured_output_enabled or not self.node_data.structured_output:
return None
return self._parse_structured_output(text)
node_inputs: Optional[dict[str, Any]] = None
process_data = None
result_text = ""
@ -244,6 +238,8 @@ class LLMNode(BaseNode[LLMNodeData]):
stop=stop,
)
structured_output: LLMStructuredOutput | None = None
for event in generator:
if isinstance(event, RunStreamChunkEvent):
yield event
@ -254,10 +250,12 @@ class LLMNode(BaseNode[LLMNodeData]):
# deduct quota
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
break
elif isinstance(event, LLMStructuredOutput):
structured_output = event
outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
structured_output = process_structured_output(result_text)
if structured_output:
outputs["structured_output"] = structured_output
outputs["structured_output"] = structured_output.structured_output
if self._file_outputs is not None:
outputs["files"] = ArrayFileSegment(value=self._file_outputs)
@ -302,20 +300,40 @@ class LLMNode(BaseNode[LLMNodeData]):
model_instance: ModelInstance,
prompt_messages: Sequence[PromptMessage],
stop: Optional[Sequence[str]] = None,
) -> Generator[NodeEvent, None, None]:
invoke_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=node_data_model.completion_params,
stop=list(stop or []),
stream=True,
user=self.user_id,
) -> Generator[NodeEvent | LLMStructuredOutput, None, None]:
model_schema = model_instance.model_type_instance.get_model_schema(
node_data_model.name, model_instance.credentials
)
if not model_schema:
raise ValueError(f"Model schema not found for {node_data_model.name}")
if self.node_data.structured_output_enabled:
output_schema = self._fetch_structured_output_schema()
invoke_result = invoke_llm_with_structured_output(
provider=model_instance.provider,
model_schema=model_schema,
model_instance=model_instance,
prompt_messages=prompt_messages,
json_schema=output_schema,
model_parameters=node_data_model.completion_params,
stop=list(stop or []),
stream=True,
user=self.user_id,
)
else:
invoke_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters=node_data_model.completion_params,
stop=list(stop or []),
stream=True,
user=self.user_id,
)
return self._handle_invoke_result(invoke_result=invoke_result)
def _handle_invoke_result(
self, invoke_result: LLMResult | Generator[LLMResultChunk, None, None]
) -> Generator[NodeEvent, None, None]:
self, invoke_result: LLMResult | Generator[LLMResultChunk | LLMStructuredOutput, None, None]
) -> Generator[NodeEvent | LLMStructuredOutput, None, None]:
# For blocking mode
if isinstance(invoke_result, LLMResult):
event = self._handle_blocking_result(invoke_result=invoke_result)
@ -329,23 +347,32 @@ class LLMNode(BaseNode[LLMNodeData]):
usage = LLMUsage.empty_usage()
finish_reason = None
full_text_buffer = io.StringIO()
for result in invoke_result:
contents = result.delta.message.content
for text_part in self._save_multimodal_output_and_convert_result_to_markdown(contents):
full_text_buffer.write(text_part)
yield RunStreamChunkEvent(chunk_content=text_part, from_variable_selector=[self.node_id, "text"])
# Consume the invoke result and handle generator exception
try:
for result in invoke_result:
if isinstance(result, LLMResultChunkWithStructuredOutput):
yield result
if isinstance(result, LLMResultChunk):
contents = result.delta.message.content
for text_part in self._save_multimodal_output_and_convert_result_to_markdown(contents):
full_text_buffer.write(text_part)
yield RunStreamChunkEvent(
chunk_content=text_part, from_variable_selector=[self.node_id, "text"]
)
# Update the whole metadata
if not model and result.model:
model = result.model
if len(prompt_messages) == 0:
# TODO(QuantumGhost): it seems that this update has no visable effect.
# What's the purpose of the line below?
prompt_messages = list(result.prompt_messages)
if usage.prompt_tokens == 0 and result.delta.usage:
usage = result.delta.usage
if finish_reason is None and result.delta.finish_reason:
finish_reason = result.delta.finish_reason
# Update the whole metadata
if not model and result.model:
model = result.model
if len(prompt_messages) == 0:
# TODO(QuantumGhost): it seems that this update has no visable effect.
# What's the purpose of the line below?
prompt_messages = list(result.prompt_messages)
if usage.prompt_tokens == 0 and result.delta.usage:
usage = result.delta.usage
if finish_reason is None and result.delta.finish_reason:
finish_reason = result.delta.finish_reason
except OutputParserError as e:
raise LLMNodeError(f"Failed to parse structured output: {e}")
yield ModelInvokeCompletedEvent(text=full_text_buffer.getvalue(), usage=usage, finish_reason=finish_reason)
@ -522,12 +549,6 @@ class LLMNode(BaseNode[LLMNodeData]):
if not model_schema:
raise ModelNotExistError(f"Model {node_data_model.name} not exist.")
if self.node_data.structured_output_enabled:
if model_schema.support_structure_output:
completion_params = self._handle_native_json_schema(completion_params, model_schema.parameter_rules)
else:
# Set appropriate response format based on model capabilities
self._set_response_format(completion_params, model_schema.parameter_rules)
model_config_with_cred.parameters = completion_params
# NOTE(-LAN-): This line modify the `self.node_data.model`, which is used in `_invoke_llm()`.
node_data_model.completion_params = completion_params
@ -719,32 +740,8 @@ class LLMNode(BaseNode[LLMNodeData]):
)
if not model_schema:
raise ModelNotExistError(f"Model {model_config.model} not exist.")
if self.node_data.structured_output_enabled:
if not model_schema.support_structure_output:
filtered_prompt_messages = self._handle_prompt_based_schema(
prompt_messages=filtered_prompt_messages,
)
return filtered_prompt_messages, model_config.stop
def _parse_structured_output(self, result_text: str) -> dict[str, Any]:
structured_output: dict[str, Any] = {}
try:
parsed = json.loads(result_text)
if not isinstance(parsed, dict):
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
except json.JSONDecodeError as e:
# if the result_text is not a valid json, try to repair it
parsed = json_repair.loads(result_text)
if not isinstance(parsed, dict):
# handle reasoning model like deepseek-r1 got '<think>\n\n</think>\n' prefix
if isinstance(parsed, list):
parsed = next((item for item in parsed if isinstance(item, dict)), {})
else:
raise LLMNodeError(f"Failed to parse structured output: {result_text}")
structured_output = parsed
return structured_output
@classmethod
def _extract_variable_selector_to_variable_mapping(
cls,
@ -934,104 +931,6 @@ class LLMNode(BaseNode[LLMNodeData]):
self._file_outputs.append(saved_file)
return saved_file
def _handle_native_json_schema(self, model_parameters: dict, rules: list[ParameterRule]) -> dict:
"""
Handle structured output for models with native JSON schema support.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
:return: Updated model parameters with JSON schema configuration
"""
# Process schema according to model requirements
schema = self._fetch_structured_output_schema()
schema_json = self._prepare_schema_for_model(schema)
# Set JSON schema in parameters
model_parameters["json_schema"] = json.dumps(schema_json, ensure_ascii=False)
# Set appropriate response format if required by the model
for rule in rules:
if rule.name == "response_format" and ResponseFormat.JSON_SCHEMA.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_SCHEMA.value
return model_parameters
def _handle_prompt_based_schema(self, prompt_messages: Sequence[PromptMessage]) -> list[PromptMessage]:
"""
Handle structured output for models without native JSON schema support.
This function modifies the prompt messages to include schema-based output requirements.
Args:
prompt_messages: Original sequence of prompt messages
Returns:
list[PromptMessage]: Updated prompt messages with structured output requirements
"""
# Convert schema to string format
schema_str = json.dumps(self._fetch_structured_output_schema(), ensure_ascii=False)
# Find existing system prompt with schema placeholder
system_prompt = next(
(prompt for prompt in prompt_messages if isinstance(prompt, SystemPromptMessage)),
None,
)
structured_output_prompt = STRUCTURED_OUTPUT_PROMPT.replace("{{schema}}", schema_str)
# Prepare system prompt content
system_prompt_content = (
structured_output_prompt + "\n\n" + system_prompt.content
if system_prompt and isinstance(system_prompt.content, str)
else structured_output_prompt
)
system_prompt = SystemPromptMessage(content=system_prompt_content)
# Extract content from the last user message
filtered_prompts = [prompt for prompt in prompt_messages if not isinstance(prompt, SystemPromptMessage)]
updated_prompt = [system_prompt] + filtered_prompts
return updated_prompt
def _set_response_format(self, model_parameters: dict, rules: list) -> None:
"""
Set the appropriate response format parameter based on model rules.
:param model_parameters: Model parameters to update
:param rules: Model parameter rules
"""
for rule in rules:
if rule.name == "response_format":
if ResponseFormat.JSON.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON.value
elif ResponseFormat.JSON_OBJECT.value in rule.options:
model_parameters["response_format"] = ResponseFormat.JSON_OBJECT.value
def _prepare_schema_for_model(self, schema: dict) -> dict:
"""
Prepare JSON schema based on model requirements.
Different models have different requirements for JSON schema formatting.
This function handles these differences.
:param schema: The original JSON schema
:return: Processed schema compatible with the current model
"""
# Deep copy to avoid modifying the original schema
processed_schema = schema.copy()
# Convert boolean types to string types (common requirement)
convert_boolean_to_string(processed_schema)
# Apply model-specific transformations
if SpecialModelType.GEMINI in self.node_data.model.name:
remove_additional_properties(processed_schema)
return processed_schema
elif SpecialModelType.OLLAMA in self.node_data.model.provider:
return processed_schema
else:
# Default format with name field
return {"schema": processed_schema, "name": "llm_response"}
def _fetch_model_schema(self, provider: str) -> AIModelEntity | None:
"""
Fetch model schema
@ -1243,49 +1142,3 @@ def _handle_completion_template(
)
prompt_messages.append(prompt_message)
return prompt_messages
def remove_additional_properties(schema: dict) -> None:
"""
Remove additionalProperties fields from JSON schema.
Used for models like Gemini that don't support this property.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Remove additionalProperties at current level
schema.pop("additionalProperties", None)
# Process nested structures recursively
for value in schema.values():
if isinstance(value, dict):
remove_additional_properties(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
remove_additional_properties(item)
def convert_boolean_to_string(schema: dict) -> None:
"""
Convert boolean type specifications to string in JSON schema.
:param schema: JSON schema to modify in-place
"""
if not isinstance(schema, dict):
return
# Check for boolean type at current level
if schema.get("type") == "boolean":
schema["type"] = "string"
# Process nested dictionaries and lists recursively
for value in schema.values():
if isinstance(value, dict):
convert_boolean_to_string(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_boolean_to_string(item)

View File

@ -1,16 +0,0 @@
from enum import StrEnum
class ResponseFormat(StrEnum):
"""Constants for model response formats"""
JSON_SCHEMA = "json_schema" # model's structured output mode. some model like gemini, gpt-4o, support this mode.
JSON = "JSON" # model's json mode. some model like claude support this mode.
JSON_OBJECT = "json_object" # json mode's another alias. some model like deepseek-chat, qwen use this alias.
class SpecialModelType(StrEnum):
"""Constants for identifying model types"""
GEMINI = "gemini"
OLLAMA = "ollama"

View File

@ -1,17 +0,0 @@
STRUCTURED_OUTPUT_PROMPT = """Youre a helpful AI assistant. You could answer questions and output in JSON format.
constraints:
- You must output in JSON format.
- Do not output boolean value, use string type instead.
- Do not output integer or float value, use number type instead.
eg:
Here is the JSON schema:
{"additionalProperties": false, "properties": {"age": {"type": "number"}, "name": {"type": "string"}}, "required": ["name", "age"], "type": "object"}
Here is the user's question:
My name is John Doe and I am 30 years old.
output:
{"name": "John Doe", "age": 30}
Here is the JSON schema:
{{schema}}
""" # noqa: E501

View File

@ -7,6 +7,7 @@ def append_variables_recursively(
):
"""
Append variables recursively
:param pool: variable pool to append variables to
:param node_id: node id
:param variable_key_list: variable key list
:param variable_value: variable value

View File

@ -300,7 +300,7 @@ class WorkflowEntry:
return node_instance, generator
except Exception as e:
logger.exception(
"error while running node_instance, workflow_id=%s, node_id=%s, type=%s, version=%s",
"error while running node_instance, node_id=%s, type=%s, version=%s",
node_instance.id,
node_instance.node_type,
node_instance.version(),

View File

@ -21,6 +21,7 @@ def init_app(app: DifyApp) -> Celery:
"master_name": dify_config.CELERY_SENTINEL_MASTER_NAME,
"sentinel_kwargs": {
"socket_timeout": dify_config.CELERY_SENTINEL_SOCKET_TIMEOUT,
"password": dify_config.CELERY_SENTINEL_PASSWORD,
},
}

View File

@ -384,7 +384,7 @@ def get_file_type_by_mime_type(mime_type: str) -> FileType:
class StorageKeyLoader:
"""FileKeyLoader load the storage key from database for a list of files.
This loader is batched, the
This loader is batched, the database query count is constant regardless of the input size.
"""
def __init__(self, session: Session, tenant_id: str) -> None:
@ -445,10 +445,10 @@ class StorageKeyLoader:
if file.transfer_method in (FileTransferMethod.LOCAL_FILE, FileTransferMethod.REMOTE_URL):
upload_file_row = upload_files.get(model_id)
if upload_file_row is None:
raise ValueError(...)
raise ValueError(f"Upload file not found for id: {model_id}")
file._storage_key = upload_file_row.key
elif file.transfer_method == FileTransferMethod.TOOL_FILE:
tool_file_row = tool_files.get(model_id)
if tool_file_row is None:
raise ValueError(...)
raise ValueError(f"Tool file not found for id: {model_id}")
file._storage_key = tool_file_row.file_key

View File

@ -718,7 +718,6 @@ class Conversation(Base):
if "model" in override_model_configs:
app_model_config = AppModelConfig()
app_model_config = app_model_config.from_model_config_dict(override_model_configs)
assert app_model_config is not None, "app model config not found"
model_config = app_model_config.to_dict()
else:
model_config["configs"] = override_model_configs

View File

@ -198,7 +198,7 @@ vdb = [
"pymochow==1.3.1",
"pyobvector~=0.1.6",
"qdrant-client==1.9.0",
"tablestore==6.1.0",
"tablestore==6.2.0",
"tcvectordb~=1.6.4",
"tidb-vector==0.0.9",
"upstash-vector==0.6.0",

View File

@ -278,6 +278,23 @@ class DatasetService:
except ProviderTokenNotInitError as ex:
raise ValueError(ex.description)
@staticmethod
def check_reranking_model_setting(tenant_id: str, reranking_model_provider: str, reranking_model: str):
try:
model_manager = ModelManager()
model_manager.get_model_instance(
tenant_id=tenant_id,
provider=reranking_model_provider,
model_type=ModelType.RERANK,
model=reranking_model,
)
except LLMBadRequestError:
raise ValueError(
"No Rerank Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ValueError(ex.description)
@staticmethod
def update_dataset(dataset_id, data, user):
"""
@ -586,6 +603,10 @@ class DatasetService:
)
except ProviderTokenNotInitError:
# If we can't get the embedding model, preserve existing settings
logging.warning(
f"Failed to initialize embedding model {data['embedding_model_provider']}/{data['embedding_model']}, "
f"preserving existing settings"
)
if dataset.embedding_model_provider and dataset.embedding_model:
filtered_data["embedding_model_provider"] = dataset.embedding_model_provider
filtered_data["embedding_model"] = dataset.embedding_model
@ -2203,6 +2224,7 @@ class SegmentService:
# calc embedding use tokens
if document.doc_form == "qa_model":
segment.answer = args.answer
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment.answer])[0]
else:
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])[0]

View File

@ -1,23 +0,0 @@
from typing import Optional
from core.moderation.factory import ModerationFactory, ModerationOutputsResult
from extensions.ext_database import db
from models.model import App, AppModelConfig
class ModerationService:
def moderation_for_outputs(self, app_id: str, app_model: App, text: str) -> ModerationOutputsResult:
app_model_config: Optional[AppModelConfig] = None
app_model_config = (
db.session.query(AppModelConfig).filter(AppModelConfig.id == app_model.app_model_config_id).first()
)
if not app_model_config:
raise ValueError("app model config not found")
name = app_model_config.sensitive_word_avoidance_dict["type"]
config = app_model_config.sensitive_word_avoidance_dict["config"]
moderation = ModerationFactory(name, app_id, app_model.tenant_id, config)
return moderation.moderation_for_outputs(text)

View File

@ -0,0 +1,74 @@
from collections.abc import Mapping, Sequence
from typing import Any, Literal
from sqlalchemy.orm import Session
from core.plugin.entities.parameters import PluginParameterOption
from core.plugin.impl.dynamic_select import DynamicSelectClient
from core.tools.tool_manager import ToolManager
from core.tools.utils.configuration import ProviderConfigEncrypter
from extensions.ext_database import db
from models.tools import BuiltinToolProvider
class PluginParameterService:
@staticmethod
def get_dynamic_select_options(
tenant_id: str,
user_id: str,
plugin_id: str,
provider: str,
action: str,
parameter: str,
provider_type: Literal["tool"],
) -> Sequence[PluginParameterOption]:
"""
Get dynamic select options for a plugin parameter.
Args:
tenant_id: The tenant ID.
plugin_id: The plugin ID.
provider: The provider name.
action: The action name.
parameter: The parameter name.
"""
credentials: Mapping[str, Any] = {}
match provider_type:
case "tool":
provider_controller = ToolManager.get_builtin_provider(provider, tenant_id)
# init tool configuration
tool_configuration = ProviderConfigEncrypter(
tenant_id=tenant_id,
config=[x.to_basic_provider_config() for x in provider_controller.get_credentials_schema()],
provider_type=provider_controller.provider_type.value,
provider_identity=provider_controller.entity.identity.name,
)
# check if credentials are required
if not provider_controller.need_credentials:
credentials = {}
else:
# fetch credentials from db
with Session(db.engine) as session:
db_record = (
session.query(BuiltinToolProvider)
.filter(
BuiltinToolProvider.tenant_id == tenant_id,
BuiltinToolProvider.provider == provider,
)
.first()
)
if db_record is None:
raise ValueError(f"Builtin provider {provider} not found when fetching credentials")
credentials = tool_configuration.decrypt(db_record.credentials)
case _:
raise ValueError(f"Invalid provider type: {provider_type}")
return (
DynamicSelectClient()
.fetch_dynamic_select_options(tenant_id, user_id, plugin_id, provider, action, credentials, parameter)
.options
)

View File

@ -97,16 +97,16 @@ class VectorService:
vector = Vector(dataset=dataset)
vector.delete_by_ids([segment.index_node_id])
vector.add_texts([document], duplicate_check=True)
# update keyword index
keyword = Keyword(dataset)
keyword.delete_by_ids([segment.index_node_id])
# save keyword index
if keywords and len(keywords) > 0:
keyword.add_texts([document], keywords_list=[keywords])
else:
keyword.add_texts([document])
# update keyword index
keyword = Keyword(dataset)
keyword.delete_by_ids([segment.index_node_id])
# save keyword index
if keywords and len(keywords) > 0:
keyword.add_texts([document], keywords_list=[keywords])
else:
keyword.add_texts([document])
@classmethod
def generate_child_chunks(

View File

@ -9,6 +9,7 @@ from unittest.mock import MagicMock, patch
import pytest
from core.app.entities.app_invoke_entities import InvokeFrom
from core.llm_generator.output_parser.structured_output import _parse_structured_output
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
from core.model_runtime.entities.message_entities import AssistantPromptMessage
from core.workflow.entities.variable_pool import VariablePool
@ -277,29 +278,6 @@ def test_execute_llm_with_jinja2(flask_req_ctx, setup_code_executor_mock):
def test_extract_json():
node = init_llm_node(
config={
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"prompt_config": {
"structured_output": {
"enabled": True,
"schema": {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "number"}},
},
}
},
"prompt_template": [{"role": "user", "text": "{{#sys.query#}}"}],
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
},
},
)
llm_texts = [
'<think>\n\n</think>{"name": "test", "age": 123', # resoning model (deepseek-r1)
'{"name":"test","age":123}', # json schema model (gpt-4o)
@ -308,4 +286,4 @@ def test_extract_json():
'{"name":"test",age:123}', # without quotes (qwen-2.5-0.5b)
]
result = {"name": "test", "age": 123}
assert all(node._parse_structured_output(item) == result for item in llm_texts)
assert all(_parse_structured_output(item) == result for item in llm_texts)

View File

@ -8,151 +8,298 @@ from services.dataset_service import DatasetService
from services.errors.account import NoPermissionError
class DatasetPermissionTestDataFactory:
"""Factory class for creating test data and mock objects for dataset permission tests."""
@staticmethod
def create_dataset_mock(
dataset_id: str = "dataset-123",
tenant_id: str = "test-tenant-123",
created_by: str = "creator-456",
permission: DatasetPermissionEnum = DatasetPermissionEnum.ONLY_ME,
**kwargs,
) -> Mock:
"""Create a mock dataset with specified attributes."""
dataset = Mock(spec=Dataset)
dataset.id = dataset_id
dataset.tenant_id = tenant_id
dataset.created_by = created_by
dataset.permission = permission
for key, value in kwargs.items():
setattr(dataset, key, value)
return dataset
@staticmethod
def create_user_mock(
user_id: str = "user-789",
tenant_id: str = "test-tenant-123",
role: TenantAccountRole = TenantAccountRole.NORMAL,
**kwargs,
) -> Mock:
"""Create a mock user with specified attributes."""
user = Mock(spec=Account)
user.id = user_id
user.current_tenant_id = tenant_id
user.current_role = role
for key, value in kwargs.items():
setattr(user, key, value)
return user
@staticmethod
def create_dataset_permission_mock(
dataset_id: str = "dataset-123",
account_id: str = "user-789",
**kwargs,
) -> Mock:
"""Create a mock dataset permission record."""
permission = Mock(spec=DatasetPermission)
permission.dataset_id = dataset_id
permission.account_id = account_id
for key, value in kwargs.items():
setattr(permission, key, value)
return permission
class TestDatasetPermissionService:
"""Test cases for dataset permission checking functionality"""
"""
Comprehensive unit tests for DatasetService.check_dataset_permission method.
def setup_method(self):
"""Set up test fixtures"""
# Mock tenant and user
self.tenant_id = "test-tenant-123"
self.creator_id = "creator-456"
self.normal_user_id = "normal-789"
self.owner_user_id = "owner-999"
This test suite covers all permission scenarios including:
- Cross-tenant access restrictions
- Owner privilege checks
- Different permission levels (ONLY_ME, ALL_TEAM, PARTIAL_TEAM)
- Explicit permission checks for PARTIAL_TEAM
- Error conditions and logging
"""
# Mock dataset
self.dataset = Mock(spec=Dataset)
self.dataset.id = "dataset-123"
self.dataset.tenant_id = self.tenant_id
self.dataset.created_by = self.creator_id
@pytest.fixture
def mock_dataset_service_dependencies(self):
"""Common mock setup for dataset service dependencies."""
with patch("services.dataset_service.db.session") as mock_session:
yield {
"db_session": mock_session,
}
# Mock users
self.creator_user = Mock(spec=Account)
self.creator_user.id = self.creator_id
self.creator_user.current_tenant_id = self.tenant_id
self.creator_user.current_role = TenantAccountRole.EDITOR
self.normal_user = Mock(spec=Account)
self.normal_user.id = self.normal_user_id
self.normal_user.current_tenant_id = self.tenant_id
self.normal_user.current_role = TenantAccountRole.NORMAL
self.owner_user = Mock(spec=Account)
self.owner_user.id = self.owner_user_id
self.owner_user.current_tenant_id = self.tenant_id
self.owner_user.current_role = TenantAccountRole.OWNER
def test_permission_check_different_tenant_should_fail(self):
"""Test that users from different tenants cannot access dataset"""
self.normal_user.current_tenant_id = "different-tenant"
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset."):
DatasetService.check_dataset_permission(self.dataset, self.normal_user)
def test_owner_can_access_any_dataset(self):
"""Test that tenant owners can access any dataset regardless of permission"""
self.dataset.permission = DatasetPermissionEnum.ONLY_ME
@pytest.fixture
def mock_logging_dependencies(self):
"""Mock setup for logging tests."""
with patch("services.dataset_service.logging") as mock_logging:
yield {
"logging": mock_logging,
}
def _assert_permission_check_passes(self, dataset: Mock, user: Mock):
"""Helper method to verify that permission check passes without raising exceptions."""
# Should not raise any exception
DatasetService.check_dataset_permission(self.dataset, self.owner_user)
DatasetService.check_dataset_permission(dataset, user)
def test_only_me_permission_creator_can_access(self):
"""Test ONLY_ME permission allows only creator to access"""
self.dataset.permission = DatasetPermissionEnum.ONLY_ME
def _assert_permission_check_fails(
self, dataset: Mock, user: Mock, expected_message: str = "You do not have permission to access this dataset."
):
"""Helper method to verify that permission check fails with expected error."""
with pytest.raises(NoPermissionError, match=expected_message):
DatasetService.check_dataset_permission(dataset, user)
# Creator should be able to access
DatasetService.check_dataset_permission(self.dataset, self.creator_user)
def _assert_database_query_called(self, mock_session: Mock, dataset_id: str, account_id: str):
"""Helper method to verify database query calls for permission checks."""
mock_session.query().filter_by.assert_called_with(dataset_id=dataset_id, account_id=account_id)
def test_only_me_permission_others_cannot_access(self):
"""Test ONLY_ME permission denies access to non-creators"""
self.dataset.permission = DatasetPermissionEnum.ONLY_ME
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset."):
DatasetService.check_dataset_permission(self.dataset, self.normal_user)
def test_all_team_permission_allows_access(self):
"""Test ALL_TEAM permission allows any team member to access"""
self.dataset.permission = DatasetPermissionEnum.ALL_TEAM
# Should not raise any exception for team members
DatasetService.check_dataset_permission(self.dataset, self.normal_user)
DatasetService.check_dataset_permission(self.dataset, self.creator_user)
@patch("services.dataset_service.db.session")
def test_partial_team_permission_creator_can_access(self, mock_session):
"""Test PARTIAL_TEAM permission allows creator to access"""
self.dataset.permission = DatasetPermissionEnum.PARTIAL_TEAM
# Should not raise any exception for creator
DatasetService.check_dataset_permission(self.dataset, self.creator_user)
# Should not query database for creator
def _assert_database_query_not_called(self, mock_session: Mock):
"""Helper method to verify that database query was not called."""
mock_session.query.assert_not_called()
@patch("services.dataset_service.db.session")
def test_partial_team_permission_with_explicit_permission(self, mock_session):
"""Test PARTIAL_TEAM permission allows users with explicit permission"""
self.dataset.permission = DatasetPermissionEnum.PARTIAL_TEAM
# ==================== Cross-Tenant Access Tests ====================
def test_permission_check_different_tenant_should_fail(self):
"""Test that users from different tenants cannot access dataset regardless of other permissions."""
# Create dataset and user from different tenants
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
tenant_id="tenant-123", permission=DatasetPermissionEnum.ALL_TEAM
)
user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="user-789", tenant_id="different-tenant-456", role=TenantAccountRole.EDITOR
)
# Should fail due to different tenant
self._assert_permission_check_fails(dataset, user)
# ==================== Owner Privilege Tests ====================
def test_owner_can_access_any_dataset(self):
"""Test that tenant owners can access any dataset regardless of permission level."""
# Create dataset with restrictive permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.ONLY_ME)
# Create owner user
owner_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="owner-999", role=TenantAccountRole.OWNER
)
# Owner should have access regardless of dataset permission
self._assert_permission_check_passes(dataset, owner_user)
# ==================== ONLY_ME Permission Tests ====================
def test_only_me_permission_creator_can_access(self):
"""Test ONLY_ME permission allows only the dataset creator to access."""
# Create dataset with ONLY_ME permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
created_by="creator-456", permission=DatasetPermissionEnum.ONLY_ME
)
# Create creator user
creator_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="creator-456", role=TenantAccountRole.EDITOR
)
# Creator should be able to access
self._assert_permission_check_passes(dataset, creator_user)
def test_only_me_permission_others_cannot_access(self):
"""Test ONLY_ME permission denies access to non-creators."""
# Create dataset with ONLY_ME permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
created_by="creator-456", permission=DatasetPermissionEnum.ONLY_ME
)
# Create normal user (not the creator)
normal_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="normal-789", role=TenantAccountRole.NORMAL
)
# Non-creator should be denied access
self._assert_permission_check_fails(dataset, normal_user)
# ==================== ALL_TEAM Permission Tests ====================
def test_all_team_permission_allows_access(self):
"""Test ALL_TEAM permission allows any team member to access the dataset."""
# Create dataset with ALL_TEAM permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.ALL_TEAM)
# Create different types of team members
normal_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="normal-789", role=TenantAccountRole.NORMAL
)
editor_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="editor-456", role=TenantAccountRole.EDITOR
)
# All team members should have access
self._assert_permission_check_passes(dataset, normal_user)
self._assert_permission_check_passes(dataset, editor_user)
# ==================== PARTIAL_TEAM Permission Tests ====================
def test_partial_team_permission_creator_can_access(self, mock_dataset_service_dependencies):
"""Test PARTIAL_TEAM permission allows creator to access without database query."""
# Create dataset with PARTIAL_TEAM permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
created_by="creator-456", permission=DatasetPermissionEnum.PARTIAL_TEAM
)
# Create creator user
creator_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="creator-456", role=TenantAccountRole.EDITOR
)
# Creator should have access without database query
self._assert_permission_check_passes(dataset, creator_user)
self._assert_database_query_not_called(mock_dataset_service_dependencies["db_session"])
def test_partial_team_permission_with_explicit_permission(self, mock_dataset_service_dependencies):
"""Test PARTIAL_TEAM permission allows users with explicit permission records."""
# Create dataset with PARTIAL_TEAM permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.PARTIAL_TEAM)
# Create normal user (not the creator)
normal_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="normal-789", role=TenantAccountRole.NORMAL
)
# Mock database query to return a permission record
mock_permission = Mock(spec=DatasetPermission)
mock_session.query().filter_by().first.return_value = mock_permission
mock_permission = DatasetPermissionTestDataFactory.create_dataset_permission_mock(
dataset_id=dataset.id, account_id=normal_user.id
)
mock_dataset_service_dependencies["db_session"].query().filter_by().first.return_value = mock_permission
# Should not raise any exception
DatasetService.check_dataset_permission(self.dataset, self.normal_user)
# User with explicit permission should have access
self._assert_permission_check_passes(dataset, normal_user)
self._assert_database_query_called(mock_dataset_service_dependencies["db_session"], dataset.id, normal_user.id)
# Verify database was queried correctly
mock_session.query().filter_by.assert_called_with(dataset_id=self.dataset.id, account_id=self.normal_user.id)
def test_partial_team_permission_without_explicit_permission(self, mock_dataset_service_dependencies):
"""Test PARTIAL_TEAM permission denies users without explicit permission records."""
# Create dataset with PARTIAL_TEAM permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.PARTIAL_TEAM)
@patch("services.dataset_service.db.session")
def test_partial_team_permission_without_explicit_permission(self, mock_session):
"""Test PARTIAL_TEAM permission denies users without explicit permission"""
self.dataset.permission = DatasetPermissionEnum.PARTIAL_TEAM
# Create normal user (not the creator)
normal_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="normal-789", role=TenantAccountRole.NORMAL
)
# Mock database query to return None (no permission record)
mock_session.query().filter_by().first.return_value = None
mock_dataset_service_dependencies["db_session"].query().filter_by().first.return_value = None
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset."):
DatasetService.check_dataset_permission(self.dataset, self.normal_user)
# User without explicit permission should be denied access
self._assert_permission_check_fails(dataset, normal_user)
self._assert_database_query_called(mock_dataset_service_dependencies["db_session"], dataset.id, normal_user.id)
# Verify database was queried correctly
mock_session.query().filter_by.assert_called_with(dataset_id=self.dataset.id, account_id=self.normal_user.id)
@patch("services.dataset_service.db.session")
def test_partial_team_permission_non_creator_without_permission_fails(self, mock_session):
"""Test that non-creators without explicit permission are denied access"""
self.dataset.permission = DatasetPermissionEnum.PARTIAL_TEAM
def test_partial_team_permission_non_creator_without_permission_fails(self, mock_dataset_service_dependencies):
"""Test that non-creators without explicit permission are denied access to PARTIAL_TEAM datasets."""
# Create dataset with PARTIAL_TEAM permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
created_by="creator-456", permission=DatasetPermissionEnum.PARTIAL_TEAM
)
# Create a different user (not the creator)
other_user = Mock(spec=Account)
other_user.id = "other-user-123"
other_user.current_tenant_id = self.tenant_id
other_user.current_role = TenantAccountRole.NORMAL
other_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="other-user-123", role=TenantAccountRole.NORMAL
)
# Mock database query to return None (no permission record)
mock_session.query().filter_by().first.return_value = None
mock_dataset_service_dependencies["db_session"].query().filter_by().first.return_value = None
with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset."):
DatasetService.check_dataset_permission(self.dataset, other_user)
# Non-creator without explicit permission should be denied access
self._assert_permission_check_fails(dataset, other_user)
self._assert_database_query_called(mock_dataset_service_dependencies["db_session"], dataset.id, other_user.id)
# ==================== Enum Usage Tests ====================
def test_partial_team_permission_uses_correct_enum(self):
"""Test that the method correctly uses DatasetPermissionEnum.PARTIAL_TEAM"""
# This test ensures we're using the enum instead of string literals
self.dataset.permission = DatasetPermissionEnum.PARTIAL_TEAM
# Creator should always have access
DatasetService.check_dataset_permission(self.dataset, self.creator_user)
@patch("services.dataset_service.logging")
@patch("services.dataset_service.db.session")
def test_permission_denied_logs_debug_message(self, mock_session, mock_logging):
"""Test that permission denied events are logged"""
self.dataset.permission = DatasetPermissionEnum.PARTIAL_TEAM
mock_session.query().filter_by().first.return_value = None
with pytest.raises(NoPermissionError):
DatasetService.check_dataset_permission(self.dataset, self.normal_user)
# Verify debug message was logged
mock_logging.debug.assert_called_with(
f"User {self.normal_user.id} does not have permission to access dataset {self.dataset.id}"
"""Test that the method correctly uses DatasetPermissionEnum.PARTIAL_TEAM instead of string literals."""
# Create dataset with PARTIAL_TEAM permission using enum
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(
created_by="creator-456", permission=DatasetPermissionEnum.PARTIAL_TEAM
)
# Create creator user
creator_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="creator-456", role=TenantAccountRole.EDITOR
)
# Creator should always have access regardless of permission level
self._assert_permission_check_passes(dataset, creator_user)
# ==================== Logging Tests ====================
def test_permission_denied_logs_debug_message(self, mock_dataset_service_dependencies, mock_logging_dependencies):
"""Test that permission denied events are properly logged for debugging purposes."""
# Create dataset with PARTIAL_TEAM permission
dataset = DatasetPermissionTestDataFactory.create_dataset_mock(permission=DatasetPermissionEnum.PARTIAL_TEAM)
# Create normal user (not the creator)
normal_user = DatasetPermissionTestDataFactory.create_user_mock(
user_id="normal-789", role=TenantAccountRole.NORMAL
)
# Mock database query to return None (no permission record)
mock_dataset_service_dependencies["db_session"].query().filter_by().first.return_value = None
# Attempt permission check (should fail)
with pytest.raises(NoPermissionError):
DatasetService.check_dataset_permission(dataset, normal_user)
# Verify debug message was logged with correct user and dataset information
mock_logging_dependencies["logging"].debug.assert_called_with(
f"User {normal_user.id} does not have permission to access dataset {dataset.id}"
)

4287
api/uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -285,6 +285,7 @@ BROKER_USE_SSL=false
# If you are using Redis Sentinel for high availability, configure the following settings.
CELERY_USE_SENTINEL=false
CELERY_SENTINEL_MASTER_NAME=
CELERY_SENTINEL_PASSWORD=
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
# ------------------------------

View File

@ -79,6 +79,7 @@ x-shared-env: &shared-api-worker-env
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-}
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}

View File

@ -36,6 +36,7 @@ import AccessControl from '@/app/components/app/app-access-control'
import { AccessMode } from '@/models/access-control'
import { useGlobalPublicStore } from '@/context/global-public-context'
import { formatTime } from '@/utils/time'
import { useGetUserCanAccessApp } from '@/service/access-control'
export type AppCardProps = {
app: App
@ -190,6 +191,7 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
}, [onRefresh, mutateApps, setShowAccessControl])
const Operations = (props: HtmlContentProps) => {
const { data: userCanAccessApp, isLoading: isGettingUserCanAccessApp } = useGetUserCanAccessApp({ appId: app?.id, enabled: (!!props?.open && systemFeatures.webapp_auth.enabled) })
const onMouseLeave = async () => {
props.onClose?.()
}
@ -267,10 +269,14 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
</button>
</>
)}
<Divider className="my-1" />
<button className='mx-1 flex h-8 cursor-pointer items-center gap-2 rounded-lg px-3 hover:bg-state-base-hover' onClick={onClickInstalledApp}>
<span className='system-sm-regular text-text-secondary'>{t('app.openInExplore')}</span>
</button>
{
(isGettingUserCanAccessApp || !userCanAccessApp?.result) ? null : <>
<Divider className="my-1" />
<button className='mx-1 flex h-8 cursor-pointer items-center gap-2 rounded-lg px-3 hover:bg-state-base-hover' onClick={onClickInstalledApp}>
<span className='system-sm-regular text-text-secondary'>{t('app.openInExplore')}</span>
</button>
</>
}
<Divider className="my-1" />
{
systemFeatures.webapp_auth.enabled && isCurrentWorkspaceEditor && <>

View File

@ -191,6 +191,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
const { userProfile: { timezone } } = useAppContext()
const { formatTime } = useTimestamp()
const { onClose, appDetail } = useContext(DrawerContext)
const { notify } = useContext(ToastContext)
const { currentLogItem, setCurrentLogItem, showMessageLogModal, setShowMessageLogModal, showPromptLogModal, setShowPromptLogModal, currentLogModalActiveTab } = useAppStore(useShallow(state => ({
currentLogItem: state.currentLogItem,
setCurrentLogItem: state.setCurrentLogItem,
@ -312,18 +313,34 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
return item
}))
}, [allChatItems])
const handleAnnotationRemoved = useCallback((index: number) => {
setAllChatItems(allChatItems.map((item, i) => {
if (i === index) {
return {
...item,
content: item.content,
annotation: undefined,
}
const handleAnnotationRemoved = useCallback(async (index: number): Promise<boolean> => {
const annotation = allChatItems[index]?.annotation
try {
if (annotation?.id) {
const { delAnnotation } = await import('@/service/annotation')
await delAnnotation(appDetail?.id || '', annotation.id)
}
return item
}))
}, [allChatItems])
setAllChatItems(allChatItems.map((item, i) => {
if (i === index) {
return {
...item,
content: item.content,
annotation: undefined,
}
}
return item
}))
notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
return true
}
catch {
notify({ type: 'error', message: t('common.actionMsg.modifiedUnsuccessfully') })
return false
}
}, [allChatItems, appDetail?.id, t])
const fetchInitiated = useRef(false)

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.9 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.9 KiB

View File

@ -18,7 +18,7 @@ describe('InputNumber Component', () => {
it('renders input with default values', () => {
render(<InputNumber {...defaultProps} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
expect(input).toBeInTheDocument()
})
@ -56,7 +56,7 @@ describe('InputNumber Component', () => {
it('handles direct input changes', () => {
render(<InputNumber {...defaultProps} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
fireEvent.change(input, { target: { value: '42' } })
expect(defaultProps.onChange).toHaveBeenCalledWith(42)
@ -64,7 +64,7 @@ describe('InputNumber Component', () => {
it('handles empty input', () => {
render(<InputNumber {...defaultProps} value={0} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
fireEvent.change(input, { target: { value: '' } })
expect(defaultProps.onChange).toHaveBeenCalledWith(undefined)
@ -72,7 +72,7 @@ describe('InputNumber Component', () => {
it('handles invalid input', () => {
render(<InputNumber {...defaultProps} />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
fireEvent.change(input, { target: { value: 'abc' } })
expect(defaultProps.onChange).not.toHaveBeenCalled()
@ -86,7 +86,7 @@ describe('InputNumber Component', () => {
it('disables controls when disabled prop is true', () => {
render(<InputNumber {...defaultProps} disabled />)
const input = screen.getByRole('textbox')
const input = screen.getByRole('spinbutton')
const incrementBtn = screen.getByRole('button', { name: /increment/i })
const decrementBtn = screen.getByRole('button', { name: /decrement/i })

View File

@ -55,8 +55,8 @@ export const InputNumber: FC<InputNumberProps> = (props) => {
return <div className={classNames('flex', wrapClassName)}>
<Input {...rest}
// disable default controller
type='text'
className={classNames('rounded-r-none', className)}
type='number'
className={classNames('no-spinner rounded-r-none', className)}
value={value}
max={max}
min={min}
@ -77,8 +77,8 @@ export const InputNumber: FC<InputNumberProps> = (props) => {
size={size}
/>
<div className={classNames(
'flex flex-col bg-components-input-bg-normal rounded-r-md border-l border-divider-subtle text-text-tertiary focus:shadow-xs',
disabled && 'opacity-50 cursor-not-allowed',
'flex flex-col rounded-r-md border-l border-divider-subtle bg-components-input-bg-normal text-text-tertiary focus:shadow-xs',
disabled && 'cursor-not-allowed opacity-50',
controlWrapClassName)}
>
<button

View File

@ -3,6 +3,7 @@ import { Fragment, cloneElement, useRef } from 'react'
import cn from '@/utils/classnames'
export type HtmlContentProps = {
open?: boolean
onClose?: () => void
onClick?: () => void
}
@ -100,7 +101,8 @@ export default function CustomPopover({
}
>
{cloneElement(htmlContent as React.ReactElement, {
onClose: () => onMouseLeave(open),
open,
onClose: close,
...(manualClose
? {
onClick: close,

View File

@ -32,6 +32,10 @@ export const PromptMenuItem = memo(({
return
onMouseEnter()
}}
onMouseDown={(e) => {
e.preventDefault()
e.stopPropagation()
}}
onClick={() => {
if (disabled)
return

View File

@ -1,10 +1,10 @@
'use client'
import type { FC } from 'react'
import React, { useEffect, useState } from 'react'
import React, { useEffect, useRef, useState } from 'react'
import { Combobox, ComboboxButton, ComboboxInput, ComboboxOption, ComboboxOptions, Listbox, ListboxButton, ListboxOption, ListboxOptions } from '@headlessui/react'
import { ChevronDownIcon, ChevronUpIcon, XMarkIcon } from '@heroicons/react/20/solid'
import Badge from '../badge/index'
import { RiCheckLine } from '@remixicon/react'
import { RiCheckLine, RiLoader4Line } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import classNames from '@/utils/classnames'
import {
@ -51,6 +51,8 @@ export type ISelectProps = {
item: Item
selected: boolean
}) => React.ReactNode
isLoading?: boolean
onOpenChange?: (open: boolean) => void
}
const Select: FC<ISelectProps> = ({
className,
@ -178,17 +180,20 @@ const SimpleSelect: FC<ISelectProps> = ({
defaultValue = 1,
disabled = false,
onSelect,
onOpenChange,
placeholder,
optionWrapClassName,
optionClassName,
hideChecked,
notClearable,
renderOption,
isLoading = false,
}) => {
const { t } = useTranslation()
const localPlaceholder = placeholder || t('common.placeholder.select')
const [selectedItem, setSelectedItem] = useState<Item | null>(null)
useEffect(() => {
let defaultSelect = null
const existed = items.find((item: Item) => item.value === defaultValue)
@ -199,8 +204,10 @@ const SimpleSelect: FC<ISelectProps> = ({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [defaultValue])
const listboxRef = useRef<HTMLDivElement>(null)
return (
<Listbox
<Listbox ref={listboxRef}
value={selectedItem}
onChange={(value: Item) => {
if (!disabled) {
@ -212,10 +219,17 @@ const SimpleSelect: FC<ISelectProps> = ({
<div className={classNames('group/simple-select relative h-9', wrapperClassName)}>
{renderTrigger && <ListboxButton className='w-full'>{renderTrigger(selectedItem)}</ListboxButton>}
{!renderTrigger && (
<ListboxButton className={classNames(`flex items-center w-full h-full rounded-lg border-0 bg-components-input-bg-normal pl-3 pr-10 sm:text-sm sm:leading-6 focus-visible:outline-none focus-visible:bg-state-base-hover-alt group-hover/simple-select:bg-state-base-hover-alt ${disabled ? 'cursor-not-allowed' : 'cursor-pointer'}`, className)}>
<ListboxButton onClick={() => {
// get data-open, use setTimeout to ensure the attribute is set
setTimeout(() => {
if (listboxRef.current)
onOpenChange?.(listboxRef.current.getAttribute('data-open') !== null)
})
}} className={classNames(`flex items-center w-full h-full rounded-lg border-0 bg-components-input-bg-normal pl-3 pr-10 sm:text-sm sm:leading-6 focus-visible:outline-none focus-visible:bg-state-base-hover-alt group-hover/simple-select:bg-state-base-hover-alt ${disabled ? 'cursor-not-allowed' : 'cursor-pointer'}`, className)}>
<span className={classNames('block truncate text-left system-sm-regular text-components-input-text-filled', !selectedItem?.name && 'text-components-input-text-placeholder')}>{selectedItem?.name ?? localPlaceholder}</span>
<span className="absolute inset-y-0 right-0 flex items-center pr-2">
{(selectedItem && !notClearable)
{isLoading ? <RiLoader4Line className='h-3.5 w-3.5 animate-spin text-text-secondary' />
: (selectedItem && !notClearable)
? (
<XMarkIcon
onClick={(e) => {
@ -237,7 +251,7 @@ const SimpleSelect: FC<ISelectProps> = ({
</ListboxButton>
)}
{!disabled && (
{(!disabled) && (
<ListboxOptions className={classNames('absolute z-10 mt-1 px-1 max-h-60 w-full overflow-auto rounded-xl bg-components-panel-bg-blur backdrop-blur-sm py-1 text-base shadow-lg border-components-panel-border border-[0.5px] focus:outline-none sm:text-sm', optionWrapClassName)}>
{items.map((item: Item) => (
<ListboxOption

View File

@ -1,6 +1,5 @@
import { useState } from 'react'
import { useCallback, useState } from 'react'
import type { ChangeEvent, FC, KeyboardEvent } from 'react'
import { } from 'use-context-selector'
import { useTranslation } from 'react-i18next'
import AutosizeInput from 'react-18-input-autosize'
import { RiAddLine, RiCloseLine } from '@remixicon/react'
@ -40,6 +39,29 @@ const TagInput: FC<TagInputProps> = ({
onChange(copyItems)
}
const handleNewTag = useCallback((value: string) => {
const valueTrimmed = value.trim()
if (!valueTrimmed) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordEmpty') })
return
}
if ((items.find(item => item === valueTrimmed))) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordDuplicate') })
return
}
if (valueTrimmed.length > 20) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordError') })
return
}
onChange([...items, valueTrimmed])
setTimeout(() => {
setValue('')
})
}, [items, onChange, notify, t])
const handleKeyDown = (e: KeyboardEvent) => {
if (isSpecialMode && e.key === 'Enter')
setValue(`${value}`)
@ -48,24 +70,12 @@ const TagInput: FC<TagInputProps> = ({
if (isSpecialMode)
e.preventDefault()
const valueTrimmed = value.trim()
if (!valueTrimmed || (items.find(item => item === valueTrimmed)))
return
if (valueTrimmed.length > 20) {
notify({ type: 'error', message: t('datasetDocuments.segment.keywordError') })
return
}
onChange([...items, valueTrimmed])
setTimeout(() => {
setValue('')
})
handleNewTag(value)
}
}
const handleBlur = () => {
setValue('')
handleNewTag(value)
setFocused(false)
}

View File

@ -52,8 +52,8 @@ const StepThree = ({ datasetId, datasetName, indexingType, creationCache, retrie
datasetId={datasetId || creationCache?.dataset?.id || ''}
batchId={creationCache?.batch || ''}
documents={creationCache?.documents as FullDocumentDetail[]}
indexingType={indexingType || creationCache?.dataset?.indexing_technique}
retrievalMethod={retrievalMethod || creationCache?.dataset?.retrieval_model?.search_method}
indexingType={creationCache?.dataset?.indexing_technique || indexingType}
retrievalMethod={creationCache?.dataset?.retrieval_model_dict?.search_method || retrievalMethod}
/>
</div>
</div>

View File

@ -575,6 +575,7 @@ const StepTwo = ({
onSuccess(data) {
updateIndexingTypeCache && updateIndexingTypeCache(indexType as string)
updateResultCache && updateResultCache(data)
updateRetrievalMethodCache && updateRetrievalMethodCache(retrievalConfig.search_method as string)
},
})
}

View File

@ -1,4 +1,4 @@
import React, { type FC, useMemo, useState } from 'react'
import React, { type FC, useCallback, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import {
RiCloseLine,
@ -16,8 +16,10 @@ import { useSegmentListContext } from './index'
import { ChunkingMode, type SegmentDetailModel } from '@/models/datasets'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { formatNumber } from '@/utils/format'
import classNames from '@/utils/classnames'
import cn from '@/utils/classnames'
import Divider from '@/app/components/base/divider'
import { useDatasetDetailContextWithSelector } from '@/context/dataset-detail'
import { IndexingType } from '../../../create/step-two'
type ISegmentDetailProps = {
segInfo?: Partial<SegmentDetailModel> & { id: string }
@ -48,6 +50,7 @@ const SegmentDetail: FC<ISegmentDetailProps> = ({
const toggleFullScreen = useSegmentListContext(s => s.toggleFullScreen)
const mode = useDocumentContext(s => s.mode)
const parentMode = useDocumentContext(s => s.parentMode)
const indexingTechnique = useDatasetDetailContextWithSelector(s => s.dataset?.indexing_technique)
eventEmitter?.useSubscription((v) => {
if (v === 'update-segment')
@ -56,56 +59,41 @@ const SegmentDetail: FC<ISegmentDetailProps> = ({
setLoading(false)
})
const handleCancel = () => {
const handleCancel = useCallback(() => {
onCancel()
}
}, [onCancel])
const handleSave = () => {
const handleSave = useCallback(() => {
onUpdate(segInfo?.id || '', question, answer, keywords)
}
}, [onUpdate, segInfo?.id, question, answer, keywords])
const handleRegeneration = () => {
const handleRegeneration = useCallback(() => {
setShowRegenerationModal(true)
}
}, [])
const onCancelRegeneration = () => {
const onCancelRegeneration = useCallback(() => {
setShowRegenerationModal(false)
}
}, [])
const onConfirmRegeneration = () => {
const onConfirmRegeneration = useCallback(() => {
onUpdate(segInfo?.id || '', question, answer, keywords, true)
}
const isParentChildMode = useMemo(() => {
return mode === 'hierarchical'
}, [mode])
const isFullDocMode = useMemo(() => {
return mode === 'hierarchical' && parentMode === 'full-doc'
}, [mode, parentMode])
const titleText = useMemo(() => {
return isEditMode ? t('datasetDocuments.segment.editChunk') : t('datasetDocuments.segment.chunkDetail')
}, [isEditMode, t])
const isQAModel = useMemo(() => {
return docForm === ChunkingMode.qa
}, [docForm])
}, [onUpdate, segInfo?.id, question, answer, keywords])
const wordCountText = useMemo(() => {
const contentLength = isQAModel ? (question.length + answer.length) : question.length
const contentLength = docForm === ChunkingMode.qa ? (question.length + answer.length) : question.length
const total = formatNumber(isEditMode ? contentLength : segInfo!.word_count as number)
const count = isEditMode ? contentLength : segInfo!.word_count as number
return `${total} ${t('datasetDocuments.segment.characters', { count })}`
}, [isEditMode, question.length, answer.length, isQAModel, segInfo, t])
}, [isEditMode, question.length, answer.length, docForm, segInfo, t])
const labelPrefix = useMemo(() => {
return isParentChildMode ? t('datasetDocuments.segment.parentChunk') : t('datasetDocuments.segment.chunk')
}, [isParentChildMode, t])
const isFullDocMode = mode === 'hierarchical' && parentMode === 'full-doc'
const titleText = isEditMode ? t('datasetDocuments.segment.editChunk') : t('datasetDocuments.segment.chunkDetail')
const labelPrefix = mode === 'hierarchical' ? t('datasetDocuments.segment.parentChunk') : t('datasetDocuments.segment.chunk')
const isECOIndexing = indexingTechnique === IndexingType.ECONOMICAL
return (
<div className={'flex h-full flex-col'}>
<div className={classNames('flex items-center justify-between', fullScreen ? 'py-3 pr-4 pl-6 border border-divider-subtle' : 'pt-3 pr-3 pl-4')}>
<div className={cn('flex items-center justify-between', fullScreen ? 'border border-divider-subtle py-3 pl-6 pr-4' : 'pl-4 pr-3 pt-3')}>
<div className='flex flex-col'>
<div className='system-xl-semibold text-text-primary'>{titleText}</div>
<div className='flex items-center gap-x-2'>
@ -134,12 +122,12 @@ const SegmentDetail: FC<ISegmentDetailProps> = ({
</div>
</div>
</div>
<div className={classNames(
<div className={cn(
'flex grow',
fullScreen ? 'w-full flex-row justify-center px-6 pt-6 gap-x-8' : 'flex-col gap-y-1 py-3 px-4',
!isEditMode && 'pb-0 overflow-hidden',
fullScreen ? 'w-full flex-row justify-center gap-x-8 px-6 pt-6' : 'flex-col gap-y-1 px-4 py-3',
!isEditMode && 'overflow-hidden pb-0',
)}>
<div className={classNames(isEditMode ? 'break-all whitespace-pre-line overflow-hidden' : 'overflow-y-auto', fullScreen ? 'w-1/2' : 'grow')}>
<div className={cn(isEditMode ? 'overflow-hidden whitespace-pre-line break-all' : 'overflow-y-auto', fullScreen ? 'w-1/2' : 'grow')}>
<ChunkContent
docForm={docForm}
question={question}
@ -149,7 +137,7 @@ const SegmentDetail: FC<ISegmentDetailProps> = ({
isEditMode={isEditMode}
/>
</div>
{mode === 'custom' && <Keywords
{isECOIndexing && <Keywords
className={fullScreen ? 'w-1/5' : ''}
actionType={isEditMode ? 'edit' : 'view'}
segInfo={segInfo}

View File

@ -1,4 +1,4 @@
import { memo, useMemo, useRef, useState } from 'react'
import { memo, useCallback, useMemo, useRef, useState } from 'react'
import type { FC } from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
@ -12,7 +12,6 @@ import Keywords from './completed/common/keywords'
import ChunkContent from './completed/common/chunk-content'
import AddAnother from './completed/common/add-another'
import Dot from './completed/common/dot'
import { useDocumentContext } from './index'
import { useStore as useAppStore } from '@/app/components/app/store'
import { ToastContext } from '@/app/components/base/toast'
import { ChunkingMode, type SegmentUpdater } from '@/models/datasets'
@ -20,6 +19,8 @@ import classNames from '@/utils/classnames'
import { formatNumber } from '@/utils/format'
import Divider from '@/app/components/base/divider'
import { useAddSegment } from '@/service/knowledge/use-segment'
import { useDatasetDetailContextWithSelector } from '@/context/dataset-detail'
import { IndexingType } from '../../create/step-two'
type NewSegmentModalProps = {
onCancel: () => void
@ -44,39 +45,37 @@ const NewSegmentModal: FC<NewSegmentModalProps> = ({
const [addAnother, setAddAnother] = useState(true)
const fullScreen = useSegmentListContext(s => s.fullScreen)
const toggleFullScreen = useSegmentListContext(s => s.toggleFullScreen)
const mode = useDocumentContext(s => s.mode)
const indexingTechnique = useDatasetDetailContextWithSelector(s => s.dataset?.indexing_technique)
const { appSidebarExpand } = useAppStore(useShallow(state => ({
appSidebarExpand: state.appSidebarExpand,
})))
const refreshTimer = useRef<any>(null)
const CustomButton = <>
<Divider type='vertical' className='mx-1 h-3 bg-divider-regular' />
<button
type='button'
className='system-xs-semibold text-text-accent'
onClick={() => {
clearTimeout(refreshTimer.current)
viewNewlyAddedChunk()
}}>
{t('common.operation.view')}
</button>
</>
const CustomButton = useMemo(() => (
<>
<Divider type='vertical' className='mx-1 h-3 bg-divider-regular' />
<button
type='button'
className='system-xs-semibold text-text-accent'
onClick={() => {
clearTimeout(refreshTimer.current)
viewNewlyAddedChunk()
}}>
{t('common.operation.view')}
</button>
</>
), [viewNewlyAddedChunk, t])
const isQAModel = useMemo(() => {
return docForm === ChunkingMode.qa
}, [docForm])
const handleCancel = (actionType: 'esc' | 'add' = 'esc') => {
const handleCancel = useCallback((actionType: 'esc' | 'add' = 'esc') => {
if (actionType === 'esc' || !addAnother)
onCancel()
}
}, [onCancel, addAnother])
const { mutateAsync: addSegment } = useAddSegment()
const handleSave = async () => {
const handleSave = useCallback(async () => {
const params: SegmentUpdater = { content: '' }
if (isQAModel) {
if (docForm === ChunkingMode.qa) {
if (!question.trim()) {
return notify({
type: 'error',
@ -129,21 +128,27 @@ const NewSegmentModal: FC<NewSegmentModalProps> = ({
setLoading(false)
},
})
}
}, [docForm, keywords, addSegment, datasetId, documentId, question, answer, notify, t, appSidebarExpand, CustomButton, handleCancel, onSave])
const wordCountText = useMemo(() => {
const count = isQAModel ? (question.length + answer.length) : question.length
const count = docForm === ChunkingMode.qa ? (question.length + answer.length) : question.length
return `${formatNumber(count)} ${t('datasetDocuments.segment.characters', { count })}`
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [question.length, answer.length, isQAModel])
}, [question.length, answer.length, docForm, t])
const isECOIndexing = indexingTechnique === IndexingType.ECONOMICAL
return (
<div className={'flex h-full flex-col'}>
<div className={classNames('flex items-center justify-between', fullScreen ? 'py-3 pr-4 pl-6 border border-divider-subtle' : 'pt-3 pr-3 pl-4')}>
<div
className={classNames(
'flex items-center justify-between',
fullScreen ? 'border border-divider-subtle py-3 pl-6 pr-4' : 'pl-4 pr-3 pt-3',
)}
>
<div className='flex flex-col'>
<div className='system-xl-semibold text-text-primary'>{
t('datasetDocuments.segment.addChunk')
}</div>
<div className='system-xl-semibold text-text-primary'>
{t('datasetDocuments.segment.addChunk')}
</div>
<div className='flex items-center gap-x-2'>
<SegmentIndexTag label={t('datasetDocuments.segment.newChunk')!} />
<Dot />
@ -171,8 +176,8 @@ const NewSegmentModal: FC<NewSegmentModalProps> = ({
</div>
</div>
</div>
<div className={classNames('flex grow', fullScreen ? 'w-full flex-row justify-center px-6 pt-6 gap-x-8' : 'flex-col gap-y-1 py-3 px-4')}>
<div className={classNames('break-all overflow-hidden whitespace-pre-line', fullScreen ? 'w-1/2' : 'grow')}>
<div className={classNames('flex grow', fullScreen ? 'w-full flex-row justify-center gap-x-8 px-6 pt-6' : 'flex-col gap-y-1 px-4 py-3')}>
<div className={classNames('overflow-hidden whitespace-pre-line break-all', fullScreen ? 'w-1/2' : 'grow')}>
<ChunkContent
docForm={docForm}
question={question}
@ -182,7 +187,7 @@ const NewSegmentModal: FC<NewSegmentModalProps> = ({
isEditMode={true}
/>
</div>
{mode === 'custom' && <Keywords
{isECOIndexing && <Keywords
className={fullScreen ? 'w-1/5' : ''}
actionType='add'
keywords={keywords}

View File

@ -19,12 +19,14 @@ export enum FormTypeEnum {
toolSelector = 'tool-selector',
multiToolSelector = 'array[tools]',
appSelector = 'app-selector',
dynamicSelect = 'dynamic-select',
}
export type FormOption = {
label: TypeWithI18N
value: string
show_on: FormShowOnObject[]
icon?: string
}
export enum ModelTypeEnum {

View File

@ -30,7 +30,7 @@ const HeaderWrapper = ({
return (
<div className={classNames(
'sticky top-0 left-0 right-0 z-30 flex flex-col grow-0 shrink-0 basis-auto min-h-[56px]',
'sticky left-0 right-0 top-0 z-[15] flex min-h-[56px] shrink-0 grow-0 basis-auto flex-col',
s.header,
isBordered ? 'border-b border-divider-regular' : '',
)}

View File

@ -117,6 +117,7 @@ const MultipleToolSelector = ({
)}
{!disabled && (
<ActionButton className='mx-1' onClick={() => {
setCollapse(false)
setOpen(!open)
setPanelShowState(true)
}}>
@ -126,23 +127,6 @@ const MultipleToolSelector = ({
</div>
{!collapse && (
<>
<ToolSelector
nodeId={nodeId}
nodeOutputVars={nodeOutputVars}
availableNodes={availableNodes}
scope={scope}
value={undefined}
selectedTools={value}
onSelect={handleAdd}
controlledState={open}
onControlledStateChange={setOpen}
trigger={
<div className=''></div>
}
panelShowState={panelShowState}
onPanelShowStateChange={setPanelShowState}
isEdit={false}
/>
{value.length === 0 && (
<div className='system-xs-regular flex justify-center rounded-[10px] bg-background-section p-3 text-text-tertiary'>{t('plugin.detailPanel.toolSelector.empty')}</div>
)}
@ -164,6 +148,23 @@ const MultipleToolSelector = ({
))}
</>
)}
<ToolSelector
nodeId={nodeId}
nodeOutputVars={nodeOutputVars}
availableNodes={availableNodes}
scope={scope}
value={undefined}
selectedTools={value}
onSelect={handleAdd}
controlledState={open}
onControlledStateChange={setOpen}
trigger={
<div className=''></div>
}
panelShowState={panelShowState}
onPanelShowStateChange={setPanelShowState}
isEdit={false}
/>
</>
)
}

View File

@ -275,7 +275,7 @@ const ToolSelector: FC<Props> = ({
/>
)}
</PortalToFollowElemTrigger>
<PortalToFollowElemContent className='z-[1000]'>
<PortalToFollowElemContent>
<div className={cn('relative max-h-[642px] min-h-20 w-[361px] rounded-xl border-[0.5px] border-components-panel-border bg-components-panel-bg-blur pb-4 shadow-lg backdrop-blur-sm', !isShowSettingAuth && 'overflow-y-auto pb-2')}>
{!isShowSettingAuth && (
<>

View File

@ -136,7 +136,7 @@ const PluginPage = ({
const options = usePluginPageContext(v => v.options)
const activeTab = usePluginPageContext(v => v.activeTab)
const setActiveTab = usePluginPageContext(v => v.setActiveTab)
const { enable_marketplace, branding } = useGlobalPublicStore(s => s.systemFeatures)
const { enable_marketplace } = useGlobalPublicStore(s => s.systemFeatures)
const isPluginsTab = useMemo(() => activeTab === PLUGIN_PAGE_TABS_MAP.plugins, [activeTab])
const isExploringMarketplace = useMemo(() => {
@ -225,7 +225,7 @@ const PluginPage = ({
)
}
{
canSetPermissions && !branding.enabled && (
canSetPermissions && (
<Tooltip
popupContent={t('plugin.privilege.title')}
>

View File

@ -6,9 +6,9 @@ import Item from './item'
import type { Plugin } from '@/app/components/plugins/types.ts'
import cn from '@/utils/classnames'
import Link from 'next/link'
import { MARKETPLACE_URL_PREFIX } from '@/config'
import { RiArrowRightUpLine, RiSearchLine } from '@remixicon/react'
import { noop } from 'lodash-es'
import { getMarketplaceUrl } from '@/utils/var'
export type ListProps = {
wrapElemRef: React.RefObject<HTMLElement>
@ -32,7 +32,7 @@ const List = forwardRef<ListRef, ListProps>(({
const { t } = useTranslation()
const hasFilter = !searchText
const hasRes = list.length > 0
const urlWithSearchText = `${MARKETPLACE_URL_PREFIX}/?q=${searchText}&tags=${tags.join(',')}`
const urlWithSearchText = getMarketplaceUrl('', { q: searchText, tags: tags.join(',') })
const nextToStickyELemRef = useRef<HTMLDivElement>(null)
const { handleScroll, scrollPosition } = useStickyScroll({
@ -71,7 +71,7 @@ const List = forwardRef<ListRef, ListProps>(({
return (
<Link
className='system-sm-medium sticky bottom-0 z-10 flex h-8 cursor-pointer items-center rounded-b-lg border-[0.5px] border-t border-components-panel-border bg-components-panel-bg-blur px-4 py-1 text-text-accent-light-mode-only shadow-lg'
href={`${MARKETPLACE_URL_PREFIX}/`}
href={getMarketplaceUrl('')}
target='_blank'
>
<span>{t('plugin.findMoreInMarketplace')}</span>

View File

@ -36,7 +36,7 @@ export type ToolValue = {
provider_name: string
tool_name: string
tool_label: string
tool_description: string
tool_description?: string
settings?: Record<string, any>
parameters?: Record<string, any>
enabled?: boolean

View File

@ -33,6 +33,8 @@ const HeaderInNormal = ({
const setShowWorkflowVersionHistoryPanel = useStore(s => s.setShowWorkflowVersionHistoryPanel)
const setShowEnvPanel = useStore(s => s.setShowEnvPanel)
const setShowDebugAndPreviewPanel = useStore(s => s.setShowDebugAndPreviewPanel)
const setShowVariableInspectPanel = useStore(s => s.setShowVariableInspectPanel)
const setShowChatVariablePanel = useStore(s => s.setShowChatVariablePanel)
const nodes = useNodes<StartNodeType>()
const selectedNode = nodes.find(node => node.data.selected)
const { handleBackupDraft } = useWorkflowRun()
@ -46,8 +48,10 @@ const HeaderInNormal = ({
setShowWorkflowVersionHistoryPanel(true)
setShowEnvPanel(false)
setShowDebugAndPreviewPanel(false)
setShowVariableInspectPanel(false)
setShowChatVariablePanel(false)
}, [handleBackupDraft, workflowStore, handleNodeSelect, selectedNode,
setShowWorkflowVersionHistoryPanel, setShowEnvPanel, setShowDebugAndPreviewPanel])
setShowWorkflowVersionHistoryPanel, setShowEnvPanel, setShowDebugAndPreviewPanel, setShowVariableInspectPanel])
return (
<>

View File

@ -15,7 +15,7 @@ import { pluginManifestToCardPluginProps } from '@/app/components/plugins/instal
import { Badge as Badge2, BadgeState } from '@/app/components/base/badge/index'
import Link from 'next/link'
import { useTranslation } from 'react-i18next'
import { MARKETPLACE_URL_PREFIX } from '@/config'
import { getMarketplaceUrl } from '@/utils/var'
export type SwitchPluginVersionProps = {
uniqueIdentifier: string
@ -82,7 +82,7 @@ export const SwitchPluginVersion: FC<SwitchPluginVersionProps> = (props) => {
modalBottomLeft={
<Link
className='flex items-center justify-center gap-1'
href={`${MARKETPLACE_URL_PREFIX}/plugins/${pluginDetail.declaration.author}/${pluginDetail.declaration.name}`}
href={getMarketplaceUrl(`/plugins/${pluginDetail.declaration.author}/${pluginDetail.declaration.name}`)}
target='_blank'
>
<span className='system-xs-regular text-xs text-text-accent'>

View File

@ -13,6 +13,8 @@ type Props = {
readonly: boolean
value: string
onChange: (value: string | number, varKindType: VarKindType, varInfo?: Var) => void
onOpenChange?: (open: boolean) => void
isLoading?: boolean
}
const DEFAULT_SCHEMA = {} as CredentialFormSchema
@ -22,6 +24,8 @@ const ConstantField: FC<Props> = ({
readonly,
value,
onChange,
onOpenChange,
isLoading,
}) => {
const language = useLanguage()
const placeholder = (schema as CredentialFormSchemaSelect).placeholder
@ -36,7 +40,7 @@ const ConstantField: FC<Props> = ({
return (
<>
{schema.type === FormTypeEnum.select && (
{(schema.type === FormTypeEnum.select || schema.type === FormTypeEnum.dynamicSelect) && (
<SimpleSelect
wrapperClassName='w-full !h-8'
className='flex items-center'
@ -45,6 +49,8 @@ const ConstantField: FC<Props> = ({
items={(schema as CredentialFormSchemaSelect).options.map(option => ({ value: option.value, name: option.label[language] || option.label.en_US }))}
onSelect={item => handleSelectChange(item.value)}
placeholder={placeholder?.[language] || placeholder?.en_US}
onOpenChange={onOpenChange}
isLoading={isLoading}
/>
)}
{schema.type === FormTypeEnum.textNumber && (

View File

@ -1090,13 +1090,13 @@ export const getNodeUsedVarPassToServerKey = (node: Node, valueSelector: ValueSe
break
}
case BlockEnum.Code: {
const targetVar = (data as CodeNodeType).variables?.find(v => v.value_selector.join('.') === valueSelector.join('.'))
const targetVar = (data as CodeNodeType).variables?.find(v => Array.isArray(v.value_selector) && v.value_selector && v.value_selector.join('.') === valueSelector.join('.'))
if (targetVar)
res = targetVar.variable
break
}
case BlockEnum.TemplateTransform: {
const targetVar = (data as TemplateTransformNodeType).variables?.find(v => v.value_selector.join('.') === valueSelector.join('.'))
const targetVar = (data as TemplateTransformNodeType).variables?.find(v => Array.isArray(v.value_selector) && v.value_selector && v.value_selector.join('.') === valueSelector.join('.'))
if (targetVar)
res = targetVar.variable
break

View File

@ -6,6 +6,7 @@ import {
RiArrowDownSLine,
RiCloseLine,
RiErrorWarningFill,
RiLoader4Line,
RiMoreLine,
} from '@remixicon/react'
import produce from 'immer'
@ -16,8 +17,9 @@ import VarReferencePopup from './var-reference-popup'
import { getNodeInfoById, isConversationVar, isENV, isSystemVar, varTypeToStructType } from './utils'
import ConstantField from './constant-field'
import cn from '@/utils/classnames'
import type { Node, NodeOutPutVar, ValueSelector, Var } from '@/app/components/workflow/types'
import type { CredentialFormSchema } from '@/app/components/header/account-setting/model-provider-page/declarations'
import type { Node, NodeOutPutVar, ToolWithProvider, ValueSelector, Var } from '@/app/components/workflow/types'
import type { CredentialFormSchemaSelect } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { type CredentialFormSchema, type FormOption, FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { BlockEnum } from '@/app/components/workflow/types'
import { VarBlockIcon } from '@/app/components/workflow/block-icon'
import { Line3 } from '@/app/components/base/icons/src/public/common'
@ -40,6 +42,8 @@ import Tooltip from '@/app/components/base/tooltip'
import { isExceptionVariable } from '@/app/components/workflow/utils'
import VarFullPathPanel from './var-full-path-panel'
import { noop } from 'lodash-es'
import { useFetchDynamicOptions } from '@/service/use-plugins'
import type { Tool } from '@/app/components/tools/types'
const TRIGGER_DEFAULT_WIDTH = 227
@ -68,6 +72,8 @@ type Props = {
minWidth?: number
popupFor?: 'assigned' | 'toAssigned'
zIndex?: number
currentTool?: Tool
currentProvider?: ToolWithProvider
}
const DEFAULT_VALUE_SELECTOR: Props['value'] = []
@ -97,6 +103,8 @@ const VarReferencePicker: FC<Props> = ({
minWidth,
popupFor,
zIndex,
currentTool,
currentProvider,
}) => {
const { t } = useTranslation()
const store = useStoreApi()
@ -316,6 +324,42 @@ const VarReferencePicker: FC<Props> = ({
return null
}, [isValidVar, isShowAPart, hasValue, t, outputVarNode?.title, outputVarNode?.type, value, type])
const [dynamicOptions, setDynamicOptions] = useState<FormOption[] | null>(null)
const [isLoading, setIsLoading] = useState(false)
const { mutateAsync: fetchDynamicOptions } = useFetchDynamicOptions(
currentProvider?.plugin_id || '', currentProvider?.name || '', currentTool?.name || '', (schema as CredentialFormSchemaSelect)?.variable || '',
'tool',
)
const handleFetchDynamicOptions = async () => {
if (schema?.type !== FormTypeEnum.dynamicSelect || !currentTool || !currentProvider)
return
setIsLoading(true)
try {
const data = await fetchDynamicOptions()
setDynamicOptions(data?.options || [])
}
finally {
setIsLoading(false)
}
}
useEffect(() => {
handleFetchDynamicOptions()
}, [currentTool, currentProvider, schema])
const schemaWithDynamicSelect = useMemo(() => {
if (schema?.type !== FormTypeEnum.dynamicSelect)
return schema
// rewrite schema.options with dynamicOptions
if (dynamicOptions) {
return {
...schema,
options: dynamicOptions,
}
}
return schema
}, [dynamicOptions])
return (
<div className={cn(className, !readonly && 'cursor-pointer')}>
<PortalToFollowElem
@ -366,8 +410,9 @@ const VarReferencePicker: FC<Props> = ({
<ConstantField
value={value as string}
onChange={onChange as ((value: string | number, varKindType: VarKindType, varInfo?: Var) => void)}
schema={schema as CredentialFormSchema}
schema={schemaWithDynamicSelect as CredentialFormSchema}
readonly={readonly}
isLoading={isLoading}
/>
)
: (
@ -412,6 +457,7 @@ const VarReferencePicker: FC<Props> = ({
)}
<div className='flex items-center text-text-accent'>
{!hasValue && <Variable02 className='h-3.5 w-3.5' />}
{isLoading && <RiLoader4Line className='h-3.5 w-3.5 animate-spin text-text-secondary' />}
{isEnv && <Env className='h-3.5 w-3.5 text-util-colors-violet-violet-600' />}
{isChatVar && <BubbleX className='h-3.5 w-3.5 text-util-colors-teal-teal-700' />}
<div className={cn('ml-0.5 truncate text-xs font-medium', isEnv && '!text-text-secondary', isChatVar && 'text-util-colors-teal-teal-700', isException && 'text-text-warning')} title={varName} style={{
@ -424,7 +470,16 @@ const VarReferencePicker: FC<Props> = ({
{!isValidVar && <RiErrorWarningFill className='ml-0.5 h-3 w-3 text-text-destructive' />}
</>
)
: <div className={`overflow-hidden ${readonly ? 'text-components-input-text-disabled' : 'text-components-input-text-placeholder'} system-sm-regular text-ellipsis`}>{placeholder ?? t('workflow.common.setVarValuePlaceholder')}</div>}
: <div className={`overflow-hidden ${readonly ? 'text-components-input-text-disabled' : 'text-components-input-text-placeholder'} system-sm-regular text-ellipsis`}>
{isLoading ? (
<div className='flex items-center'>
<RiLoader4Line className='mr-1 h-3.5 w-3.5 animate-spin text-text-secondary' />
<span>{placeholder ?? t('workflow.common.setVarValuePlaceholder')}</span>
</div>
) : (
placeholder ?? t('workflow.common.setVarValuePlaceholder')
)}
</div>}
</div>
</Tooltip>
</div>

View File

@ -6,7 +6,7 @@ import { useTranslation } from 'react-i18next'
import type { ToolVarInputs } from '../types'
import { VarType as VarKindType } from '../types'
import cn from '@/utils/classnames'
import type { ValueSelector, Var } from '@/app/components/workflow/types'
import type { ToolWithProvider, ValueSelector, Var } from '@/app/components/workflow/types'
import type { CredentialFormSchema } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks'
@ -17,6 +17,7 @@ import { VarType } from '@/app/components/workflow/types'
import AppSelector from '@/app/components/plugins/plugin-detail-panel/app-selector'
import ModelParameterModal from '@/app/components/plugins/plugin-detail-panel/model-selector'
import { noop } from 'lodash-es'
import type { Tool } from '@/app/components/tools/types'
type Props = {
readOnly: boolean
@ -27,6 +28,8 @@ type Props = {
onOpen?: (index: number) => void
isSupportConstantValue?: boolean
filterVar?: (payload: Var, valueSelector: ValueSelector) => boolean
currentTool?: Tool
currentProvider?: ToolWithProvider
}
const InputVarList: FC<Props> = ({
@ -38,6 +41,8 @@ const InputVarList: FC<Props> = ({
onOpen = noop,
isSupportConstantValue,
filterVar,
currentTool,
currentProvider,
}) => {
const language = useLanguage()
const { t } = useTranslation()
@ -58,6 +63,8 @@ const InputVarList: FC<Props> = ({
return 'ModelSelector'
else if (type === FormTypeEnum.toolSelector)
return 'ToolSelector'
else if (type === FormTypeEnum.dynamicSelect || type === FormTypeEnum.select)
return 'Select'
else
return 'String'
}
@ -149,6 +156,7 @@ const InputVarList: FC<Props> = ({
const handleOpen = useCallback((index: number) => {
return () => onOpen(index)
}, [onOpen])
return (
<div className='space-y-3'>
{
@ -163,7 +171,8 @@ const InputVarList: FC<Props> = ({
} = schema
const varInput = value[variable]
const isNumber = type === FormTypeEnum.textNumber
const isSelect = type === FormTypeEnum.select
const isDynamicSelect = type === FormTypeEnum.dynamicSelect
const isSelect = type === FormTypeEnum.select || type === FormTypeEnum.dynamicSelect
const isFile = type === FormTypeEnum.file || type === FormTypeEnum.files
const isAppSelector = type === FormTypeEnum.appSelector
const isModelSelector = type === FormTypeEnum.modelSelector
@ -198,11 +207,13 @@ const InputVarList: FC<Props> = ({
value={varInput?.type === VarKindType.constant ? (varInput?.value ?? '') : (varInput?.value ?? [])}
onChange={handleNotMixedTypeChange(variable)}
onOpen={handleOpen(index)}
defaultVarKindType={varInput?.type || (isNumber ? VarKindType.constant : VarKindType.variable)}
defaultVarKindType={varInput?.type || ((isNumber || isDynamicSelect) ? VarKindType.constant : VarKindType.variable)}
isSupportConstantValue={isSupportConstantValue}
filterVar={isNumber ? filterVar : undefined}
availableVars={isSelect ? availableVars : undefined}
schema={schema}
currentTool={currentTool}
currentProvider={currentProvider}
/>
)}
{isFile && (

View File

@ -42,6 +42,7 @@ const Panel: FC<NodePanelProps<ToolNodeType>> = ({
isLoading,
outputSchema,
hasObjectOutput,
currTool,
} = useConfig(id, data)
if (isLoading) {
@ -80,6 +81,8 @@ const Panel: FC<NodePanelProps<ToolNodeType>> = ({
filterVar={filterVar}
isSupportConstantValue
onOpen={handleOnVarOpen}
currentProvider={currCollection}
currentTool={currTool}
/>
</Field>
)}

View File

@ -1,5 +1,5 @@
import type { FC } from 'react'
import { memo, useEffect, useRef } from 'react'
import { memo, useCallback, useEffect, useRef } from 'react'
import { useNodes } from 'reactflow'
import type { CommonNodeType } from '../types'
import { Panel as NodePanel } from '../nodes'
@ -13,6 +13,51 @@ export type PanelProps = {
right?: React.ReactNode
}
}
/**
* Reference MDN standard implementationhttps://developer.mozilla.org/zh-CN/docs/Web/API/ResizeObserverEntry/borderBoxSize
*/
const getEntryWidth = (entry: ResizeObserverEntry, element: HTMLElement): number => {
if (entry.borderBoxSize?.length > 0)
return entry.borderBoxSize[0].inlineSize
if (entry.contentRect.width > 0)
return entry.contentRect.width
return element.getBoundingClientRect().width
}
const useResizeObserver = (
callback: (width: number) => void,
dependencies: React.DependencyList = [],
) => {
const elementRef = useRef<HTMLDivElement>(null)
const stableCallback = useCallback(callback, [callback])
useEffect(() => {
const element = elementRef.current
if (!element) return
const resizeObserver = new ResizeObserver((entries) => {
for (const entry of entries) {
const width = getEntryWidth(entry, element)
stableCallback(width)
}
})
resizeObserver.observe(element)
const initialWidth = element.getBoundingClientRect().width
stableCallback(initialWidth)
return () => {
resizeObserver.disconnect()
}
}, [stableCallback, ...dependencies])
return elementRef
}
const Panel: FC<PanelProps> = ({
components,
}) => {
@ -20,44 +65,21 @@ const Panel: FC<PanelProps> = ({
const selectedNode = nodes.find(node => node.data.selected)
const showEnvPanel = useStore(s => s.showEnvPanel)
const isRestoring = useStore(s => s.isRestoring)
const showWorkflowVersionHistoryPanel = useStore(s => s.showWorkflowVersionHistoryPanel)
const rightPanelRef = useRef<HTMLDivElement>(null)
const setRightPanelWidth = useStore(s => s.setRightPanelWidth)
// get right panel width
useEffect(() => {
if (rightPanelRef.current) {
const resizeRightPanelObserver = new ResizeObserver((entries) => {
for (const entry of entries) {
const { inlineSize } = entry.borderBoxSize[0]
setRightPanelWidth(inlineSize)
}
})
resizeRightPanelObserver.observe(rightPanelRef.current)
return () => {
resizeRightPanelObserver.disconnect()
}
}
}, [setRightPanelWidth])
const otherPanelRef = useRef<HTMLDivElement>(null)
const setOtherPanelWidth = useStore(s => s.setOtherPanelWidth)
// get other panel width
useEffect(() => {
if (otherPanelRef.current) {
const resizeOtherPanelObserver = new ResizeObserver((entries) => {
for (const entry of entries) {
const { inlineSize } = entry.borderBoxSize[0]
setOtherPanelWidth(inlineSize)
}
})
resizeOtherPanelObserver.observe(otherPanelRef.current)
return () => {
resizeOtherPanelObserver.disconnect()
}
}
}, [setOtherPanelWidth])
const rightPanelRef = useResizeObserver(
setRightPanelWidth,
[setRightPanelWidth, selectedNode, showEnvPanel, showWorkflowVersionHistoryPanel],
)
const otherPanelRef = useResizeObserver(
setOtherPanelWidth,
[setOtherPanelWidth, showEnvPanel, showWorkflowVersionHistoryPanel],
)
return (
<div
ref={rightPanelRef}
@ -65,26 +87,14 @@ const Panel: FC<PanelProps> = ({
className={cn('absolute bottom-1 right-0 top-14 z-10 flex outline-none')}
key={`${isRestoring}`}
>
{
components?.left
}
{
!!selectedNode && (
<NodePanel {...selectedNode!} />
)
}
{components?.left}
{!!selectedNode && <NodePanel {...selectedNode} />}
<div
className='relative'
className="relative"
ref={otherPanelRef}
>
{
components?.right
}
{
showEnvPanel && (
<EnvPanel />
)
}
{components?.right}
{showEnvPanel && <EnvPanel />}
</div>
</div>
)

View File

@ -29,6 +29,7 @@ import type {
import ErrorHandleTip from '@/app/components/workflow/nodes/_base/components/error-handle/error-handle-tip'
import { hasRetryNode } from '@/app/components/workflow/utils'
import { useDocLink } from '@/context/i18n'
import Tooltip from '@/app/components/base/tooltip'
type Props = {
className?: string
@ -129,10 +130,16 @@ const NodePanel: FC<Props> = ({
/>
)}
<BlockIcon size={inMessage ? 'xs' : 'sm'} className={cn('mr-2 shrink-0', inMessage && '!mr-1')} type={nodeInfo.node_type} toolIcon={nodeInfo.extras?.icon || nodeInfo.extras} />
<div className={cn(
'system-xs-semibold-uppercase grow truncate text-text-secondary',
hideInfo && '!text-xs',
)} title={nodeInfo.title}>{nodeInfo.title}</div>
<Tooltip
popupContent={
<div className='max-w-xs'>{nodeInfo.title}</div>
}
>
<div className={cn(
'system-xs-semibold-uppercase grow truncate text-text-secondary',
hideInfo && '!text-xs',
)}>{nodeInfo.title}</div>
</Tooltip>
{nodeInfo.status !== 'running' && !hideInfo && (
<div className='system-xs-regular shrink-0 text-text-tertiary'>{nodeInfo.execution_metadata?.total_tokens ? `${getTokenCount(nodeInfo.execution_metadata?.total_tokens || 0)} tokens · ` : ''}{`${getTime(nodeInfo.elapsed_time || 0)}`}</div>
)}

View File

@ -15,7 +15,7 @@ const Empty: FC = () => {
<div className='system-xs-regular text-text-tertiary'>{t('workflow.debug.variableInspect.emptyTip')}</div>
<a
className='system-xs-regular cursor-pointer text-text-accent'
href='https://docs.dify.ai/guides/workflow/debug-and-preview/variable-inspect'
href='https://docs.dify.ai/en/guides/workflow/debug-and-preview/variable-inspect'
target='_blank'
rel='noopener noreferrer'>
{t('workflow.debug.variableInspect.emptyLink')}

View File

@ -12,6 +12,7 @@ import type { CommonNodeType } from '@/app/components/workflow/types'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { EVENT_WORKFLOW_STOP } from '@/app/components/workflow/variable-inspect/types'
import cn from '@/utils/classnames'
import { useNodesReadOnly } from '../hooks/use-workflow'
const VariableInspectTrigger: FC = () => {
const { t } = useTranslation()
@ -32,7 +33,10 @@ const VariableInspectTrigger: FC = () => {
const allVars = [...environmentVariables, ...conversationVars, ...systemVars, ...nodesWithInspectVars]
return allVars
}, [environmentVariables, conversationVars, systemVars, nodesWithInspectVars])
const {
nodesReadOnly,
getNodesReadOnly,
} = useNodesReadOnly()
const workflowRunningData = useStore(s => s.workflowRunningData)
const nodes = useNodes<CommonNodeType>()
const isStepRunning = useMemo(() => nodes.some(node => node.data._singleRunningStatus === NodeRunningStatus.Running), [nodes])
@ -61,8 +65,14 @@ const VariableInspectTrigger: FC = () => {
<div className={cn('flex items-center gap-1')}>
{!isRunning && !currentVars.length && (
<div
className='system-2xs-semibold-uppercase flex h-5 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-background-default-hover'
onClick={() => setShowVariableInspectPanel(true)}
className={cn('system-2xs-semibold-uppercase flex h-5 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-background-default-hover',
nodesReadOnly && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
)}
onClick={() => {
if (getNodesReadOnly())
return
setShowVariableInspectPanel(true)
}}
>
{t('workflow.debug.variableInspect.trigger.normal')}
</div>
@ -70,13 +80,21 @@ const VariableInspectTrigger: FC = () => {
{!isRunning && currentVars.length > 0 && (
<>
<div
className='system-xs-medium flex h-6 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-accent shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent'
onClick={() => setShowVariableInspectPanel(true)}
className={cn('system-xs-medium flex h-6 cursor-pointer items-center gap-1 rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-2 text-text-accent shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent',
nodesReadOnly && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
)}
onClick={() => {
if (getNodesReadOnly())
return
setShowVariableInspectPanel(true)
}}
>
{t('workflow.debug.variableInspect.trigger.cached')}
</div>
<div
className='system-xs-medium flex h-6 cursor-pointer items-center rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-1 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent hover:text-text-accent'
className={cn('system-xs-medium flex h-6 cursor-pointer items-center rounded-md border-[0.5px] border-effects-highlight bg-components-actionbar-bg px-1 text-text-tertiary shadow-lg backdrop-blur-sm hover:bg-components-actionbar-bg-accent hover:text-text-accent',
nodesReadOnly && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
)}
onClick={handleClearAll}
>
{t('workflow.debug.variableInspect.trigger.clear')}

View File

@ -697,4 +697,15 @@ button:focus-within {
-ms-overflow-style: none;
scrollbar-width: none;
}
/* Hide arrows from number input */
.no-spinner::-webkit-outer-spin-button,
.no-spinner::-webkit-inner-spin-button {
-webkit-appearance: none;
margin: 0;
}
.no-spinner {
-moz-appearance: textfield;
}
}

View File

@ -213,7 +213,7 @@ export default combine(
settings: {
tailwindcss: {
// These are the default values but feel free to customize
callees: ['classnames', 'clsx', 'ctl', 'cn'],
callees: ['classnames', 'clsx', 'ctl', 'cn', 'classNames'],
config: 'tailwind.config.js', // returned from `loadConfig()` utility if not provided
cssFiles: [
'**/*.css',

View File

@ -29,9 +29,10 @@ export const useTabSearchParams = ({
const router = useRouter()
const pathName = pathnameFromHook || window?.location?.pathname
const searchParams = useSearchParams()
const searchParamValue = searchParams.has(searchParamName) ? decodeURIComponent(searchParams.get(searchParamName)!) : defaultTab
const [activeTab, setTab] = useState<string>(
!disableSearchParams
? (searchParams.get(searchParamName) || defaultTab)
? searchParamValue
: defaultTab,
)
@ -39,7 +40,7 @@ export const useTabSearchParams = ({
setTab(newActiveTab)
if (disableSearchParams)
return
router[`${routingBehavior}`](`${pathName}?${searchParamName}=${newActiveTab}`)
router[`${routingBehavior}`](`${pathName}?${searchParamName}=${encodeURIComponent(newActiveTab)}`)
}
return [activeTab, setActiveTab] as const

View File

@ -199,9 +199,9 @@ const translation = {
accessControl: 'Web App Access Control',
accessItemsDescription: {
anyone: 'Anyone can access the web app (no login required)',
specific: 'Only specific members within the platform can access the Web application',
organization: 'All members within the platform can access the Web application',
external: 'Only authenticated external users can access the Web application',
specific: 'Only specific members within the platform can access the web app',
organization: 'All members within the platform can access the web app',
external: 'Only authenticated external users can access the web app',
},
accessControlDialog: {
title: 'Web App Access Control',
@ -218,7 +218,7 @@ const translation = {
members_one: '{{count}} MEMBER',
members_other: '{{count}} MEMBERS',
noGroupsOrMembers: 'No groups or members selected',
webAppSSONotEnabledTip: 'Please contact your organization administrator to configure external authentication for the Web application.',
webAppSSONotEnabledTip: 'Please contact your organization administrator to configure external authentication for the web app.',
operateGroupAndMember: {
searchPlaceholder: 'Search groups and members',
allMembers: 'All members',

View File

@ -355,7 +355,9 @@ const translation = {
newChildChunk: 'New Child Chunk',
keywords: 'KEYWORDS',
addKeyWord: 'Add keyword',
keywordEmpty: 'The keyword cannot be empty',
keywordError: 'The maximum length of keyword is 20',
keywordDuplicate: 'The keyword already exists',
characters_one: 'character',
characters_other: 'characters',
hitCount: 'Retrieval count',

View File

@ -86,5 +86,8 @@ export const useGetUserCanAccessApp = ({ appId, isInstalledApp = true, enabled }
enabled: !!appId && enabled,
staleTime: 0,
gcTime: 0,
initialData: {
result: !enabled,
},
})
}

View File

@ -1,5 +1,6 @@
import { useCallback, useEffect } from 'react'
import type {
FormOption,
ModelProvider,
} from '@/app/components/header/account-setting/model-provider-page/declarations'
import { fetchModelProviderModelList } from '@/service/common'
@ -477,7 +478,7 @@ export const usePluginTaskList = (category?: PluginType) => {
refreshPluginList(category ? { category } as any : undefined, !category)
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isRefetching])
const handleRefetch = useCallback(() => {
@ -571,3 +572,17 @@ export const usePluginInfo = (providerName?: string) => {
enabled: !!providerName,
})
}
export const useFetchDynamicOptions = (plugin_id: string, provider: string, action: string, parameter: string, provider_type: 'tool') => {
return useMutation({
mutationFn: () => get<{ options: FormOption[] }>('/workspaces/current/plugin/parameters/dynamic-options', {
params: {
plugin_id,
provider,
action,
parameter,
provider_type,
},
}),
})
}