mirror of
https://github.com/langgenius/dify.git
synced 2026-02-02 09:56:36 +08:00
Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0ff8bd2aa9 | |||
| 2866383228 | |||
| 00ac7edeb3 | |||
| 537068cfde | |||
| c3c6a48059 | |||
| 5c166b3f40 | |||
| 230fa3286b | |||
| 061c0b10fd | |||
| 32f8a98cf8 | |||
| 6c60ecb237 | |||
| c3fae5e801 | |||
| a594e256ae | |||
| 41d90c2408 | |||
| 7ff42b1b7a | |||
| 4d7cfd0de5 | |||
| 266d32bd77 | |||
| 7e1184c071 | |||
| 1ce51e57ab | |||
| 142b4fd699 | |||
| cc8feaa483 | |||
| d9d5d35a77 | |||
| 9277156b6c | |||
| 1490a19fa1 | |||
| 9b7adcd4d9 | |||
| a8d32f9964 | |||
| 5093337de1 | |||
| f54225568c | |||
| 255ff446ba | |||
| 9a0dc4bfdc | |||
| 9d975750bc | |||
| 7c979e6490 | |||
| d60ca1661c | |||
| bb62391a4c | |||
| 0b25c0b677 | |||
| a5d6082418 | |||
| 631cbcd781 | |||
| 20c4633d2a | |||
| 5669cac16d | |||
| 6180762160 | |||
| 376726cf90 | |||
| 284bb7ac71 | |||
| eca466bdaa | |||
| d56abec195 | |||
| 961e25f608 | |||
| 138bf698b0 | |||
| e5bb4cca12 | |||
| 5e2cb0e3a8 | |||
| 16a65cb367 | |||
| 1bae9b8ff7 | |||
| d7c1f43b49 | |||
| f933af9f57 | |||
| 91e1ff5e30 | |||
| 5908e10549 | |||
| 464e6354c5 | |||
| d470e55f8c | |||
| 98a1b01b0c | |||
| e240424be5 | |||
| 1cb5a12abb | |||
| ff2a4a6fcd |
13
.github/pull_request_template.md
vendored
13
.github/pull_request_template.md
vendored
@ -8,16 +8,9 @@ Please include a summary of the change and which issue is fixed. Please also inc
|
||||
|
||||
# Screenshots
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>Before: </td>
|
||||
<td>After: </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>...</td>
|
||||
<td>...</td>
|
||||
</tr>
|
||||
</table>
|
||||
| Before | After |
|
||||
|--------|-------|
|
||||
| ... | ... |
|
||||
|
||||
# Checklist
|
||||
|
||||
|
||||
@ -259,7 +259,7 @@ def migrate_knowledge_vector_database():
|
||||
skipped_count = 0
|
||||
total_count = 0
|
||||
vector_type = dify_config.VECTOR_STORE
|
||||
upper_colletion_vector_types = {
|
||||
upper_collection_vector_types = {
|
||||
VectorType.MILVUS,
|
||||
VectorType.PGVECTOR,
|
||||
VectorType.RELYT,
|
||||
@ -267,7 +267,7 @@ def migrate_knowledge_vector_database():
|
||||
VectorType.ORACLE,
|
||||
VectorType.ELASTICSEARCH,
|
||||
}
|
||||
lower_colletion_vector_types = {
|
||||
lower_collection_vector_types = {
|
||||
VectorType.ANALYTICDB,
|
||||
VectorType.CHROMA,
|
||||
VectorType.MYSCALE,
|
||||
@ -307,7 +307,7 @@ def migrate_knowledge_vector_database():
|
||||
continue
|
||||
collection_name = ""
|
||||
dataset_id = dataset.id
|
||||
if vector_type in upper_colletion_vector_types:
|
||||
if vector_type in upper_collection_vector_types:
|
||||
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
|
||||
elif vector_type == VectorType.QDRANT:
|
||||
if dataset.collection_binding_id:
|
||||
@ -323,7 +323,7 @@ def migrate_knowledge_vector_database():
|
||||
else:
|
||||
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
|
||||
|
||||
elif vector_type in lower_colletion_vector_types:
|
||||
elif vector_type in lower_collection_vector_types:
|
||||
collection_name = Dataset.gen_collection_name_by_id(dataset_id).lower()
|
||||
else:
|
||||
raise ValueError(f"Vector store {vector_type} is not supported.")
|
||||
|
||||
@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.13.0",
|
||||
default="0.13.2",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, inputs, marshal_with, reqparse
|
||||
from sqlalchemy import and_
|
||||
@ -20,8 +21,17 @@ class InstalledAppsListApi(Resource):
|
||||
@account_initialization_required
|
||||
@marshal_with(installed_app_list_fields)
|
||||
def get(self):
|
||||
app_id = request.args.get("app_id", default=None, type=str)
|
||||
current_tenant_id = current_user.current_tenant_id
|
||||
installed_apps = db.session.query(InstalledApp).filter(InstalledApp.tenant_id == current_tenant_id).all()
|
||||
|
||||
if app_id:
|
||||
installed_apps = (
|
||||
db.session.query(InstalledApp)
|
||||
.filter(and_(InstalledApp.tenant_id == current_tenant_id, InstalledApp.app_id == app_id))
|
||||
.all()
|
||||
)
|
||||
else:
|
||||
installed_apps = db.session.query(InstalledApp).filter(InstalledApp.tenant_id == current_tenant_id).all()
|
||||
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
installed_apps = [
|
||||
|
||||
@ -368,6 +368,7 @@ class ToolWorkflowProviderCreateApi(Resource):
|
||||
description=args["description"],
|
||||
parameters=args["parameters"],
|
||||
privacy_policy=args["privacy_policy"],
|
||||
labels=args["labels"],
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
Due to the presence of tasks in App Runner that require long execution times, such as LLM generation and external requests, Flask-Sqlalchemy's strategy for database connection pooling is to allocate one connection (transaction) per request. This approach keeps a connection occupied even during non-DB tasks, leading to the inability to acquire new connections during high concurrency requests due to multiple long-running tasks.
|
||||
|
||||
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid deattach errors.
|
||||
Therefore, the database operations in App Runner and Task Pipeline must ensure connections are closed immediately after use, and it's better to pass IDs rather than Model objects to avoid detach errors.
|
||||
|
||||
Examples:
|
||||
|
||||
|
||||
@ -82,7 +82,7 @@ class AppGenerateResponseConverter(ABC):
|
||||
for resource in metadata["retriever_resources"]:
|
||||
updated_resources.append(
|
||||
{
|
||||
"segment_id": resource["segment_id"],
|
||||
"segment_id": resource.get("segment_id", ""),
|
||||
"position": resource["position"],
|
||||
"document_name": resource["document_name"],
|
||||
"score": resource["score"],
|
||||
|
||||
@ -2,7 +2,7 @@ from datetime import datetime
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
|
||||
from core.workflow.entities.node_entities import NodeRunMetadataKey
|
||||
@ -113,18 +113,6 @@ class QueueIterationNextEvent(AppQueueEvent):
|
||||
output: Optional[Any] = None # output for the current iteration
|
||||
duration: Optional[float] = None
|
||||
|
||||
@field_validator("output", mode="before")
|
||||
@classmethod
|
||||
def set_output(cls, v):
|
||||
"""
|
||||
Set output
|
||||
"""
|
||||
if v is None:
|
||||
return None
|
||||
if isinstance(v, int | float | str | bool | dict | list):
|
||||
return v
|
||||
raise ValueError("output must be a valid type")
|
||||
|
||||
|
||||
class QueueIterationCompletedEvent(AppQueueEvent):
|
||||
"""
|
||||
|
||||
@ -91,7 +91,7 @@ class XinferenceProvider(Provider):
|
||||
"""
|
||||
```
|
||||
|
||||
也可以直接抛出对应Erros,并做如下定义,这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
|
||||
也可以直接抛出对应 Errors,并做如下定义,这样在之后的调用中可以直接抛出`InvokeConnectionError`等异常。
|
||||
|
||||
```python
|
||||
@property
|
||||
|
||||
@ -16,6 +16,7 @@ help:
|
||||
supported_model_types:
|
||||
- llm
|
||||
- text-embedding
|
||||
- rerank
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
provider_credential_schema:
|
||||
|
||||
@ -0,0 +1,53 @@
|
||||
model: amazon.nova-lite-v1:0
|
||||
label:
|
||||
en_US: Nova Lite V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.00006'
|
||||
output: '0.00024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -0,0 +1,52 @@
|
||||
model: amazon.nova-micro-v1:0
|
||||
label:
|
||||
en_US: Nova Micro V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.000035'
|
||||
output: '0.00014'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -0,0 +1,53 @@
|
||||
model: amazon.nova-pro-v1:0
|
||||
label:
|
||||
en_US: Nova Pro V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0032'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -70,6 +70,8 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
{"prefix": "cohere.command-r", "support_system_prompts": True, "support_tool_use": True},
|
||||
{"prefix": "amazon.titan", "support_system_prompts": False, "support_tool_use": False},
|
||||
{"prefix": "ai21.jamba-1-5", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "amazon.nova", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "us.amazon.nova", "support_system_prompts": True, "support_tool_use": False},
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@ -194,6 +196,13 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
if model_info["support_tool_use"] and tools:
|
||||
parameters["toolConfig"] = self._convert_converse_tool_config(tools=tools)
|
||||
try:
|
||||
# for issue #10976
|
||||
conversations_list = parameters["messages"]
|
||||
# if two consecutive user messages found, combine them into one message
|
||||
for i in range(len(conversations_list) - 2, -1, -1):
|
||||
if conversations_list[i]["role"] == conversations_list[i + 1]["role"]:
|
||||
conversations_list[i]["content"].extend(conversations_list.pop(i + 1)["content"])
|
||||
|
||||
if stream:
|
||||
response = bedrock_client.converse_stream(**parameters)
|
||||
return self._handle_converse_stream_response(
|
||||
|
||||
@ -0,0 +1,53 @@
|
||||
model: us.amazon.nova-lite-v1:0
|
||||
label:
|
||||
en_US: Nova Lite V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.00006'
|
||||
output: '0.00024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-micro-v1:0
|
||||
label:
|
||||
en_US: Nova Micro V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.000035'
|
||||
output: '0.00014'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -0,0 +1,53 @@
|
||||
model: us.amazon.nova-pro-v1:0
|
||||
label:
|
||||
en_US: Nova Pro V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0032'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -0,0 +1,2 @@
|
||||
- amazon.rerank-v1
|
||||
- cohere.rerank-v3-5
|
||||
@ -0,0 +1,4 @@
|
||||
model: amazon.rerank-v1:0
|
||||
model_type: rerank
|
||||
model_properties:
|
||||
context_size: 5120
|
||||
@ -0,0 +1,4 @@
|
||||
model: cohere.rerank-v3-5:0
|
||||
model_type: rerank
|
||||
model_properties:
|
||||
context_size: 5120
|
||||
147
api/core/model_runtime/model_providers/bedrock/rerank/rerank.py
Normal file
147
api/core/model_runtime/model_providers/bedrock/rerank/rerank.py
Normal file
@ -0,0 +1,147 @@
|
||||
from typing import Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
|
||||
from core.model_runtime.errors.invoke import (
|
||||
InvokeAuthorizationError,
|
||||
InvokeBadRequestError,
|
||||
InvokeConnectionError,
|
||||
InvokeError,
|
||||
InvokeRateLimitError,
|
||||
InvokeServerUnavailableError,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
|
||||
|
||||
|
||||
class BedrockRerankModel(RerankModel):
|
||||
"""
|
||||
Model class for Cohere rerank model.
|
||||
"""
|
||||
|
||||
def _invoke(
|
||||
self,
|
||||
model: str,
|
||||
credentials: dict,
|
||||
query: str,
|
||||
docs: list[str],
|
||||
score_threshold: Optional[float] = None,
|
||||
top_n: Optional[int] = None,
|
||||
user: Optional[str] = None,
|
||||
) -> RerankResult:
|
||||
"""
|
||||
Invoke rerank model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param query: search query
|
||||
:param docs: docs for reranking
|
||||
:param score_threshold: score threshold
|
||||
:param top_n: top n
|
||||
:param user: unique user id
|
||||
:return: rerank result
|
||||
"""
|
||||
|
||||
if len(docs) == 0:
|
||||
return RerankResult(model=model, docs=docs)
|
||||
|
||||
# initialize client
|
||||
client_config = Config(region_name=credentials["aws_region"])
|
||||
bedrock_runtime = boto3.client(
|
||||
service_name="bedrock-agent-runtime",
|
||||
config=client_config,
|
||||
aws_access_key_id=credentials.get("aws_access_key_id", ""),
|
||||
aws_secret_access_key=credentials.get("aws_secret_access_key"),
|
||||
)
|
||||
queries = [{"type": "TEXT", "textQuery": {"text": query}}]
|
||||
text_sources = []
|
||||
for text in docs:
|
||||
text_sources.append(
|
||||
{
|
||||
"type": "INLINE",
|
||||
"inlineDocumentSource": {
|
||||
"type": "TEXT",
|
||||
"textDocument": {
|
||||
"text": text,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
modelId = model
|
||||
region = credentials["aws_region"]
|
||||
model_package_arn = f"arn:aws:bedrock:{region}::foundation-model/{modelId}"
|
||||
rerankingConfiguration = {
|
||||
"type": "BEDROCK_RERANKING_MODEL",
|
||||
"bedrockRerankingConfiguration": {
|
||||
"numberOfResults": top_n,
|
||||
"modelConfiguration": {
|
||||
"modelArn": model_package_arn,
|
||||
},
|
||||
},
|
||||
}
|
||||
response = bedrock_runtime.rerank(
|
||||
queries=queries, sources=text_sources, rerankingConfiguration=rerankingConfiguration
|
||||
)
|
||||
|
||||
rerank_documents = []
|
||||
for idx, result in enumerate(response["results"]):
|
||||
# format document
|
||||
index = result["index"]
|
||||
rerank_document = RerankDocument(
|
||||
index=index,
|
||||
text=docs[index],
|
||||
score=result["relevanceScore"],
|
||||
)
|
||||
|
||||
# score threshold check
|
||||
if score_threshold is not None:
|
||||
if rerank_document.score >= score_threshold:
|
||||
rerank_documents.append(rerank_document)
|
||||
else:
|
||||
rerank_documents.append(rerank_document)
|
||||
|
||||
return RerankResult(model=model, docs=rerank_documents)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
Validate model credentials
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
self.invoke(
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
query="What is the capital of the United States?",
|
||||
docs=[
|
||||
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
|
||||
"Census, Carson City had a population of 55,274.",
|
||||
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
|
||||
"are a political division controlled by the United States. Its capital is Saipan.",
|
||||
],
|
||||
score_threshold=0.8,
|
||||
)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
@property
|
||||
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
|
||||
"""
|
||||
Map model invoke error to unified error
|
||||
The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller
|
||||
The value is the md = genai.GenerativeModel(model) error type thrown by the model,
|
||||
which needs to be converted into a unified error type for the caller.
|
||||
|
||||
:return: Invoke emd = genai.GenerativeModel(model) error mapping
|
||||
"""
|
||||
return {
|
||||
InvokeConnectionError: [],
|
||||
InvokeServerUnavailableError: [],
|
||||
InvokeRateLimitError: [],
|
||||
InvokeAuthorizationError: [],
|
||||
InvokeBadRequestError: [],
|
||||
}
|
||||
@ -2,3 +2,4 @@
|
||||
- rerank-english-v3.0
|
||||
- rerank-multilingual-v2.0
|
||||
- rerank-multilingual-v3.0
|
||||
- rerank-v3.5
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
model: rerank-v3.5
|
||||
model_type: rerank
|
||||
model_properties:
|
||||
context_size: 5120
|
||||
@ -0,0 +1,38 @@
|
||||
model: gemini-exp-1206
|
||||
label:
|
||||
en_US: Gemini exp 1206
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@ -181,9 +181,11 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
|
||||
# prepare the payload for a simple ping to the model
|
||||
data = {"model": model, "stream": stream}
|
||||
|
||||
if "format" in model_parameters:
|
||||
data["format"] = model_parameters["format"]
|
||||
del model_parameters["format"]
|
||||
if format_schema := model_parameters.pop("format", None):
|
||||
try:
|
||||
data["format"] = format_schema if format_schema == "json" else json.loads(format_schema)
|
||||
except json.JSONDecodeError as e:
|
||||
raise InvokeBadRequestError(f"Invalid format schema: {str(e)}")
|
||||
|
||||
if "keep_alive" in model_parameters:
|
||||
data["keep_alive"] = model_parameters["keep_alive"]
|
||||
@ -733,12 +735,12 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
|
||||
ParameterRule(
|
||||
name="format",
|
||||
label=I18nObject(en_US="Format", zh_Hans="返回格式"),
|
||||
type=ParameterType.STRING,
|
||||
type=ParameterType.TEXT,
|
||||
default="json",
|
||||
help=I18nObject(
|
||||
en_US="the format to return a response in. Currently the only accepted value is json.",
|
||||
zh_Hans="返回响应的格式。目前唯一接受的值是json。",
|
||||
en_US="the format to return a response in. Format can be `json` or a JSON schema.",
|
||||
zh_Hans="返回响应的格式。目前接受的值是字符串`json`或JSON schema.",
|
||||
),
|
||||
options=["json"],
|
||||
),
|
||||
],
|
||||
pricing=PriceConfig(
|
||||
|
||||
@ -104,13 +104,14 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
|
||||
"""
|
||||
# use Anthropic official SDK references
|
||||
# - https://github.com/anthropics/anthropic-sdk-python
|
||||
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
|
||||
service_account_key = credentials.get("vertex_service_account_key", "")
|
||||
project_id = credentials["vertex_project_id"]
|
||||
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
token = ""
|
||||
|
||||
# get access token from service account credential
|
||||
if service_account_info:
|
||||
if service_account_key:
|
||||
service_account_info = json.loads(base64.b64decode(service_account_key))
|
||||
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
|
||||
request = google.auth.transport.requests.Request()
|
||||
credentials.refresh(request)
|
||||
@ -478,10 +479,11 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
|
||||
if stop:
|
||||
config_kwargs["stop_sequences"] = stop
|
||||
|
||||
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
|
||||
service_account_key = credentials.get("vertex_service_account_key", "")
|
||||
project_id = credentials["vertex_project_id"]
|
||||
location = credentials["vertex_location"]
|
||||
if service_account_info:
|
||||
if service_account_key:
|
||||
service_account_info = json.loads(base64.b64decode(service_account_key))
|
||||
service_accountSA = service_account.Credentials.from_service_account_info(service_account_info)
|
||||
aiplatform.init(credentials=service_accountSA, project=project_id, location=location)
|
||||
else:
|
||||
|
||||
@ -48,10 +48,11 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
|
||||
:param input_type: input type
|
||||
:return: embeddings result
|
||||
"""
|
||||
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
|
||||
service_account_key = credentials.get("vertex_service_account_key", "")
|
||||
project_id = credentials["vertex_project_id"]
|
||||
location = credentials["vertex_location"]
|
||||
if service_account_info:
|
||||
if service_account_key:
|
||||
service_account_info = json.loads(base64.b64decode(service_account_key))
|
||||
service_accountSA = service_account.Credentials.from_service_account_info(service_account_info)
|
||||
aiplatform.init(credentials=service_accountSA, project=project_id, location=location)
|
||||
else:
|
||||
@ -100,10 +101,11 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
|
||||
service_account_key = credentials.get("vertex_service_account_key", "")
|
||||
project_id = credentials["vertex_project_id"]
|
||||
location = credentials["vertex_location"]
|
||||
if service_account_info:
|
||||
if service_account_key:
|
||||
service_account_info = json.loads(base64.b64decode(service_account_key))
|
||||
service_accountSA = service_account.Credentials.from_service_account_info(service_account_info)
|
||||
aiplatform.init(credentials=service_accountSA, project=project_id, location=location)
|
||||
else:
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,7 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 10240
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@ -4,6 +4,7 @@ label:
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2048
|
||||
features:
|
||||
- vision
|
||||
parameter_rules:
|
||||
|
||||
@ -0,0 +1,52 @@
|
||||
model: glm-4v-flash
|
||||
label:
|
||||
en_US: glm-4v-flash
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2048
|
||||
features:
|
||||
- vision
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
default: 0.95
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
|
||||
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
default: 0.6
|
||||
help:
|
||||
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
|
||||
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
|
||||
- name: do_sample
|
||||
label:
|
||||
zh_Hans: 采样策略
|
||||
en_US: Sampling strategy
|
||||
type: boolean
|
||||
help:
|
||||
zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。
|
||||
en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
|
||||
default: true
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 1024
|
||||
min: 1
|
||||
max: 1024
|
||||
- name: web_search
|
||||
type: boolean
|
||||
label:
|
||||
zh_Hans: 联网搜索
|
||||
en_US: Web Search
|
||||
default: false
|
||||
help:
|
||||
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
|
||||
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
@ -4,6 +4,7 @@ label:
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
features:
|
||||
- vision
|
||||
- video
|
||||
|
||||
@ -22,18 +22,6 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
|
||||
from core.model_runtime.model_providers.zhipuai._common import _CommonZhipuaiAI
|
||||
from core.model_runtime.utils import helper
|
||||
|
||||
GLM_JSON_MODE_PROMPT = """You should always follow the instructions and output a valid JSON object.
|
||||
The structure of the JSON object you can found in the instructions, use {"answer": "$your_answer"} as the default structure
|
||||
if you are not sure about the structure.
|
||||
|
||||
And you should always end the block with a "```" to indicate the end of the JSON object.
|
||||
|
||||
<instructions>
|
||||
{{instructions}}
|
||||
</instructions>
|
||||
|
||||
```JSON""" # noqa: E501
|
||||
|
||||
|
||||
class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
def _invoke(
|
||||
@ -64,42 +52,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
|
||||
# invoke model
|
||||
# stop = stop or []
|
||||
# self._transform_json_prompts(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
return self._generate(model, credentials_kwargs, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
|
||||
# def _transform_json_prompts(self, model: str, credentials: dict,
|
||||
# prompt_messages: list[PromptMessage], model_parameters: dict,
|
||||
# tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None,
|
||||
# stream: bool = True, user: str | None = None) \
|
||||
# -> None:
|
||||
# """
|
||||
# Transform json prompts to model prompts
|
||||
# """
|
||||
# if "}\n\n" not in stop:
|
||||
# stop.append("}\n\n")
|
||||
|
||||
# # check if there is a system message
|
||||
# if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage):
|
||||
# # override the system message
|
||||
# prompt_messages[0] = SystemPromptMessage(
|
||||
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content)
|
||||
# )
|
||||
# else:
|
||||
# # insert the system message
|
||||
# prompt_messages.insert(0, SystemPromptMessage(
|
||||
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", "Please output a valid JSON object.")
|
||||
# ))
|
||||
# # check if the last message is a user message
|
||||
# if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage):
|
||||
# # add ```JSON\n to the last message
|
||||
# prompt_messages[-1].content += "\n```JSON\n"
|
||||
# else:
|
||||
# # append a user message
|
||||
# prompt_messages.append(UserPromptMessage(
|
||||
# content="```JSON\n"
|
||||
# ))
|
||||
|
||||
def get_num_tokens(
|
||||
self,
|
||||
model: str,
|
||||
@ -170,7 +124,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
:return: full response or stream response chunk generator result
|
||||
"""
|
||||
extra_model_kwargs = {}
|
||||
# request to glm-4v-plus with stop words will always response "finish_reason":"network_error"
|
||||
# request to glm-4v-plus with stop words will always respond "finish_reason":"network_error"
|
||||
if stop and model != "glm-4v-plus":
|
||||
extra_model_kwargs["stop"] = stop
|
||||
|
||||
@ -186,11 +140,11 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
# resolve zhipuai model not support system message and user message, assistant message must be in sequence
|
||||
new_prompt_messages: list[PromptMessage] = []
|
||||
for prompt_message in prompt_messages:
|
||||
copy_prompt_message = prompt_message.copy()
|
||||
copy_prompt_message = prompt_message.model_copy()
|
||||
if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}:
|
||||
if isinstance(copy_prompt_message.content, list):
|
||||
# check if model is 'glm-4v'
|
||||
if model not in {"glm-4v", "glm-4v-plus"}:
|
||||
if not model.startswith("glm-4v"):
|
||||
# not support list message
|
||||
continue
|
||||
# get image and
|
||||
@ -234,63 +188,42 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
else:
|
||||
model_parameters["tools"] = [web_search_params]
|
||||
|
||||
if model in {"glm-4v", "glm-4v-plus"}:
|
||||
if model.startswith("glm-4v"):
|
||||
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
|
||||
else:
|
||||
params = {"model": model, "messages": [], **model_parameters}
|
||||
# glm model
|
||||
if not model.startswith("chatglm"):
|
||||
for prompt_message in new_prompt_messages:
|
||||
if prompt_message.role == PromptMessageRole.TOOL:
|
||||
for prompt_message in new_prompt_messages:
|
||||
if prompt_message.role == PromptMessageRole.TOOL:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "tool",
|
||||
"content": prompt_message.content,
|
||||
"tool_call_id": prompt_message.tool_call_id,
|
||||
}
|
||||
)
|
||||
elif isinstance(prompt_message, AssistantPromptMessage):
|
||||
if prompt_message.tool_calls:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "tool",
|
||||
"role": "assistant",
|
||||
"content": prompt_message.content,
|
||||
"tool_call_id": prompt_message.tool_call_id,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tool_call.id,
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments,
|
||||
},
|
||||
}
|
||||
for tool_call in prompt_message.tool_calls
|
||||
],
|
||||
}
|
||||
)
|
||||
elif isinstance(prompt_message, AssistantPromptMessage):
|
||||
if prompt_message.tool_calls:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": prompt_message.content,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tool_call.id,
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments,
|
||||
},
|
||||
}
|
||||
for tool_call in prompt_message.tool_calls
|
||||
],
|
||||
}
|
||||
)
|
||||
else:
|
||||
params["messages"].append({"role": "assistant", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append(
|
||||
{"role": prompt_message.role.value, "content": prompt_message.content}
|
||||
)
|
||||
else:
|
||||
# chatglm model
|
||||
for prompt_message in new_prompt_messages:
|
||||
# merge system message to user message
|
||||
if prompt_message.role in {
|
||||
PromptMessageRole.SYSTEM,
|
||||
PromptMessageRole.TOOL,
|
||||
PromptMessageRole.USER,
|
||||
}:
|
||||
if len(params["messages"]) > 0 and params["messages"][-1]["role"] == "user":
|
||||
params["messages"][-1]["content"] += "\n\n" + prompt_message.content
|
||||
else:
|
||||
params["messages"].append({"role": "user", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append(
|
||||
{"role": prompt_message.role.value, "content": prompt_message.content}
|
||||
)
|
||||
params["messages"].append({"role": "assistant", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append({"role": prompt_message.role.value, "content": prompt_message.content})
|
||||
|
||||
if tools and len(tools) > 0:
|
||||
params["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools]
|
||||
@ -406,7 +339,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
Handle llm stream response
|
||||
|
||||
:param model: model name
|
||||
:param response: response
|
||||
:param responses: response
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response chunk generator result
|
||||
"""
|
||||
@ -479,6 +412,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
human_prompt = "\n\nHuman:"
|
||||
ai_prompt = "\n\nAssistant:"
|
||||
content = message.content
|
||||
if isinstance(content, list):
|
||||
content = "".join(c.data for c in content if c.type == PromptMessageContentType.TEXT)
|
||||
|
||||
if isinstance(message, UserPromptMessage):
|
||||
message_text = f"{human_prompt} {content}"
|
||||
@ -505,7 +440,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
if tools and len(tools) > 0:
|
||||
text += "\n\nTools:"
|
||||
for tool in tools:
|
||||
text += f"\n{tool.json()}"
|
||||
text += f"\n{tool.model_dump_json()}"
|
||||
|
||||
# trim off the trailing ' ' that might come from the "Assistant: "
|
||||
return text.rstrip()
|
||||
|
||||
@ -5,7 +5,7 @@ BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找
|
||||
CHAT_APP_COMPLETION_PROMPT_CONFIG = {
|
||||
"completion_prompt_config": {
|
||||
"prompt": {
|
||||
"text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
|
||||
"text": "{{#pre_prompt#}}\nHere are the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
|
||||
},
|
||||
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
|
||||
},
|
||||
|
||||
@ -375,7 +375,6 @@ class TidbOnQdrantVector(BaseVector):
|
||||
for result in results:
|
||||
if result:
|
||||
document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value)
|
||||
document.metadata["vector"] = result.vector
|
||||
documents.append(document)
|
||||
|
||||
return documents
|
||||
@ -394,6 +393,7 @@ class TidbOnQdrantVector(BaseVector):
|
||||
) -> Document:
|
||||
return Document(
|
||||
page_content=scored_point.payload.get(content_payload_key),
|
||||
vector=scored_point.vector,
|
||||
metadata=scored_point.payload.get(metadata_payload_key) or {},
|
||||
)
|
||||
|
||||
|
||||
@ -162,7 +162,7 @@ class TidbService:
|
||||
clusters = []
|
||||
tidb_serverless_list_map = {item.cluster_id: item for item in tidb_serverless_list}
|
||||
cluster_ids = [item.cluster_id for item in tidb_serverless_list]
|
||||
params = {"clusterIds": cluster_ids, "view": "FULL"}
|
||||
params = {"clusterIds": cluster_ids, "view": "BASIC"}
|
||||
response = requests.get(
|
||||
f"{api_url}/clusters:batchGet", params=params, auth=HTTPDigestAuth(public_key, private_key)
|
||||
)
|
||||
|
||||
@ -6,9 +6,9 @@ identity:
|
||||
zh_Hans: GitLab 合并请求查询
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for query GitLab merge requests, Input should be a exists reposity or branch.
|
||||
en_US: A tool for query GitLab merge requests, Input should be a exists repository or branch.
|
||||
zh_Hans: 一个用于查询 GitLab 代码合并请求的工具,输入的内容应该是一个已存在的仓库名或者分支。
|
||||
llm: A tool for query GitLab merge requests, Input should be a exists reposity or branch.
|
||||
llm: A tool for query GitLab merge requests, Input should be a exists repository or branch.
|
||||
parameters:
|
||||
- name: repository
|
||||
type: string
|
||||
|
||||
@ -61,7 +61,7 @@ class WolframAlphaTool(BuiltinTool):
|
||||
params["input"] = query
|
||||
else:
|
||||
finished = True
|
||||
if "souces" in response_data["queryresult"]:
|
||||
if "sources" in response_data["queryresult"]:
|
||||
return self.create_link_message(response_data["queryresult"]["sources"]["url"])
|
||||
elif "pods" in response_data["queryresult"]:
|
||||
result = response_data["queryresult"]["pods"][0]["subpods"][0]["plaintext"]
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
|
||||
@ -140,8 +141,8 @@ class BaseIterationEvent(GraphEngineEvent):
|
||||
|
||||
class IterationRunStartedEvent(BaseIterationEvent):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
predecessor_node_id: Optional[str] = None
|
||||
|
||||
|
||||
@ -153,18 +154,18 @@ class IterationRunNextEvent(BaseIterationEvent):
|
||||
|
||||
class IterationRunSucceededEvent(BaseIterationEvent):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[dict[str, Any]] = None
|
||||
outputs: Optional[dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
steps: int = 0
|
||||
iteration_duration_map: Optional[dict[str, float]] = None
|
||||
|
||||
|
||||
class IterationRunFailedEvent(BaseIterationEvent):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[dict[str, Any]] = None
|
||||
outputs: Optional[dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
steps: int = 0
|
||||
error: str = Field(..., description="failed reason")
|
||||
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import docx
|
||||
import pandas as pd
|
||||
@ -264,14 +266,20 @@ def _extract_text_from_ppt(file_content: bytes) -> str:
|
||||
|
||||
def _extract_text_from_pptx(file_content: bytes) -> str:
|
||||
try:
|
||||
with io.BytesIO(file_content) as file:
|
||||
if dify_config.UNSTRUCTURED_API_URL and dify_config.UNSTRUCTURED_API_KEY:
|
||||
elements = partition_via_api(
|
||||
file=file,
|
||||
api_url=dify_config.UNSTRUCTURED_API_URL,
|
||||
api_key=dify_config.UNSTRUCTURED_API_KEY,
|
||||
)
|
||||
else:
|
||||
if dify_config.UNSTRUCTURED_API_URL and dify_config.UNSTRUCTURED_API_KEY:
|
||||
with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as temp_file:
|
||||
temp_file.write(file_content)
|
||||
temp_file.flush()
|
||||
with open(temp_file.name, "rb") as file:
|
||||
elements = partition_via_api(
|
||||
file=file,
|
||||
metadata_filename=temp_file.name,
|
||||
api_url=dify_config.UNSTRUCTURED_API_URL,
|
||||
api_key=dify_config.UNSTRUCTURED_API_KEY,
|
||||
)
|
||||
os.unlink(temp_file.name)
|
||||
else:
|
||||
with io.BytesIO(file_content) as file:
|
||||
elements = partition_pptx(file=file)
|
||||
return "\n".join([getattr(element, "text", "") for element in elements])
|
||||
except Exception as e:
|
||||
|
||||
@ -1,11 +1,9 @@
|
||||
import logging
|
||||
from collections.abc import Mapping, Sequence
|
||||
from mimetypes import guess_extension
|
||||
from os import path
|
||||
from typing import Any
|
||||
|
||||
from configs import dify_config
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.file import File, FileTransferMethod
|
||||
from core.tools.tool_file_manager import ToolFileManager
|
||||
from core.workflow.entities.node_entities import NodeRunResult
|
||||
from core.workflow.entities.variable_entities import VariableSelector
|
||||
@ -150,11 +148,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
||||
content = response.content
|
||||
|
||||
if is_file and content_type:
|
||||
# extract filename from url
|
||||
filename = path.basename(url)
|
||||
# extract extension if possible
|
||||
extension = guess_extension(content_type) or ".bin"
|
||||
|
||||
tool_file = ToolFileManager.create_file_by_raw(
|
||||
user_id=self.user_id,
|
||||
tenant_id=self.tenant_id,
|
||||
@ -165,7 +158,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
||||
|
||||
mapping = {
|
||||
"tool_file_id": tool_file.id,
|
||||
"type": FileType.IMAGE.value,
|
||||
"transfer_method": FileTransferMethod.TOOL_FILE.value,
|
||||
}
|
||||
file = file_factory.build_from_mapping(
|
||||
|
||||
@ -24,7 +24,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
"""
|
||||
node_inputs: dict[str, list] = {"conditions": []}
|
||||
|
||||
process_datas: dict[str, list] = {"condition_results": []}
|
||||
process_data: dict[str, list] = {"condition_results": []}
|
||||
|
||||
input_conditions = []
|
||||
final_result = False
|
||||
@ -40,7 +40,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
operator=case.logical_operator,
|
||||
)
|
||||
|
||||
process_datas["condition_results"].append(
|
||||
process_data["condition_results"].append(
|
||||
{
|
||||
"group": case.model_dump(),
|
||||
"results": group_result,
|
||||
@ -65,7 +65,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
|
||||
selected_case_id = "true" if final_result else "false"
|
||||
|
||||
process_datas["condition_results"].append(
|
||||
process_data["condition_results"].append(
|
||||
{"group": "default", "results": group_result, "final_result": final_result}
|
||||
)
|
||||
|
||||
@ -73,7 +73,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
|
||||
except Exception as e:
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_datas, error=str(e)
|
||||
status=WorkflowNodeExecutionStatus.FAILED, inputs=node_inputs, process_data=process_data, error=str(e)
|
||||
)
|
||||
|
||||
outputs = {"result": final_result, "selected_case_id": selected_case_id}
|
||||
@ -81,7 +81,7 @@ class IfElseNode(BaseNode[IfElseNodeData]):
|
||||
data = NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
inputs=node_inputs,
|
||||
process_data=process_datas,
|
||||
process_data=process_data,
|
||||
edge_source_handle=selected_case_id or "false", # Use case ID or 'default'
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
@ -9,7 +9,7 @@ from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
from flask import Flask, current_app
|
||||
|
||||
from configs import dify_config
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.variables import IntegerVariable
|
||||
from core.workflow.entities.node_entities import (
|
||||
NodeRunMetadataKey,
|
||||
NodeRunResult,
|
||||
@ -116,7 +116,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
variable_pool.add([self.node_id, "item"], iterator_list_value[0])
|
||||
|
||||
# init graph engine
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine, GraphEngineThreadPool
|
||||
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id=self.tenant_id,
|
||||
@ -155,33 +155,34 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
iteration_node_data=self.node_data,
|
||||
index=0,
|
||||
pre_iteration_output=None,
|
||||
duration=None,
|
||||
)
|
||||
iter_run_map: dict[str, float] = {}
|
||||
outputs: list[Any] = [None] * len(iterator_list_value)
|
||||
try:
|
||||
if self.node_data.is_parallel:
|
||||
futures: list[Future] = []
|
||||
q = Queue()
|
||||
thread_pool = graph_engine.workflow_thread_pool_mapping[graph_engine.thread_pool_id]
|
||||
thread_pool._max_workers = self.node_data.parallel_nums
|
||||
q: Queue = Queue()
|
||||
thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100)
|
||||
for index, item in enumerate(iterator_list_value):
|
||||
future: Future = thread_pool.submit(
|
||||
self._run_single_iter_parallel,
|
||||
current_app._get_current_object(),
|
||||
q,
|
||||
iterator_list_value,
|
||||
inputs,
|
||||
outputs,
|
||||
start_at,
|
||||
graph_engine,
|
||||
iteration_graph,
|
||||
index,
|
||||
item,
|
||||
iter_run_map,
|
||||
flask_app=current_app._get_current_object(), # type: ignore
|
||||
q=q,
|
||||
iterator_list_value=iterator_list_value,
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
start_at=start_at,
|
||||
graph_engine=graph_engine,
|
||||
iteration_graph=iteration_graph,
|
||||
index=index,
|
||||
item=item,
|
||||
iter_run_map=iter_run_map,
|
||||
)
|
||||
future.add_done_callback(thread_pool.task_done_callback)
|
||||
futures.append(future)
|
||||
succeeded_count = 0
|
||||
empty_count = 0
|
||||
while True:
|
||||
try:
|
||||
event = q.get(timeout=1)
|
||||
@ -209,17 +210,22 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
else:
|
||||
for _ in range(len(iterator_list_value)):
|
||||
yield from self._run_single_iter(
|
||||
iterator_list_value,
|
||||
variable_pool,
|
||||
inputs,
|
||||
outputs,
|
||||
start_at,
|
||||
graph_engine,
|
||||
iteration_graph,
|
||||
iter_run_map,
|
||||
iterator_list_value=iterator_list_value,
|
||||
variable_pool=variable_pool,
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
start_at=start_at,
|
||||
graph_engine=graph_engine,
|
||||
iteration_graph=iteration_graph,
|
||||
iter_run_map=iter_run_map,
|
||||
)
|
||||
if self.node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
|
||||
outputs = [output for output in outputs if output is not None]
|
||||
|
||||
# Flatten the list of lists
|
||||
if isinstance(outputs, list) and all(isinstance(output, list) for output in outputs):
|
||||
outputs = [item for sublist in outputs for item in sublist]
|
||||
|
||||
yield IterationRunSucceededEvent(
|
||||
iteration_id=self.id,
|
||||
iteration_node_id=self.node_id,
|
||||
@ -227,7 +233,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
iteration_node_data=self.node_data,
|
||||
start_at=start_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": jsonable_encoder(outputs)},
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
|
||||
)
|
||||
@ -235,8 +241,11 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
yield RunCompletedEvent(
|
||||
run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
outputs={"output": jsonable_encoder(outputs)},
|
||||
metadata={NodeRunMetadataKey.ITERATION_DURATION_MAP: iter_run_map},
|
||||
outputs={"output": outputs},
|
||||
metadata={
|
||||
NodeRunMetadataKey.ITERATION_DURATION_MAP: iter_run_map,
|
||||
NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens,
|
||||
},
|
||||
)
|
||||
)
|
||||
except IterationNodeError as e:
|
||||
@ -249,7 +258,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
iteration_node_data=self.node_data,
|
||||
start_at=start_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": jsonable_encoder(outputs)},
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
|
||||
error=str(e),
|
||||
@ -281,7 +290,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
:param node_data: node data
|
||||
:return:
|
||||
"""
|
||||
variable_mapping = {
|
||||
variable_mapping: dict[str, Sequence[str]] = {
|
||||
f"{node_id}.input_selector": node_data.iterator_selector,
|
||||
}
|
||||
|
||||
@ -309,7 +318,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
sub_node_variable_mapping = node_cls.extract_variable_selector_to_variable_mapping(
|
||||
graph_config=graph_config, config=sub_node_config
|
||||
)
|
||||
sub_node_variable_mapping = cast(dict[str, list[str]], sub_node_variable_mapping)
|
||||
sub_node_variable_mapping = cast(dict[str, Sequence[str]], sub_node_variable_mapping)
|
||||
except NotImplementedError:
|
||||
sub_node_variable_mapping = {}
|
||||
|
||||
@ -330,8 +339,12 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
return variable_mapping
|
||||
|
||||
def _handle_event_metadata(
|
||||
self, event: BaseNodeEvent, iter_run_index: str, parallel_mode_run_id: str
|
||||
) -> NodeRunStartedEvent | BaseNodeEvent:
|
||||
self,
|
||||
*,
|
||||
event: BaseNodeEvent | InNodeEvent,
|
||||
iter_run_index: int,
|
||||
parallel_mode_run_id: str | None,
|
||||
) -> NodeRunStartedEvent | BaseNodeEvent | InNodeEvent:
|
||||
"""
|
||||
add iteration metadata to event.
|
||||
"""
|
||||
@ -356,9 +369,10 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
|
||||
def _run_single_iter(
|
||||
self,
|
||||
iterator_list_value: list[str],
|
||||
*,
|
||||
iterator_list_value: Sequence[str],
|
||||
variable_pool: VariablePool,
|
||||
inputs: dict[str, list],
|
||||
inputs: Mapping[str, list],
|
||||
outputs: list,
|
||||
start_at: datetime,
|
||||
graph_engine: "GraphEngine",
|
||||
@ -374,12 +388,12 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
try:
|
||||
rst = graph_engine.run()
|
||||
# get current iteration index
|
||||
current_index = variable_pool.get([self.node_id, "index"]).value
|
||||
index_variable = variable_pool.get([self.node_id, "index"])
|
||||
if not isinstance(index_variable, IntegerVariable):
|
||||
raise IterationIndexNotFoundError(f"iteration {self.node_id} current index not found")
|
||||
current_index = index_variable.value
|
||||
iteration_run_id = parallel_mode_run_id if parallel_mode_run_id is not None else f"{current_index}"
|
||||
next_index = int(current_index) + 1
|
||||
|
||||
if current_index is None:
|
||||
raise IterationIndexNotFoundError(f"iteration {self.node_id} current index not found")
|
||||
for event in rst:
|
||||
if isinstance(event, (BaseNodeEvent | BaseParallelBranchEvent)) and not event.in_iteration_id:
|
||||
event.in_iteration_id = self.node_id
|
||||
@ -392,7 +406,9 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
continue
|
||||
|
||||
if isinstance(event, NodeRunSucceededEvent):
|
||||
yield self._handle_event_metadata(event, current_index, parallel_mode_run_id)
|
||||
yield self._handle_event_metadata(
|
||||
event=event, iter_run_index=current_index, parallel_mode_run_id=parallel_mode_run_id
|
||||
)
|
||||
elif isinstance(event, BaseGraphEvent):
|
||||
if isinstance(event, GraphRunFailedEvent):
|
||||
# iteration run failed
|
||||
@ -405,7 +421,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
parallel_mode_run_id=parallel_mode_run_id,
|
||||
start_at=start_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": jsonable_encoder(outputs)},
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
|
||||
error=event.error,
|
||||
@ -418,7 +434,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
iteration_node_data=self.node_data,
|
||||
start_at=start_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": jsonable_encoder(outputs)},
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
|
||||
error=event.error,
|
||||
@ -430,9 +446,11 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
)
|
||||
)
|
||||
return
|
||||
else:
|
||||
event = cast(InNodeEvent, event)
|
||||
metadata_event = self._handle_event_metadata(event, current_index, parallel_mode_run_id)
|
||||
elif isinstance(event, InNodeEvent):
|
||||
# event = cast(InNodeEvent, event)
|
||||
metadata_event = self._handle_event_metadata(
|
||||
event=event, iter_run_index=current_index, parallel_mode_run_id=parallel_mode_run_id
|
||||
)
|
||||
if isinstance(event, NodeRunFailedEvent):
|
||||
if self.node_data.error_handle_mode == ErrorHandleMode.CONTINUE_ON_ERROR:
|
||||
yield NodeInIterationFailedEvent(
|
||||
@ -514,7 +532,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
iteration_node_data=self.node_data,
|
||||
index=next_index,
|
||||
parallel_mode_run_id=parallel_mode_run_id,
|
||||
pre_iteration_output=jsonable_encoder(current_iteration_output) if current_iteration_output else None,
|
||||
pre_iteration_output=current_iteration_output or None,
|
||||
duration=duration,
|
||||
)
|
||||
|
||||
@ -541,10 +559,11 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
|
||||
def _run_single_iter_parallel(
|
||||
self,
|
||||
*,
|
||||
flask_app: Flask,
|
||||
q: Queue,
|
||||
iterator_list_value: list[str],
|
||||
inputs: dict[str, list],
|
||||
iterator_list_value: Sequence[str],
|
||||
inputs: Mapping[str, list],
|
||||
outputs: list,
|
||||
start_at: datetime,
|
||||
graph_engine: "GraphEngine",
|
||||
@ -552,7 +571,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
index: int,
|
||||
item: Any,
|
||||
iter_run_map: dict[str, float],
|
||||
) -> Generator[NodeEvent | InNodeEvent, None, None]:
|
||||
):
|
||||
"""
|
||||
run single iteration in parallel mode
|
||||
"""
|
||||
|
||||
@ -815,7 +815,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
||||
"completion_model": {
|
||||
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
|
||||
"prompt": {
|
||||
"text": "Here is the chat histories between human and assistant, inside "
|
||||
"text": "Here are the chat histories between human and assistant, inside "
|
||||
"<histories></histories> XML tags.\n\n<histories>\n{{"
|
||||
"#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
|
||||
"edition_type": "basic",
|
||||
|
||||
@ -98,7 +98,7 @@ Step 3: Structure the extracted parameters to JSON object as specified in <struc
|
||||
Step 4: Ensure that the JSON object is properly formatted and valid. The output should not contain any XML tags. Only the JSON object should be outputted.
|
||||
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
@ -125,7 +125,7 @@ CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and out
|
||||
The structure of the JSON object you can found in the instructions.
|
||||
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
|
||||
@ -8,7 +8,7 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """
|
||||
### Constraint
|
||||
DO NOT include anything other than the JSON array in your response.
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
@ -66,7 +66,7 @@ User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"
|
||||
Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Experience"}}
|
||||
</example>
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
|
||||
@ -7,8 +7,8 @@ from .enums import InputType, Operation
|
||||
|
||||
|
||||
class OperationNotSupportedError(VariableOperatorNodeError):
|
||||
def __init__(self, *, operation: Operation, varialbe_type: str):
|
||||
super().__init__(f"Operation {operation} is not supported for type {varialbe_type}")
|
||||
def __init__(self, *, operation: Operation, variable_type: str):
|
||||
super().__init__(f"Operation {operation} is not supported for type {variable_type}")
|
||||
|
||||
|
||||
class InputTypeNotSupportedError(VariableOperatorNodeError):
|
||||
|
||||
@ -45,7 +45,7 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
|
||||
|
||||
# Check if operation is supported
|
||||
if not helpers.is_operation_supported(variable_type=variable.value_type, operation=item.operation):
|
||||
raise OperationNotSupportedError(operation=item.operation, varialbe_type=variable.value_type)
|
||||
raise OperationNotSupportedError(operation=item.operation, variable_type=variable.value_type)
|
||||
|
||||
# Check if variable input is supported
|
||||
if item.input_type == InputType.VARIABLE and not helpers.is_variable_input_supported(
|
||||
@ -156,4 +156,4 @@ class VariableAssignerNode(BaseNode[VariableAssignerNodeData]):
|
||||
case Operation.DIVIDE:
|
||||
return variable.value / value
|
||||
case _:
|
||||
raise OperationNotSupportedError(operation=operation, varialbe_type=variable.value_type)
|
||||
raise OperationNotSupportedError(operation=operation, variable_type=variable.value_type)
|
||||
|
||||
@ -253,6 +253,8 @@ class NotionOAuth(OAuthDataSource):
|
||||
}
|
||||
response = requests.get(url=f"{self._NOTION_BLOCK_SEARCH}/{block_id}", headers=headers)
|
||||
response_json = response.json()
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Error fetching block parent page ID: {response_json.message}")
|
||||
parent = response_json["parent"]
|
||||
parent_type = parent["type"]
|
||||
if parent_type == "block_id":
|
||||
|
||||
1867
api/poetry.lock
generated
1867
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -20,7 +20,7 @@ azure-ai-inference = "~1.0.0b3"
|
||||
azure-ai-ml = "~1.20.0"
|
||||
azure-identity = "1.16.1"
|
||||
beautifulsoup4 = "4.12.2"
|
||||
boto3 = "1.35.17"
|
||||
boto3 = "1.35.74"
|
||||
bs4 = "~0.0.1"
|
||||
cachetools = "~5.3.0"
|
||||
celery = "~5.4.0"
|
||||
|
||||
@ -36,14 +36,16 @@ def clean_messages():
|
||||
db.session.query(Message)
|
||||
.filter(Message.created_at < plan_sandbox_clean_message_day)
|
||||
.order_by(Message.created_at.desc())
|
||||
.paginate(page=page, per_page=100)
|
||||
.limit(100)
|
||||
.all()
|
||||
)
|
||||
|
||||
except NotFound:
|
||||
break
|
||||
if messages.items is None or len(messages.items) == 0:
|
||||
if not messages:
|
||||
break
|
||||
for message in messages.items:
|
||||
for message in messages:
|
||||
plan_sandbox_clean_message_day = message.created_at
|
||||
app = App.query.filter_by(id=message.app_id).first()
|
||||
features_cache_key = f"features:{app.tenant_id}"
|
||||
plan_cache = redis_client.get(features_cache_key)
|
||||
|
||||
@ -81,6 +81,10 @@ class WorkflowToolManageService:
|
||||
db.session.add(workflow_tool_provider)
|
||||
db.session.commit()
|
||||
|
||||
if labels is not None:
|
||||
ToolLabelManager.update_tool_labels(
|
||||
ToolTransformService.workflow_provider_to_controller(workflow_tool_provider), labels
|
||||
)
|
||||
return {"result": "success"}
|
||||
|
||||
@classmethod
|
||||
|
||||
@ -5,7 +5,7 @@ import time
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from core.indexing_runner import DocumentIsPausedException
|
||||
from core.indexing_runner import DocumentIsPausedError
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_storage import storage
|
||||
from models.dataset import Dataset, ExternalKnowledgeApis
|
||||
@ -86,7 +86,7 @@ def external_document_indexing_task(
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except DocumentIsPausedException as ex:
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
|
||||
except Exception:
|
||||
|
||||
@ -37,7 +37,11 @@ def test_dify_config_undefined_entry(example_env_file):
|
||||
assert config["LOG_LEVEL"] == "INFO"
|
||||
|
||||
|
||||
# NOTE: If there is a `.env` file in your Workspace, this test might not succeed as expected.
|
||||
# This is due to `pymilvus` loading all the variables from the `.env` file into `os.environ`.
|
||||
def test_dify_config(example_env_file):
|
||||
# clear system environment variables
|
||||
os.environ.clear()
|
||||
# load dotenv file with pydantic-settings
|
||||
config = DifyConfig(_env_file=example_env_file)
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@ version: '3'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.13.0
|
||||
image: langgenius/dify-api:0.13.2
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@ -227,7 +227,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.13.0
|
||||
image: langgenius/dify-api:0.13.2
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
@ -397,7 +397,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.13.0
|
||||
image: langgenius/dify-web:0.13.2
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
||||
@ -292,7 +292,7 @@ x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.13.0
|
||||
image: langgenius/dify-api:0.13.2
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -312,7 +312,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.13.0
|
||||
image: langgenius/dify-api:0.13.2
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -331,7 +331,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.13.0
|
||||
image: langgenius/dify-web:0.13.2
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@ -9,7 +9,7 @@ import s from './style.module.css'
|
||||
import cn from '@/utils/classnames'
|
||||
import type { App } from '@/types/app'
|
||||
import Confirm from '@/app/components/base/confirm'
|
||||
import { ToastContext } from '@/app/components/base/toast'
|
||||
import Toast, { ToastContext } from '@/app/components/base/toast'
|
||||
import { copyApp, deleteApp, exportAppConfig, updateAppInfo } from '@/service/apps'
|
||||
import DuplicateAppModal from '@/app/components/app/duplicate-modal'
|
||||
import type { DuplicateAppModalProps } from '@/app/components/app/duplicate-modal'
|
||||
@ -31,6 +31,7 @@ import TagSelector from '@/app/components/base/tag-management/selector'
|
||||
import type { EnvironmentVariable } from '@/app/components/workflow/types'
|
||||
import DSLExportConfirmModal from '@/app/components/workflow/dsl-export-confirm-modal'
|
||||
import { fetchWorkflowDraft } from '@/service/workflow'
|
||||
import { fetchInstalledAppList } from '@/service/explore'
|
||||
|
||||
export type AppCardProps = {
|
||||
app: App
|
||||
@ -209,6 +210,21 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
|
||||
e.preventDefault()
|
||||
setShowConfirmDelete(true)
|
||||
}
|
||||
const onClickInstalledApp = async (e: React.MouseEvent<HTMLButtonElement>) => {
|
||||
e.stopPropagation()
|
||||
props.onClick?.()
|
||||
e.preventDefault()
|
||||
try {
|
||||
const { installed_apps }: any = await fetchInstalledAppList(app.id) || {}
|
||||
if (installed_apps?.length > 0)
|
||||
window.open(`/explore/installed/${installed_apps[0].id}`, '_blank')
|
||||
else
|
||||
throw new Error('No app found in Explore')
|
||||
}
|
||||
catch (e: any) {
|
||||
Toast.notify({ type: 'error', message: `${e.message || e}` })
|
||||
}
|
||||
}
|
||||
return (
|
||||
<div className="relative w-full py-1" onMouseLeave={onMouseLeave}>
|
||||
<button className={s.actionItem} onClick={onClickSettings}>
|
||||
@ -233,6 +249,10 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
|
||||
</>
|
||||
)}
|
||||
<Divider className="!my-1" />
|
||||
<button className={s.actionItem} onClick={onClickInstalledApp}>
|
||||
<span className={s.actionName}>{t('app.openInExplore')}</span>
|
||||
</button>
|
||||
<Divider className="!my-1" />
|
||||
<div
|
||||
className={cn(s.actionItem, s.deleteActionItem, 'group')}
|
||||
onClick={onClickDelete}
|
||||
@ -353,10 +373,10 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
|
||||
}
|
||||
popupClassName={
|
||||
(app.mode === 'completion' || app.mode === 'chat')
|
||||
? '!w-[238px] translate-x-[-110px]'
|
||||
: ''
|
||||
? '!w-[256px] translate-x-[-224px]'
|
||||
: '!w-[160px] translate-x-[-128px]'
|
||||
}
|
||||
className={'!w-[128px] h-fit !z-20'}
|
||||
className={'h-fit !z-20'}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
|
||||
@ -166,7 +166,7 @@ const ExtraInfo = ({ isMobile, relatedApps }: IExtraInfoProps) => {
|
||||
className='inline-flex items-center text-xs text-primary-600 mt-2 cursor-pointer'
|
||||
href={
|
||||
locale === LanguagesSupported[1]
|
||||
? 'https://docs.dify.ai/v/zh-hans/guides/knowledge-base/integrate_knowledge_within_application'
|
||||
? 'https://docs.dify.ai/v/zh-hans/guides/knowledge-base/integrate-knowledge-within-application'
|
||||
: 'https://docs.dify.ai/guides/knowledge-base/integrate-knowledge-within-application'
|
||||
}
|
||||
target='_blank' rel='noopener noreferrer'
|
||||
|
||||
@ -5,7 +5,8 @@ import {
|
||||
} from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import dayjs from 'dayjs'
|
||||
import { RiArrowDownSLine } from '@remixicon/react'
|
||||
import { RiArrowDownSLine, RiPlanetLine } from '@remixicon/react'
|
||||
import Toast from '../../base/toast'
|
||||
import type { ModelAndParameter } from '../configuration/debug/types'
|
||||
import SuggestedAction from './suggested-action'
|
||||
import PublishWithMultipleModel from './publish-with-multiple-model'
|
||||
@ -15,6 +16,7 @@ import {
|
||||
PortalToFollowElemContent,
|
||||
PortalToFollowElemTrigger,
|
||||
} from '@/app/components/base/portal-to-follow-elem'
|
||||
import { fetchInstalledAppList } from '@/service/explore'
|
||||
import EmbeddedModal from '@/app/components/app/overview/embedded'
|
||||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
import { useGetLanguage } from '@/context/i18n'
|
||||
@ -105,6 +107,19 @@ const AppPublisher = ({
|
||||
setPublished(false)
|
||||
}, [disabled, onToggle, open])
|
||||
|
||||
const handleOpenInExplore = useCallback(async () => {
|
||||
try {
|
||||
const { installed_apps }: any = await fetchInstalledAppList(appDetail?.id) || {}
|
||||
if (installed_apps?.length > 0)
|
||||
window.open(`/explore/installed/${installed_apps[0].id}`, '_blank')
|
||||
else
|
||||
throw new Error('No app found in Explore')
|
||||
}
|
||||
catch (e: any) {
|
||||
Toast.notify({ type: 'error', message: `${e.message || e}` })
|
||||
}
|
||||
}, [appDetail?.id])
|
||||
|
||||
const [embeddingModalOpen, setEmbeddingModalOpen] = useState(false)
|
||||
|
||||
return (
|
||||
@ -205,6 +220,15 @@ const AppPublisher = ({
|
||||
{t('workflow.common.embedIntoSite')}
|
||||
</SuggestedAction>
|
||||
)}
|
||||
<SuggestedAction
|
||||
onClick={() => {
|
||||
handleOpenInExplore()
|
||||
}}
|
||||
disabled={!publishedAt}
|
||||
icon={<RiPlanetLine className='w-4 h-4' />}
|
||||
>
|
||||
{t('workflow.common.openInExplore')}
|
||||
</SuggestedAction>
|
||||
<SuggestedAction disabled={!publishedAt} link='./develop' icon={<FileText className='w-4 h-4' />}>{t('workflow.common.accessAPIReference')}</SuggestedAction>
|
||||
{appDetail?.mode === 'workflow' && (
|
||||
<WorkflowToolConfigureButton
|
||||
|
||||
@ -29,6 +29,7 @@ import { useAppContext } from '@/context/app-context'
|
||||
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
import type { InputForm } from '@/app/components/base/chat/chat/type'
|
||||
import { getLastAnswer } from '@/app/components/base/chat/utils'
|
||||
|
||||
type ChatItemProps = {
|
||||
modelAndParameter: ModelAndParameter
|
||||
@ -101,7 +102,7 @@ const ChatItem: FC<ChatItemProps> = ({
|
||||
query: message,
|
||||
inputs,
|
||||
model_config: configData,
|
||||
parent_message_id: chatListRef.current.at(-1)?.id || null,
|
||||
parent_message_id: getLastAnswer(chatListRef.current)?.id || null,
|
||||
}
|
||||
|
||||
if ((config.file_upload as any).enabled && files?.length && supportVision)
|
||||
|
||||
@ -318,7 +318,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
|
||||
const targetTone = TONE_LIST.find((item: any) => {
|
||||
let res = true
|
||||
validatedParams.forEach((param) => {
|
||||
res = item.config?.[param] === detail.model_config?.configs?.completion_params?.[param]
|
||||
res = item.config?.[param] === detail?.model_config.model?.completion_params?.[param]
|
||||
})
|
||||
return res
|
||||
})?.name ?? 'custom'
|
||||
|
||||
@ -334,7 +334,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
</SimpleBtn>
|
||||
)
|
||||
}
|
||||
{(currentTab === 'RESULT' || !isWorkflow) && (
|
||||
{((currentTab === 'RESULT' && workflowProcessData?.resultText) || !isWorkflow) && (
|
||||
<SimpleBtn
|
||||
isDisabled={isError || !messageId}
|
||||
className={cn(isMobile && '!px-1.5', 'space-x-1')}
|
||||
|
||||
@ -27,15 +27,15 @@ const ResultTab = ({
|
||||
onCurrentTabChange(tab)
|
||||
}
|
||||
useEffect(() => {
|
||||
if (data?.resultText)
|
||||
if (data?.resultText || !!data?.files?.length)
|
||||
switchTab('RESULT')
|
||||
else
|
||||
switchTab('DETAIL')
|
||||
}, [data?.resultText])
|
||||
}, [data?.files?.length, data?.resultText])
|
||||
|
||||
return (
|
||||
<div className='grow relative flex flex-col'>
|
||||
{data?.resultText && (
|
||||
{(data?.resultText || !!data?.files?.length) && (
|
||||
<div className='shrink-0 flex items-center mb-2 border-b-[0.5px] border-[rgba(0,0,0,0.05)]'>
|
||||
<div
|
||||
className={cn(
|
||||
@ -56,14 +56,21 @@ const ResultTab = ({
|
||||
<div className={cn('grow bg-white')}>
|
||||
{currentTab === 'RESULT' && (
|
||||
<>
|
||||
<Markdown content={data?.resultText || ''} />
|
||||
{data?.resultText && <Markdown content={data?.resultText || ''} />}
|
||||
{!!data?.files?.length && (
|
||||
<FileList
|
||||
files={data?.files}
|
||||
showDeleteAction={false}
|
||||
showDownloadAction
|
||||
canPreview
|
||||
/>
|
||||
<div className='flex flex-col gap-2'>
|
||||
{data?.files.map((item: any) => (
|
||||
<div key={item.varName} className='flex flex-col gap-1 system-xs-regular'>
|
||||
<div className='py-1 text-text-tertiary '>{item.varName}</div>
|
||||
<FileList
|
||||
files={item.list}
|
||||
showDeleteAction={false}
|
||||
showDownloadAction
|
||||
canPreview
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
@ -76,7 +76,7 @@ const Logs: FC<ILogsProps> = ({ appDetail }) => {
|
||||
<div className='flex flex-col h-full'>
|
||||
<h1 className='text-text-primary system-xl-semibold'>{t('appLog.workflowTitle')}</h1>
|
||||
<p className='text-text-tertiary system-sm-regular'>{t('appLog.workflowSubtitle')}</p>
|
||||
<div className='flex flex-col py-4 flex-1'>
|
||||
<div className='flex flex-col py-4 flex-1 max-h-[calc(100%-16px)]'>
|
||||
<Filter queryParams={queryParams} setQueryParams={setQueryParams} />
|
||||
{/* workflow log */}
|
||||
{total === undefined
|
||||
|
||||
@ -11,16 +11,19 @@ import { useDraggableUploader } from './hooks'
|
||||
import { checkIsAnimatedImage } from './utils'
|
||||
import { ALLOW_FILE_EXTENSIONS } from '@/types/app'
|
||||
|
||||
type UploaderProps = {
|
||||
className?: string
|
||||
onImageCropped?: (tempUrl: string, croppedAreaPixels: Area, fileName: string) => void
|
||||
onUpload?: (file?: File) => void
|
||||
export type OnImageInput = {
|
||||
(isCropped: true, tempUrl: string, croppedAreaPixels: Area, fileName: string): void
|
||||
(isCropped: false, file: File): void
|
||||
}
|
||||
|
||||
const Uploader: FC<UploaderProps> = ({
|
||||
type UploaderProps = {
|
||||
className?: string
|
||||
onImageInput?: OnImageInput
|
||||
}
|
||||
|
||||
const ImageInput: FC<UploaderProps> = ({
|
||||
className,
|
||||
onImageCropped,
|
||||
onUpload,
|
||||
onImageInput,
|
||||
}) => {
|
||||
const [inputImage, setInputImage] = useState<{ file: File; url: string }>()
|
||||
const [isAnimatedImage, setIsAnimatedImage] = useState<boolean>(false)
|
||||
@ -37,8 +40,7 @@ const Uploader: FC<UploaderProps> = ({
|
||||
const onCropComplete = async (_: Area, croppedAreaPixels: Area) => {
|
||||
if (!inputImage)
|
||||
return
|
||||
onImageCropped?.(inputImage.url, croppedAreaPixels, inputImage.file.name)
|
||||
onUpload?.(undefined)
|
||||
onImageInput?.(true, inputImage.url, croppedAreaPixels, inputImage.file.name)
|
||||
}
|
||||
|
||||
const handleLocalFileInput = (e: ChangeEvent<HTMLInputElement>) => {
|
||||
@ -48,7 +50,7 @@ const Uploader: FC<UploaderProps> = ({
|
||||
checkIsAnimatedImage(file).then((isAnimatedImage) => {
|
||||
setIsAnimatedImage(!!isAnimatedImage)
|
||||
if (isAnimatedImage)
|
||||
onUpload?.(file)
|
||||
onImageInput?.(false, file)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -117,4 +119,4 @@ const Uploader: FC<UploaderProps> = ({
|
||||
)
|
||||
}
|
||||
|
||||
export default Uploader
|
||||
export default ImageInput
|
||||
@ -8,12 +8,14 @@ import Button from '../button'
|
||||
import { ImagePlus } from '../icons/src/vender/line/images'
|
||||
import { useLocalFileUploader } from '../image-uploader/hooks'
|
||||
import EmojiPickerInner from '../emoji-picker/Inner'
|
||||
import Uploader from './Uploader'
|
||||
import type { OnImageInput } from './ImageInput'
|
||||
import ImageInput from './ImageInput'
|
||||
import s from './style.module.css'
|
||||
import getCroppedImg from './utils'
|
||||
import type { AppIconType, ImageFile } from '@/types/app'
|
||||
import cn from '@/utils/classnames'
|
||||
import { DISABLE_UPLOAD_IMAGE_AS_ICON } from '@/config'
|
||||
|
||||
export type AppIconEmojiSelection = {
|
||||
type: 'emoji'
|
||||
icon: string
|
||||
@ -69,14 +71,15 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
|
||||
},
|
||||
})
|
||||
|
||||
const [imageCropInfo, setImageCropInfo] = useState<{ tempUrl: string; croppedAreaPixels: Area; fileName: string }>()
|
||||
const handleImageCropped = async (tempUrl: string, croppedAreaPixels: Area, fileName: string) => {
|
||||
setImageCropInfo({ tempUrl, croppedAreaPixels, fileName })
|
||||
}
|
||||
type InputImageInfo = { file: File } | { tempUrl: string; croppedAreaPixels: Area; fileName: string }
|
||||
const [inputImageInfo, setInputImageInfo] = useState<InputImageInfo>()
|
||||
|
||||
const [uploadImageInfo, setUploadImageInfo] = useState<{ file?: File }>()
|
||||
const handleUpload = async (file?: File) => {
|
||||
setUploadImageInfo({ file })
|
||||
const handleImageInput: OnImageInput = async (isCropped: boolean, fileOrTempUrl: string | File, croppedAreaPixels?: Area, fileName?: string) => {
|
||||
setInputImageInfo(
|
||||
isCropped
|
||||
? { tempUrl: fileOrTempUrl as string, croppedAreaPixels: croppedAreaPixels!, fileName: fileName! }
|
||||
: { file: fileOrTempUrl as File },
|
||||
)
|
||||
}
|
||||
|
||||
const handleSelect = async () => {
|
||||
@ -90,15 +93,15 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!imageCropInfo && !uploadImageInfo)
|
||||
if (!inputImageInfo)
|
||||
return
|
||||
setUploading(true)
|
||||
if (imageCropInfo.file) {
|
||||
handleLocalFileUpload(imageCropInfo.file)
|
||||
if ('file' in inputImageInfo) {
|
||||
handleLocalFileUpload(inputImageInfo.file)
|
||||
return
|
||||
}
|
||||
const blob = await getCroppedImg(imageCropInfo.tempUrl, imageCropInfo.croppedAreaPixels, imageCropInfo.fileName)
|
||||
const file = new File([blob], imageCropInfo.fileName, { type: blob.type })
|
||||
const blob = await getCroppedImg(inputImageInfo.tempUrl, inputImageInfo.croppedAreaPixels, inputImageInfo.fileName)
|
||||
const file = new File([blob], inputImageInfo.fileName, { type: blob.type })
|
||||
handleLocalFileUpload(file)
|
||||
}
|
||||
}
|
||||
@ -127,10 +130,8 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
|
||||
</div>
|
||||
</div>}
|
||||
|
||||
<Divider className='m-0' />
|
||||
|
||||
<EmojiPickerInner className={activeTab === 'emoji' ? 'block' : 'hidden'} onSelect={handleSelectEmoji} />
|
||||
<Uploader className={activeTab === 'image' ? 'block' : 'hidden'} onImageCropped={handleImageCropped} onUpload={handleUpload}/>
|
||||
<EmojiPickerInner className={cn(activeTab === 'emoji' ? 'block' : 'hidden', 'pt-2')} onSelect={handleSelectEmoji} />
|
||||
<ImageInput className={activeTab === 'image' ? 'block' : 'hidden'} onImageInput={handleImageInput} />
|
||||
|
||||
<Divider className='m-0' />
|
||||
<div className='w-full flex items-center justify-center p-3 gap-2'>
|
||||
|
||||
@ -116,12 +116,12 @@ export default async function getCroppedImg(
|
||||
})
|
||||
}
|
||||
|
||||
export function checkIsAnimatedImage(file) {
|
||||
export function checkIsAnimatedImage(file: File): Promise<boolean> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const fileReader = new FileReader()
|
||||
|
||||
fileReader.onload = function (e) {
|
||||
const arr = new Uint8Array(e.target.result)
|
||||
const arr = new Uint8Array(e.target?.result as ArrayBuffer)
|
||||
|
||||
// Check file extension
|
||||
const fileName = file.name.toLowerCase()
|
||||
@ -148,7 +148,7 @@ export function checkIsAnimatedImage(file) {
|
||||
}
|
||||
|
||||
// Function to check for WebP signature
|
||||
function isWebP(arr) {
|
||||
function isWebP(arr: Uint8Array) {
|
||||
return (
|
||||
arr[0] === 0x52 && arr[1] === 0x49 && arr[2] === 0x46 && arr[3] === 0x46
|
||||
&& arr[8] === 0x57 && arr[9] === 0x45 && arr[10] === 0x42 && arr[11] === 0x50
|
||||
@ -156,7 +156,7 @@ function isWebP(arr) {
|
||||
}
|
||||
|
||||
// Function to check if the WebP is animated (contains ANIM chunk)
|
||||
function checkWebPAnimation(arr) {
|
||||
function checkWebPAnimation(arr: Uint8Array) {
|
||||
// Search for the ANIM chunk in WebP to determine if it's animated
|
||||
for (let i = 12; i < arr.length - 4; i++) {
|
||||
if (arr[i] === 0x41 && arr[i + 1] === 0x4E && arr[i + 2] === 0x49 && arr[i + 3] === 0x4D)
|
||||
|
||||
@ -68,7 +68,7 @@ const EmojiPickerInner: FC<IEmojiPickerInnerProps> = ({
|
||||
}, [onSelect, selectedEmoji, selectedBackground])
|
||||
|
||||
return <div className={cn(className)}>
|
||||
<div className='flex flex-col items-center w-full px-3'>
|
||||
<div className='flex flex-col items-center w-full px-3 pb-2'>
|
||||
<div className="relative w-full">
|
||||
<div className="absolute inset-y-0 left-0 flex items-center pl-3 pointer-events-none">
|
||||
<MagnifyingGlassIcon className="w-5 h-5 text-gray-400" aria-hidden="true" />
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import React, { useState } from 'react'
|
||||
import React, { useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { RiArrowRightSLine } from '@remixicon/react'
|
||||
import FileImageRender from './file-image-render'
|
||||
import FileTypeIcon from './file-type-icon'
|
||||
@ -12,23 +13,36 @@ import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
type Props = {
|
||||
fileList: FileEntity[]
|
||||
fileList: {
|
||||
varName: string
|
||||
list: FileEntity[]
|
||||
}[]
|
||||
isExpanded?: boolean
|
||||
noBorder?: boolean
|
||||
noPadding?: boolean
|
||||
}
|
||||
|
||||
const FileListInLog = ({ fileList }: Props) => {
|
||||
const [expanded, setExpanded] = useState(false)
|
||||
const FileListInLog = ({ fileList, isExpanded = false, noBorder = false, noPadding = false }: Props) => {
|
||||
const { t } = useTranslation()
|
||||
const [expanded, setExpanded] = useState(isExpanded)
|
||||
const fullList = useMemo(() => {
|
||||
return fileList.reduce((acc: FileEntity[], { list }) => {
|
||||
return [...acc, ...list]
|
||||
}, [])
|
||||
}, [fileList])
|
||||
|
||||
if (!fileList.length)
|
||||
return null
|
||||
|
||||
return (
|
||||
<div className={cn('border-t border-divider-subtle px-3 py-2', expanded && 'py-3')}>
|
||||
<div className={cn('px-3 py-2', expanded && 'py-3', !noBorder && 'border-t border-divider-subtle', noPadding && '!p-0')}>
|
||||
<div className='flex justify-between gap-1'>
|
||||
{expanded && (
|
||||
<div></div>
|
||||
<div className='grow py-1 text-text-secondary system-xs-semibold-uppercase cursor-pointer' onClick={() => setExpanded(!expanded)}>{t('appLog.runDetail.fileListLabel')}</div>
|
||||
)}
|
||||
{!expanded && (
|
||||
<div className='flex'>
|
||||
{fileList.map((file) => {
|
||||
<div className='flex gap-1'>
|
||||
{fullList.map((file) => {
|
||||
const { id, name, type, supportFileType, base64Url, url } = file
|
||||
const isImageFile = supportFileType === SupportUploadFileTypes.image
|
||||
return (
|
||||
@ -63,19 +77,25 @@ const FileListInLog = ({ fileList }: Props) => {
|
||||
</div>
|
||||
)}
|
||||
<div className='flex items-center gap-1 cursor-pointer' onClick={() => setExpanded(!expanded)}>
|
||||
{!expanded && <div className='text-text-tertiary system-xs-medium-uppercase'>DETAIL</div>}
|
||||
{!expanded && <div className='text-text-tertiary system-xs-medium-uppercase'>{t('appLog.runDetail.fileListDetail')}</div>}
|
||||
<RiArrowRightSLine className={cn('w-4 h-4 text-text-tertiary', expanded && 'rotate-90')} />
|
||||
</div>
|
||||
</div>
|
||||
{expanded && (
|
||||
<div className='flex flex-col gap-1'>
|
||||
{fileList.map(file => (
|
||||
<FileItem
|
||||
key={file.id}
|
||||
file={file}
|
||||
showDeleteAction={false}
|
||||
showDownloadAction
|
||||
/>
|
||||
<div className='flex flex-col gap-3'>
|
||||
{fileList.map(item => (
|
||||
<div key={item.varName} className='flex flex-col gap-1 system-xs-regular'>
|
||||
<div className='py-1 text-text-tertiary '>{item.varName}</div>
|
||||
{item.list.map(file => (
|
||||
<FileItem
|
||||
key={file.id}
|
||||
file={file}
|
||||
showDeleteAction={false}
|
||||
showDownloadAction
|
||||
canPreview
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -1,12 +1,15 @@
|
||||
import {
|
||||
memo,
|
||||
useState,
|
||||
} from 'react'
|
||||
import {
|
||||
RiDeleteBinLine,
|
||||
RiDownloadLine,
|
||||
RiEyeLine,
|
||||
} from '@remixicon/react'
|
||||
import FileTypeIcon from '../file-type-icon'
|
||||
import {
|
||||
downloadFile,
|
||||
fileIsUploaded,
|
||||
getFileAppearanceType,
|
||||
getFileExtension,
|
||||
@ -19,6 +22,7 @@ import { formatFileSize } from '@/utils/format'
|
||||
import cn from '@/utils/classnames'
|
||||
import { ReplayLine } from '@/app/components/base/icons/src/vender/other'
|
||||
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||
import ImagePreview from '@/app/components/base/image-uploader/image-preview'
|
||||
|
||||
type FileInAttachmentItemProps = {
|
||||
file: FileEntity
|
||||
@ -26,6 +30,7 @@ type FileInAttachmentItemProps = {
|
||||
showDownloadAction?: boolean
|
||||
onRemove?: (fileId: string) => void
|
||||
onReUpload?: (fileId: string) => void
|
||||
canPreview?: boolean
|
||||
}
|
||||
const FileInAttachmentItem = ({
|
||||
file,
|
||||
@ -33,96 +38,116 @@ const FileInAttachmentItem = ({
|
||||
showDownloadAction = true,
|
||||
onRemove,
|
||||
onReUpload,
|
||||
canPreview,
|
||||
}: FileInAttachmentItemProps) => {
|
||||
const { id, name, type, progress, supportFileType, base64Url, url, isRemote } = file
|
||||
const ext = getFileExtension(name, type, isRemote)
|
||||
const isImageFile = supportFileType === SupportUploadFileTypes.image
|
||||
|
||||
const [imagePreviewUrl, setImagePreviewUrl] = useState('')
|
||||
return (
|
||||
<div className={cn(
|
||||
'flex items-center pr-3 h-12 rounded-lg border-[0.5px] border-components-panel-border bg-components-panel-on-panel-item-bg shadow-xs',
|
||||
progress === -1 && 'bg-state-destructive-hover border-state-destructive-border',
|
||||
)}>
|
||||
<div className='flex items-center justify-center w-12 h-12'>
|
||||
{
|
||||
isImageFile && (
|
||||
<FileImageRender
|
||||
className='w-8 h-8'
|
||||
imageUrl={base64Url || url || ''}
|
||||
/>
|
||||
)
|
||||
}
|
||||
{
|
||||
!isImageFile && (
|
||||
<FileTypeIcon
|
||||
type={getFileAppearanceType(name, type)}
|
||||
size='lg'
|
||||
/>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
<div className='grow w-0 mr-1'>
|
||||
<div
|
||||
className='flex items-center mb-0.5 system-xs-medium text-text-secondary truncate'
|
||||
title={file.name}
|
||||
>
|
||||
<div className='truncate'>{name}</div>
|
||||
<>
|
||||
<div className={cn(
|
||||
'flex items-center pr-3 h-12 rounded-lg border-[0.5px] border-components-panel-border bg-components-panel-on-panel-item-bg shadow-xs',
|
||||
progress === -1 && 'bg-state-destructive-hover border-state-destructive-border',
|
||||
)}>
|
||||
<div className='flex items-center justify-center w-12 h-12'>
|
||||
{
|
||||
isImageFile && (
|
||||
<FileImageRender
|
||||
className='w-8 h-8'
|
||||
imageUrl={base64Url || url || ''}
|
||||
/>
|
||||
)
|
||||
}
|
||||
{
|
||||
!isImageFile && (
|
||||
<FileTypeIcon
|
||||
type={getFileAppearanceType(name, type)}
|
||||
size='lg'
|
||||
/>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
<div className='flex items-center system-2xs-medium-uppercase text-text-tertiary'>
|
||||
<div className='grow w-0 mr-1'>
|
||||
<div
|
||||
className='flex items-center mb-0.5 system-xs-medium text-text-secondary truncate'
|
||||
title={file.name}
|
||||
>
|
||||
<div className='truncate'>{name}</div>
|
||||
</div>
|
||||
<div className='flex items-center system-2xs-medium-uppercase text-text-tertiary'>
|
||||
{
|
||||
ext && (
|
||||
<span>{ext.toLowerCase()}</span>
|
||||
)
|
||||
}
|
||||
{
|
||||
ext && (
|
||||
<span className='mx-1 system-2xs-medium'>•</span>
|
||||
)
|
||||
}
|
||||
{
|
||||
!!file.size && (
|
||||
<span>{formatFileSize(file.size)}</span>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
<div className='shrink-0 flex items-center'>
|
||||
{
|
||||
ext && (
|
||||
<span>{ext.toLowerCase()}</span>
|
||||
progress >= 0 && !fileIsUploaded(file) && (
|
||||
<ProgressCircle
|
||||
className='mr-2.5'
|
||||
percentage={progress}
|
||||
/>
|
||||
)
|
||||
}
|
||||
{
|
||||
ext && (
|
||||
<span className='mx-1 system-2xs-medium'>•</span>
|
||||
progress === -1 && (
|
||||
<ActionButton
|
||||
className='mr-1'
|
||||
onClick={() => onReUpload?.(id)}
|
||||
>
|
||||
<ReplayLine className='w-4 h-4 text-text-tertiary' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
{
|
||||
!!file.size && (
|
||||
<span>{formatFileSize(file.size)}</span>
|
||||
showDeleteAction && (
|
||||
<ActionButton onClick={() => onRemove?.(id)}>
|
||||
<RiDeleteBinLine className='w-4 h-4' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
{
|
||||
canPreview && isImageFile && (
|
||||
<ActionButton className='mr-1' onClick={() => setImagePreviewUrl(url || '')}>
|
||||
<RiEyeLine className='w-4 h-4' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
{
|
||||
showDownloadAction && (
|
||||
<ActionButton onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
downloadFile(url || base64Url || '', name)
|
||||
}}>
|
||||
<RiDownloadLine className='w-4 h-4' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
<div className='shrink-0 flex items-center'>
|
||||
{
|
||||
progress >= 0 && !fileIsUploaded(file) && (
|
||||
<ProgressCircle
|
||||
className='mr-2.5'
|
||||
percentage={progress}
|
||||
/>
|
||||
)
|
||||
}
|
||||
{
|
||||
progress === -1 && (
|
||||
<ActionButton
|
||||
className='mr-1'
|
||||
onClick={() => onReUpload?.(id)}
|
||||
>
|
||||
<ReplayLine className='w-4 h-4 text-text-tertiary' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
{
|
||||
showDeleteAction && (
|
||||
<ActionButton onClick={() => onRemove?.(id)}>
|
||||
<RiDeleteBinLine className='w-4 h-4' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
{
|
||||
showDownloadAction && (
|
||||
<ActionButton
|
||||
size='xs'
|
||||
>
|
||||
<RiDownloadLine className='w-3.5 h-3.5 text-text-tertiary' />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
{
|
||||
imagePreviewUrl && canPreview && (
|
||||
<ImagePreview
|
||||
title={name}
|
||||
url={imagePreviewUrl}
|
||||
onCancel={() => setImagePreviewUrl('')}
|
||||
/>
|
||||
)
|
||||
}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -84,7 +84,7 @@ const FileImageItem = ({
|
||||
className='absolute bottom-0.5 right-0.5 flex items-center justify-center w-6 h-6 rounded-lg bg-components-actionbar-bg shadow-md'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
downloadFile(url || '', name)
|
||||
downloadFile(url || base64Url || '', name)
|
||||
}}
|
||||
>
|
||||
<RiDownloadLine className='w-4 h-4 text-text-tertiary' />
|
||||
|
||||
@ -31,7 +31,7 @@ const FileItem = ({
|
||||
onRemove,
|
||||
onReUpload,
|
||||
}: FileItemProps) => {
|
||||
const { id, name, type, progress, url, isRemote } = file
|
||||
const { id, name, type, progress, url, base64Url, isRemote } = file
|
||||
const ext = getFileExtension(name, type, isRemote)
|
||||
const uploadError = progress === -1
|
||||
|
||||
@ -80,13 +80,13 @@ const FileItem = ({
|
||||
}
|
||||
</div>
|
||||
{
|
||||
showDownloadAction && (
|
||||
showDownloadAction && url && (
|
||||
<ActionButton
|
||||
size='m'
|
||||
className='hidden group-hover/file-item:flex absolute -right-1 -top-1'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
downloadFile(url || '', name)
|
||||
downloadFile(url || base64Url || '', name)
|
||||
}}
|
||||
>
|
||||
<RiDownloadLine className='w-3.5 h-3.5 text-text-tertiary' />
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import mime from 'mime'
|
||||
import { flatten } from 'lodash-es'
|
||||
import { FileAppearanceTypeEnum } from './types'
|
||||
import type { FileEntity } from './types'
|
||||
import { upload } from '@/service/base'
|
||||
@ -158,12 +157,22 @@ export const isAllowedFileExtension = (fileName: string, fileMimetype: string, a
|
||||
}
|
||||
|
||||
export const getFilesInLogs = (rawData: any) => {
|
||||
const originalFiles = flatten(Object.keys(rawData || {}).map((key) => {
|
||||
if (typeof rawData[key] === 'object' || Array.isArray(rawData[key]))
|
||||
return rawData[key]
|
||||
const result = Object.keys(rawData || {}).map((key) => {
|
||||
if (typeof rawData[key] === 'object' && rawData[key]?.dify_model_identity === '__dify__file__') {
|
||||
return {
|
||||
varName: key,
|
||||
list: getProcessedFilesFromResponse([rawData[key]]),
|
||||
}
|
||||
}
|
||||
if (Array.isArray(rawData[key]) && rawData[key].some(item => item?.dify_model_identity === '__dify__file__')) {
|
||||
return {
|
||||
varName: key,
|
||||
list: getProcessedFilesFromResponse(rawData[key]),
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}).filter(Boolean)).filter(item => item?.model_identity === '__dify__file__')
|
||||
return getProcessedFilesFromResponse(originalFiles)
|
||||
}).filter(Boolean)
|
||||
return result
|
||||
}
|
||||
|
||||
export const fileIsUploaded = (file: FileEntity) => {
|
||||
|
||||
@ -53,8 +53,7 @@ const ImageGallery: FC<Props> = ({
|
||||
imagePreviewUrl && (
|
||||
<ImagePreview
|
||||
url={imagePreviewUrl}
|
||||
onCancel={() => setImagePreviewUrl('')}
|
||||
/>
|
||||
onCancel={() => setImagePreviewUrl('')} title={''} />
|
||||
)
|
||||
}
|
||||
</div>
|
||||
|
||||
@ -8,8 +8,7 @@ import RemarkGfm from 'remark-gfm'
|
||||
import RehypeRaw from 'rehype-raw'
|
||||
import SyntaxHighlighter from 'react-syntax-highlighter'
|
||||
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
|
||||
import type { RefObject } from 'react'
|
||||
import { Component, memo, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { Component, memo, useMemo, useRef, useState } from 'react'
|
||||
import type { CodeComponent } from 'react-markdown/lib/ast-to-react'
|
||||
import cn from '@/utils/classnames'
|
||||
import CopyBtn from '@/app/components/base/copy-btn'
|
||||
@ -77,29 +76,6 @@ export function PreCode(props: { children: any }) {
|
||||
)
|
||||
}
|
||||
|
||||
// eslint-disable-next-line unused-imports/no-unused-vars
|
||||
const useLazyLoad = (ref: RefObject<Element>): boolean => {
|
||||
const [isIntersecting, setIntersecting] = useState<boolean>(false)
|
||||
|
||||
useEffect(() => {
|
||||
const observer = new IntersectionObserver(([entry]) => {
|
||||
if (entry.isIntersecting) {
|
||||
setIntersecting(true)
|
||||
observer.disconnect()
|
||||
}
|
||||
})
|
||||
|
||||
if (ref.current)
|
||||
observer.observe(ref.current)
|
||||
|
||||
return () => {
|
||||
observer.disconnect()
|
||||
}
|
||||
}, [ref])
|
||||
|
||||
return isIntersecting
|
||||
}
|
||||
|
||||
// **Add code block
|
||||
// Avoid error #185 (Maximum update depth exceeded.
|
||||
// This can happen when a component repeatedly calls setState inside componentWillUpdate or componentDidUpdate.
|
||||
@ -123,7 +99,7 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
|
||||
try {
|
||||
return JSON.parse(String(children).replace(/\n$/, ''))
|
||||
}
|
||||
catch (error) {}
|
||||
catch (error) { }
|
||||
}
|
||||
return JSON.parse('{"title":{"text":"ECharts error - Wrong JSON format."}}')
|
||||
}, [language, children])
|
||||
@ -181,7 +157,7 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
|
||||
>
|
||||
<div className='text-[13px] text-gray-500 font-normal'>{languageShowName}</div>
|
||||
<div style={{ display: 'flex' }}>
|
||||
{(['mermaid', 'svg']).includes(language!) && <SVGBtn isSVG={isSVG} setIsSVG={setIsSVG}/>}
|
||||
{(['mermaid', 'svg']).includes(language!) && <SVGBtn isSVG={isSVG} setIsSVG={setIsSVG} />}
|
||||
<CopyBtn
|
||||
className='mr-1'
|
||||
value={String(children).replace(/\n$/, '')}
|
||||
@ -261,7 +237,7 @@ export function Markdown(props: { content: string; className?: string }) {
|
||||
() => {
|
||||
return (tree) => {
|
||||
const iterate = (node: any) => {
|
||||
if (node.type === 'element' && !node.properties?.src && node.properties?.ref && node.properties.ref.startsWith('{') && node.properties.ref.endsWith('}'))
|
||||
if (node.type === 'element' && node.properties?.ref)
|
||||
delete node.properties.ref
|
||||
|
||||
if (node.children)
|
||||
|
||||
@ -1,30 +1,17 @@
|
||||
import React, { useEffect, useRef, useState } from 'react'
|
||||
import React, { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import mermaid from 'mermaid'
|
||||
import { usePrevious } from 'ahooks'
|
||||
import CryptoJS from 'crypto-js'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { ExclamationTriangleIcon } from '@heroicons/react/24/outline'
|
||||
import LoadingAnim from '@/app/components/base/chat/chat/loading-anim'
|
||||
import cn from '@/utils/classnames'
|
||||
import ImagePreview from '@/app/components/base/image-uploader/image-preview'
|
||||
|
||||
let mermaidAPI: any
|
||||
mermaidAPI = null
|
||||
|
||||
if (typeof window !== 'undefined') {
|
||||
mermaid.initialize({
|
||||
startOnLoad: true,
|
||||
theme: 'default',
|
||||
flowchart: {
|
||||
htmlLabels: true,
|
||||
useMaxWidth: true,
|
||||
},
|
||||
})
|
||||
if (typeof window !== 'undefined')
|
||||
mermaidAPI = mermaid.mermaidAPI
|
||||
}
|
||||
|
||||
const style = {
|
||||
minWidth: '480px',
|
||||
height: 'auto',
|
||||
overflow: 'auto',
|
||||
}
|
||||
|
||||
const svgToBase64 = (svgGraph: string) => {
|
||||
const svgBytes = new TextEncoder().encode(svgGraph)
|
||||
@ -40,22 +27,26 @@ const svgToBase64 = (svgGraph: string) => {
|
||||
const Flowchart = React.forwardRef((props: {
|
||||
PrimitiveCode: string
|
||||
}, ref) => {
|
||||
const { t } = useTranslation()
|
||||
const [svgCode, setSvgCode] = useState(null)
|
||||
const chartId = useRef(`flowchart_${CryptoJS.MD5(props.PrimitiveCode).toString()}`)
|
||||
const [look, setLook] = useState<'classic' | 'handDrawn'>('classic')
|
||||
|
||||
const prevPrimitiveCode = usePrevious(props.PrimitiveCode)
|
||||
const [isLoading, setIsLoading] = useState(true)
|
||||
const timeRef = useRef<NodeJS.Timeout>()
|
||||
const [errMsg, setErrMsg] = useState('')
|
||||
const [imagePreviewUrl, setImagePreviewUrl] = useState('')
|
||||
|
||||
const renderFlowchart = useCallback(async (PrimitiveCode: string) => {
|
||||
setSvgCode(null)
|
||||
setIsLoading(true)
|
||||
|
||||
const renderFlowchart = async (PrimitiveCode: string) => {
|
||||
try {
|
||||
if (typeof window !== 'undefined' && mermaidAPI) {
|
||||
const svgGraph = await mermaidAPI.render(chartId.current, PrimitiveCode)
|
||||
const svgGraph = await mermaidAPI.render('flowchart', PrimitiveCode)
|
||||
const base64Svg: any = await svgToBase64(svgGraph.svg)
|
||||
setSvgCode(base64Svg)
|
||||
setIsLoading(false)
|
||||
if (chartId.current && base64Svg)
|
||||
localStorage.setItem(chartId.current, base64Svg)
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
@ -64,15 +55,25 @@ const Flowchart = React.forwardRef((props: {
|
||||
setErrMsg((error as Error).message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}, [props.PrimitiveCode])
|
||||
|
||||
useEffect(() => {
|
||||
const cachedSvg: any = localStorage.getItem(chartId.current)
|
||||
if (cachedSvg) {
|
||||
setSvgCode(cachedSvg)
|
||||
setIsLoading(false)
|
||||
return
|
||||
if (typeof window !== 'undefined') {
|
||||
mermaid.initialize({
|
||||
startOnLoad: true,
|
||||
theme: 'neutral',
|
||||
look,
|
||||
flowchart: {
|
||||
htmlLabels: true,
|
||||
useMaxWidth: true,
|
||||
},
|
||||
})
|
||||
|
||||
renderFlowchart(props.PrimitiveCode)
|
||||
}
|
||||
}, [look])
|
||||
|
||||
useEffect(() => {
|
||||
if (timeRef.current)
|
||||
clearTimeout(timeRef.current)
|
||||
|
||||
@ -85,24 +86,51 @@ const Flowchart = React.forwardRef((props: {
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-expect-error
|
||||
<div ref={ref}>
|
||||
<div className="msh-segmented msh-segmented-sm css-23bs09 css-var-r1">
|
||||
<div className="msh-segmented-group">
|
||||
<label className="msh-segmented-item flex items-center space-x-1 m-2 w-[200px]">
|
||||
<div key='classic'
|
||||
className={cn('flex items-center justify-center mb-4 w-[calc((100%-8px)/2)] h-8 rounded-lg border border-components-option-card-option-border bg-components-option-card-option-bg cursor-pointer system-sm-medium text-text-secondary',
|
||||
look === 'classic' && 'border-[1.5px] border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg text-text-primary',
|
||||
)}
|
||||
|
||||
onClick={() => setLook('classic')}
|
||||
>
|
||||
<div className="msh-segmented-item-label">{t('app.mermaid.classic')}</div>
|
||||
</div>
|
||||
<div key='handDrawn'
|
||||
className={cn(
|
||||
'flex items-center justify-center mb-4 w-[calc((100%-8px)/2)] h-8 rounded-lg border border-components-option-card-option-border bg-components-option-card-option-bg cursor-pointer system-sm-medium text-text-secondary',
|
||||
look === 'handDrawn' && 'border-[1.5px] border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg text-text-primary',
|
||||
)}
|
||||
onClick={() => setLook('handDrawn')}
|
||||
>
|
||||
<div className="msh-segmented-item-label">{t('app.mermaid.handDrawn')}</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
{
|
||||
svgCode
|
||||
&& <div className="mermaid" style={style}>
|
||||
{svgCode && <img src={svgCode} style={{ width: '100%', height: 'auto' }} alt="Mermaid chart" />}
|
||||
</div>
|
||||
&& <div className="mermaid cursor-pointer h-auto w-full object-fit: cover" onClick={() => setImagePreviewUrl(svgCode)}>
|
||||
{svgCode && <img src={svgCode} alt="mermaid_chart" />}
|
||||
</div>
|
||||
}
|
||||
{isLoading
|
||||
&& <div className='py-4 px-[26px]'>
|
||||
<LoadingAnim type='text' />
|
||||
</div>
|
||||
&& <div className='py-4 px-[26px]'>
|
||||
<LoadingAnim type='text'/>
|
||||
</div>
|
||||
}
|
||||
{
|
||||
errMsg
|
||||
&& <div className='py-4 px-[26px]'>
|
||||
<ExclamationTriangleIcon className='w-6 h-6 text-red-500' />
|
||||
|
||||
{errMsg}
|
||||
</div>
|
||||
&& <div className='py-4 px-[26px]'>
|
||||
<ExclamationTriangleIcon className='w-6 h-6 text-red-500'/>
|
||||
|
||||
{errMsg}
|
||||
</div>
|
||||
}
|
||||
{
|
||||
imagePreviewUrl && (<ImagePreview title='mermaid_chart' url={imagePreviewUrl} onCancel={() => setImagePreviewUrl('')} />)
|
||||
}
|
||||
</div>
|
||||
)
|
||||
|
||||
47
web/app/components/base/skeleton/index.tsx
Normal file
47
web/app/components/base/skeleton/index.tsx
Normal file
@ -0,0 +1,47 @@
|
||||
import type { ComponentProps, FC } from 'react'
|
||||
import classNames from '@/utils/classnames'
|
||||
|
||||
type SkeletonProps = ComponentProps<'div'>
|
||||
|
||||
export const SkeletonContanier: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('flex flex-col gap-1', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonRow: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('flex items-center gap-2', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonRectangle: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('h-2 rounded-sm opacity-20 bg-text-tertiary my-1', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonPoint: FC = () =>
|
||||
<div className='text-text-quaternary text-xs font-medium'>·</div>
|
||||
|
||||
/** Usage
|
||||
* <SkeletonContanier>
|
||||
* <SkeletonRow>
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* <SkeletonPoint />
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* </SkeletonRow>
|
||||
* <SkeletonRow>
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* </SkeletonRow>
|
||||
* <SkeletonRow>
|
||||
*/
|
||||
@ -123,11 +123,25 @@ Toast.notify = ({
|
||||
const holder = document.createElement('div')
|
||||
const root = createRoot(holder)
|
||||
|
||||
root.render(<Toast type={type} size={size} message={message} duration={duration} className={className} />)
|
||||
root.render(
|
||||
<ToastContext.Provider value={{
|
||||
notify: () => {},
|
||||
close: () => {
|
||||
if (holder) {
|
||||
root.unmount()
|
||||
holder.remove()
|
||||
}
|
||||
},
|
||||
}}>
|
||||
<Toast type={type} size={size} message={message} duration={duration} className={className} />
|
||||
</ToastContext.Provider>,
|
||||
)
|
||||
document.body.appendChild(holder)
|
||||
setTimeout(() => {
|
||||
if (holder)
|
||||
if (holder) {
|
||||
root.unmount()
|
||||
holder.remove()
|
||||
}
|
||||
}, duration || defaultDuring)
|
||||
}
|
||||
}
|
||||
|
||||
@ -21,7 +21,7 @@ import { sleep } from '@/utils'
|
||||
import type { SiteInfo } from '@/models/share'
|
||||
import { TEXT_GENERATION_TIMEOUT_MS } from '@/config'
|
||||
import {
|
||||
getProcessedFilesFromResponse,
|
||||
getFilesInLogs,
|
||||
} from '@/app/components/base/file-uploader/utils'
|
||||
|
||||
export type IResultProps = {
|
||||
@ -288,7 +288,7 @@ const Result: FC<IResultProps> = ({
|
||||
}
|
||||
setWorkflowProcessData(produce(getWorkflowProcessData()!, (draft) => {
|
||||
draft.status = WorkflowRunningStatus.Succeeded
|
||||
draft.files = getProcessedFilesFromResponse(data.files || [])
|
||||
draft.files = getFilesInLogs(data.outputs || []) as any[]
|
||||
}))
|
||||
if (!data.outputs) {
|
||||
setCompletionRes('')
|
||||
|
||||
@ -26,7 +26,7 @@ import {
|
||||
import { useFeaturesStore } from '@/app/components/base/features/hooks'
|
||||
import { AudioPlayerManager } from '@/app/components/base/audio-btn/audio.player.manager'
|
||||
import {
|
||||
getProcessedFilesFromResponse,
|
||||
getFilesInLogs,
|
||||
} from '@/app/components/base/file-uploader/utils'
|
||||
|
||||
export const useWorkflowRun = () => {
|
||||
@ -213,7 +213,7 @@ export const useWorkflowRun = () => {
|
||||
draft.result = {
|
||||
...draft.result,
|
||||
...data,
|
||||
files: getProcessedFilesFromResponse(data.files || []),
|
||||
files: getFilesInLogs(data.outputs),
|
||||
} as any
|
||||
if (isStringOutput) {
|
||||
draft.resultTabActive = true
|
||||
|
||||
@ -27,7 +27,10 @@ type Props = {
|
||||
isInNode?: boolean
|
||||
onGenerated?: (prompt: string) => void
|
||||
codeLanguages?: CodeLanguage
|
||||
fileList?: FileEntity[]
|
||||
fileList?: {
|
||||
varName: string
|
||||
list: FileEntity[]
|
||||
}[]
|
||||
showFileList?: boolean
|
||||
showCodeGenerator?: boolean
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ const CodeEditor: FC<Props> = ({
|
||||
isInNode={isInNode}
|
||||
onGenerated={onGenerated}
|
||||
codeLanguages={language}
|
||||
fileList={fileList}
|
||||
fileList={fileList as any}
|
||||
showFileList={showFileList}
|
||||
showCodeGenerator={showCodeGenerator}
|
||||
>
|
||||
|
||||
@ -72,7 +72,7 @@ const VariableTag = ({
|
||||
{isEnv && <Env className='shrink-0 mr-0.5 w-3.5 h-3.5 text-util-colors-violet-violet-600' />}
|
||||
{isChatVar && <BubbleX className='w-3.5 h-3.5 text-util-colors-teal-teal-700' />}
|
||||
<div
|
||||
className={cn('truncate text-text-accent font-medium', (isEnv || isChatVar) && 'text-text-secondary')}
|
||||
className={cn('truncate ml-0.5 text-text-accent font-medium', (isEnv || isChatVar) && 'text-text-secondary')}
|
||||
title={variableName}
|
||||
>
|
||||
{variableName}
|
||||
|
||||
@ -274,7 +274,7 @@ const VarReferenceVars: FC<Props> = ({
|
||||
{
|
||||
!hideSearch && (
|
||||
<>
|
||||
<div className={cn('mb-2 mx-1', searchBoxClassName)} onClick={e => e.stopPropagation()}>
|
||||
<div className={cn('mb-1 mx-2 mt-2', searchBoxClassName)} onClick={e => e.stopPropagation()}>
|
||||
<Input
|
||||
showLeftIcon
|
||||
showClearIcon
|
||||
|
||||
@ -30,7 +30,9 @@ const nodeDefault: NodeDefault<AssignerNodeType> = {
|
||||
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.assignedVariable') })
|
||||
|
||||
if (!errorMessages && value.operation !== WriteMode.clear) {
|
||||
if (value.operation === WriteMode.set) {
|
||||
if (value.operation === WriteMode.set || value.operation === WriteMode.increment
|
||||
|| value.operation === WriteMode.decrement || value.operation === WriteMode.multiply
|
||||
|| value.operation === WriteMode.divide) {
|
||||
if (!value.value && typeof value.value !== 'number')
|
||||
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.variable') })
|
||||
}
|
||||
|
||||
@ -25,10 +25,12 @@ import { FILE_TYPE_OPTIONS, SUB_VARIABLES, TRANSFER_METHOD } from '../../default
|
||||
import ConditionWrap from '../condition-wrap'
|
||||
import ConditionOperator from './condition-operator'
|
||||
import ConditionInput from './condition-input'
|
||||
import VariableTag from '@/app/components/workflow/nodes/_base/components/variable-tag'
|
||||
|
||||
import ConditionVarSelector from './condition-var-selector'
|
||||
import type {
|
||||
Node,
|
||||
NodeOutPutVar,
|
||||
ValueSelector,
|
||||
Var,
|
||||
} from '@/app/components/workflow/types'
|
||||
import { VarType } from '@/app/components/workflow/types'
|
||||
@ -82,6 +84,7 @@ const ConditionItem = ({
|
||||
const { t } = useTranslation()
|
||||
|
||||
const [isHovered, setIsHovered] = useState(false)
|
||||
const [open, setOpen] = useState(false)
|
||||
|
||||
const doUpdateCondition = useCallback((newCondition: Condition) => {
|
||||
if (isSubVariableKey)
|
||||
@ -190,6 +193,17 @@ const ConditionItem = ({
|
||||
onRemoveCondition?.(caseId, condition.id)
|
||||
}, [caseId, condition, conditionId, isSubVariableKey, onRemoveCondition, onRemoveSubVariableCondition])
|
||||
|
||||
const handleVarChange = useCallback((valueSelector: ValueSelector, varItem: Var) => {
|
||||
const newCondition = produce(condition, (draft) => {
|
||||
draft.variable_selector = valueSelector
|
||||
draft.varType = varItem.type
|
||||
draft.value = ''
|
||||
draft.comparison_operator = getOperators(varItem.type)[0]
|
||||
})
|
||||
doUpdateCondition(newCondition)
|
||||
setOpen(false)
|
||||
}, [condition, doUpdateCondition])
|
||||
|
||||
return (
|
||||
<div className={cn('flex mb-1 last-of-type:mb-0', className)}>
|
||||
<div className={cn(
|
||||
@ -221,11 +235,14 @@ const ConditionItem = ({
|
||||
/>
|
||||
)
|
||||
: (
|
||||
<VariableTag
|
||||
<ConditionVarSelector
|
||||
open={open}
|
||||
onOpenChange={setOpen}
|
||||
valueSelector={condition.variable_selector || []}
|
||||
varType={condition.varType}
|
||||
availableNodes={availableNodes}
|
||||
isShort
|
||||
nodesOutputVars={nodesOutputVars}
|
||||
onChange={handleVarChange}
|
||||
/>
|
||||
)}
|
||||
|
||||
|
||||
@ -0,0 +1,58 @@
|
||||
import { PortalToFollowElem, PortalToFollowElemContent, PortalToFollowElemTrigger } from '@/app/components/base/portal-to-follow-elem'
|
||||
import VariableTag from '@/app/components/workflow/nodes/_base/components/variable-tag'
|
||||
import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars'
|
||||
import type { Node, NodeOutPutVar, ValueSelector, Var, VarType } from '@/app/components/workflow/types'
|
||||
|
||||
type ConditionVarSelectorProps = {
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
valueSelector: ValueSelector
|
||||
varType: VarType
|
||||
availableNodes: Node[]
|
||||
nodesOutputVars: NodeOutPutVar[]
|
||||
onChange: (valueSelector: ValueSelector, varItem: Var) => void
|
||||
}
|
||||
|
||||
const ConditionVarSelector = ({
|
||||
open,
|
||||
onOpenChange,
|
||||
valueSelector,
|
||||
varType,
|
||||
availableNodes,
|
||||
nodesOutputVars,
|
||||
onChange,
|
||||
}: ConditionVarSelectorProps) => {
|
||||
return (
|
||||
<PortalToFollowElem
|
||||
open={open}
|
||||
onOpenChange={onOpenChange}
|
||||
placement='bottom-start'
|
||||
offset={{
|
||||
mainAxis: 4,
|
||||
crossAxis: 0,
|
||||
}}
|
||||
>
|
||||
<PortalToFollowElemTrigger onClick={() => onOpenChange(!open)}>
|
||||
<div className="cursor-pointer">
|
||||
<VariableTag
|
||||
valueSelector={valueSelector}
|
||||
varType={varType}
|
||||
availableNodes={availableNodes}
|
||||
isShort
|
||||
/>
|
||||
</div>
|
||||
</PortalToFollowElemTrigger>
|
||||
<PortalToFollowElemContent className='z-[1000]'>
|
||||
<div className='w-[296px] bg-components-panel-bg-blur rounded-lg border-[0.5px] border-components-panel-border shadow-lg'>
|
||||
<VarReferenceVars
|
||||
vars={nodesOutputVars}
|
||||
isSupportFileVar
|
||||
onChange={onChange}
|
||||
/>
|
||||
</div>
|
||||
</PortalToFollowElemContent>
|
||||
</PortalToFollowElem>
|
||||
)
|
||||
}
|
||||
|
||||
export default ConditionVarSelector
|
||||
@ -73,7 +73,7 @@ const ConditionValue = ({
|
||||
|
||||
<div
|
||||
className={cn(
|
||||
'shrink-0 truncate text-xs font-medium text-text-accent',
|
||||
'shrink-0 ml-0.5 truncate text-xs font-medium text-text-accent',
|
||||
!notHasValue && 'max-w-[70px]',
|
||||
)}
|
||||
title={variableName}
|
||||
|
||||
@ -173,7 +173,8 @@ const InputVarList: FC<Props> = ({
|
||||
value={varInput?.type === VarKindType.constant ? (varInput?.value || '') : (varInput?.value || [])}
|
||||
onChange={handleNotMixedTypeChange(variable)}
|
||||
onOpen={handleOpen(index)}
|
||||
defaultVarKindType={VarKindType.variable}
|
||||
defaultVarKindType={isNumber ? VarKindType.constant : VarKindType.variable}
|
||||
isSupportConstantValue={isSupportConstantValue}
|
||||
filterVar={isNumber ? filterVar : undefined}
|
||||
availableVars={isSelect ? availableVars : undefined}
|
||||
schema={schema}
|
||||
|
||||
@ -33,7 +33,7 @@ const NodeVariableItem = ({
|
||||
const { t } = useTranslation()
|
||||
return (
|
||||
<div className={cn(
|
||||
'relative flex items-center p-[3px] pl-[5px] gap-1 self-stretch rounded-md bg-workflow-block-param-bg',
|
||||
'relative flex items-center p-[3px] pl-[5px] gap-1 self-stretch rounded-md bg-workflow-block-parma-bg',
|
||||
showBorder && '!bg-black/[0.02]',
|
||||
className,
|
||||
)}>
|
||||
|
||||
@ -97,8 +97,9 @@ const ChatVariableModal = ({
|
||||
return objectPlaceholder
|
||||
}, [type])
|
||||
const getObjectValue = useCallback(() => {
|
||||
if (!chatVar)
|
||||
if (!chatVar || Object.keys(chatVar.value).length === 0)
|
||||
return [DEFAULT_OBJECT_VALUE]
|
||||
|
||||
return Object.keys(chatVar.value).map((key) => {
|
||||
return {
|
||||
key,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user