mirror of
https://github.com/langgenius/dify.git
synced 2026-02-23 03:17:57 +08:00
Merge remote-tracking branch 'origin/main' into feat/trigger
This commit is contained in:
@ -45,7 +45,7 @@ APP_WEB_URL=
|
||||
# Recommendation: use a dedicated domain (e.g., https://upload.example.com).
|
||||
# Alternatively, use http://<your-ip>:5001 or http://api:5001,
|
||||
# ensuring port 5001 is externally accessible (see docker-compose.yaml).
|
||||
FILES_URL=
|
||||
FILES_URL=http://api:5001
|
||||
|
||||
# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network.
|
||||
# Set this to the internal Docker service URL for proper plugin file access.
|
||||
@ -225,6 +225,9 @@ SQLALCHEMY_ECHO=false
|
||||
SQLALCHEMY_POOL_PRE_PING=false
|
||||
# Whether to enable the Last in first out option or use default FIFO queue if is false
|
||||
SQLALCHEMY_POOL_USE_LIFO=false
|
||||
# Number of seconds to wait for a connection from the pool before raising a timeout error.
|
||||
# Default is 30
|
||||
SQLALCHEMY_POOL_TIMEOUT=30
|
||||
|
||||
# Maximum number of connections to the database
|
||||
# Default is 100
|
||||
@ -632,6 +635,8 @@ BAIDU_VECTOR_DB_API_KEY=dify
|
||||
BAIDU_VECTOR_DB_DATABASE=dify
|
||||
BAIDU_VECTOR_DB_SHARD=1
|
||||
BAIDU_VECTOR_DB_REPLICAS=3
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE
|
||||
|
||||
# VikingDB configurations, only available when VECTOR_STORE is `vikingdb`
|
||||
VIKINGDB_ACCESS_KEY=your-ak
|
||||
@ -643,9 +648,10 @@ VIKINGDB_CONNECTION_TIMEOUT=30
|
||||
VIKINGDB_SOCKET_TIMEOUT=30
|
||||
|
||||
# Lindorm configuration, only available when VECTOR_STORE is `lindorm`
|
||||
LINDORM_URL=http://lindorm:30070
|
||||
LINDORM_USERNAME=lindorm
|
||||
LINDORM_PASSWORD=lindorm
|
||||
LINDORM_URL=http://localhost:30070
|
||||
LINDORM_USERNAME=admin
|
||||
LINDORM_PASSWORD=admin
|
||||
LINDORM_USING_UGC=True
|
||||
LINDORM_QUERY_TIMEOUT=1
|
||||
|
||||
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
|
||||
@ -657,6 +663,7 @@ OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_CLUSTER_NAME=difyai
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||||
OCEANBASE_FULLTEXT_PARSER=ik
|
||||
|
||||
# opengauss configurations, only available when VECTOR_STORE is `opengauss`
|
||||
OPENGAUSS_HOST=opengauss
|
||||
@ -843,6 +850,7 @@ INVITE_EXPIRY_HOURS=72
|
||||
|
||||
# Reset password token valid time (minutes),
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||||
EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5
|
||||
CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5
|
||||
OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5
|
||||
|
||||
@ -870,6 +878,16 @@ MAX_VARIABLE_SIZE=204800
|
||||
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT=10
|
||||
|
||||
# GraphEngine Worker Pool Configuration
|
||||
# Minimum number of workers per GraphEngine instance (default: 1)
|
||||
GRAPH_ENGINE_MIN_WORKERS=1
|
||||
# Maximum number of workers per GraphEngine instance (default: 10)
|
||||
GRAPH_ENGINE_MAX_WORKERS=10
|
||||
# Queue depth threshold that triggers worker scale up (default: 3)
|
||||
GRAPH_ENGINE_SCALE_UP_THRESHOLD=3
|
||||
# Seconds of idle time before scaling down workers (default: 5.0)
|
||||
GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0
|
||||
|
||||
# Workflow storage configuration
|
||||
# Options: rdbms, hybrid
|
||||
# rdbms: Use only the relational database (default)
|
||||
@ -907,6 +925,12 @@ WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||||
HTTP_REQUEST_NODE_SSL_VERIFY=True
|
||||
# Base64 encoded CA certificate data for custom certificate verification (PEM format, optional)
|
||||
# HTTP_REQUEST_NODE_SSL_CERT_DATA=LS0tLS1CRUdJTi...
|
||||
# Base64 encoded client certificate data for mutual TLS authentication (PEM format, optional)
|
||||
# HTTP_REQUEST_NODE_SSL_CLIENT_CERT_DATA=LS0tLS1CRUdJTi...
|
||||
# Base64 encoded client private key data for mutual TLS authentication (PEM format, optional)
|
||||
# HTTP_REQUEST_NODE_SSL_CLIENT_KEY_DATA=LS0tLS1CRUdJTi...
|
||||
|
||||
# Webhook request configuration
|
||||
WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760
|
||||
@ -1263,6 +1287,10 @@ QUEUE_MONITOR_INTERVAL=30
|
||||
SWAGGER_UI_ENABLED=true
|
||||
SWAGGER_UI_PATH=/swagger-ui.html
|
||||
|
||||
# Whether to encrypt dataset IDs when exporting DSL files (default: true)
|
||||
# Set to false to export dataset IDs as plain text for easier cross-environment import
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID=true
|
||||
|
||||
# Celery schedule tasks configuration
|
||||
ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false
|
||||
ENABLE_CLEAN_UNUSED_DATASETS_TASK=false
|
||||
|
||||
Reference in New Issue
Block a user