mirror of
https://github.com/langgenius/dify.git
synced 2026-05-03 00:48:04 +08:00
Merge remote-tracking branch 'myori/main' into feat/collaboration2
This commit is contained in:
@ -58,8 +58,8 @@ FILES_URL=
|
||||
INTERNAL_FILES_URL=
|
||||
|
||||
# Ensure UTF-8 encoding
|
||||
LANG=en_US.UTF-8
|
||||
LC_ALL=en_US.UTF-8
|
||||
LANG=C.UTF-8
|
||||
LC_ALL=C.UTF-8
|
||||
PYTHONIOENCODING=utf-8
|
||||
|
||||
# ------------------------------
|
||||
@ -69,6 +69,8 @@ PYTHONIOENCODING=utf-8
|
||||
# The log level for the application.
|
||||
# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
|
||||
LOG_LEVEL=INFO
|
||||
# Log output format: text or json
|
||||
LOG_OUTPUT_FORMAT=text
|
||||
# Log file path
|
||||
LOG_FILE=/app/logs/server.log
|
||||
# Log file max size, the unit is MB
|
||||
@ -137,6 +139,8 @@ ACCESS_TOKEN_EXPIRE_MINUTES=60
|
||||
# Refresh token expiration time in days
|
||||
REFRESH_TOKEN_EXPIRE_DAYS=30
|
||||
|
||||
# The default number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
|
||||
APP_DEFAULT_ACTIVE_REQUESTS=0
|
||||
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
|
||||
APP_MAX_ACTIVE_REQUESTS=0
|
||||
APP_MAX_EXECUTION_TIME=1200
|
||||
@ -229,15 +233,20 @@ NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false
|
||||
|
||||
# ------------------------------
|
||||
# Database Configuration
|
||||
# The database uses PostgreSQL. Please use the public schema.
|
||||
# It is consistent with the configuration in the 'db' service below.
|
||||
# The database uses PostgreSQL or MySQL. OceanBase and seekdb are also supported. Please use the public schema.
|
||||
# It is consistent with the configuration in the database service below.
|
||||
# You can adjust the database configuration according to your needs.
|
||||
# ------------------------------
|
||||
|
||||
# Database type, supported values are `postgresql`, `mysql`, `oceanbase`, `seekdb`
|
||||
DB_TYPE=postgresql
|
||||
# For MySQL, only `root` user is supported for now
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db
|
||||
DB_HOST=db_postgres
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
# The size of the database connection pool.
|
||||
# The default is 30 connections, which can be appropriately increased.
|
||||
SQLALCHEMY_POOL_SIZE=30
|
||||
@ -299,6 +308,29 @@ POSTGRES_STATEMENT_TIMEOUT=0
|
||||
# A value of 0 prevents the server from terminating idle sessions.
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
|
||||
|
||||
# MySQL Performance Configuration
|
||||
# Maximum number of connections to MySQL
|
||||
#
|
||||
# Default is 1000
|
||||
MYSQL_MAX_CONNECTIONS=1000
|
||||
|
||||
# InnoDB buffer pool size
|
||||
# Default is 512M
|
||||
# Recommended value: 70-80% of available memory for dedicated MySQL server
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size
|
||||
MYSQL_INNODB_BUFFER_POOL_SIZE=512M
|
||||
|
||||
# InnoDB log file size
|
||||
# Default is 128M
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size
|
||||
MYSQL_INNODB_LOG_FILE_SIZE=128M
|
||||
|
||||
# InnoDB flush log at transaction commit
|
||||
# Default is 2 (flush to OS cache, sync every second)
|
||||
# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache)
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit
|
||||
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2
|
||||
|
||||
# ------------------------------
|
||||
# Redis Configuration
|
||||
# This Redis configuration is used for caching and for pub/sub during conversation.
|
||||
@ -370,11 +402,11 @@ WEB_API_CORS_ALLOW_ORIGINS=*
|
||||
# Specifies the allowed origins for cross-origin requests to the console API,
|
||||
# e.g. https://cloud.dify.ai or * for all origins.
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=*
|
||||
# Set COOKIE_DOMAIN when the console frontend and API are on different subdomains.
|
||||
# Provide the registrable domain (e.g. example.com); leading dots are optional.
|
||||
# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site’s top-level domain (e.g., `example.com`). Leading dots are optional.
|
||||
COOKIE_DOMAIN=
|
||||
# The frontend reads NEXT_PUBLIC_COOKIE_DOMAIN to align cookie handling with the API.
|
||||
# When the frontend and backend run on different subdomains, set NEXT_PUBLIC_COOKIE_DOMAIN=1.
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN=
|
||||
NEXT_PUBLIC_BATCH_CONCURRENCY=5
|
||||
|
||||
# ------------------------------
|
||||
# File Storage Configuration
|
||||
@ -422,6 +454,15 @@ S3_SECRET_KEY=
|
||||
# If set to false, the access key and secret key must be provided.
|
||||
S3_USE_AWS_MANAGED_IAM=false
|
||||
|
||||
# Workflow run and Conversation archive storage (S3-compatible)
|
||||
ARCHIVE_STORAGE_ENABLED=false
|
||||
ARCHIVE_STORAGE_ENDPOINT=
|
||||
ARCHIVE_STORAGE_ARCHIVE_BUCKET=
|
||||
ARCHIVE_STORAGE_EXPORT_BUCKET=
|
||||
ARCHIVE_STORAGE_ACCESS_KEY=
|
||||
ARCHIVE_STORAGE_SECRET_KEY=
|
||||
ARCHIVE_STORAGE_REGION=auto
|
||||
|
||||
# Azure Blob Configuration
|
||||
#
|
||||
AZURE_BLOB_ACCOUNT_NAME=difyai
|
||||
@ -444,6 +485,7 @@ ALIYUN_OSS_REGION=ap-southeast-1
|
||||
ALIYUN_OSS_AUTH_VERSION=v4
|
||||
# Don't start with '/'. OSS doesn't support leading slash in object names.
|
||||
ALIYUN_OSS_PATH=your-path
|
||||
ALIYUN_CLOUDBOX_ID=your-cloudbox-id
|
||||
|
||||
# Tencent COS Configuration
|
||||
#
|
||||
@ -452,6 +494,7 @@ TENCENT_COS_SECRET_KEY=your-secret-key
|
||||
TENCENT_COS_SECRET_ID=your-secret-id
|
||||
TENCENT_COS_REGION=your-region
|
||||
TENCENT_COS_SCHEME=your-scheme
|
||||
TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain
|
||||
|
||||
# Oracle Storage Configuration
|
||||
#
|
||||
@ -467,6 +510,7 @@ HUAWEI_OBS_BUCKET_NAME=your-bucket-name
|
||||
HUAWEI_OBS_SECRET_KEY=your-secret-key
|
||||
HUAWEI_OBS_ACCESS_KEY=your-access-key
|
||||
HUAWEI_OBS_SERVER=your-server-url
|
||||
HUAWEI_OBS_PATH_STYLE=false
|
||||
|
||||
# Volcengine TOS Configuration
|
||||
#
|
||||
@ -494,7 +538,7 @@ SUPABASE_URL=your-server-url
|
||||
# ------------------------------
|
||||
|
||||
# The type of vector store to use.
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `clickzetta`, `alibabacloud_mysql`.
|
||||
# Supported values are `weaviate`, `oceanbase`, `seekdb`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`, `vastbase`, `tidb`, `tidb_on_qdrant`, `baidu`, `lindorm`, `huawei_cloud`, `upstash`, `matrixone`, `clickzetta`, `alibabacloud_mysql`, `iris`.
|
||||
VECTOR_STORE=weaviate
|
||||
# Prefix used to create collection name in vector database
|
||||
VECTOR_INDEX_NAME_PREFIX=Vector_index
|
||||
@ -503,6 +547,24 @@ VECTOR_INDEX_NAME_PREFIX=Vector_index
|
||||
WEAVIATE_ENDPOINT=http://weaviate:8080
|
||||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051
|
||||
WEAVIATE_TOKENIZATION=word
|
||||
|
||||
# For OceanBase metadata database configuration, available when `DB_TYPE` is `oceanbase`.
|
||||
# For OceanBase vector database configuration, available when `VECTOR_STORE` is `oceanbase`
|
||||
# If you want to use OceanBase as both vector database and metadata database, you need to set both `DB_TYPE` and `VECTOR_STORE` to `oceanbase`, and set Database Configuration is the same as the vector database.
|
||||
# seekdb is the lite version of OceanBase and shares the connection configuration with OceanBase.
|
||||
OCEANBASE_VECTOR_HOST=oceanbase
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_CLUSTER_NAME=difyai
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||||
# For OceanBase vector database, built-in fulltext parsers are `ngram`, `beng`, `space`, `ngram2`, `ik`
|
||||
# For OceanBase vector database, external fulltext parsers (require plugin installation) are `japanese_ftparser`, `thai_ftparser`
|
||||
OCEANBASE_FULLTEXT_PARSER=ik
|
||||
SEEKDB_MEMORY_LIMIT=2G
|
||||
|
||||
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
|
||||
QDRANT_URL=http://qdrant:6333
|
||||
@ -709,19 +771,6 @@ LINDORM_PASSWORD=admin
|
||||
LINDORM_USING_UGC=True
|
||||
LINDORM_QUERY_TIMEOUT=1
|
||||
|
||||
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
|
||||
# Built-in fulltext parsers are `ngram`, `beng`, `space`, `ngram2`, `ik`
|
||||
# External fulltext parsers (require plugin installation) are `japanese_ftparser`, `thai_ftparser`
|
||||
OCEANBASE_VECTOR_HOST=oceanbase
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_CLUSTER_NAME=difyai
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||||
OCEANBASE_FULLTEXT_PARSER=ik
|
||||
|
||||
# opengauss configurations, only available when VECTOR_STORE is `opengauss`
|
||||
OPENGAUSS_HOST=opengauss
|
||||
OPENGAUSS_PORT=6600
|
||||
@ -763,6 +812,21 @@ CLICKZETTA_ANALYZER_TYPE=chinese
|
||||
CLICKZETTA_ANALYZER_MODE=smart
|
||||
CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance
|
||||
|
||||
# InterSystems IRIS configuration, only available when VECTOR_STORE is `iris`
|
||||
IRIS_HOST=iris
|
||||
IRIS_SUPER_SERVER_PORT=1972
|
||||
IRIS_WEB_SERVER_PORT=52773
|
||||
IRIS_USER=_SYSTEM
|
||||
IRIS_PASSWORD=Dify@1234
|
||||
IRIS_DATABASE=USER
|
||||
IRIS_SCHEMA=dify
|
||||
IRIS_CONNECTION_URL=
|
||||
IRIS_MIN_CONNECTION=1
|
||||
IRIS_MAX_CONNECTION=3
|
||||
IRIS_TEXT_INDEX=true
|
||||
IRIS_TEXT_INDEX_LANGUAGE=en
|
||||
IRIS_TIMEZONE=UTC
|
||||
|
||||
# ------------------------------
|
||||
# Knowledge Configuration
|
||||
# ------------------------------
|
||||
@ -779,6 +843,19 @@ UPLOAD_FILE_BATCH_LIMIT=5
|
||||
# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST=
|
||||
|
||||
# Maximum number of files allowed in a single chunk attachment, default 10.
|
||||
SINGLE_CHUNK_ATTACHMENT_LIMIT=10
|
||||
|
||||
# Maximum number of files allowed in a image batch upload operation
|
||||
IMAGE_FILE_BATCH_LIMIT=10
|
||||
|
||||
# Maximum allowed image file size for attachments in megabytes, default 2.
|
||||
ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2
|
||||
|
||||
# Timeout for downloading image attachments in seconds, default 60.
|
||||
ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60
|
||||
|
||||
|
||||
# ETL type, support: `dify`, `Unstructured`
|
||||
# `dify` Dify's proprietary file extraction scheme
|
||||
# `Unstructured` Unstructured.io file extraction scheme
|
||||
@ -896,6 +973,8 @@ SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
SMTP_USE_TLS=true
|
||||
SMTP_OPPORTUNISTIC_TLS=false
|
||||
# Optional: override the local hostname used for SMTP HELO/EHLO
|
||||
SMTP_LOCAL_HOSTNAME=
|
||||
|
||||
# Sendgid configuration
|
||||
SENDGRID_API_KEY=
|
||||
@ -965,18 +1044,26 @@ WORKFLOW_NODE_EXECUTION_STORAGE=rdbms
|
||||
# Options:
|
||||
# - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default)
|
||||
# - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository
|
||||
# - extensions.logstore.repositories.logstore_workflow_execution_repository.LogstoreWorkflowExecutionRepository
|
||||
CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository
|
||||
|
||||
# Core workflow node execution repository implementation
|
||||
# Options:
|
||||
# - core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository (default)
|
||||
# - core.repositories.celery_workflow_node_execution_repository.CeleryWorkflowNodeExecutionRepository
|
||||
# - extensions.logstore.repositories.logstore_workflow_node_execution_repository.LogstoreWorkflowNodeExecutionRepository
|
||||
CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository
|
||||
|
||||
# API workflow run repository implementation
|
||||
# Options:
|
||||
# - repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository (default)
|
||||
# - extensions.logstore.repositories.logstore_api_workflow_run_repository.LogstoreAPIWorkflowRunRepository
|
||||
API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository
|
||||
|
||||
# API workflow node execution repository implementation
|
||||
# Options:
|
||||
# - repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository (default)
|
||||
# - extensions.logstore.repositories.logstore_api_workflow_node_execution_repository.LogstoreAPIWorkflowNodeExecutionRepository
|
||||
API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository
|
||||
|
||||
# Workflow log cleanup configuration
|
||||
@ -987,6 +1074,29 @@ WORKFLOW_LOG_RETENTION_DAYS=30
|
||||
# Batch size for workflow log cleanup operations (default: 100)
|
||||
WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100
|
||||
|
||||
# Aliyun SLS Logstore Configuration
|
||||
# Aliyun Access Key ID
|
||||
ALIYUN_SLS_ACCESS_KEY_ID=
|
||||
# Aliyun Access Key Secret
|
||||
ALIYUN_SLS_ACCESS_KEY_SECRET=
|
||||
# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com)
|
||||
ALIYUN_SLS_ENDPOINT=
|
||||
# Aliyun SLS Region (e.g., cn-hangzhou)
|
||||
ALIYUN_SLS_REGION=
|
||||
# Aliyun SLS Project Name
|
||||
ALIYUN_SLS_PROJECT_NAME=
|
||||
# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage)
|
||||
ALIYUN_SLS_LOGSTORE_TTL=365
|
||||
# Enable dual-write to both SLS LogStore and SQL database (default: false)
|
||||
LOGSTORE_DUAL_WRITE_ENABLED=false
|
||||
# Enable dual-read fallback to SQL database when LogStore returns no results (default: true)
|
||||
# Useful for migration scenarios where historical data exists only in SQL database
|
||||
LOGSTORE_DUAL_READ_ENABLED=true
|
||||
# Control flag for whether to write the `graph` field to LogStore.
|
||||
# If LOGSTORE_ENABLE_PUT_GRAPH_FIELD is "true", write the full `graph` field;
|
||||
# otherwise write an empty {} instead. Defaults to writing the `graph` field.
|
||||
LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true
|
||||
|
||||
# HTTP request node in workflow configuration
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||||
@ -1045,18 +1155,14 @@ ALLOW_UNSAFE_DATA_SCHEME=false
|
||||
MAX_TREE_DEPTH=50
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# Environment Variables for database Service
|
||||
# ------------------------------
|
||||
|
||||
# The name of the default postgres user.
|
||||
POSTGRES_USER=${DB_USERNAME}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD=${DB_PASSWORD}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB=${DB_DATABASE}
|
||||
# postgres data directory
|
||||
# Postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
|
||||
# MySQL Default Configuration
|
||||
MYSQL_HOST_VOLUME=./volumes/mysql/data
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for sandbox Service
|
||||
# ------------------------------
|
||||
@ -1090,6 +1196,10 @@ WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
WEAVIATE_DISABLE_TELEMETRY=false
|
||||
WEAVIATE_ENABLE_TOKENIZER_GSE=false
|
||||
WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false
|
||||
WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Chroma
|
||||
@ -1172,7 +1282,7 @@ NGINX_SSL_PORT=443
|
||||
# and modify the env vars below accordingly.
|
||||
NGINX_SSL_CERT_FILENAME=dify.crt
|
||||
NGINX_SSL_CERT_KEY_FILENAME=dify.key
|
||||
NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3
|
||||
NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3
|
||||
|
||||
# Nginx performance tuning
|
||||
NGINX_WORKER_PROCESSES=auto
|
||||
@ -1216,12 +1326,12 @@ SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||||
SSRF_POOL_KEEPALIVE_EXPIRY=5.0
|
||||
|
||||
# ------------------------------
|
||||
# docker env var for specifying vector db type at startup
|
||||
# (based on the vector db type, the corresponding docker
|
||||
# docker env var for specifying vector db and metadata db type at startup
|
||||
# (based on the vector db and metadata db type, the corresponding docker
|
||||
# compose profile will be used)
|
||||
# if you want to use unstructured, add ',unstructured' to the end
|
||||
# ------------------------------
|
||||
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
|
||||
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql}
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose Service Expose Host Port Configurations
|
||||
@ -1293,7 +1403,10 @@ PLUGIN_STDIO_BUFFER_SIZE=1024
|
||||
PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880
|
||||
|
||||
PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
|
||||
# Plugin Daemon side timeout (configure to match the API side below)
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT=600
|
||||
# API side timeout (configure to match the Plugin Daemon side above)
|
||||
PLUGIN_DAEMON_TIMEOUT=600.0
|
||||
# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
PIP_MIRROR_URL=
|
||||
|
||||
@ -1364,7 +1477,7 @@ QUEUE_MONITOR_ALERT_EMAILS=
|
||||
QUEUE_MONITOR_INTERVAL=30
|
||||
|
||||
# Swagger UI configuration
|
||||
SWAGGER_UI_ENABLED=true
|
||||
SWAGGER_UI_ENABLED=false
|
||||
SWAGGER_UI_PATH=/swagger-ui.html
|
||||
|
||||
# Whether to encrypt dataset IDs when exporting DSL files (default: true)
|
||||
@ -1380,6 +1493,7 @@ ENABLE_CLEAN_UNUSED_DATASETS_TASK=false
|
||||
ENABLE_CREATE_TIDB_SERVERLESS_TASK=false
|
||||
ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false
|
||||
ENABLE_CLEAN_MESSAGES=false
|
||||
ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false
|
||||
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false
|
||||
ENABLE_DATASETS_QUEUE_MONITOR=false
|
||||
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true
|
||||
@ -1390,3 +1504,22 @@ WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0
|
||||
|
||||
# Tenant isolated task queue configuration
|
||||
TENANT_ISOLATED_TASK_CONCURRENCY=1
|
||||
|
||||
# Maximum allowed CSV file size for annotation import in megabytes
|
||||
ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2
|
||||
#Maximum number of annotation records allowed in a single import
|
||||
ANNOTATION_IMPORT_MAX_RECORDS=10000
|
||||
# Minimum number of annotation records required in a single import
|
||||
ANNOTATION_IMPORT_MIN_RECORDS=1
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20
|
||||
# Maximum number of concurrent annotation import tasks per tenant
|
||||
ANNOTATION_IMPORT_MAX_CONCURRENT=5
|
||||
|
||||
# The API key of amplitude
|
||||
AMPLITUDE_API_KEY=
|
||||
|
||||
# Sandbox expired records clean configuration
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000
|
||||
SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30
|
||||
|
||||
@ -23,6 +23,10 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T
|
||||
- Navigate to the `docker` directory.
|
||||
- Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
|
||||
- Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
|
||||
- **Optional (Recommended for upgrades)**:
|
||||
You may use the environment synchronization tool to help keep your `.env` file aligned with the latest `.env.example` updates, while preserving your custom settings.
|
||||
This is especially useful when upgrading Dify or managing a large, customized `.env` file.
|
||||
See the [Environment Variables Synchronization](#environment-variables-synchronization) section below.
|
||||
1. **Running the Services**:
|
||||
- Execute `docker compose up` from the `docker` directory to start the services.
|
||||
- To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
|
||||
@ -40,7 +44,9 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T
|
||||
- Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file).
|
||||
1. **Running Middleware Services**:
|
||||
- Navigate to the `docker` directory.
|
||||
- Execute `docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d` to start the middleware services. (Change the profile to other vector database if you are not using weaviate)
|
||||
- Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance.
|
||||
|
||||
> Compose automatically loads `COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate` from `middleware.env`, so no extra `--profile` flags are needed. Adjust variables in `middleware.env` if you want a different combination of services.
|
||||
|
||||
### Migration for Existing Users
|
||||
|
||||
@ -109,6 +115,47 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w
|
||||
|
||||
- Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`.
|
||||
|
||||
### Environment Variables Synchronization
|
||||
|
||||
When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example`.
|
||||
|
||||
To help keep your existing `.env` file up to date **without losing your custom values**, an optional environment variables synchronization tool is provided.
|
||||
|
||||
> This tool performs a **one-way synchronization** from `.env.example` to `.env`.
|
||||
> Existing values in `.env` are never overwritten automatically.
|
||||
|
||||
#### `dify-env-sync.sh` (Optional)
|
||||
|
||||
This script compares your current `.env` file with the latest `.env.example` template and helps safely apply new or updated environment variables.
|
||||
|
||||
**What it does**
|
||||
|
||||
- Creates a backup of the current `.env` file before making any changes
|
||||
- Synchronizes newly added environment variables from `.env.example`
|
||||
- Preserves all existing custom values in `.env`
|
||||
- Displays differences and variables removed from `.env.example` for review
|
||||
|
||||
**Backup behavior**
|
||||
|
||||
Before synchronization, the current `.env` file is saved to the `env-backup/` directory with a timestamped filename
|
||||
(e.g. `env-backup/.env.backup_20231218_143022`).
|
||||
|
||||
**When to use**
|
||||
|
||||
- After upgrading Dify to a newer version
|
||||
- When `.env.example` has been updated with new environment variables
|
||||
- When managing a large or heavily customized `.env` file
|
||||
|
||||
**Usage**
|
||||
|
||||
```bash
|
||||
# Grant execution permission (first time only)
|
||||
chmod +x dify-env-sync.sh
|
||||
|
||||
# Run the synchronization
|
||||
./dify-env-sync.sh
|
||||
```
|
||||
|
||||
### Additional Information
|
||||
|
||||
- **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions.
|
||||
|
||||
465
docker/dify-env-sync.sh
Executable file
465
docker/dify-env-sync.sh
Executable file
@ -0,0 +1,465 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ================================================================
|
||||
# Dify Environment Variables Synchronization Script
|
||||
#
|
||||
# Features:
|
||||
# - Synchronize latest settings from .env.example to .env
|
||||
# - Preserve custom settings in existing .env
|
||||
# - Add new environment variables
|
||||
# - Detect removed environment variables
|
||||
# - Create backup files
|
||||
# ================================================================
|
||||
|
||||
set -eo pipefail # Exit on error and pipe failures (safer for complex variable handling)
|
||||
|
||||
# Error handling function
|
||||
# Arguments:
|
||||
# $1 - Line number where error occurred
|
||||
# $2 - Error code
|
||||
handle_error() {
|
||||
local line_no=$1
|
||||
local error_code=$2
|
||||
echo -e "\033[0;31m[ERROR]\033[0m Script error: line $line_no with error code $error_code" >&2
|
||||
echo -e "\033[0;31m[ERROR]\033[0m Debug info: current working directory $(pwd)" >&2
|
||||
exit $error_code
|
||||
}
|
||||
|
||||
# Set error trap
|
||||
trap 'handle_error ${LINENO} $?' ERR
|
||||
|
||||
# Color settings for output
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
# Print informational message in blue
|
||||
# Arguments: $1 - Message to print
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
# Print success message in green
|
||||
# Arguments: $1 - Message to print
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
# Print warning message in yellow
|
||||
# Arguments: $1 - Message to print
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1" >&2
|
||||
}
|
||||
|
||||
# Print error message in red to stderr
|
||||
# Arguments: $1 - Message to print
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
||||
}
|
||||
|
||||
# Check for required files and create .env if missing
|
||||
# Verifies that .env.example exists and creates .env from template if needed
|
||||
check_files() {
|
||||
log_info "Checking required files..."
|
||||
|
||||
if [[ ! -f ".env.example" ]]; then
|
||||
log_error ".env.example file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f ".env" ]]; then
|
||||
log_warning ".env file does not exist. Creating from .env.example."
|
||||
cp ".env.example" ".env"
|
||||
log_success ".env file created"
|
||||
fi
|
||||
|
||||
log_success "Required files verified"
|
||||
}
|
||||
|
||||
# Create timestamped backup of .env file
|
||||
# Creates env-backup directory if needed and backs up current .env file
|
||||
create_backup() {
|
||||
local timestamp=$(date +"%Y%m%d_%H%M%S")
|
||||
local backup_dir="env-backup"
|
||||
|
||||
# Create backup directory if it doesn't exist
|
||||
if [[ ! -d "$backup_dir" ]]; then
|
||||
mkdir -p "$backup_dir"
|
||||
log_info "Created backup directory: $backup_dir"
|
||||
fi
|
||||
|
||||
if [[ -f ".env" ]]; then
|
||||
local backup_file="${backup_dir}/.env.backup_${timestamp}"
|
||||
cp ".env" "$backup_file"
|
||||
log_success "Backed up existing .env to $backup_file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Detect differences between .env and .env.example (optimized for large files)
|
||||
detect_differences() {
|
||||
log_info "Detecting differences between .env and .env.example..."
|
||||
|
||||
# Create secure temporary directory
|
||||
local temp_dir=$(mktemp -d)
|
||||
local temp_diff="$temp_dir/env_diff"
|
||||
|
||||
# Store diff file path as global variable
|
||||
declare -g DIFF_FILE="$temp_diff"
|
||||
declare -g TEMP_DIR="$temp_dir"
|
||||
|
||||
# Initialize difference file
|
||||
> "$temp_diff"
|
||||
|
||||
# Use awk for efficient comparison (much faster for large files)
|
||||
local diff_count=$(awk -F= '
|
||||
BEGIN { OFS="\x01" }
|
||||
FNR==NR {
|
||||
if (!/^[[:space:]]*#/ && !/^[[:space:]]*$/ && /=/) {
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1)
|
||||
key = $1
|
||||
value = substr($0, index($0,"=")+1)
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", value)
|
||||
env_values[key] = value
|
||||
}
|
||||
next
|
||||
}
|
||||
{
|
||||
if (!/^[[:space:]]*#/ && !/^[[:space:]]*$/ && /=/) {
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1)
|
||||
key = $1
|
||||
example_value = substr($0, index($0,"=")+1)
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", example_value)
|
||||
|
||||
if (key in env_values && env_values[key] != example_value) {
|
||||
print key, env_values[key], example_value > "'$temp_diff'"
|
||||
diff_count++
|
||||
}
|
||||
}
|
||||
}
|
||||
END { print diff_count }
|
||||
' .env .env.example)
|
||||
|
||||
if [[ $diff_count -gt 0 ]]; then
|
||||
log_success "Detected differences in $diff_count environment variables"
|
||||
# Show detailed differences
|
||||
show_differences_detail
|
||||
else
|
||||
log_info "No differences detected"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse environment variable line
|
||||
# Extracts key-value pairs from .env file format lines
|
||||
# Arguments:
|
||||
# $1 - Line to parse
|
||||
# Returns:
|
||||
# 0 - Success, outputs "key|value" format
|
||||
# 1 - Skip (empty line, comment, or invalid format)
|
||||
parse_env_line() {
|
||||
local line="$1"
|
||||
local key=""
|
||||
local value=""
|
||||
|
||||
# Skip empty lines or comment lines
|
||||
[[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && return 1
|
||||
|
||||
# Split by =
|
||||
if [[ "$line" =~ ^([^=]+)=(.*)$ ]]; then
|
||||
key="${BASH_REMATCH[1]}"
|
||||
value="${BASH_REMATCH[2]}"
|
||||
|
||||
# Remove leading and trailing whitespace
|
||||
key=$(echo "$key" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//')
|
||||
value=$(echo "$value" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//')
|
||||
|
||||
if [[ -n "$key" ]]; then
|
||||
echo "$key|$value"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Show detailed differences
|
||||
show_differences_detail() {
|
||||
log_info ""
|
||||
log_info "=== Environment Variable Differences ==="
|
||||
|
||||
# Read differences from the already created diff file
|
||||
if [[ ! -s "$DIFF_FILE" ]]; then
|
||||
log_info "No differences to display"
|
||||
return
|
||||
fi
|
||||
|
||||
# Display differences
|
||||
local count=1
|
||||
while IFS=$'\x01' read -r key env_value example_value; do
|
||||
echo ""
|
||||
echo -e "${YELLOW}[$count] $key${NC}"
|
||||
echo -e " ${GREEN}.env (current)${NC} : ${env_value}"
|
||||
echo -e " ${BLUE}.env.example (recommended)${NC}: ${example_value}"
|
||||
|
||||
# Analyze value changes
|
||||
analyze_value_change "$env_value" "$example_value"
|
||||
((count++))
|
||||
done < "$DIFF_FILE"
|
||||
|
||||
echo ""
|
||||
log_info "=== Difference Analysis Complete ==="
|
||||
log_info "Note: Consider changing to the recommended values above."
|
||||
log_info "Current implementation preserves .env values."
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Analyze value changes
|
||||
analyze_value_change() {
|
||||
local current_value="$1"
|
||||
local recommended_value="$2"
|
||||
|
||||
# Analyze value characteristics
|
||||
local analysis=""
|
||||
|
||||
# Empty value check
|
||||
if [[ -z "$current_value" && -n "$recommended_value" ]]; then
|
||||
analysis=" ${RED}→ Setting from empty to recommended value${NC}"
|
||||
elif [[ -n "$current_value" && -z "$recommended_value" ]]; then
|
||||
analysis=" ${RED}→ Recommended value changed to empty${NC}"
|
||||
# Numeric check - using arithmetic evaluation for robust comparison
|
||||
elif [[ "$current_value" =~ ^[0-9]+$ && "$recommended_value" =~ ^[0-9]+$ ]]; then
|
||||
# Use arithmetic evaluation to handle leading zeros correctly
|
||||
if (( 10#$current_value < 10#$recommended_value )); then
|
||||
analysis=" ${BLUE}→ Numeric increase (${current_value} < ${recommended_value})${NC}"
|
||||
elif (( 10#$current_value > 10#$recommended_value )); then
|
||||
analysis=" ${YELLOW}→ Numeric decrease (${current_value} > ${recommended_value})${NC}"
|
||||
fi
|
||||
# Boolean check
|
||||
elif [[ "$current_value" =~ ^(true|false)$ && "$recommended_value" =~ ^(true|false)$ ]]; then
|
||||
if [[ "$current_value" != "$recommended_value" ]]; then
|
||||
analysis=" ${BLUE}→ Boolean value change (${current_value} → ${recommended_value})${NC}"
|
||||
fi
|
||||
# URL/endpoint check
|
||||
elif [[ "$current_value" =~ ^https?:// || "$recommended_value" =~ ^https?:// ]]; then
|
||||
analysis=" ${BLUE}→ URL/endpoint change${NC}"
|
||||
# File path check
|
||||
elif [[ "$current_value" =~ ^/ || "$recommended_value" =~ ^/ ]]; then
|
||||
analysis=" ${BLUE}→ File path change${NC}"
|
||||
else
|
||||
# Length comparison
|
||||
local current_len=${#current_value}
|
||||
local recommended_len=${#recommended_value}
|
||||
if [[ $current_len -ne $recommended_len ]]; then
|
||||
analysis=" ${YELLOW}→ String length change (${current_len} → ${recommended_len} characters)${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "$analysis" ]]; then
|
||||
echo -e "$analysis"
|
||||
fi
|
||||
}
|
||||
|
||||
# Synchronize .env file with .env.example while preserving custom values
|
||||
# Creates a new .env file based on .env.example structure, preserving existing custom values
|
||||
# Global variables used: DIFF_FILE, TEMP_DIR
|
||||
sync_env_file() {
|
||||
log_info "Starting partial synchronization of .env file..."
|
||||
|
||||
local new_env_file=".env.new"
|
||||
local preserved_count=0
|
||||
local updated_count=0
|
||||
|
||||
# Pre-process diff file for efficient lookup
|
||||
local lookup_file=""
|
||||
if [[ -f "$DIFF_FILE" && -s "$DIFF_FILE" ]]; then
|
||||
lookup_file="${DIFF_FILE}.lookup"
|
||||
# Create sorted lookup file for fast search
|
||||
sort "$DIFF_FILE" > "$lookup_file"
|
||||
log_info "Created lookup file for $(wc -l < "$DIFF_FILE") preserved values"
|
||||
fi
|
||||
|
||||
# Use AWK for efficient processing (much faster than bash loop for large files)
|
||||
log_info "Processing $(wc -l < .env.example) lines with AWK..."
|
||||
|
||||
local preserved_keys_file="${TEMP_DIR}/preserved_keys"
|
||||
local awk_preserved_count_file="${TEMP_DIR}/awk_preserved_count"
|
||||
local awk_updated_count_file="${TEMP_DIR}/awk_updated_count"
|
||||
|
||||
awk -F'=' -v lookup_file="$lookup_file" -v preserved_file="$preserved_keys_file" \
|
||||
-v preserved_count_file="$awk_preserved_count_file" -v updated_count_file="$awk_updated_count_file" '
|
||||
BEGIN {
|
||||
preserved_count = 0
|
||||
updated_count = 0
|
||||
|
||||
# Load preserved values if lookup file exists
|
||||
if (lookup_file != "") {
|
||||
while ((getline line < lookup_file) > 0) {
|
||||
split(line, parts, "\x01")
|
||||
key = parts[1]
|
||||
value = parts[2]
|
||||
preserved_values[key] = value
|
||||
}
|
||||
close(lookup_file)
|
||||
}
|
||||
}
|
||||
|
||||
# Process each line
|
||||
{
|
||||
# Check if this is an environment variable line
|
||||
if (/^[[:space:]]*[A-Za-z_][A-Za-z0-9_]*[[:space:]]*=/) {
|
||||
# Extract key
|
||||
key = $1
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
||||
|
||||
# Check if key should be preserved
|
||||
if (key in preserved_values) {
|
||||
print key "=" preserved_values[key]
|
||||
print key > preserved_file
|
||||
preserved_count++
|
||||
} else {
|
||||
print $0
|
||||
updated_count++
|
||||
}
|
||||
} else {
|
||||
# Not an env var line, preserve as-is
|
||||
print $0
|
||||
}
|
||||
}
|
||||
|
||||
END {
|
||||
print preserved_count > preserved_count_file
|
||||
print updated_count > updated_count_file
|
||||
}
|
||||
' .env.example > "$new_env_file"
|
||||
|
||||
# Read counters and preserved keys
|
||||
if [[ -f "$awk_preserved_count_file" ]]; then
|
||||
preserved_count=$(cat "$awk_preserved_count_file")
|
||||
fi
|
||||
if [[ -f "$awk_updated_count_file" ]]; then
|
||||
updated_count=$(cat "$awk_updated_count_file")
|
||||
fi
|
||||
|
||||
# Show what was preserved
|
||||
if [[ -f "$preserved_keys_file" ]]; then
|
||||
while read -r key; do
|
||||
[[ -n "$key" ]] && log_info " Preserved: $key (.env value)"
|
||||
done < "$preserved_keys_file"
|
||||
fi
|
||||
|
||||
# Clean up lookup file
|
||||
[[ -n "$lookup_file" ]] && rm -f "$lookup_file"
|
||||
|
||||
# Replace the original .env file
|
||||
if mv "$new_env_file" ".env"; then
|
||||
log_success "Successfully created new .env file"
|
||||
else
|
||||
log_error "Failed to replace .env file"
|
||||
rm -f "$new_env_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Clean up difference file and temporary directory
|
||||
if [[ -n "${TEMP_DIR:-}" ]]; then
|
||||
rm -rf "${TEMP_DIR}"
|
||||
unset TEMP_DIR
|
||||
fi
|
||||
if [[ -n "${DIFF_FILE:-}" ]]; then
|
||||
unset DIFF_FILE
|
||||
fi
|
||||
|
||||
log_success "Partial synchronization of .env file completed"
|
||||
log_info " Preserved .env values: $preserved_count"
|
||||
log_info " Updated to .env.example values: $updated_count"
|
||||
}
|
||||
|
||||
# Detect removed environment variables
|
||||
detect_removed_variables() {
|
||||
log_info "Detecting removed environment variables..."
|
||||
|
||||
if [[ ! -f ".env" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Use temporary files for efficient lookup
|
||||
local temp_dir="${TEMP_DIR:-$(mktemp -d)}"
|
||||
local temp_example_keys="$temp_dir/example_keys"
|
||||
local temp_current_keys="$temp_dir/current_keys"
|
||||
local cleanup_temp_dir=""
|
||||
|
||||
# Set flag if we created a new temp directory
|
||||
if [[ -z "${TEMP_DIR:-}" ]]; then
|
||||
cleanup_temp_dir="$temp_dir"
|
||||
fi
|
||||
|
||||
# Get keys from .env.example and .env, sorted for comm
|
||||
awk -F= '!/^[[:space:]]*#/ && /=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1); print $1}' .env.example | sort > "$temp_example_keys"
|
||||
awk -F= '!/^[[:space:]]*#/ && /=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1); print $1}' .env | sort > "$temp_current_keys"
|
||||
|
||||
# Get keys from existing .env and check for removals
|
||||
local removed_vars=()
|
||||
while IFS= read -r var; do
|
||||
removed_vars+=("$var")
|
||||
done < <(comm -13 "$temp_example_keys" "$temp_current_keys")
|
||||
|
||||
# Clean up temporary files if we created a new temp directory
|
||||
if [[ -n "$cleanup_temp_dir" ]]; then
|
||||
rm -rf "$cleanup_temp_dir"
|
||||
fi
|
||||
|
||||
if [[ ${#removed_vars[@]} -gt 0 ]]; then
|
||||
log_warning "The following environment variables have been removed from .env.example:"
|
||||
for var in "${removed_vars[@]}"; do
|
||||
log_warning " - $var"
|
||||
done
|
||||
log_warning "Consider manually removing these variables from .env"
|
||||
else
|
||||
log_success "No removed environment variables found"
|
||||
fi
|
||||
}
|
||||
|
||||
# Show statistics
|
||||
show_statistics() {
|
||||
log_info "Synchronization statistics:"
|
||||
|
||||
local total_example=$(grep -c "^[^#]*=" .env.example 2>/dev/null || echo "0")
|
||||
local total_env=$(grep -c "^[^#]*=" .env 2>/dev/null || echo "0")
|
||||
|
||||
log_info " .env.example environment variables: $total_example"
|
||||
log_info " .env environment variables: $total_env"
|
||||
}
|
||||
|
||||
# Main execution function
|
||||
# Orchestrates the complete synchronization process in the correct order
|
||||
main() {
|
||||
log_info "=== Dify Environment Variables Synchronization Script ==="
|
||||
log_info "Execution started: $(date)"
|
||||
|
||||
# Check prerequisites
|
||||
check_files
|
||||
|
||||
# Create backup
|
||||
create_backup
|
||||
|
||||
# Detect differences
|
||||
detect_differences
|
||||
|
||||
# Detect removed variables (before sync)
|
||||
detect_removed_variables
|
||||
|
||||
# Synchronize environment file
|
||||
sync_env_file
|
||||
|
||||
# Show statistics
|
||||
show_statistics
|
||||
|
||||
log_success "=== Synchronization process completed successfully ==="
|
||||
log_info "Execution finished: $(date)"
|
||||
}
|
||||
|
||||
# Execute main function only when script is run directly
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
main "$@"
|
||||
fi
|
||||
@ -1,8 +1,27 @@
|
||||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# Init container to fix permissions
|
||||
init_permissions:
|
||||
image: busybox:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
FLAG_FILE="/app/api/storage/.init_permissions"
|
||||
if [ -f "$${FLAG_FILE}" ]; then
|
||||
echo "Permissions already initialized. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
echo "Initializing permissions for /app/api/storage"
|
||||
chown -R 1001:1001 /app/api/storage && touch "$${FLAG_FILE}"
|
||||
echo "Permissions initialized. Exiting."
|
||||
volumes:
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
restart: "no"
|
||||
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.10.0-rc1
|
||||
image: langgenius/dify-api:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -15,10 +34,23 @@ services:
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
db:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
@ -31,7 +63,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.10.0-rc1
|
||||
image: langgenius/dify-api:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -44,8 +76,20 @@ services:
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
db:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
@ -58,7 +102,7 @@ services:
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.10.0-rc1
|
||||
image: langgenius/dify-api:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -66,8 +110,20 @@ services:
|
||||
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
|
||||
MODE: beat
|
||||
depends_on:
|
||||
db:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
networks:
|
||||
@ -76,11 +132,12 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.10.0-rc1
|
||||
image: langgenius/dify-web:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
@ -101,16 +158,17 @@ services:
|
||||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
# The PostgreSQL database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- postgresql
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: ${POSTGRES_USER:-postgres}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
POSTGRES_USER: ${DB_USERNAME:-postgres}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${DB_DATABASE:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
@ -128,16 +186,46 @@ services:
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${PGUSER:-postgres}",
|
||||
"${DB_USERNAME:-postgres}",
|
||||
"-d",
|
||||
"${POSTGRES_DB:-dify}",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 60
|
||||
|
||||
# The mysql database.
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${DB_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${DB_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
@ -182,7 +270,7 @@ services:
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.4.0-local
|
||||
image: langgenius/dify-plugin-daemon:0.5.2-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -238,8 +326,18 @@ services:
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
depends_on:
|
||||
db:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
@ -317,7 +415,7 @@ services:
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
@ -354,11 +452,73 @@ services:
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# seekdb vector database
|
||||
seekdb:
|
||||
image: oceanbase/seekdb:latest
|
||||
container_name: seekdb
|
||||
profiles:
|
||||
- seekdb
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/seekdb:/var/lib/oceanbase
|
||||
environment:
|
||||
ROOT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
REPORTER: dify-ai-seekdb
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'mysql -h127.0.0.1 -P2881 -uroot -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
timeout: 5s
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
image: langgenius/qdrant:v1.8.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
@ -490,37 +650,25 @@ services:
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
# InterSystems IRIS vector database
|
||||
iris:
|
||||
image: containers.intersystems.com/intersystems/iris-community:2025.3
|
||||
profiles:
|
||||
- oceanbase
|
||||
- iris
|
||||
container_name: iris
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: en_US.UTF-8
|
||||
init: true
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
- "${IRIS_SUPER_SERVER_PORT:-1972}:1972"
|
||||
- "${IRIS_WEB_SERVER_PORT:-52773}:52773"
|
||||
volumes:
|
||||
- ./volumes/iris:/opt/iris
|
||||
- ./iris/iris-init.script:/iris-init.script
|
||||
- ./iris/docker-entrypoint.sh:/custom-entrypoint.sh
|
||||
entrypoint: ["/custom-entrypoint.sh"]
|
||||
tty: true
|
||||
environment:
|
||||
TZ: ${IRIS_TIMEZONE:-UTC}
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
@ -580,7 +728,7 @@ services:
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.5.15
|
||||
image: milvusdb/milvus:v2.6.3
|
||||
profiles:
|
||||
- milvus
|
||||
command: ["milvus", "run", "standalone"]
|
||||
|
||||
@ -1,13 +1,16 @@
|
||||
services:
|
||||
# The postgres database.
|
||||
db:
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- ""
|
||||
- postgresql
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${DB_DATABASE:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
@ -27,11 +30,44 @@ services:
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${PGUSER:-postgres}",
|
||||
"${DB_USERNAME:-postgres}",
|
||||
"-d",
|
||||
"${POSTGRES_DB:-dify}",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${DB_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
ports:
|
||||
- "${EXPOSE_MYSQL_PORT:-3306}:3306"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${DB_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
@ -87,16 +123,13 @@ services:
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.4.0-local
|
||||
image: langgenius/dify-plugin-daemon:0.5.2-local
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
DB_HOST: ${DB_HOST:-db}
|
||||
DB_PORT: ${DB_PORT:-5432}
|
||||
DB_USERNAME: ${DB_USER:-postgres}
|
||||
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text}
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
@ -206,6 +239,7 @@ services:
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
ports:
|
||||
- "${EXPOSE_WEAVIATE_PORT:-8080}:8080"
|
||||
- "${EXPOSE_WEAVIATE_GRPC_PORT:-50051}:50051"
|
||||
|
||||
@ -13,10 +13,11 @@ x-shared-env: &shared-api-worker-env
|
||||
APP_WEB_URL: ${APP_WEB_URL:-}
|
||||
FILES_URL: ${FILES_URL:-}
|
||||
INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-}
|
||||
LANG: ${LANG:-en_US.UTF-8}
|
||||
LC_ALL: ${LC_ALL:-en_US.UTF-8}
|
||||
LANG: ${LANG:-C.UTF-8}
|
||||
LC_ALL: ${LC_ALL:-C.UTF-8}
|
||||
PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8}
|
||||
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||
LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text}
|
||||
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
|
||||
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
|
||||
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
|
||||
@ -34,6 +35,7 @@ x-shared-env: &shared-api-worker-env
|
||||
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
|
||||
REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
|
||||
APP_DEFAULT_ACTIVE_REQUESTS: ${APP_DEFAULT_ACTIVE_REQUESTS:-0}
|
||||
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
|
||||
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
|
||||
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
|
||||
@ -53,9 +55,10 @@ x-shared-env: &shared-api-worker-env
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
DB_TYPE: ${DB_TYPE:-postgresql}
|
||||
DB_USERNAME: ${DB_USERNAME:-postgres}
|
||||
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
DB_HOST: ${DB_HOST:-db}
|
||||
DB_HOST: ${DB_HOST:-db_postgres}
|
||||
DB_PORT: ${DB_PORT:-5432}
|
||||
DB_DATABASE: ${DB_DATABASE:-dify}
|
||||
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
|
||||
@ -72,6 +75,10 @@ x-shared-env: &shared-api-worker-env
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
|
||||
POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-0}
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}
|
||||
MYSQL_MAX_CONNECTIONS: ${MYSQL_MAX_CONNECTIONS:-1000}
|
||||
MYSQL_INNODB_BUFFER_POOL_SIZE: ${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
MYSQL_INNODB_LOG_FILE_SIZE: ${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT: ${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_USERNAME: ${REDIS_USERNAME:-}
|
||||
@ -102,6 +109,7 @@ x-shared-env: &shared-api-worker-env
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
|
||||
COOKIE_DOMAIN: ${COOKIE_DOMAIN:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5}
|
||||
STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
|
||||
OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
|
||||
OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
|
||||
@ -115,6 +123,13 @@ x-shared-env: &shared-api-worker-env
|
||||
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
|
||||
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
|
||||
ARCHIVE_STORAGE_ENABLED: ${ARCHIVE_STORAGE_ENABLED:-false}
|
||||
ARCHIVE_STORAGE_ENDPOINT: ${ARCHIVE_STORAGE_ENDPOINT:-}
|
||||
ARCHIVE_STORAGE_ARCHIVE_BUCKET: ${ARCHIVE_STORAGE_ARCHIVE_BUCKET:-}
|
||||
ARCHIVE_STORAGE_EXPORT_BUCKET: ${ARCHIVE_STORAGE_EXPORT_BUCKET:-}
|
||||
ARCHIVE_STORAGE_ACCESS_KEY: ${ARCHIVE_STORAGE_ACCESS_KEY:-}
|
||||
ARCHIVE_STORAGE_SECRET_KEY: ${ARCHIVE_STORAGE_SECRET_KEY:-}
|
||||
ARCHIVE_STORAGE_REGION: ${ARCHIVE_STORAGE_REGION:-auto}
|
||||
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
|
||||
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
|
||||
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
|
||||
@ -128,11 +143,13 @@ x-shared-env: &shared-api-worker-env
|
||||
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
|
||||
ALIYUN_CLOUDBOX_ID: ${ALIYUN_CLOUDBOX_ID:-your-cloudbox-id}
|
||||
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
|
||||
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
|
||||
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
|
||||
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
|
||||
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
|
||||
TENCENT_COS_CUSTOM_DOMAIN: ${TENCENT_COS_CUSTOM_DOMAIN:-your-custom-domain}
|
||||
OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com}
|
||||
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
|
||||
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
|
||||
@ -142,6 +159,7 @@ x-shared-env: &shared-api-worker-env
|
||||
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
|
||||
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
|
||||
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
|
||||
HUAWEI_OBS_PATH_STYLE: ${HUAWEI_OBS_PATH_STYLE:-false}
|
||||
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
|
||||
@ -159,6 +177,17 @@ x-shared-env: &shared-api-worker-env
|
||||
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
|
||||
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
WEAVIATE_GRPC_ENDPOINT: ${WEAVIATE_GRPC_ENDPOINT:-grpc://weaviate:50051}
|
||||
WEAVIATE_TOKENIZATION: ${WEAVIATE_TOKENIZATION:-word}
|
||||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
|
||||
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
|
||||
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
|
||||
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false}
|
||||
OCEANBASE_FULLTEXT_PARSER: ${OCEANBASE_FULLTEXT_PARSER:-ik}
|
||||
SEEKDB_MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
|
||||
@ -314,15 +343,6 @@ x-shared-env: &shared-api-worker-env
|
||||
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-admin}
|
||||
LINDORM_USING_UGC: ${LINDORM_USING_UGC:-True}
|
||||
LINDORM_QUERY_TIMEOUT: ${LINDORM_QUERY_TIMEOUT:-1}
|
||||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
|
||||
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
|
||||
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
|
||||
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false}
|
||||
OCEANBASE_FULLTEXT_PARSER: ${OCEANBASE_FULLTEXT_PARSER:-ik}
|
||||
OPENGAUSS_HOST: ${OPENGAUSS_HOST:-opengauss}
|
||||
OPENGAUSS_PORT: ${OPENGAUSS_PORT:-6600}
|
||||
OPENGAUSS_USER: ${OPENGAUSS_USER:-postgres}
|
||||
@ -353,9 +373,26 @@ x-shared-env: &shared-api-worker-env
|
||||
CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese}
|
||||
CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart}
|
||||
CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance}
|
||||
IRIS_HOST: ${IRIS_HOST:-iris}
|
||||
IRIS_SUPER_SERVER_PORT: ${IRIS_SUPER_SERVER_PORT:-1972}
|
||||
IRIS_WEB_SERVER_PORT: ${IRIS_WEB_SERVER_PORT:-52773}
|
||||
IRIS_USER: ${IRIS_USER:-_SYSTEM}
|
||||
IRIS_PASSWORD: ${IRIS_PASSWORD:-Dify@1234}
|
||||
IRIS_DATABASE: ${IRIS_DATABASE:-USER}
|
||||
IRIS_SCHEMA: ${IRIS_SCHEMA:-dify}
|
||||
IRIS_CONNECTION_URL: ${IRIS_CONNECTION_URL:-}
|
||||
IRIS_MIN_CONNECTION: ${IRIS_MIN_CONNECTION:-1}
|
||||
IRIS_MAX_CONNECTION: ${IRIS_MAX_CONNECTION:-3}
|
||||
IRIS_TEXT_INDEX: ${IRIS_TEXT_INDEX:-true}
|
||||
IRIS_TEXT_INDEX_LANGUAGE: ${IRIS_TEXT_INDEX_LANGUAGE:-en}
|
||||
IRIS_TIMEZONE: ${IRIS_TIMEZONE:-UTC}
|
||||
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
|
||||
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST: ${UPLOAD_FILE_EXTENSION_BLACKLIST:-}
|
||||
SINGLE_CHUNK_ATTACHMENT_LIMIT: ${SINGLE_CHUNK_ATTACHMENT_LIMIT:-10}
|
||||
IMAGE_FILE_BATCH_LIMIT: ${IMAGE_FILE_BATCH_LIMIT:-10}
|
||||
ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: ${ATTACHMENT_IMAGE_FILE_SIZE_LIMIT:-2}
|
||||
ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: ${ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT:-60}
|
||||
ETL_TYPE: ${ETL_TYPE:-dify}
|
||||
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
|
||||
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
|
||||
@ -388,6 +425,7 @@ x-shared-env: &shared-api-worker-env
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
|
||||
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
|
||||
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
|
||||
SMTP_LOCAL_HOSTNAME: ${SMTP_LOCAL_HOSTNAME:-}
|
||||
SENDGRID_API_KEY: ${SENDGRID_API_KEY:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
|
||||
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
|
||||
@ -430,6 +468,15 @@ x-shared-env: &shared-api-worker-env
|
||||
WORKFLOW_LOG_CLEANUP_ENABLED: ${WORKFLOW_LOG_CLEANUP_ENABLED:-false}
|
||||
WORKFLOW_LOG_RETENTION_DAYS: ${WORKFLOW_LOG_RETENTION_DAYS:-30}
|
||||
WORKFLOW_LOG_CLEANUP_BATCH_SIZE: ${WORKFLOW_LOG_CLEANUP_BATCH_SIZE:-100}
|
||||
ALIYUN_SLS_ACCESS_KEY_ID: ${ALIYUN_SLS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_SLS_ACCESS_KEY_SECRET: ${ALIYUN_SLS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_SLS_ENDPOINT: ${ALIYUN_SLS_ENDPOINT:-}
|
||||
ALIYUN_SLS_REGION: ${ALIYUN_SLS_REGION:-}
|
||||
ALIYUN_SLS_PROJECT_NAME: ${ALIYUN_SLS_PROJECT_NAME:-}
|
||||
ALIYUN_SLS_LOGSTORE_TTL: ${ALIYUN_SLS_LOGSTORE_TTL:-365}
|
||||
LOGSTORE_DUAL_WRITE_ENABLED: ${LOGSTORE_DUAL_WRITE_ENABLED:-false}
|
||||
LOGSTORE_DUAL_READ_ENABLED: ${LOGSTORE_DUAL_READ_ENABLED:-true}
|
||||
LOGSTORE_ENABLE_PUT_GRAPH_FIELD: ${LOGSTORE_ENABLE_PUT_GRAPH_FIELD:-true}
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
|
||||
HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True}
|
||||
@ -447,10 +494,8 @@ x-shared-env: &shared-api-worker-env
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
|
||||
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
|
||||
POSTGRES_USER: ${POSTGRES_USER:-${DB_USERNAME}}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
MYSQL_HOST_VOLUME: ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}
|
||||
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
@ -468,6 +513,10 @@ x-shared-env: &shared-api-worker-env
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
WEAVIATE_DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
WEAVIATE_ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
|
||||
WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
|
||||
WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
@ -501,7 +550,7 @@ x-shared-env: &shared-api-worker-env
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
@ -556,6 +605,7 @@ x-shared-env: &shared-api-worker-env
|
||||
PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
|
||||
PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
@ -604,7 +654,7 @@ x-shared-env: &shared-api-worker-env
|
||||
QUEUE_MONITOR_THRESHOLD: ${QUEUE_MONITOR_THRESHOLD:-200}
|
||||
QUEUE_MONITOR_ALERT_EMAILS: ${QUEUE_MONITOR_ALERT_EMAILS:-}
|
||||
QUEUE_MONITOR_INTERVAL: ${QUEUE_MONITOR_INTERVAL:-30}
|
||||
SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-true}
|
||||
SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-false}
|
||||
SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html}
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID: ${DSL_EXPORT_ENCRYPT_DATASET_ID:-true}
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST: ${DATASET_MAX_SEGMENTS_PER_REQUEST:-0}
|
||||
@ -613,6 +663,7 @@ x-shared-env: &shared-api-worker-env
|
||||
ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false}
|
||||
ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK: ${ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK:-false}
|
||||
ENABLE_CLEAN_MESSAGES: ${ENABLE_CLEAN_MESSAGES:-false}
|
||||
ENABLE_WORKFLOW_RUN_CLEANUP_TASK: ${ENABLE_WORKFLOW_RUN_CLEANUP_TASK:-false}
|
||||
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: ${ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK:-false}
|
||||
ENABLE_DATASETS_QUEUE_MONITOR: ${ENABLE_DATASETS_QUEUE_MONITOR:-false}
|
||||
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK: ${ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK:-true}
|
||||
@ -621,11 +672,40 @@ x-shared-env: &shared-api-worker-env
|
||||
WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE: ${WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE:-100}
|
||||
WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK: ${WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK:-0}
|
||||
TENANT_ISOLATED_TASK_CONCURRENCY: ${TENANT_ISOLATED_TASK_CONCURRENCY:-1}
|
||||
ANNOTATION_IMPORT_FILE_SIZE_LIMIT: ${ANNOTATION_IMPORT_FILE_SIZE_LIMIT:-2}
|
||||
ANNOTATION_IMPORT_MAX_RECORDS: ${ANNOTATION_IMPORT_MAX_RECORDS:-10000}
|
||||
ANNOTATION_IMPORT_MIN_RECORDS: ${ANNOTATION_IMPORT_MIN_RECORDS:-1}
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE:-5}
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR:-20}
|
||||
ANNOTATION_IMPORT_MAX_CONCURRENT: ${ANNOTATION_IMPORT_MAX_CONCURRENT:-5}
|
||||
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: ${SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD:-21}
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE:-1000}
|
||||
SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: ${SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS:-30}
|
||||
|
||||
services:
|
||||
# Init container to fix permissions
|
||||
init_permissions:
|
||||
image: busybox:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
FLAG_FILE="/app/api/storage/.init_permissions"
|
||||
if [ -f "$${FLAG_FILE}" ]; then
|
||||
echo "Permissions already initialized. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
echo "Initializing permissions for /app/api/storage"
|
||||
chown -R 1001:1001 /app/api/storage && touch "$${FLAG_FILE}"
|
||||
echo "Permissions initialized. Exiting."
|
||||
volumes:
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
restart: "no"
|
||||
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.10.0-rc1
|
||||
image: langgenius/dify-api:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -638,10 +718,23 @@ services:
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
db:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
@ -654,7 +747,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.10.0-rc1
|
||||
image: langgenius/dify-api:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -667,8 +760,20 @@ services:
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
db:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
@ -681,7 +786,7 @@ services:
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.10.0-rc1
|
||||
image: langgenius/dify-api:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -689,8 +794,20 @@ services:
|
||||
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
|
||||
MODE: beat
|
||||
depends_on:
|
||||
db:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
networks:
|
||||
@ -699,11 +816,12 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.10.0-rc1
|
||||
image: langgenius/dify-web:1.11.4
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
@ -724,16 +842,17 @@ services:
|
||||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
# The PostgreSQL database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- postgresql
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: ${POSTGRES_USER:-postgres}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
POSTGRES_USER: ${DB_USERNAME:-postgres}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${DB_DATABASE:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
@ -751,16 +870,46 @@ services:
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${PGUSER:-postgres}",
|
||||
"${DB_USERNAME:-postgres}",
|
||||
"-d",
|
||||
"${POSTGRES_DB:-dify}",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 60
|
||||
|
||||
# The mysql database.
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${DB_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${DB_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
@ -805,7 +954,7 @@ services:
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.4.0-local
|
||||
image: langgenius/dify-plugin-daemon:0.5.2-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -861,8 +1010,18 @@ services:
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
depends_on:
|
||||
db:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
@ -940,7 +1099,7 @@ services:
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
@ -977,11 +1136,73 @@ services:
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# seekdb vector database
|
||||
seekdb:
|
||||
image: oceanbase/seekdb:latest
|
||||
container_name: seekdb
|
||||
profiles:
|
||||
- seekdb
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/seekdb:/var/lib/oceanbase
|
||||
environment:
|
||||
ROOT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
REPORTER: dify-ai-seekdb
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'mysql -h127.0.0.1 -P2881 -uroot -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
timeout: 5s
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
image: langgenius/qdrant:v1.8.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
@ -1113,37 +1334,25 @@ services:
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
# InterSystems IRIS vector database
|
||||
iris:
|
||||
image: containers.intersystems.com/intersystems/iris-community:2025.3
|
||||
profiles:
|
||||
- oceanbase
|
||||
- iris
|
||||
container_name: iris
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: en_US.UTF-8
|
||||
init: true
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
- "${IRIS_SUPER_SERVER_PORT:-1972}:1972"
|
||||
- "${IRIS_WEB_SERVER_PORT:-52773}:52773"
|
||||
volumes:
|
||||
- ./volumes/iris:/opt/iris
|
||||
- ./iris/iris-init.script:/iris-init.script
|
||||
- ./iris/docker-entrypoint.sh:/custom-entrypoint.sh
|
||||
entrypoint: ["/custom-entrypoint.sh"]
|
||||
tty: true
|
||||
environment:
|
||||
TZ: ${IRIS_TIMEZONE:-UTC}
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
@ -1203,7 +1412,7 @@ services:
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.5.15
|
||||
image: milvusdb/milvus:v2.6.3
|
||||
profiles:
|
||||
- milvus
|
||||
command: ["milvus", "run", "standalone"]
|
||||
|
||||
38
docker/iris/docker-entrypoint.sh
Executable file
38
docker/iris/docker-entrypoint.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# IRIS configuration flag file
|
||||
IRIS_CONFIG_DONE="/opt/iris/.iris-configured"
|
||||
|
||||
# Function to configure IRIS
|
||||
configure_iris() {
|
||||
echo "Configuring IRIS for first-time setup..."
|
||||
|
||||
# Wait for IRIS to be fully started
|
||||
sleep 5
|
||||
|
||||
# Execute the initialization script
|
||||
iris session IRIS < /iris-init.script
|
||||
|
||||
# Mark configuration as done
|
||||
touch "$IRIS_CONFIG_DONE"
|
||||
|
||||
echo "IRIS configuration completed."
|
||||
}
|
||||
|
||||
# Start IRIS in background for initial configuration if not already configured
|
||||
if [ ! -f "$IRIS_CONFIG_DONE" ]; then
|
||||
echo "First-time IRIS setup detected. Starting IRIS for configuration..."
|
||||
|
||||
# Start IRIS
|
||||
iris start IRIS
|
||||
|
||||
# Configure IRIS
|
||||
configure_iris
|
||||
|
||||
# Stop IRIS
|
||||
iris stop IRIS quietly
|
||||
fi
|
||||
|
||||
# Run the original IRIS entrypoint
|
||||
exec /iris-main "$@"
|
||||
11
docker/iris/iris-init.script
Normal file
11
docker/iris/iris-init.script
Normal file
@ -0,0 +1,11 @@
|
||||
// Switch to the %SYS namespace to modify system settings
|
||||
set $namespace="%SYS"
|
||||
|
||||
// Set predefined user passwords to never expire (default password: SYS)
|
||||
Do ##class(Security.Users).UnExpireUserPasswords("*")
|
||||
|
||||
// Change the default password
|
||||
Do $SYSTEM.Security.ChangePassword("_SYSTEM","Dify@1234")
|
||||
|
||||
// Install the Japanese locale (default is English since the container is Ubuntu-based)
|
||||
// Do ##class(Config.NLS.Locales).Install("jpuw")
|
||||
@ -1,11 +1,17 @@
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# ------------------------------
|
||||
POSTGRES_USER=postgres
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD=difyai123456
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB=dify
|
||||
# Database Configuration
|
||||
# Database type, supported values are `postgresql` and `mysql`
|
||||
DB_TYPE=postgresql
|
||||
# For MySQL, only `root` user is supported for now
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db_postgres
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
# PostgreSQL Configuration
|
||||
# postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
PGDATA_HOST_VOLUME=./volumes/db/data
|
||||
@ -54,6 +60,32 @@ POSTGRES_STATEMENT_TIMEOUT=0
|
||||
# A value of 0 prevents the server from terminating idle sessions.
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0
|
||||
|
||||
# MySQL Configuration
|
||||
# MySQL data directory host volume
|
||||
MYSQL_HOST_VOLUME=./volumes/mysql/data
|
||||
|
||||
# MySQL Performance Configuration
|
||||
# Maximum number of connections to MySQL
|
||||
# Default is 1000
|
||||
MYSQL_MAX_CONNECTIONS=1000
|
||||
|
||||
# InnoDB buffer pool size
|
||||
# Default is 512M
|
||||
# Recommended value: 70-80% of available memory for dedicated MySQL server
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size
|
||||
MYSQL_INNODB_BUFFER_POOL_SIZE=512M
|
||||
|
||||
# InnoDB log file size
|
||||
# Default is 128M
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size
|
||||
MYSQL_INNODB_LOG_FILE_SIZE=128M
|
||||
|
||||
# InnoDB flush log at transaction commit
|
||||
# Default is 2 (flush to OS cache, sync every second)
|
||||
# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache)
|
||||
# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit
|
||||
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2
|
||||
|
||||
# -----------------------------
|
||||
# Environment Variables for redis Service
|
||||
# -----------------------------
|
||||
@ -91,12 +123,21 @@ WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
WEAVIATE_DISABLE_TELEMETRY=false
|
||||
WEAVIATE_HOST_VOLUME=./volumes/weaviate
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose profile configuration
|
||||
# ------------------------------
|
||||
# Loaded automatically when running `docker compose --env-file middleware.env ...`.
|
||||
# Controls which DB/vector services start, so no extra `--profile` flag is needed.
|
||||
COMPOSE_PROFILES=${DB_TYPE:-postgresql},weaviate
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose Service Expose Host Port Configurations
|
||||
# ------------------------------
|
||||
EXPOSE_POSTGRES_PORT=5432
|
||||
EXPOSE_MYSQL_PORT=3306
|
||||
EXPOSE_REDIS_PORT=6379
|
||||
EXPOSE_SANDBOX_PORT=8194
|
||||
EXPOSE_SSRF_PROXY_PORT=3128
|
||||
@ -172,3 +213,28 @@ PLUGIN_VOLCENGINE_TOS_ENDPOINT=
|
||||
PLUGIN_VOLCENGINE_TOS_ACCESS_KEY=
|
||||
PLUGIN_VOLCENGINE_TOS_SECRET_KEY=
|
||||
PLUGIN_VOLCENGINE_TOS_REGION=
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Aliyun SLS (Simple Log Service)
|
||||
# ------------------------------
|
||||
# Aliyun SLS Access Key ID
|
||||
ALIYUN_SLS_ACCESS_KEY_ID=
|
||||
# Aliyun SLS Access Key Secret
|
||||
ALIYUN_SLS_ACCESS_KEY_SECRET=
|
||||
# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com)
|
||||
ALIYUN_SLS_ENDPOINT=
|
||||
# Aliyun SLS Region (e.g., cn-hangzhou)
|
||||
ALIYUN_SLS_REGION=
|
||||
# Aliyun SLS Project Name
|
||||
ALIYUN_SLS_PROJECT_NAME=
|
||||
# Aliyun SLS Logstore TTL (default: 365 days, 3650 for permanent storage)
|
||||
ALIYUN_SLS_LOGSTORE_TTL=365
|
||||
# Enable dual-write to both LogStore and SQL database (default: true)
|
||||
LOGSTORE_DUAL_WRITE_ENABLED=true
|
||||
# Enable dual-read fallback to SQL database when LogStore returns no results (default: true)
|
||||
# Useful for migration scenarios where historical data exists only in SQL database
|
||||
LOGSTORE_DUAL_READ_ENABLED=true
|
||||
# Control flag for whether to write the `graph` field to LogStore.
|
||||
# If LOGSTORE_ENABLE_PUT_GRAPH_FIELD is "true", write the full `graph` field;
|
||||
# otherwise write an empty {} instead. Defaults to writing the `graph` field.
|
||||
LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true
|
||||
@ -54,3 +54,52 @@ http_access allow src_all
|
||||
|
||||
# Unless the option's size is increased, an error will occur when uploading more than two files.
|
||||
client_request_buffer_max_size 100 MB
|
||||
|
||||
################################## Performance & Concurrency ###############################
|
||||
# Increase file descriptor limit for high concurrency
|
||||
max_filedescriptors 65536
|
||||
|
||||
# Timeout configurations for image requests
|
||||
connect_timeout 30 seconds
|
||||
request_timeout 2 minutes
|
||||
read_timeout 2 minutes
|
||||
client_lifetime 5 minutes
|
||||
shutdown_lifetime 30 seconds
|
||||
|
||||
# Persistent connections - improve performance for multiple requests
|
||||
server_persistent_connections on
|
||||
client_persistent_connections on
|
||||
persistent_request_timeout 30 seconds
|
||||
pconn_timeout 1 minute
|
||||
|
||||
# Connection pool and concurrency limits
|
||||
client_db on
|
||||
server_idle_pconn_timeout 2 minutes
|
||||
client_idle_pconn_timeout 2 minutes
|
||||
|
||||
# Quick abort settings - don't abort requests that are mostly done
|
||||
quick_abort_min 16 KB
|
||||
quick_abort_max 16 MB
|
||||
quick_abort_pct 95
|
||||
|
||||
# Memory and cache optimization
|
||||
memory_cache_mode disk
|
||||
cache_mem 256 MB
|
||||
maximum_object_size_in_memory 512 KB
|
||||
|
||||
# DNS resolver settings for better performance
|
||||
dns_timeout 30 seconds
|
||||
dns_retransmit_interval 5 seconds
|
||||
# By default, Squid uses the system's configured DNS resolvers.
|
||||
# If you need to override them, set dns_nameservers to appropriate servers
|
||||
# for your environment (for example, internal/corporate DNS). The following
|
||||
# is an example using public DNS and SHOULD be customized before use:
|
||||
# dns_nameservers 8.8.8.8 8.8.4.4
|
||||
|
||||
# Logging format for better debugging
|
||||
logformat dify_log %ts.%03tu %6tr %>a %Ss/%03>Hs %<st %rm %ru %[un %Sh/%<a %mt
|
||||
access_log daemon:/var/log/squid/access.log dify_log
|
||||
|
||||
# Access log to track concurrent requests and timeouts
|
||||
logfile_rotate 10
|
||||
|
||||
|
||||
@ -55,7 +55,8 @@ services:
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --config=/tiflash.toml
|
||||
- server
|
||||
- --config-file=/tiflash.toml
|
||||
depends_on:
|
||||
- "tikv"
|
||||
- "tidb"
|
||||
|
||||
Reference in New Issue
Block a user