mirror of
https://github.com/langgenius/dify.git
synced 2026-05-03 17:08:03 +08:00
Chore/improve deployment flow (#4299)
Co-authored-by: 天魂 <365125264@qq.com>
This commit is contained in:
598
docker/.env.example
Normal file
598
docker/.env.example
Normal file
@ -0,0 +1,598 @@
|
||||
# ------------------------------
|
||||
# Environment Variables for API service & worker
|
||||
# ------------------------------
|
||||
|
||||
# ------------------------------
|
||||
# Common Variables
|
||||
# ------------------------------
|
||||
|
||||
# The backend URL of the console API,
|
||||
# used to concatenate the authorization callback.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://api.console.dify.ai
|
||||
CONSOLE_API_URL=
|
||||
|
||||
# The front-end URL of the console web,
|
||||
# used to concatenate some front-end addresses and for CORS configuration use.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://console.dify.ai
|
||||
CONSOLE_WEB_URL=
|
||||
|
||||
# Service API Url,
|
||||
# used to display Service API Base Url to the front-end.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://api.dify.ai
|
||||
SERVICE_API_URL=
|
||||
|
||||
# WebApp API backend Url,
|
||||
# used to declare the back-end URL for the front-end API.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://api.app.dify.ai
|
||||
APP_API_URL=
|
||||
|
||||
# WebApp Url,
|
||||
# used to display WebAPP API Base Url to the front-end.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://app.dify.ai
|
||||
APP_WEB_URL=
|
||||
|
||||
# File preview or download Url prefix.
|
||||
# used to display File preview or download Url to the front-end or as Multi-model inputs;
|
||||
# Url is signed and has expiration time.
|
||||
FILES_URL=
|
||||
|
||||
# ------------------------------
|
||||
# Server Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The log level for the application.
|
||||
# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
|
||||
LOG_LEVEL=INFO
|
||||
|
||||
# Debug mode, default is false.
|
||||
# It is recommended to turn on this configuration for local development
|
||||
# to prevent some problems caused by monkey patch.
|
||||
DEBUG=false
|
||||
|
||||
# Flask debug mode, it can output trace information at the interface when turned on,
|
||||
# which is convenient for debugging.
|
||||
FLASK_DEBUG=false
|
||||
|
||||
# A secretkey that is used for securely signing the session cookie
|
||||
# and encrypting sensitive information on the database.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
|
||||
|
||||
# Password for admin user initialization.
|
||||
# If left unset, admin user will not be prompted for a password
|
||||
# when creating the initial admin account.
|
||||
INIT_PASSWORD=
|
||||
|
||||
# Deployment environment.
|
||||
# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
|
||||
# Testing environment. There will be a distinct color label on the front-end page,
|
||||
# indicating that this environment is a testing environment.
|
||||
DEPLOY_ENV=PRODUCTION
|
||||
|
||||
# Whether to enable the version check policy.
|
||||
# If set to false, https://updates.dify.ai will not be called for version check.
|
||||
CHECK_UPDATE_URL=false
|
||||
|
||||
# Used to change the OpenAI base address, default is https://api.openai.com/v1.
|
||||
# When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
|
||||
# or when a local model provides OpenAI compatible API, it can be replaced.
|
||||
OPENAI_API_BASE=https://api.openai.com/v1
|
||||
|
||||
# When enabled, migrations will be executed prior to application startup
|
||||
# and the application will start after the migrations have completed.
|
||||
MIGRATION_ENABLED=true
|
||||
|
||||
# File Access Time specifies a time interval in seconds for the file to be accessed.
|
||||
# The default value is 300 seconds.
|
||||
FILES_ACCESS_TIMEOUT=300
|
||||
|
||||
# ------------------------------
|
||||
# Container Startup Related Configuration
|
||||
# Only effective when starting with docker image or docker-compose.
|
||||
# ------------------------------
|
||||
|
||||
# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
|
||||
DIFY_BIND_ADDRESS=
|
||||
|
||||
# API service binding port number, default 5001.
|
||||
DIFY_PORT=
|
||||
|
||||
# The number of API server workers, i.e., the number of gevent workers.
|
||||
# Formula: number of cpu cores x 2 + 1
|
||||
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
|
||||
SERVER_WORKER_AMOUNT=
|
||||
|
||||
# Defaults to gevent. If using windows, it can be switched to sync or solo.
|
||||
SERVER_WORKER_CLASS=
|
||||
|
||||
# Similar to SERVER_WORKER_CLASS. Default is gevent.
|
||||
# If using windows, it can be switched to sync or solo.
|
||||
CELERY_WORKER_CLASS=
|
||||
|
||||
# Request handling timeout. The default is 200,
|
||||
# it is recommended to set it to 360 to support a longer sse connection time.
|
||||
GUNICORN_TIMEOUT=360
|
||||
|
||||
# The number of Celery workers. The default is 1, and can be set as needed.
|
||||
CELERY_WORKER_AMOUNT=
|
||||
|
||||
# ------------------------------
|
||||
# Database Configuration
|
||||
# The database uses PostgreSQL. Please use the public schema.
|
||||
# It is consistent with the configuration in the 'db' service below.
|
||||
# ------------------------------
|
||||
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
# The size of the database connection pool.
|
||||
# The default is 30 connections, which can be appropriately increased.
|
||||
SQLALCHEMY_POOL_SIZE=30
|
||||
# Database connection pool recycling time, the default is 3600 seconds.
|
||||
SQLALCHEMY_POOL_RECYCLE=3600
|
||||
# Whether to print SQL, default is false.
|
||||
SQLALCHEMY_ECHO=false
|
||||
|
||||
# ------------------------------
|
||||
# Redis Configuration
|
||||
# This Redis configuration is used for caching and for pub/sub during conversation.
|
||||
# ------------------------------
|
||||
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
REDIS_USERNAME=
|
||||
REDIS_PASSWORD=difyai123456
|
||||
REDIS_USE_SSL=false
|
||||
|
||||
# ------------------------------
|
||||
# Celery Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Use redis as the broker, and redis db 1 for celery broker.
|
||||
# Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>`
|
||||
# Example: redis://:difyai123456@redis:6379/1
|
||||
CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1
|
||||
BROKER_USE_SSL=false
|
||||
|
||||
# ------------------------------
|
||||
# CORS Configuration
|
||||
# Used to set the front-end cross-domain access policy.
|
||||
# ------------------------------
|
||||
|
||||
# Specifies the allowed origins for cross-origin requests to the Web API,
|
||||
# e.g. https://dify.app or * for all origins.
|
||||
WEB_API_CORS_ALLOW_ORIGINS=*
|
||||
|
||||
# Specifies the allowed origins for cross-origin requests to the console API,
|
||||
# e.g. https://cloud.dify.ai or * for all origins.
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=*
|
||||
|
||||
# ------------------------------
|
||||
# File Storage Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The type of storage to use for storing user files.
|
||||
# Supported values are `local` and `s3` and `azure-blob` and `google-storage` and `tencent-cos`,
|
||||
# Default: `local`
|
||||
STORAGE_TYPE=local
|
||||
|
||||
# S3 Configuration
|
||||
# Whether to use AWS managed IAM roles for authenticating with the S3 service.
|
||||
# If set to false, the access key and secret key must be provided.
|
||||
S3_USE_AWS_MANAGED_IAM=false
|
||||
# The endpoint of the S3 service.
|
||||
S3_ENDPOINT=
|
||||
# The region of the S3 service.
|
||||
S3_REGION=us-east-1
|
||||
# The name of the S3 bucket to use for storing files.
|
||||
S3_BUCKET_NAME=difyai
|
||||
# The access key to use for authenticating with the S3 service.
|
||||
S3_ACCESS_KEY=
|
||||
# The secret key to use for authenticating with the S3 service.
|
||||
S3_SECRET_KEY=
|
||||
|
||||
# Azure Blob Configuration
|
||||
# The name of the Azure Blob Storage account to use for storing files.
|
||||
AZURE_BLOB_ACCOUNT_NAME=difyai
|
||||
# The access key to use for authenticating with the Azure Blob Storage account.
|
||||
AZURE_BLOB_ACCOUNT_KEY=difyai
|
||||
# The name of the Azure Blob Storage container to use for storing files.
|
||||
AZURE_BLOB_CONTAINER_NAME=difyai-container
|
||||
# The URL of the Azure Blob Storage account.
|
||||
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
|
||||
|
||||
# Google Storage Configuration
|
||||
# The name of the Google Storage bucket to use for storing files.
|
||||
GOOGLE_STORAGE_BUCKET_NAME=yout-bucket-name
|
||||
# The service account JSON key to use for authenticating with the Google Storage service.
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
|
||||
|
||||
# The Alibaba Cloud OSS configurations,
|
||||
# only available when STORAGE_TYPE is `aliyun-oss`
|
||||
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
|
||||
ALIYUN_OSS_ACCESS_KEY=your-access-key
|
||||
ALIYUN_OSS_SECRET_KEY=your-secret-key
|
||||
ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
|
||||
ALIYUN_OSS_REGION=ap-southeast-1
|
||||
ALIYUN_OSS_AUTH_VERSION=v4
|
||||
|
||||
# Tencent COS Configuration
|
||||
# The name of the Tencent COS bucket to use for storing files.
|
||||
TENCENT_COS_BUCKET_NAME=your-bucket-name
|
||||
# The secret key to use for authenticating with the Tencent COS service.
|
||||
TENCENT_COS_SECRET_KEY=your-secret-key
|
||||
# The secret id to use for authenticating with the Tencent COS service.
|
||||
TENCENT_COS_SECRET_ID=your-secret-id
|
||||
# The region of the Tencent COS service.
|
||||
TENCENT_COS_REGION=your-region
|
||||
# The scheme of the Tencent COS service.
|
||||
TENCENT_COS_SCHEME=your-scheme
|
||||
|
||||
# ------------------------------
|
||||
# Vector Database Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The type of vector store to use.
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`.
|
||||
VECTOR_STORE=weaviate
|
||||
|
||||
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
|
||||
WEAVIATE_ENDPOINT=http://weaviate:8080
|
||||
# The Weaviate API key.
|
||||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
|
||||
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
|
||||
QDRANT_URL=http://qdrant:6333
|
||||
# The Qdrant API key.
|
||||
QDRANT_API_KEY=difyai123456
|
||||
# The Qdrant client timeout setting.
|
||||
QDRANT_CLIENT_TIMEOUT=20
|
||||
# The Qdrant client enable gRPC mode.
|
||||
QDRANT_GRPC_ENABLED=false
|
||||
# The Qdrant server gRPC mode PORT.
|
||||
QDRANT_GRPC_PORT=6334
|
||||
|
||||
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
|
||||
# The milvus host.
|
||||
MILVUS_HOST=127.0.0.1
|
||||
# The milvus host.
|
||||
MILVUS_PORT=19530
|
||||
# The milvus username.
|
||||
MILVUS_USER=root
|
||||
# The milvus password.
|
||||
MILVUS_PASSWORD=Milvus
|
||||
# The milvus tls switch.
|
||||
MILVUS_SECURE=false
|
||||
|
||||
# pgvector configurations, only available when VECTOR_STORE is `pgvecto-rs or pgvector`
|
||||
PGVECTOR_HOST=pgvector
|
||||
PGVECTOR_PORT=5432
|
||||
PGVECTOR_USER=postgres
|
||||
PGVECTOR_PASSWORD=difyai123456
|
||||
PGVECTOR_DATABASE=dify
|
||||
|
||||
# TiDB vector configurations, only available when VECTOR_STORE is `tidb`
|
||||
TIDB_VECTOR_HOST=tidb
|
||||
TIDB_VECTOR_PORT=4000
|
||||
TIDB_VECTOR_USER=xxx.root
|
||||
TIDB_VECTOR_PASSWORD=xxxxxx
|
||||
TIDB_VECTOR_DATABASE=dify
|
||||
|
||||
# Chroma configuration, only available when VECTOR_STORE is `chroma`
|
||||
CHROMA_HOST=127.0.0.1
|
||||
CHROMA_PORT=8000
|
||||
CHROMA_TENANT=default_tenant
|
||||
CHROMA_DATABASE=default_database
|
||||
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
|
||||
CHROMA_AUTH_CREDENTIALS=xxxxxx
|
||||
|
||||
# Oracle configuration, only available when VECTOR_STORE is `oracle`
|
||||
ORACLE_HOST=oracle
|
||||
ORACLE_PORT=1521
|
||||
ORACLE_USER=dify
|
||||
ORACLE_PASSWORD=dify
|
||||
ORACLE_DATABASE=FREEPDB1
|
||||
|
||||
# relyt configurations, only available when VECTOR_STORE is `relyt`
|
||||
RELYT_HOST=db
|
||||
RELYT_PORT=5432
|
||||
RELYT_USER=postgres
|
||||
RELYT_PASSWORD=difyai123456
|
||||
RELYT_DATABASE=postgres
|
||||
|
||||
# open search configuration, only available when VECTOR_STORE is `opensearch`
|
||||
OPENSEARCH_HOST=127.0.0.1
|
||||
OPENSEARCH_PORT=9200
|
||||
OPENSEARCH_USER=admin
|
||||
OPENSEARCH_PASSWORD=admin
|
||||
OPENSEARCH_SECURE=true
|
||||
|
||||
# tencent vector configurations, only available when VECTOR_STORE is `tencent`
|
||||
TENCENT_VECTOR_DB_URL=http://127.0.0.1
|
||||
TENCENT_VECTOR_DB_API_KEY=dify
|
||||
TENCENT_VECTOR_DB_TIMEOUT=30
|
||||
TENCENT_VECTOR_DB_USERNAME=dify
|
||||
TENCENT_VECTOR_DB_DATABASE=dify
|
||||
TENCENT_VECTOR_DB_SHARD=1
|
||||
TENCENT_VECTOR_DB_REPLICAS=2
|
||||
|
||||
# ------------------------------
|
||||
# Knowledge Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Upload file size limit, default 15M.
|
||||
UPLOAD_FILE_SIZE_LIMIT=15
|
||||
|
||||
# The maximum number of files that can be uploaded at a time, default 5.
|
||||
UPLOAD_FILE_BATCH_LIMIT=5
|
||||
|
||||
# ETl type, support: `dify`, `Unstructured`
|
||||
# `dify` Dify's proprietary file extraction scheme
|
||||
# `Unstructured` Unstructured.io file extraction scheme
|
||||
ETL_TYPE=dify
|
||||
|
||||
# Unstructured API path, needs to be configured when ETL_TYPE is Unstructured.
|
||||
# For example: http://unstructured:8000/general/v0/general
|
||||
UNSTRUCTURED_API_URL=
|
||||
|
||||
# ------------------------------
|
||||
# Multi-modal Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The format of the image sent when the multi-modal model is input,
|
||||
# the default is base64, optional url.
|
||||
# The delay of the call in url mode will be lower than that in base64 mode.
|
||||
# It is generally recommended to use the more compatible base64 mode.
|
||||
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image.
|
||||
MULTIMODAL_SEND_IMAGE_FORMAT=base64
|
||||
|
||||
# Upload image file size limit, default 10M.
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||||
|
||||
# ------------------------------
|
||||
# Sentry Configuration
|
||||
# Used for application monitoring and error log tracking.
|
||||
# ------------------------------
|
||||
|
||||
# Sentry DSN address, default is empty, when empty,
|
||||
# all monitoring information is not reported to Sentry.
|
||||
# If not set, Sentry error reporting will be disabled.
|
||||
SENTRY_DSN=
|
||||
|
||||
# The reporting ratio of Sentry events, if it is 0.01, it is 1%.
|
||||
SENTRY_TRACES_SAMPLE_RATE=1.0
|
||||
|
||||
# The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
|
||||
SENTRY_PROFILES_SAMPLE_RATE=1.0
|
||||
|
||||
# ------------------------------
|
||||
# Notion Integration Configuration
|
||||
# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations
|
||||
# ------------------------------
|
||||
|
||||
# Configure as "public" or "internal".
|
||||
# Since Notion's OAuth redirect URL only supports HTTPS,
|
||||
# if deploying locally, please use Notion's internal integration.
|
||||
NOTION_INTEGRATION_TYPE=public
|
||||
# Notion OAuth client secret (used for public integration type)
|
||||
NOTION_CLIENT_SECRET=
|
||||
# Notion OAuth client id (used for public integration type)
|
||||
NOTION_CLIENT_ID=
|
||||
# Notion internal integration secret.
|
||||
# If the value of NOTION_INTEGRATION_TYPE is "internal",
|
||||
# you need to configure this variable.
|
||||
NOTION_INTERNAL_SECRET=
|
||||
|
||||
# ------------------------------
|
||||
# Mail related configuration
|
||||
# ------------------------------
|
||||
|
||||
# Mail type, support: resend, smtp
|
||||
MAIL_TYPE=resend
|
||||
|
||||
# Default send from email address, if not specified
|
||||
MAIL_DEFAULT_SEND_FROM=
|
||||
|
||||
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
|
||||
RESEND_API_KEY=your-resend-api-key
|
||||
|
||||
# SMTP server configuration, used when MAIL_TYPE is `smtp`
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
SMTP_USE_TLS=true
|
||||
SMTP_OPPORTUNISTIC_TLS=false
|
||||
|
||||
# ------------------------------
|
||||
# Others Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Maximum length of segmentation tokens for indexing
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000
|
||||
|
||||
# Member invitation link valid time (hours),
|
||||
# Default: 72.
|
||||
INVITE_EXPIRY_HOURS=72
|
||||
|
||||
# The sandbox service endpoint.
|
||||
CODE_EXECUTION_ENDPOINT=http://sandbox:8194
|
||||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||||
CODE_MAX_NUMBER=9223372036854775807
|
||||
CODE_MIN_NUMBER=-9223372036854775808
|
||||
CODE_MAX_STRING_LENGTH=80000
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
|
||||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||||
|
||||
# SSRF Proxy server HTTP URL
|
||||
SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128
|
||||
# SSRF Proxy server HTTPS URL
|
||||
SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# ------------------------------
|
||||
|
||||
PGUSER=${DB_USERNAME}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD=${DB_PASSWORD}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB=${DB_DATABASE}
|
||||
# postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for sandbox Service
|
||||
# ------------------------------
|
||||
|
||||
# The API key for the sandbox service
|
||||
API_KEY=dify-sandbox
|
||||
# The mode in which the Gin framework runs
|
||||
GIN_MODE=release
|
||||
# The timeout for the worker in seconds
|
||||
WORKER_TIMEOUT=15
|
||||
# Enable network for the sandbox service
|
||||
ENABLE_NETWORK=true
|
||||
# HTTP proxy URL for SSRF protection
|
||||
HTTP_PROXY=http://ssrf_proxy:3128
|
||||
# HTTPS proxy URL for SSRF protection
|
||||
HTTPS_PROXY=http://ssrf_proxy:3128
|
||||
# The port on which the sandbox service runs
|
||||
SANDBOX_PORT=8194
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for qdrant Service
|
||||
# (only used when VECTOR_STORE is qdrant)
|
||||
# ------------------------------
|
||||
QDRANT_API_KEY=difyai123456
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for weaviate Service
|
||||
# (only used when VECTOR_STORE is weaviate)
|
||||
# ------------------------------
|
||||
PERSISTENCE_DATA_PATH='/var/lib/weaviate'
|
||||
QUERY_DEFAULTS_LIMIT=25
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
|
||||
DEFAULT_VECTORIZER_MODULE=none
|
||||
CLUSTER_HOSTNAME=node1
|
||||
AUTHENTICATION_APIKEY_ENABLED=true
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Chroma
|
||||
# (only used when VECTOR_STORE is chroma)
|
||||
# ------------------------------
|
||||
|
||||
# Authentication credentials for Chroma server
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456
|
||||
# Authentication provider for Chroma server
|
||||
CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
|
||||
# Persistence setting for Chroma server
|
||||
IS_PERSISTENT=TRUE
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Oracle Service
|
||||
# (only used when VECTOR_STORE is Oracle)
|
||||
# ------------------------------
|
||||
ORACLE_PWD=Dify123456
|
||||
ORACLE_CHARACTERSET=AL32UTF8
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for milvus Service
|
||||
# (only used when VECTOR_STORE is milvus)
|
||||
# ------------------------------
|
||||
# ETCD configuration for auto compaction mode
|
||||
ETCD_AUTO_COMPACTION_MODE=revision
|
||||
# ETCD configuration for auto compaction retention in terms of number of revisions
|
||||
ETCD_AUTO_COMPACTION_RETENTION=1000
|
||||
# ETCD configuration for backend quota in bytes
|
||||
ETCD_QUOTA_BACKEND_BYTES=4294967296
|
||||
# ETCD configuration for the number of changes before triggering a snapshot
|
||||
ETCD_SNAPSHOT_COUNT=50000
|
||||
# MinIO access key for authentication
|
||||
MINIO_ACCESS_KEY=minioadmin
|
||||
# MinIO secret key for authentication
|
||||
MINIO_SECRET_KEY=minioadmin
|
||||
# ETCD service endpoints
|
||||
ETCD_ENDPOINTS=etcd:2379
|
||||
# MinIO service address
|
||||
MINIO_ADDRESS=minio:9000
|
||||
# Enable or disable security authorization
|
||||
MILVUS_AUTHORIZATION_ENABLED=true
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for pgvector / pgvector-rs Service
|
||||
# (only used when VECTOR_STORE is pgvector / pgvector-rs)
|
||||
# ------------------------------
|
||||
PGVECTOR_PGUSER=postgres
|
||||
# The password for the default postgres user.
|
||||
PGVECTOR_POSTGRES_PASSWORD=difyai123456
|
||||
# The name of the default postgres database.
|
||||
PGVECTOR_POSTGRES_DB=dify
|
||||
# postgres data directory
|
||||
PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for opensearch
|
||||
# (only used when VECTOR_STORE is opensearch)
|
||||
# ------------------------------
|
||||
OPENSEARCH_DISCOVERY_TYPE=single-node
|
||||
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true
|
||||
OPENSEARCH_JAVA_OPTS_MIN=512m
|
||||
OPENSEARCH_JAVA_OPTS_MAX=1024m
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123
|
||||
OPENSEARCH_MEMLOCK_SOFT=-1
|
||||
OPENSEARCH_MEMLOCK_HARD=-1
|
||||
OPENSEARCH_NOFILE_SOFT=65536
|
||||
OPENSEARCH_NOFILE_HARD=65536
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Nginx reverse proxy
|
||||
# ------------------------------
|
||||
NGINX_SERVER_NAME=_
|
||||
HTTPS_ENABLED=false
|
||||
# HTTP port
|
||||
NGINX_PORT=80
|
||||
# SSL settings are only applied when HTTPS_ENABLED is true
|
||||
NGINX_SSL_PORT=443
|
||||
# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below accordingly.
|
||||
NGINX_SSL_CERT_FILENAME=dify.crt
|
||||
NGINX_SSL_CERT_KEY_FILENAME=dify.key
|
||||
NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3
|
||||
|
||||
# Nginx performance tuning
|
||||
NGINX_WORKER_PROCESSES=auto
|
||||
NGINX_CLIENT_MAX_BODY_SIZE=15M
|
||||
NGINX_KEEPALIVE_TIMEOUT=65
|
||||
|
||||
# Proxy settings
|
||||
NGINX_PROXY_READ_TIMEOUT=3600s
|
||||
NGINX_PROXY_SEND_TIMEOUT=3600s
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for SSRF Proxy
|
||||
# ------------------------------
|
||||
HTTP_PORT=3128
|
||||
COREDUMP_DIR=/var/spool/squid
|
||||
REVERSE_PROXY_PORT=8194
|
||||
SANDBOX_HOST=sandbox
|
||||
|
||||
# ------------------------------
|
||||
# docker env var for specifying vector db type at startup
|
||||
# (based on the vector db type, the corresponding docker
|
||||
# compose profile will be used)
|
||||
# ------------------------------
|
||||
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
|
||||
1
docker/.gitignore
vendored
Normal file
1
docker/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
nginx/conf.d/default.conf
|
||||
@ -1,13 +0,0 @@
|
||||
services:
|
||||
# Chroma vector store.
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.1
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: difyai123456
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: chromadb.auth.token_authn.TokenAuthenticationServerProvider
|
||||
IS_PERSISTENT: TRUE
|
||||
ports:
|
||||
- "8000:8000"
|
||||
@ -3,13 +3,12 @@ services:
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: difyai123456
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: dify
|
||||
# postgres data directory
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
ports:
|
||||
@ -34,19 +33,21 @@ services:
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the container.
|
||||
- ./volumes/weaviate:/var/lib/weaviate
|
||||
env_file:
|
||||
- ./middleware.env
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
QUERY_DEFAULTS_LIMIT: 25
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'false'
|
||||
PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
|
||||
DEFAULT_VECTORIZER_MODULE: 'none'
|
||||
CLUSTER_HOSTNAME: 'node1'
|
||||
AUTHENTICATION_APIKEY_ENABLED: 'true'
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: 'WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih'
|
||||
AUTHENTICATION_APIKEY_USERS: 'hello@dify.ai'
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: 'true'
|
||||
AUTHORIZATION_ADMINLIST_USERS: 'hello@dify.ai'
|
||||
PERSISTENCE_DATA_PATH: ${PERSISTENCE_DATA_PATH:-'/var/lib/weaviate'}
|
||||
QUERY_DEFAULTS_LIMIT: ${QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
@ -58,13 +59,13 @@ services:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: dify-sandbox
|
||||
GIN_MODE: 'release'
|
||||
WORKER_TIMEOUT: 15
|
||||
ENABLE_NETWORK: 'true'
|
||||
HTTP_PROXY: 'http://ssrf_proxy:3128'
|
||||
HTTPS_PROXY: 'http://ssrf_proxy:3128'
|
||||
SANDBOX_PORT: 8194
|
||||
API_KEY: ${API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
networks:
|
||||
@ -76,30 +77,23 @@ services:
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
entrypoint: /docker-entrypoint.sh
|
||||
ports:
|
||||
- "3128:3128"
|
||||
- "8194:8194"
|
||||
volumes:
|
||||
# pls clearly modify the squid.conf file to fit your network environment.
|
||||
- ./volumes/ssrf_proxy/squid.conf:/etc/squid/squid.conf
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
# Qdrant vector store.
|
||||
# uncomment to use qdrant as vector store.
|
||||
# (if uncommented, you need to comment out the weaviate service above,
|
||||
# and set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
# qdrant:
|
||||
# image: qdrant/qdrant:1.7.3
|
||||
# restart: always
|
||||
# volumes:
|
||||
# - ./volumes/qdrant:/qdrant/storage
|
||||
# environment:
|
||||
# QDRANT_API_KEY: 'difyai123456'
|
||||
# ports:
|
||||
# - "6333:6333"
|
||||
# - "6334:6334"
|
||||
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
|
||||
@ -1,64 +0,0 @@
|
||||
version: '3.5'
|
||||
|
||||
services:
|
||||
etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
environment:
|
||||
- ETCD_AUTO_COMPACTION_MODE=revision
|
||||
- ETCD_AUTO_COMPACTION_RETENTION=1000
|
||||
- ETCD_QUOTA_BACKEND_BYTES=4294967296
|
||||
- ETCD_SNAPSHOT_COUNT=50000
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ["CMD", "etcdctl", "endpoint", "health"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
ports:
|
||||
- "9001:9001"
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.3.1
|
||||
command: ["milvus", "run", "standalone"]
|
||||
environment:
|
||||
ETCD_ENDPOINTS: etcd:2379
|
||||
MINIO_ADDRESS: minio:9000
|
||||
common.security.authorizationEnabled: true
|
||||
volumes:
|
||||
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
ports:
|
||||
- "19530:19530"
|
||||
- "9091:9091"
|
||||
depends_on:
|
||||
- "etcd"
|
||||
- "minio"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: milvus
|
||||
@ -1,40 +0,0 @@
|
||||
services:
|
||||
opensearch: # This is also the hostname of the container within the Docker network (i.e. https://opensearch/)
|
||||
image: opensearchproject/opensearch:latest # Specifying the latest available image - modify if you want a specific version
|
||||
container_name: opensearch
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
|
||||
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx1024m" # Set min and max JVM heap sizes to at least 50% of system RAM
|
||||
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 # Sets the demo admin user password when using demo configuration, required for OpenSearch 2.12 and later
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1 # Set memlock to unlimited (no soft or hard limit)
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
|
||||
hard: 65536
|
||||
volumes:
|
||||
- ./volumes/opensearch/data:/usr/share/opensearch/data # Creates volume called opensearch-data1 and mounts it to the container
|
||||
ports:
|
||||
- 9200:9200 # REST API
|
||||
- 9600:9600 # Performance Analyzer
|
||||
networks:
|
||||
- opensearch-net # All of the containers will join the same Docker bridge network
|
||||
opensearch-dashboards:
|
||||
image: opensearchproject/opensearch-dashboards:latest # Make sure the version of opensearch-dashboards matches the version of opensearch installed on other nodes
|
||||
container_name: opensearch-dashboards
|
||||
ports:
|
||||
- 5601:5601 # Map host port 5601 to container port 5601
|
||||
expose:
|
||||
- "5601" # Expose port 5601 for web access to OpenSearch Dashboards
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query
|
||||
volumes:
|
||||
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
networks:
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
@ -1,17 +0,0 @@
|
||||
services:
|
||||
# oracle 23 ai vector store.
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
restart: always
|
||||
ports:
|
||||
- 1521:1521
|
||||
volumes:
|
||||
- type: volume
|
||||
source: oradata_vector
|
||||
target: /opt/oracle/oradata
|
||||
- ./startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
- ORACLE_PWD=Dify123456
|
||||
- ORACLE_CHARACTERSET=AL32UTF8
|
||||
volumes:
|
||||
oradata_vector:
|
||||
@ -1,23 +0,0 @@
|
||||
services:
|
||||
# The pgvecto—rs database.
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.2.0
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: postgres
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: difyai123456
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: dify
|
||||
# postgres data directory
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
volumes:
|
||||
- ./volumes/pgvectors/data:/var/lib/postgresql/data
|
||||
# uncomment to expose db(postgresql) port to host
|
||||
ports:
|
||||
- "5431:5432"
|
||||
healthcheck:
|
||||
test: [ "CMD", "pg_isready" ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
@ -1,23 +0,0 @@
|
||||
services:
|
||||
# Qdrant vector store.
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: postgres
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: difyai123456
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: dify
|
||||
# postgres data directory
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
volumes:
|
||||
- ./volumes/pgvector/data:/var/lib/postgresql/data
|
||||
# uncomment to expose db(postgresql) port to host
|
||||
ports:
|
||||
- "5433:5432"
|
||||
healthcheck:
|
||||
test: [ "CMD", "pg_isready" ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
@ -1,12 +0,0 @@
|
||||
services:
|
||||
# Qdrant vector store.
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: 'difyai123456'
|
||||
ports:
|
||||
- "6333:6333"
|
||||
- "6334:6334"
|
||||
File diff suppressed because it is too large
Load Diff
42
docker/middleware.env.example
Normal file
42
docker/middleware.env.example
Normal file
@ -0,0 +1,42 @@
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# ------------------------------
|
||||
PGUSER=postgres
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD=difyai123456
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB=dify
|
||||
# postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for qdrant Service
|
||||
# (only used when VECTOR_STORE is qdrant)
|
||||
# ------------------------------
|
||||
QDRANT_API_KEY=difyai123456
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for sandbox Service
|
||||
API_KEY=dify-sandbox
|
||||
GIN_MODE=release
|
||||
WORKER_TIMEOUT=15
|
||||
ENABLE_NETWORK=true
|
||||
HTTP_PROXY=http://ssrf_proxy:3128
|
||||
HTTPS_PROXY=http://ssrf_proxy:3128
|
||||
SANDBOX_PORT=8194
|
||||
# ------------------------------
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for weaviate Service
|
||||
# (only used when VECTOR_STORE is weaviate)
|
||||
# ------------------------------
|
||||
QUERY_DEFAULTS_LIMIT=25
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
|
||||
DEFAULT_VECTORIZER_MODULE=none
|
||||
CLUSTER_HOSTNAME=node1
|
||||
AUTHENTICATION_APIKEY_ENABLED=true
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
@ -1,6 +1,8 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
server_name ${NGINX_SERVER_NAME};
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
@ -27,12 +29,6 @@ server {
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# If you want to support HTTPS, please uncomment the code snippet below
|
||||
#listen 443 ssl;
|
||||
#ssl_certificate ./../ssl/your_cert_file.cer;
|
||||
#ssl_certificate_key ./../ssl/your_cert_key.key;
|
||||
#ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
#ssl_prefer_server_ciphers on;
|
||||
#ssl_session_cache shared:SSL:10m;
|
||||
#ssl_session_timeout 10m;
|
||||
# placeholder for https config defined in https.conf.template
|
||||
${HTTPS_CONFIG}
|
||||
}
|
||||
19
docker/nginx/docker-entrypoint.sh
Executable file
19
docker/nginx/docker-entrypoint.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "${HTTPS_ENABLED}" = "true" ]; then
|
||||
# set the HTTPS_CONFIG environment variable to the content of the https.conf.template
|
||||
HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
|
||||
export HTTPS_CONFIG
|
||||
# Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
|
||||
envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
fi
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
|
||||
envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
|
||||
|
||||
envsubst < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Start Nginx using the default entrypoint
|
||||
exec nginx -g 'daemon off;'
|
||||
9
docker/nginx/https.conf.template
Normal file
9
docker/nginx/https.conf.template
Normal file
@ -0,0 +1,9 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
listen ${NGINX_SSL_PORT} ssl;
|
||||
ssl_certificate ./../ssl/${NGINX_SSL_CERT_FILENAME};
|
||||
ssl_certificate_key ./../ssl/${NGINX_SSL_CERT_KEY_FILENAME};
|
||||
ssl_protocols ${NGINX_SSL_PROTOCOLS};
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
@ -1,5 +1,7 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
worker_processes ${NGINX_WORKER_PROCESSES};
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
@ -23,10 +25,10 @@ http {
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout 65;
|
||||
keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
|
||||
|
||||
#gzip on;
|
||||
client_max_body_size 15M;
|
||||
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
@ -1,8 +1,10 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout 3600s;
|
||||
proxy_send_timeout 3600s;
|
||||
proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
|
||||
proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
|
||||
@ -1 +0,0 @@
|
||||
|
||||
|
||||
42
docker/ssrf_proxy/docker-entrypoint.sh
Executable file
42
docker/ssrf_proxy/docker-entrypoint.sh
Executable file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Modified based on Squid OCI image entrypoint
|
||||
|
||||
# This entrypoint aims to forward the squid logs to stdout to assist users of
|
||||
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
|
||||
# access the service logs.
|
||||
|
||||
# Moreover, it invokes the squid binary, leaving all the desired parameters to
|
||||
# be provided by the "command" passed to the spawned container. If no command
|
||||
# is provided by the user, the default behavior (as per the CMD statement in
|
||||
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
|
||||
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
|
||||
# systemd unit.
|
||||
|
||||
# [1] The default configuration is changed in the Dockerfile to allow local
|
||||
# network connections. See the Dockerfile for further information.
|
||||
|
||||
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
|
||||
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
|
||||
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
tail -F /var/log/squid/access.log 2>/dev/null &
|
||||
tail -F /var/log/squid/error.log 2>/dev/null &
|
||||
tail -F /var/log/squid/store.log 2>/dev/null &
|
||||
tail -F /var/log/squid/cache.log 2>/dev/null &
|
||||
|
||||
# Replace environment variables in the template and output to the squid.conf
|
||||
echo "[ENTRYPOINT] replacing environment variables in the template"
|
||||
awk '{
|
||||
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
|
||||
var = substr($0, RSTART+2, RLENGTH-3)
|
||||
val = ENVIRON[var]
|
||||
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
|
||||
}
|
||||
print
|
||||
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
|
||||
|
||||
/usr/sbin/squid -Nz
|
||||
echo "[ENTRYPOINT] starting squid"
|
||||
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
|
||||
@ -27,8 +27,8 @@ include /etc/squid/conf.d/*.conf
|
||||
http_access deny all
|
||||
|
||||
################################## Proxy Server ################################
|
||||
http_port 3128
|
||||
coredump_dir /var/spool/squid
|
||||
http_port ${HTTP_PORT}
|
||||
coredump_dir ${COREDUMP_DIR}
|
||||
refresh_pattern ^ftp: 1440 20% 10080
|
||||
refresh_pattern ^gopher: 1440 0% 1440
|
||||
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
|
||||
@ -38,12 +38,13 @@ refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern . 0 20% 4320
|
||||
|
||||
|
||||
# cache_dir ufs /var/spool/squid 100 16 256
|
||||
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
|
||||
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
|
||||
|
||||
|
||||
################################## Reverse Proxy To Sandbox ################################
|
||||
http_port 8194 accel vhost
|
||||
cache_peer sandbox parent 8194 0 no-query originserver
|
||||
http_port ${REVERSE_PROXY_PORT} accel vhost
|
||||
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
|
||||
acl src_all src all
|
||||
http_access allow src_all
|
||||
Reference in New Issue
Block a user