mirror of
https://github.com/langgenius/dify.git
synced 2026-05-11 04:37:17 +08:00
Compare commits
2 Commits
chore/simp
...
build/samp
| Author | SHA1 | Date | |
|---|---|---|---|
| 1ec4f688ff | |||
| d70acb217a |
10
README.md
10
README.md
@ -74,11 +74,13 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
|
||||
The easiest way to start the Dify server is through [Docker Compose](docker/docker-compose.yaml). Before running Dify with the following commands, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
|
||||
|
||||
```bash
|
||||
cd dify/docker
|
||||
cp .env.example .env
|
||||
docker compose up -d
|
||||
cd dify
|
||||
cd docker
|
||||
./dify-compose up -d
|
||||
```
|
||||
|
||||
On Windows PowerShell, run `.\dify-compose.ps1 up -d` from the `docker` directory.
|
||||
|
||||
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process.
|
||||
|
||||
#### Seeking help
|
||||
@ -136,7 +138,7 @@ Star Dify on GitHub and be instantly notified of new releases.
|
||||
|
||||
### Custom configurations
|
||||
|
||||
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
If you need to customize the configuration, add only the values you want to override to `docker/.env`. The default values live in [`docker/.env.default`](docker/.env.default), and the full reference remains in [`docker/.env.example`](docker/.env.example). After making any changes, re-run `./dify-compose up -d` or `.\dify-compose.ps1 up -d` from the `docker` directory. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
### Metrics Monitoring with Grafana
|
||||
|
||||
|
||||
@ -33,6 +33,7 @@ from .vector import (
|
||||
old_metadata_migration,
|
||||
vdb_migrate,
|
||||
)
|
||||
from .vector_space import sample_vector_space_usage
|
||||
|
||||
__all__ = [
|
||||
"add_qdrant_index",
|
||||
@ -62,6 +63,7 @@ __all__ = [
|
||||
"reset_encrypt_key_pair",
|
||||
"reset_password",
|
||||
"restore_workflow_runs",
|
||||
"sample_vector_space_usage",
|
||||
"setup_datasource_oauth_client",
|
||||
"setup_system_tool_oauth_client",
|
||||
"setup_system_trigger_oauth_client",
|
||||
|
||||
698
api/commands/vector_space.py
Normal file
698
api/commands/vector_space.py
Normal file
@ -0,0 +1,698 @@
|
||||
import csv
|
||||
import json
|
||||
import secrets
|
||||
from dataclasses import dataclass
|
||||
from decimal import Decimal
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import func, select
|
||||
|
||||
from configs import dify_config
|
||||
from core.rag.datasource.vdb.vector_type import VectorType
|
||||
from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import (
|
||||
ChildChunk,
|
||||
Dataset,
|
||||
DatasetCollectionBinding,
|
||||
DocumentSegment,
|
||||
DocumentSegmentSummary,
|
||||
SegmentAttachmentBinding,
|
||||
TidbAuthBinding,
|
||||
)
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from models.enums import IndexingStatus, SegmentStatus, SummaryStatus, TidbAuthBindingStatus
|
||||
from models.model import App, AppAnnotationSetting, MessageAnnotation
|
||||
|
||||
COMMON_EMBEDDING_MODEL_DIMS = {
|
||||
# OpenAI
|
||||
"text-embedding-ada-002": 1536,
|
||||
"text-embedding-3-small": 1536,
|
||||
"text-embedding-3-large": 3072,
|
||||
# Cohere
|
||||
"embed-english-v3.0": 1024,
|
||||
"embed-multilingual-v3.0": 1024,
|
||||
"embed-english-light-v3.0": 384,
|
||||
"embed-multilingual-light-v3.0": 384,
|
||||
# Google
|
||||
"embedding-001": 768,
|
||||
"text-embedding-004": 768,
|
||||
# Voyage
|
||||
"voyage-2": 1024,
|
||||
"voyage-3": 1024,
|
||||
"voyage-3-lite": 512,
|
||||
"voyage-large-2": 1536,
|
||||
"voyage-code-2": 1536,
|
||||
# BAAI BGE
|
||||
"bge-small-en": 384,
|
||||
"bge-small-en-v1.5": 384,
|
||||
"bge-small-zh": 512,
|
||||
"bge-small-zh-v1.5": 512,
|
||||
"bge-base-en": 768,
|
||||
"bge-base-en-v1.5": 768,
|
||||
"bge-base-zh": 768,
|
||||
"bge-base-zh-v1.5": 768,
|
||||
"bge-large-en": 1024,
|
||||
"bge-large-en-v1.5": 1024,
|
||||
"bge-large-zh": 1024,
|
||||
"bge-large-zh-v1.5": 1024,
|
||||
"bge-m3": 1024,
|
||||
# E5
|
||||
"multilingual-e5-small": 384,
|
||||
"multilingual-e5-base": 768,
|
||||
"multilingual-e5-large": 1024,
|
||||
"e5-small-v2": 384,
|
||||
"e5-base-v2": 768,
|
||||
"e5-large-v2": 1024,
|
||||
# M3E
|
||||
"m3e-small": 512,
|
||||
"m3e-base": 768,
|
||||
"m3e-large": 1024,
|
||||
# Jina
|
||||
"jina-embeddings-v2-small-en": 512,
|
||||
"jina-embeddings-v2-base-en": 768,
|
||||
"jina-embeddings-v2-base-zh": 768,
|
||||
"jina-embeddings-v3": 1024,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CollectionPointStats:
|
||||
collection_name: str
|
||||
source_type: str
|
||||
source_id: str
|
||||
model_provider: str | None
|
||||
model_name: str | None
|
||||
segment_points: int = 0
|
||||
child_chunk_points: int = 0
|
||||
summary_points: int = 0
|
||||
attachment_points: int = 0
|
||||
annotation_points: int = 0
|
||||
|
||||
@property
|
||||
def total_points(self) -> int:
|
||||
return (
|
||||
self.segment_points
|
||||
+ self.child_chunk_points
|
||||
+ self.summary_points
|
||||
+ self.attachment_points
|
||||
+ self.annotation_points
|
||||
)
|
||||
|
||||
|
||||
def _parse_overheads(value: str) -> list[int]:
|
||||
overheads = []
|
||||
for item in value.split(","):
|
||||
item = item.strip()
|
||||
if not item:
|
||||
continue
|
||||
overheads.append(int(item))
|
||||
if not overheads:
|
||||
raise click.BadParameter("At least one overhead is required.")
|
||||
return overheads
|
||||
|
||||
|
||||
def _normalize_model_name(model_name: str) -> str:
|
||||
return model_name.strip().split("/")[-1]
|
||||
|
||||
|
||||
def _tidb_storage_usage_bytes(binding: TidbAuthBinding, timeout: float) -> int:
|
||||
endpoint = _binding_qdrant_endpoint(binding, timeout)
|
||||
if not endpoint:
|
||||
raise ValueError("qdrant_endpoint is empty")
|
||||
|
||||
endpoint = endpoint.rstrip("/")
|
||||
with httpx.Client(timeout=timeout, verify=False) as client:
|
||||
response = client.get(f"{endpoint}/cluster", headers={"api-key": f"{binding.account}:{binding.password}"})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
storage = data.get("usage", {}).get("storage", {})
|
||||
row_based = int(storage.get("row_based") or 0)
|
||||
columnar = int(storage.get("columnar") or 0)
|
||||
return row_based + columnar
|
||||
|
||||
|
||||
def _extract_qdrant_endpoint(cluster_response: dict[str, Any]) -> str | None:
|
||||
endpoints = cluster_response.get("endpoints") or {}
|
||||
public = endpoints.get("public") or {}
|
||||
host = public.get("host")
|
||||
if host:
|
||||
return f"https://qdrant-{host}"
|
||||
return None
|
||||
|
||||
|
||||
def _fetch_qdrant_endpoint(binding: TidbAuthBinding, timeout: float) -> str | None:
|
||||
if not (dify_config.TIDB_API_URL and dify_config.TIDB_PUBLIC_KEY and dify_config.TIDB_PRIVATE_KEY):
|
||||
return None
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=timeout) as client:
|
||||
response = client.get(
|
||||
f"{dify_config.TIDB_API_URL.rstrip('/')}/clusters/{binding.cluster_id}",
|
||||
auth=httpx.DigestAuth(dify_config.TIDB_PUBLIC_KEY, dify_config.TIDB_PRIVATE_KEY),
|
||||
)
|
||||
response.raise_for_status()
|
||||
return _extract_qdrant_endpoint(response.json())
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _binding_qdrant_endpoint(binding: TidbAuthBinding, timeout: float) -> str | None:
|
||||
return binding.qdrant_endpoint or dify_config.TIDB_ON_QDRANT_URL or _fetch_qdrant_endpoint(binding, timeout)
|
||||
|
||||
|
||||
def _extract_vector_size(collection_payload: dict[str, Any]) -> int | None:
|
||||
vectors = (
|
||||
collection_payload.get("result", {})
|
||||
.get("config", {})
|
||||
.get("params", {})
|
||||
.get("vectors")
|
||||
)
|
||||
if isinstance(vectors, dict):
|
||||
size = vectors.get("size")
|
||||
if isinstance(size, int):
|
||||
return size
|
||||
for vector_config in vectors.values():
|
||||
if isinstance(vector_config, dict) and isinstance(vector_config.get("size"), int):
|
||||
return vector_config["size"]
|
||||
return None
|
||||
|
||||
|
||||
def _qdrant_collection_dim(
|
||||
binding: TidbAuthBinding,
|
||||
collection_name: str,
|
||||
timeout: float,
|
||||
dim_cache: dict[str, int | None],
|
||||
) -> int | None:
|
||||
if collection_name in dim_cache:
|
||||
return dim_cache[collection_name]
|
||||
endpoint = _binding_qdrant_endpoint(binding, timeout)
|
||||
if not endpoint:
|
||||
dim_cache[collection_name] = None
|
||||
return None
|
||||
|
||||
endpoint = endpoint.rstrip("/")
|
||||
try:
|
||||
with httpx.Client(timeout=timeout, verify=False) as client:
|
||||
response = client.get(
|
||||
f"{endpoint}/collections/{collection_name}",
|
||||
headers={"api-key": f"{binding.account}:{binding.password}"},
|
||||
)
|
||||
if response.status_code == 404:
|
||||
dim_cache[collection_name] = None
|
||||
return None
|
||||
response.raise_for_status()
|
||||
dim = _extract_vector_size(response.json())
|
||||
dim_cache[collection_name] = dim
|
||||
return dim
|
||||
except Exception:
|
||||
dim_cache[collection_name] = None
|
||||
return None
|
||||
|
||||
|
||||
def _dataset_vector_type(dataset: Dataset) -> str | None:
|
||||
if dataset.index_struct_dict:
|
||||
return dataset.index_struct_dict.get("type")
|
||||
return dify_config.VECTOR_STORE
|
||||
|
||||
|
||||
def _dataset_collection_name(dataset: Dataset) -> str:
|
||||
if dataset.index_struct_dict:
|
||||
vector_store = dataset.index_struct_dict.get("vector_store") or {}
|
||||
collection_name = vector_store.get("class_prefix")
|
||||
if collection_name:
|
||||
return collection_name
|
||||
if dataset.collection_binding_id:
|
||||
binding = db.session.get(DatasetCollectionBinding, dataset.collection_binding_id)
|
||||
if binding:
|
||||
return binding.collection_name
|
||||
return Dataset.gen_collection_name_by_id(dataset.id)
|
||||
|
||||
|
||||
def _completed_document_filter() -> tuple[Any, ...]:
|
||||
return (
|
||||
DatasetDocument.indexing_status == IndexingStatus.COMPLETED,
|
||||
DatasetDocument.enabled == True,
|
||||
DatasetDocument.archived == False,
|
||||
)
|
||||
|
||||
|
||||
def _completed_segment_filter() -> tuple[Any, ...]:
|
||||
return (
|
||||
DocumentSegment.status == SegmentStatus.COMPLETED,
|
||||
DocumentSegment.enabled == True,
|
||||
DocumentSegment.index_node_id.is_not(None),
|
||||
)
|
||||
|
||||
|
||||
def _tenant_has_local_points(tenant_id: str) -> bool:
|
||||
return bool(
|
||||
db.session.scalar(
|
||||
select(DocumentSegment.id)
|
||||
.join(DatasetDocument, DatasetDocument.id == DocumentSegment.document_id)
|
||||
.where(
|
||||
DocumentSegment.tenant_id == tenant_id,
|
||||
DatasetDocument.doc_form != IndexStructureType.PARENT_CHILD_INDEX,
|
||||
*_completed_document_filter(),
|
||||
*_completed_segment_filter(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _active_tidb_bindings(
|
||||
tenant_ids: tuple[str, ...],
|
||||
limit: int,
|
||||
offset: int,
|
||||
candidate_page_size: int,
|
||||
max_candidates: int,
|
||||
random_offset: bool,
|
||||
quiet: bool,
|
||||
) -> list[TidbAuthBinding]:
|
||||
active_binding_filters = (
|
||||
TidbAuthBinding.tenant_id.is_not(None),
|
||||
TidbAuthBinding.active == True,
|
||||
TidbAuthBinding.status == TidbAuthBindingStatus.ACTIVE,
|
||||
)
|
||||
base_stmt = select(TidbAuthBinding).where(*active_binding_filters)
|
||||
if tenant_ids:
|
||||
stmt = base_stmt.where(TidbAuthBinding.tenant_id.in_(tenant_ids)).order_by(TidbAuthBinding.created_at.desc())
|
||||
return list(db.session.scalars(stmt).all())
|
||||
|
||||
selected = []
|
||||
scanned = 0
|
||||
skipped_used = 0
|
||||
active_binding_count = db.session.scalar(select(func.count(TidbAuthBinding.id)).where(*active_binding_filters)) or 0
|
||||
if active_binding_count <= 0:
|
||||
return []
|
||||
|
||||
scan_start_offset = offset
|
||||
if random_offset:
|
||||
max_start_offset = max(int(active_binding_count) - 1, 0)
|
||||
scan_start_offset = secrets.randbelow(max_start_offset + 1)
|
||||
_log(
|
||||
f"Random active binding scan start: offset={scan_start_offset}, active_bindings={active_binding_count}.",
|
||||
quiet,
|
||||
)
|
||||
|
||||
page_offset = scan_start_offset
|
||||
wrapped = False
|
||||
while len(selected) < limit and scanned < max_candidates:
|
||||
page_limit = min(candidate_page_size, max_candidates - scanned)
|
||||
stmt = base_stmt.order_by(TidbAuthBinding.created_at.desc()).limit(page_limit).offset(page_offset)
|
||||
candidates = list(db.session.scalars(stmt).all())
|
||||
if not candidates and random_offset and not wrapped and scan_start_offset > 0:
|
||||
page_offset = 0
|
||||
wrapped = True
|
||||
continue
|
||||
if not candidates:
|
||||
break
|
||||
|
||||
_log(
|
||||
f"Scanning {len(candidates)} active TiDB binding candidate(s) "
|
||||
f"from offset={page_offset}; selected={len(selected)}/{limit}.",
|
||||
quiet,
|
||||
)
|
||||
for binding in candidates:
|
||||
scanned += 1
|
||||
if binding.tenant_id and _tenant_has_local_points(binding.tenant_id):
|
||||
selected.append(binding)
|
||||
if len(selected) >= limit:
|
||||
break
|
||||
else:
|
||||
skipped_used += 1
|
||||
|
||||
page_offset += len(candidates)
|
||||
|
||||
_log(
|
||||
f"Candidate scan finished: scanned={scanned}, selected={len(selected)}, skipped_empty={skipped_used}.",
|
||||
quiet,
|
||||
)
|
||||
return selected
|
||||
|
||||
|
||||
def _count_dataset_points(dataset: Dataset) -> CollectionPointStats:
|
||||
segment_points = (
|
||||
db.session.scalar(
|
||||
select(func.count(DocumentSegment.id))
|
||||
.join(DatasetDocument, DatasetDocument.id == DocumentSegment.document_id)
|
||||
.where(
|
||||
DocumentSegment.tenant_id == dataset.tenant_id,
|
||||
DocumentSegment.dataset_id == dataset.id,
|
||||
DatasetDocument.doc_form != IndexStructureType.PARENT_CHILD_INDEX,
|
||||
*_completed_document_filter(),
|
||||
*_completed_segment_filter(),
|
||||
)
|
||||
)
|
||||
or 0
|
||||
)
|
||||
|
||||
child_chunk_points = (
|
||||
db.session.scalar(
|
||||
select(func.count(ChildChunk.id))
|
||||
.join(DatasetDocument, DatasetDocument.id == ChildChunk.document_id)
|
||||
.where(
|
||||
ChildChunk.tenant_id == dataset.tenant_id,
|
||||
ChildChunk.dataset_id == dataset.id,
|
||||
ChildChunk.index_node_id.is_not(None),
|
||||
*_completed_document_filter(),
|
||||
)
|
||||
)
|
||||
or 0
|
||||
)
|
||||
|
||||
summary_points = (
|
||||
db.session.scalar(
|
||||
select(func.count(DocumentSegmentSummary.id))
|
||||
.join(DatasetDocument, DatasetDocument.id == DocumentSegmentSummary.document_id)
|
||||
.where(
|
||||
DocumentSegmentSummary.dataset_id == dataset.id,
|
||||
DocumentSegmentSummary.enabled == True,
|
||||
DocumentSegmentSummary.status == SummaryStatus.COMPLETED,
|
||||
DocumentSegmentSummary.summary_index_node_id.is_not(None),
|
||||
*_completed_document_filter(),
|
||||
)
|
||||
)
|
||||
or 0
|
||||
)
|
||||
|
||||
attachment_points = 0
|
||||
if dataset.is_multimodal:
|
||||
attachment_points = (
|
||||
db.session.scalar(
|
||||
select(func.count(sa.distinct(SegmentAttachmentBinding.attachment_id)))
|
||||
.join(DocumentSegment, DocumentSegment.id == SegmentAttachmentBinding.segment_id)
|
||||
.join(DatasetDocument, DatasetDocument.id == SegmentAttachmentBinding.document_id)
|
||||
.where(
|
||||
SegmentAttachmentBinding.tenant_id == dataset.tenant_id,
|
||||
SegmentAttachmentBinding.dataset_id == dataset.id,
|
||||
*_completed_document_filter(),
|
||||
*_completed_segment_filter(),
|
||||
)
|
||||
)
|
||||
or 0
|
||||
)
|
||||
|
||||
return CollectionPointStats(
|
||||
collection_name=_dataset_collection_name(dataset),
|
||||
source_type="dataset",
|
||||
source_id=dataset.id,
|
||||
model_provider=dataset.embedding_model_provider,
|
||||
model_name=dataset.embedding_model,
|
||||
segment_points=int(segment_points),
|
||||
child_chunk_points=int(child_chunk_points),
|
||||
summary_points=int(summary_points),
|
||||
attachment_points=int(attachment_points),
|
||||
)
|
||||
|
||||
|
||||
def _dataset_stats_for_tenant(tenant_id: str) -> list[CollectionPointStats]:
|
||||
datasets = db.session.scalars(
|
||||
select(Dataset).where(
|
||||
Dataset.tenant_id == tenant_id,
|
||||
Dataset.indexing_technique == IndexTechniqueType.HIGH_QUALITY,
|
||||
)
|
||||
).all()
|
||||
|
||||
stats = []
|
||||
for dataset in datasets:
|
||||
if _dataset_vector_type(dataset) != VectorType.TIDB_ON_QDRANT:
|
||||
continue
|
||||
dataset_stats = _count_dataset_points(dataset)
|
||||
if dataset_stats.total_points > 0:
|
||||
stats.append(dataset_stats)
|
||||
return stats
|
||||
|
||||
|
||||
def _annotation_stats_for_tenant(tenant_id: str) -> list[CollectionPointStats]:
|
||||
rows = db.session.execute(
|
||||
select(
|
||||
App.id,
|
||||
DatasetCollectionBinding.provider_name,
|
||||
DatasetCollectionBinding.model_name,
|
||||
DatasetCollectionBinding.collection_name,
|
||||
func.count(MessageAnnotation.id),
|
||||
)
|
||||
.join(AppAnnotationSetting, AppAnnotationSetting.app_id == App.id)
|
||||
.join(DatasetCollectionBinding, DatasetCollectionBinding.id == AppAnnotationSetting.collection_binding_id)
|
||||
.join(MessageAnnotation, MessageAnnotation.app_id == App.id)
|
||||
.where(App.tenant_id == tenant_id)
|
||||
.group_by(
|
||||
App.id,
|
||||
DatasetCollectionBinding.provider_name,
|
||||
DatasetCollectionBinding.model_name,
|
||||
DatasetCollectionBinding.collection_name,
|
||||
)
|
||||
).all()
|
||||
|
||||
return [
|
||||
CollectionPointStats(
|
||||
collection_name=row[3],
|
||||
source_type="annotation",
|
||||
source_id=row[0],
|
||||
model_provider=row[1],
|
||||
model_name=row[2],
|
||||
annotation_points=int(row[4] or 0),
|
||||
)
|
||||
for row in rows
|
||||
if int(row[4] or 0) > 0
|
||||
]
|
||||
|
||||
|
||||
def _resolve_dim(
|
||||
stat: CollectionPointStats,
|
||||
binding: TidbAuthBinding,
|
||||
default_dim: int,
|
||||
fetch_qdrant_dim: bool,
|
||||
timeout: float,
|
||||
dim_cache: dict[str, int | None],
|
||||
) -> tuple[int, str]:
|
||||
if stat.model_provider and stat.model_name:
|
||||
builtin_dim = COMMON_EMBEDDING_MODEL_DIMS.get(_normalize_model_name(stat.model_name))
|
||||
if builtin_dim:
|
||||
return builtin_dim, "builtin_model_map"
|
||||
|
||||
if fetch_qdrant_dim:
|
||||
qdrant_dim = _qdrant_collection_dim(binding, stat.collection_name, timeout, dim_cache)
|
||||
if qdrant_dim:
|
||||
return qdrant_dim, "qdrant"
|
||||
|
||||
return default_dim, "default"
|
||||
|
||||
|
||||
def _mb(value: int | float | Decimal) -> float:
|
||||
return round(float(value) / 1024 / 1024, 4)
|
||||
|
||||
|
||||
def _log(message: str, quiet: bool) -> None:
|
||||
if not quiet:
|
||||
click.echo(message, err=True)
|
||||
|
||||
|
||||
@click.command(
|
||||
"sample-vector-space-usage",
|
||||
help="Sample TiDB vector storage usage and compare it with local formula estimates.",
|
||||
)
|
||||
@click.option("--tenant-id", multiple=True, help="Tenant ID to sample. Can be repeated.")
|
||||
@click.option(
|
||||
"--limit",
|
||||
default=20,
|
||||
show_default=True,
|
||||
help="Number of active TiDB tenants with local vector points to sample.",
|
||||
)
|
||||
@click.option("--offset", default=0, show_default=True, help="Offset when sampling active TiDB tenants.")
|
||||
@click.option("--default-dim", default=3072, show_default=True, help="Fallback embedding dimension.")
|
||||
@click.option(
|
||||
"--overheads",
|
||||
default="3584,5120,8192",
|
||||
show_default=True,
|
||||
help="Comma-separated per-point overhead bytes to compare.",
|
||||
)
|
||||
@click.option("--fetch-qdrant-dim/--no-fetch-qdrant-dim", default=True, show_default=True)
|
||||
@click.option("--include-annotations/--exclude-annotations", default=True, show_default=True)
|
||||
@click.option(
|
||||
"--candidate-page-size",
|
||||
default=200,
|
||||
show_default=True,
|
||||
help="Number of active TiDB bindings to inspect per candidate scan page.",
|
||||
)
|
||||
@click.option(
|
||||
"--max-candidates",
|
||||
default=2000,
|
||||
show_default=True,
|
||||
help="Maximum active TiDB bindings to inspect when tenant IDs are not specified.",
|
||||
)
|
||||
@click.option(
|
||||
"--random-offset/--no-random-offset",
|
||||
default=True,
|
||||
show_default=True,
|
||||
help="Start candidate scan from a random active TiDB binding offset.",
|
||||
)
|
||||
@click.option("--timeout", default=10.0, show_default=True, help="HTTP timeout for TiDB/Qdrant calls.")
|
||||
@click.option("--output", type=click.Path(dir_okay=False, path_type=Path), help="CSV output path. Defaults to stdout.")
|
||||
@click.option("--quiet", is_flag=True, help="Suppress progress logs. CSV output is unaffected.")
|
||||
def sample_vector_space_usage(
|
||||
tenant_id: tuple[str, ...],
|
||||
limit: int,
|
||||
offset: int,
|
||||
default_dim: int,
|
||||
overheads: str,
|
||||
fetch_qdrant_dim: bool,
|
||||
include_annotations: bool,
|
||||
candidate_page_size: int,
|
||||
max_candidates: int,
|
||||
random_offset: bool,
|
||||
timeout: float,
|
||||
output: Path | None,
|
||||
quiet: bool,
|
||||
):
|
||||
overhead_values = _parse_overheads(overheads)
|
||||
bindings = _active_tidb_bindings(
|
||||
tenant_id,
|
||||
limit,
|
||||
offset,
|
||||
candidate_page_size,
|
||||
max_candidates,
|
||||
random_offset,
|
||||
quiet,
|
||||
)
|
||||
sample_scope = (
|
||||
f" for tenant_id={','.join(tenant_id)}"
|
||||
if tenant_id
|
||||
else f" with local vector points, limit={limit}, offset={offset}, max_candidates={max_candidates}"
|
||||
)
|
||||
_log(
|
||||
f"Sampling {len(bindings)} active TiDB binding(s){sample_scope}.",
|
||||
quiet,
|
||||
)
|
||||
if not bindings:
|
||||
_log("No active TiDB bindings with local vector points found. Nothing to sample.", quiet)
|
||||
|
||||
fieldnames = [
|
||||
"tenant_id",
|
||||
"cluster_id",
|
||||
"tidb_actual_mb",
|
||||
"total_points",
|
||||
"segment_points",
|
||||
"child_chunk_points",
|
||||
"summary_points",
|
||||
"attachment_points",
|
||||
"annotation_points",
|
||||
"collection_count",
|
||||
"dim_sources",
|
||||
"dims",
|
||||
"errors",
|
||||
]
|
||||
for overhead in overhead_values:
|
||||
fieldnames.extend(
|
||||
[
|
||||
f"estimated_mb_o{overhead}",
|
||||
f"diff_mb_o{overhead}",
|
||||
f"ratio_o{overhead}",
|
||||
]
|
||||
)
|
||||
|
||||
output_file = output.open("w", newline="") if output else None
|
||||
try:
|
||||
writer = csv.DictWriter(output_file or click.get_text_stream("stdout"), fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
for index, binding in enumerate(bindings, start=1):
|
||||
assert binding.tenant_id is not None
|
||||
tenant = binding.tenant_id
|
||||
errors = []
|
||||
dim_cache: dict[str, int | None] = {}
|
||||
_log(f"[{index}/{len(bindings)}] tenant={tenant} cluster={binding.cluster_id}: fetching TiDB usage", quiet)
|
||||
|
||||
try:
|
||||
actual_bytes = _tidb_storage_usage_bytes(binding, timeout)
|
||||
_log(
|
||||
f"[{index}/{len(bindings)}] tenant={tenant}: TiDB actual={_mb(actual_bytes)} MB",
|
||||
quiet,
|
||||
)
|
||||
except Exception as exc:
|
||||
actual_bytes = 0
|
||||
errors.append(f"tidb_usage:{exc.__class__.__name__}:{exc}")
|
||||
_log(
|
||||
f"[{index}/{len(bindings)}] tenant={tenant}: failed to fetch TiDB usage: "
|
||||
f"{exc.__class__.__name__}: {exc}",
|
||||
quiet,
|
||||
)
|
||||
|
||||
_log(f"[{index}/{len(bindings)}] tenant={tenant}: counting local vector points", quiet)
|
||||
collection_stats = _dataset_stats_for_tenant(tenant)
|
||||
if include_annotations:
|
||||
collection_stats.extend(_annotation_stats_for_tenant(tenant))
|
||||
|
||||
total_points = 0
|
||||
segment_points = 0
|
||||
child_chunk_points = 0
|
||||
summary_points = 0
|
||||
attachment_points = 0
|
||||
annotation_points = 0
|
||||
dim_sources: dict[str, int] = {}
|
||||
dims: dict[str, int] = {}
|
||||
estimated_by_overhead = dict.fromkeys(overhead_values, 0)
|
||||
|
||||
for stat in collection_stats:
|
||||
dim, dim_source = _resolve_dim(
|
||||
stat,
|
||||
binding,
|
||||
default_dim,
|
||||
fetch_qdrant_dim,
|
||||
timeout,
|
||||
dim_cache,
|
||||
)
|
||||
dim_sources[dim_source] = dim_sources.get(dim_source, 0) + 1
|
||||
dims[str(dim)] = dims.get(str(dim), 0) + stat.total_points
|
||||
|
||||
total_points += stat.total_points
|
||||
segment_points += stat.segment_points
|
||||
child_chunk_points += stat.child_chunk_points
|
||||
summary_points += stat.summary_points
|
||||
attachment_points += stat.attachment_points
|
||||
annotation_points += stat.annotation_points
|
||||
|
||||
for overhead in overhead_values:
|
||||
estimated_by_overhead[overhead] += stat.total_points * (dim * 4 + overhead)
|
||||
|
||||
_log(
|
||||
f"[{index}/{len(bindings)}] tenant={tenant}: points={total_points}, "
|
||||
f"collections={len(collection_stats)}, dim_sources={json.dumps(dim_sources, sort_keys=True)}",
|
||||
quiet,
|
||||
)
|
||||
|
||||
row: dict[str, Any] = {
|
||||
"tenant_id": tenant,
|
||||
"cluster_id": binding.cluster_id,
|
||||
"tidb_actual_mb": _mb(actual_bytes),
|
||||
"total_points": total_points,
|
||||
"segment_points": segment_points,
|
||||
"child_chunk_points": child_chunk_points,
|
||||
"summary_points": summary_points,
|
||||
"attachment_points": attachment_points,
|
||||
"annotation_points": annotation_points,
|
||||
"collection_count": len(collection_stats),
|
||||
"dim_sources": json.dumps(dim_sources, sort_keys=True),
|
||||
"dims": json.dumps(dims, sort_keys=True),
|
||||
"errors": ";".join(errors),
|
||||
}
|
||||
|
||||
for overhead, estimated_bytes in estimated_by_overhead.items():
|
||||
diff_bytes = estimated_bytes - actual_bytes
|
||||
ratio = round(estimated_bytes / actual_bytes, 6) if actual_bytes > 0 else ""
|
||||
row[f"estimated_mb_o{overhead}"] = _mb(estimated_bytes)
|
||||
row[f"diff_mb_o{overhead}"] = _mb(diff_bytes)
|
||||
row[f"ratio_o{overhead}"] = ratio
|
||||
|
||||
writer.writerow(row)
|
||||
_log(f"[{index}/{len(bindings)}] tenant={tenant}: row written", quiet)
|
||||
finally:
|
||||
if output_file:
|
||||
output_file.close()
|
||||
@ -28,6 +28,7 @@ def init_app(app: DifyApp):
|
||||
reset_encrypt_key_pair,
|
||||
reset_password,
|
||||
restore_workflow_runs,
|
||||
sample_vector_space_usage,
|
||||
setup_datasource_oauth_client,
|
||||
setup_system_tool_oauth_client,
|
||||
setup_system_trigger_oauth_client,
|
||||
@ -68,6 +69,7 @@ def init_app(app: DifyApp):
|
||||
clean_workflow_runs,
|
||||
clean_expired_messages,
|
||||
export_app_messages,
|
||||
sample_vector_space_usage,
|
||||
]
|
||||
for cmd in cmds_to_register:
|
||||
app.cli.add_command(cmd)
|
||||
|
||||
51
docker/.env.default
Normal file
51
docker/.env.default
Normal file
@ -0,0 +1,51 @@
|
||||
# ------------------------------------------------------------------
|
||||
# Minimal defaults for Docker Compose deployments.
|
||||
#
|
||||
# Keep local changes in .env. Use .env.example as the full reference
|
||||
# for advanced and service-specific settings.
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
# Public URLs used when Dify generates links. Change these together when
|
||||
# exposing Dify under another hostname, IP address, or port.
|
||||
CONSOLE_WEB_URL=http://localhost
|
||||
SERVICE_API_URL=http://localhost
|
||||
APP_WEB_URL=http://localhost
|
||||
FILES_URL=http://localhost
|
||||
INTERNAL_FILES_URL=http://api:5001
|
||||
TRIGGER_URL=http://localhost
|
||||
ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
|
||||
NEXT_PUBLIC_SOCKET_URL=ws://localhost
|
||||
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
|
||||
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
|
||||
|
||||
# Built-in metadata database defaults.
|
||||
DB_TYPE=postgresql
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db_postgres
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
# Built-in Redis defaults.
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=difyai123456
|
||||
|
||||
# Default file storage.
|
||||
STORAGE_TYPE=opendal
|
||||
OPENDAL_SCHEME=fs
|
||||
OPENDAL_FS_ROOT=storage
|
||||
|
||||
# Default vector database.
|
||||
VECTOR_STORE=weaviate
|
||||
|
||||
# Internal service authentication. Paired values must match.
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
|
||||
# Host ports.
|
||||
EXPOSE_NGINX_PORT=80
|
||||
EXPOSE_NGINX_SSL_PORT=443
|
||||
|
||||
# Docker Compose profiles for bundled services.
|
||||
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql}
|
||||
@ -7,28 +7,28 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T
|
||||
- **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\
|
||||
For more information, refer `docker/certbot/README.md`.
|
||||
|
||||
- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments.
|
||||
- **Persistent Environment Variables**: Default environment variables are managed through `.env.default`, while local overrides are stored in `.env`, ensuring that your configurations persist across deployments.
|
||||
|
||||
> What is `.env`? </br> </br>
|
||||
> The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments.
|
||||
> The `.env` file is a local override file. Keep it small by adding only the values that differ from `.env.default`. Use `.env.example` as the full reference when you need advanced configuration.
|
||||
|
||||
- **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file.
|
||||
|
||||
- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades.
|
||||
- **Local .env Overrides**: The `dify-compose` and `dify-compose.ps1` wrappers create `.env` if it is missing and generate a persistent `SECRET_KEY` for this deployment.
|
||||
|
||||
### How to Deploy Dify with `docker-compose.yaml`
|
||||
|
||||
1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system.
|
||||
1. **Environment Setup**:
|
||||
- Navigate to the `docker` directory.
|
||||
- Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`.
|
||||
- Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options.
|
||||
- **Optional (Recommended for upgrades)**:
|
||||
You may use the environment synchronization tool to help keep your `.env` file aligned with the latest `.env.example` updates, while preserving your custom settings.
|
||||
This is especially useful when upgrading Dify or managing a large, customized `.env` file.
|
||||
- No copy step is required. The `dify-compose` wrappers create `.env` if it is missing and write a generated `SECRET_KEY` to it.
|
||||
- When prompted on first run, press Enter to use the default deployment, or answer `y` to stop and edit `.env` first.
|
||||
- Customize `.env` only when you need to override defaults from `.env.default`. Refer to `.env.example` for the full list of available variables.
|
||||
- **Optional (for advanced deployments)**:
|
||||
If you maintain a full `.env` file copied from `.env.example`, you may use the environment synchronization tool to keep it aligned with the latest `.env.example` updates while preserving your custom settings.
|
||||
See the [Environment Variables Synchronization](#environment-variables-synchronization) section below.
|
||||
1. **Running the Services**:
|
||||
- Execute `docker compose up` from the `docker` directory to start the services.
|
||||
- Execute `./dify-compose up -d` from the `docker` directory to start the services. On Windows PowerShell, run `.\dify-compose.ps1 up -d`.
|
||||
- To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`.
|
||||
1. **SSL Certificate Setup**:
|
||||
- Refer `docker/certbot/README.md` to set up SSL certificates using Certbot.
|
||||
@ -58,7 +58,13 @@ For users migrating from the `docker-legacy` setup:
|
||||
1. **Data Migration**:
|
||||
- Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary.
|
||||
|
||||
### Overview of `.env`
|
||||
### Overview of `.env.default`, `.env`, and `.env.example`
|
||||
|
||||
- `.env.default` contains the minimal default configuration for Docker Compose deployments.
|
||||
- `.env` contains the generated `SECRET_KEY` plus any local overrides.
|
||||
- `.env.example` is the full reference for advanced configuration.
|
||||
|
||||
The `dify-compose` wrappers merge `.env.default` and `.env` into a temporary environment file, append paired internal service keys when needed, and remove the temporary file after Docker Compose starts.
|
||||
|
||||
#### Key Modules and Customization
|
||||
|
||||
@ -118,9 +124,11 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w
|
||||
|
||||
### Environment Variables Synchronization
|
||||
|
||||
When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example`.
|
||||
When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.default` or `.env.example`.
|
||||
|
||||
To help keep your existing `.env` file up to date **without losing your custom values**, an optional environment variables synchronization tool is provided.
|
||||
If you use the default override-only workflow, review `.env.default` and add only the values you need to override to `.env`.
|
||||
|
||||
If you maintain a full `.env` file copied from `.env.example`, an optional environment variables synchronization tool is provided.
|
||||
|
||||
> This tool performs a **one-way synchronization** from `.env.example` to `.env`.
|
||||
> Existing values in `.env` are never overwritten automatically.
|
||||
@ -143,9 +151,9 @@ Before synchronization, the current `.env` file is saved to the `env-backup/` di
|
||||
|
||||
**When to use**
|
||||
|
||||
- After upgrading Dify to a newer version
|
||||
- After upgrading Dify to a newer version with a full `.env` file
|
||||
- When `.env.example` has been updated with new environment variables
|
||||
- When managing a large or heavily customized `.env` file
|
||||
- When managing a large or heavily customized `.env` file copied from `.env.example`
|
||||
|
||||
**Usage**
|
||||
|
||||
|
||||
334
docker/dify-compose
Executable file
334
docker/dify-compose
Executable file
@ -0,0 +1,334 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
DEFAULT_ENV_FILE=".env.default"
|
||||
USER_ENV_FILE=".env"
|
||||
|
||||
log() {
|
||||
printf '%s\n' "$*" >&2
|
||||
}
|
||||
|
||||
die() {
|
||||
printf 'Error: %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
detect_compose() {
|
||||
if docker compose version >/dev/null 2>&1; then
|
||||
COMPOSE_CMD=(docker compose)
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v docker-compose >/dev/null 2>&1; then
|
||||
COMPOSE_CMD=(docker-compose)
|
||||
return
|
||||
fi
|
||||
|
||||
die "Docker Compose is not available. Install Docker Compose, then run this command again."
|
||||
}
|
||||
|
||||
generate_secret_key() {
|
||||
if command -v openssl >/dev/null 2>&1; then
|
||||
openssl rand -base64 42
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v dd >/dev/null 2>&1 && command -v base64 >/dev/null 2>&1; then
|
||||
dd if=/dev/urandom bs=42 count=1 2>/dev/null | base64 | tr -d '\n'
|
||||
printf '\n'
|
||||
return
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
ensure_env_files() {
|
||||
[[ -f "$DEFAULT_ENV_FILE" ]] || die "$DEFAULT_ENV_FILE is missing."
|
||||
|
||||
if [[ -f "$USER_ENV_FILE" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
: >"$USER_ENV_FILE"
|
||||
|
||||
if [[ ! -t 0 ]]; then
|
||||
log "Created $USER_ENV_FILE for local overrides."
|
||||
return
|
||||
fi
|
||||
|
||||
printf 'Created %s for local overrides.\n' "$USER_ENV_FILE"
|
||||
printf 'Do you need a custom deployment now? (Most users can press Enter to skip.) [y/N] '
|
||||
read -r answer
|
||||
|
||||
case "${answer:-}" in
|
||||
y | Y | yes | YES | Yes)
|
||||
cat <<'EOF'
|
||||
Edit .env with the settings you want to override, using .env.example as the full reference.
|
||||
Run ./dify-compose up -d again when you are ready.
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
user_env_value() {
|
||||
local key="$1"
|
||||
awk -F= -v target="$key" '
|
||||
/^[[:space:]]*#/ || !/=/{ next }
|
||||
{
|
||||
key = $1
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
||||
if (key == target) {
|
||||
value = substr($0, index($0, "=") + 1)
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", value)
|
||||
if ((value ~ /^".*"$/) || (value ~ /^'\''.*'\''$/)) {
|
||||
value = substr(value, 2, length(value) - 2)
|
||||
}
|
||||
result = value
|
||||
}
|
||||
}
|
||||
END { print result }
|
||||
' "$USER_ENV_FILE"
|
||||
}
|
||||
|
||||
set_user_env_value() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local temp_file
|
||||
|
||||
temp_file="$(mktemp "${TMPDIR:-/tmp}/dify-env.XXXXXX")"
|
||||
awk -F= -v target="$key" -v replacement="$key=$value" '
|
||||
BEGIN { replaced = 0 }
|
||||
/^[[:space:]]*#/ || !/=/{ print; next }
|
||||
{
|
||||
key = $1
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
||||
if (key == target) {
|
||||
if (!replaced) {
|
||||
print replacement
|
||||
replaced = 1
|
||||
}
|
||||
next
|
||||
}
|
||||
print
|
||||
}
|
||||
END {
|
||||
if (!replaced) {
|
||||
print replacement
|
||||
}
|
||||
}
|
||||
' "$USER_ENV_FILE" >"$temp_file"
|
||||
mv "$temp_file" "$USER_ENV_FILE"
|
||||
}
|
||||
|
||||
ensure_secret_key() {
|
||||
local current_secret_key
|
||||
local secret_key
|
||||
|
||||
current_secret_key="$(user_env_value SECRET_KEY)"
|
||||
if [[ -n "$current_secret_key" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
secret_key="$(generate_secret_key)" || die "Unable to generate SECRET_KEY. Install openssl or configure SECRET_KEY in .env."
|
||||
set_user_env_value SECRET_KEY "$secret_key"
|
||||
log "Generated SECRET_KEY in $USER_ENV_FILE."
|
||||
}
|
||||
|
||||
env_value() {
|
||||
local key="$1"
|
||||
awk -F= -v target="$key" '
|
||||
/^[[:space:]]*#/ || !/=/{ next }
|
||||
{
|
||||
key = $1
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
||||
if (key == target) {
|
||||
value = substr($0, index($0, "=") + 1)
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", value)
|
||||
if ((value ~ /^".*"$/) || (value ~ /^'\''.*'\''$/)) {
|
||||
value = substr(value, 2, length(value) - 2)
|
||||
}
|
||||
result = value
|
||||
}
|
||||
}
|
||||
END { print result }
|
||||
' "$DEFAULT_ENV_FILE" "$USER_ENV_FILE"
|
||||
}
|
||||
|
||||
user_overrides() {
|
||||
local key="$1"
|
||||
grep -Eq "^[[:space:]]*${key}[[:space:]]*=" "$USER_ENV_FILE"
|
||||
}
|
||||
|
||||
write_merged_env() {
|
||||
awk '
|
||||
function trim(s) {
|
||||
sub(/^[[:space:]]+/, "", s)
|
||||
sub(/[[:space:]]+$/, "", s)
|
||||
return s
|
||||
}
|
||||
|
||||
/^[[:space:]]*#/ || !/=/{ next }
|
||||
|
||||
{
|
||||
key = $0
|
||||
sub(/=.*/, "", key)
|
||||
key = trim(key)
|
||||
if (key == "") {
|
||||
next
|
||||
}
|
||||
|
||||
value = substr($0, index($0, "=") + 1)
|
||||
value = trim(value)
|
||||
|
||||
if (!(key in seen)) {
|
||||
order[++count] = key
|
||||
seen[key] = 1
|
||||
}
|
||||
|
||||
values[key] = value
|
||||
}
|
||||
|
||||
END {
|
||||
for (i = 1; i <= count; i++) {
|
||||
key = order[i]
|
||||
print key "=" values[key]
|
||||
}
|
||||
}
|
||||
' "$DEFAULT_ENV_FILE" "$USER_ENV_FILE" >"$MERGED_ENV_FILE"
|
||||
}
|
||||
|
||||
set_merged_env_value() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local temp_file
|
||||
|
||||
temp_file="$(mktemp "${TMPDIR:-/tmp}/dify-compose-env.XXXXXX")"
|
||||
awk -F= -v target="$key" -v replacement="$key=$value" '
|
||||
BEGIN { replaced = 0 }
|
||||
/^[[:space:]]*#/ || !/=/{ print; next }
|
||||
{
|
||||
key = $1
|
||||
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
||||
if (key == target) {
|
||||
if (!replaced) {
|
||||
print replacement
|
||||
replaced = 1
|
||||
}
|
||||
next
|
||||
}
|
||||
print
|
||||
}
|
||||
END {
|
||||
if (!replaced) {
|
||||
print replacement
|
||||
}
|
||||
}
|
||||
' "$MERGED_ENV_FILE" >"$temp_file"
|
||||
mv "$temp_file" "$MERGED_ENV_FILE"
|
||||
}
|
||||
|
||||
set_if_not_overridden() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
|
||||
if user_overrides "$key"; then
|
||||
return
|
||||
fi
|
||||
|
||||
set_merged_env_value "$key" "$value"
|
||||
}
|
||||
|
||||
metadata_db_host() {
|
||||
case "$1" in
|
||||
mysql) printf 'db_mysql' ;;
|
||||
postgresql | '') printf 'db_postgres' ;;
|
||||
*) printf '%s' "$(env_value DB_HOST)" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
metadata_db_port() {
|
||||
case "$1" in
|
||||
mysql) printf '3306' ;;
|
||||
postgresql | '') printf '5432' ;;
|
||||
*) printf '%s' "$(env_value DB_PORT)" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
metadata_db_user() {
|
||||
case "$1" in
|
||||
mysql) printf 'root' ;;
|
||||
postgresql | '') printf 'postgres' ;;
|
||||
*) printf '%s' "$(env_value DB_USERNAME)" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
build_merged_env() {
|
||||
MERGED_ENV_FILE="$(mktemp "${TMPDIR:-/tmp}/dify-compose.XXXXXX")"
|
||||
trap 'rm -f "$MERGED_ENV_FILE"' EXIT
|
||||
|
||||
write_merged_env
|
||||
|
||||
local db_type
|
||||
local redis_host
|
||||
local redis_port
|
||||
local redis_username
|
||||
local redis_password
|
||||
local redis_auth
|
||||
local code_execution_api_key
|
||||
local weaviate_api_key
|
||||
|
||||
db_type="$(env_value DB_TYPE)"
|
||||
|
||||
set_if_not_overridden DB_HOST "$(metadata_db_host "$db_type")"
|
||||
set_if_not_overridden DB_PORT "$(metadata_db_port "$db_type")"
|
||||
set_if_not_overridden DB_USERNAME "$(metadata_db_user "$db_type")"
|
||||
|
||||
if ! user_overrides CELERY_BROKER_URL; then
|
||||
redis_host="$(env_value REDIS_HOST)"
|
||||
redis_port="$(env_value REDIS_PORT)"
|
||||
redis_username="$(env_value REDIS_USERNAME)"
|
||||
redis_password="$(env_value REDIS_PASSWORD)"
|
||||
redis_auth=""
|
||||
|
||||
if [[ -n "$redis_username" && -n "$redis_password" ]]; then
|
||||
redis_auth="${redis_username}:${redis_password}@"
|
||||
elif [[ -n "$redis_password" ]]; then
|
||||
redis_auth=":${redis_password}@"
|
||||
elif [[ -n "$redis_username" ]]; then
|
||||
redis_auth="${redis_username}@"
|
||||
fi
|
||||
|
||||
set_merged_env_value CELERY_BROKER_URL "redis://${redis_auth}${redis_host:-redis}:${redis_port:-6379}/1"
|
||||
fi
|
||||
|
||||
if ! user_overrides SANDBOX_API_KEY; then
|
||||
code_execution_api_key="$(env_value CODE_EXECUTION_API_KEY)"
|
||||
set_if_not_overridden SANDBOX_API_KEY "${code_execution_api_key:-dify-sandbox}"
|
||||
fi
|
||||
|
||||
if ! user_overrides WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS; then
|
||||
weaviate_api_key="$(env_value WEAVIATE_API_KEY)"
|
||||
set_if_not_overridden WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS \
|
||||
"${weaviate_api_key:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
detect_compose
|
||||
ensure_env_files
|
||||
ensure_secret_key
|
||||
build_merged_env
|
||||
|
||||
if [[ "$#" -eq 0 ]]; then
|
||||
set -- up -d
|
||||
fi
|
||||
|
||||
"${COMPOSE_CMD[@]}" --env-file "$MERGED_ENV_FILE" "$@"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
317
docker/dify-compose.ps1
Normal file
317
docker/dify-compose.ps1
Normal file
@ -0,0 +1,317 @@
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
Set-Location $ScriptDir
|
||||
|
||||
$DefaultEnvFile = ".env.default"
|
||||
$UserEnvFile = ".env"
|
||||
$MergedEnvFile = $null
|
||||
$Utf8NoBom = New-Object System.Text.UTF8Encoding -ArgumentList $false
|
||||
|
||||
function Write-Info {
|
||||
param([string]$Message)
|
||||
[Console]::Error.WriteLine($Message)
|
||||
}
|
||||
|
||||
function Fail {
|
||||
param([string]$Message)
|
||||
[Console]::Error.WriteLine("Error: $Message")
|
||||
exit 1
|
||||
}
|
||||
|
||||
function Test-CommandSuccess {
|
||||
param([string[]]$Command)
|
||||
|
||||
try {
|
||||
$Executable = $Command[0]
|
||||
$CommandArgs = @()
|
||||
if ($Command.Length -gt 1) {
|
||||
$CommandArgs = @($Command[1..($Command.Length - 1)])
|
||||
}
|
||||
|
||||
& $Executable @CommandArgs *> $null
|
||||
return $LASTEXITCODE -eq 0
|
||||
}
|
||||
catch {
|
||||
return $false
|
||||
}
|
||||
}
|
||||
|
||||
function Get-ComposeCommand {
|
||||
if (Test-CommandSuccess @("docker", "compose", "version")) {
|
||||
return @("docker", "compose")
|
||||
}
|
||||
|
||||
if ((Get-Command "docker-compose" -ErrorAction SilentlyContinue) -and (Test-CommandSuccess @("docker-compose", "version"))) {
|
||||
return @("docker-compose")
|
||||
}
|
||||
|
||||
Fail "Docker Compose is not available. Install Docker Compose, then run this command again."
|
||||
}
|
||||
|
||||
function New-SecretKey {
|
||||
$Bytes = New-Object byte[] 42
|
||||
$Generator = [System.Security.Cryptography.RandomNumberGenerator]::Create()
|
||||
|
||||
try {
|
||||
$Generator.GetBytes($Bytes)
|
||||
}
|
||||
finally {
|
||||
$Generator.Dispose()
|
||||
}
|
||||
|
||||
return [Convert]::ToBase64String($Bytes)
|
||||
}
|
||||
|
||||
function Ensure-EnvFiles {
|
||||
if (-not (Test-Path $DefaultEnvFile -PathType Leaf)) {
|
||||
Fail "$DefaultEnvFile is missing."
|
||||
}
|
||||
|
||||
if (Test-Path $UserEnvFile -PathType Leaf) {
|
||||
return
|
||||
}
|
||||
|
||||
New-Item -ItemType File -Path $UserEnvFile | Out-Null
|
||||
|
||||
if ([Console]::IsInputRedirected) {
|
||||
Write-Info "Created $UserEnvFile for local overrides."
|
||||
return
|
||||
}
|
||||
|
||||
Write-Info "Created $UserEnvFile for local overrides."
|
||||
$Answer = Read-Host "Do you need a custom deployment now? (Most users can press Enter to skip.) [y/N]"
|
||||
|
||||
if ($Answer -match "^(y|yes)$") {
|
||||
Write-Output "Edit .env with the settings you want to override, using .env.example as the full reference."
|
||||
Write-Output "Run .\dify-compose.ps1 up -d again when you are ready."
|
||||
exit 0
|
||||
}
|
||||
}
|
||||
|
||||
function Read-EnvFile {
|
||||
param([string]$Path)
|
||||
|
||||
$Values = [ordered]@{}
|
||||
|
||||
if (-not (Test-Path $Path -PathType Leaf)) {
|
||||
return $Values
|
||||
}
|
||||
|
||||
foreach ($Line in Get-Content -Path $Path) {
|
||||
if ($Line -match "^\s*#" -or $Line -notmatch "=") {
|
||||
continue
|
||||
}
|
||||
|
||||
$SeparatorIndex = $Line.IndexOf("=")
|
||||
$Key = $Line.Substring(0, $SeparatorIndex).Trim()
|
||||
$Value = $Line.Substring($SeparatorIndex + 1).Trim()
|
||||
|
||||
if (($Value.StartsWith('"') -and $Value.EndsWith('"')) -or ($Value.StartsWith("'") -and $Value.EndsWith("'"))) {
|
||||
$Value = $Value.Substring(1, $Value.Length - 2)
|
||||
}
|
||||
|
||||
if ($Key.Length -gt 0) {
|
||||
$Values[$Key] = $Value
|
||||
}
|
||||
}
|
||||
|
||||
return $Values
|
||||
}
|
||||
|
||||
function Set-UserEnvValue {
|
||||
param(
|
||||
[string]$Key,
|
||||
[string]$Value
|
||||
)
|
||||
|
||||
$Path = [string](Resolve-Path $UserEnvFile)
|
||||
$Lines = [System.IO.File]::ReadAllLines($Path, [System.Text.Encoding]::UTF8)
|
||||
$Output = New-Object System.Collections.Generic.List[string]
|
||||
$Replaced = $false
|
||||
|
||||
foreach ($Line in $Lines) {
|
||||
if ($Line -match "^\s*#" -or $Line -notmatch "=") {
|
||||
$Output.Add($Line)
|
||||
continue
|
||||
}
|
||||
|
||||
$SeparatorIndex = $Line.IndexOf("=")
|
||||
$CurrentKey = $Line.Substring(0, $SeparatorIndex).Trim()
|
||||
|
||||
if ($CurrentKey -eq $Key) {
|
||||
if (-not $Replaced) {
|
||||
$Output.Add("$Key=$Value")
|
||||
$Replaced = $true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
$Output.Add($Line)
|
||||
}
|
||||
|
||||
if (-not $Replaced) {
|
||||
$Output.Add("$Key=$Value")
|
||||
}
|
||||
|
||||
[System.IO.File]::WriteAllLines($Path, $Output, $Utf8NoBom)
|
||||
}
|
||||
|
||||
function Ensure-SecretKey {
|
||||
$Values = Read-EnvFile $UserEnvFile
|
||||
|
||||
if ($Values.Contains("SECRET_KEY") -and $Values["SECRET_KEY"]) {
|
||||
return
|
||||
}
|
||||
|
||||
Set-UserEnvValue "SECRET_KEY" (New-SecretKey)
|
||||
Write-Info "Generated SECRET_KEY in $UserEnvFile."
|
||||
}
|
||||
|
||||
function Merge-EnvValues {
|
||||
$Values = [ordered]@{}
|
||||
|
||||
foreach ($Entry in (Read-EnvFile $DefaultEnvFile).GetEnumerator()) {
|
||||
$Values[$Entry.Key] = $Entry.Value
|
||||
}
|
||||
|
||||
foreach ($Entry in (Read-EnvFile $UserEnvFile).GetEnumerator()) {
|
||||
$Values[$Entry.Key] = $Entry.Value
|
||||
}
|
||||
|
||||
return $Values
|
||||
}
|
||||
|
||||
function User-Overrides {
|
||||
param([string]$Key)
|
||||
|
||||
if (-not (Test-Path $UserEnvFile -PathType Leaf)) {
|
||||
return $false
|
||||
}
|
||||
|
||||
return [bool](Select-String -Path $UserEnvFile -Pattern "^\s*$([regex]::Escape($Key))\s*=" -Quiet)
|
||||
}
|
||||
|
||||
function Metadata-DbHost {
|
||||
param([string]$DbType, $Values)
|
||||
|
||||
switch ($DbType) {
|
||||
"mysql" { return "db_mysql" }
|
||||
"postgresql" { return "db_postgres" }
|
||||
"" { return "db_postgres" }
|
||||
default { return $Values["DB_HOST"] }
|
||||
}
|
||||
}
|
||||
|
||||
function Metadata-DbPort {
|
||||
param([string]$DbType, $Values)
|
||||
|
||||
switch ($DbType) {
|
||||
"mysql" { return "3306" }
|
||||
"postgresql" { return "5432" }
|
||||
"" { return "5432" }
|
||||
default { return $Values["DB_PORT"] }
|
||||
}
|
||||
}
|
||||
|
||||
function Metadata-DbUser {
|
||||
param([string]$DbType, $Values)
|
||||
|
||||
switch ($DbType) {
|
||||
"mysql" { return "root" }
|
||||
"postgresql" { return "postgres" }
|
||||
"" { return "postgres" }
|
||||
default { return $Values["DB_USERNAME"] }
|
||||
}
|
||||
}
|
||||
|
||||
function Write-MergedEnv {
|
||||
param($Values)
|
||||
|
||||
$Output = New-Object System.Collections.Generic.List[string]
|
||||
|
||||
foreach ($Entry in $Values.GetEnumerator()) {
|
||||
$Output.Add("$($Entry.Key)=$($Entry.Value)")
|
||||
}
|
||||
|
||||
[System.IO.File]::WriteAllLines($MergedEnvFile, $Output, $Utf8NoBom)
|
||||
}
|
||||
|
||||
function Build-MergedEnv {
|
||||
$Values = Merge-EnvValues
|
||||
$script:MergedEnvFile = [System.IO.Path]::GetTempFileName()
|
||||
|
||||
$DbType = if ($Values.Contains("DB_TYPE")) { $Values["DB_TYPE"] } else { "postgresql" }
|
||||
|
||||
if (-not (User-Overrides "DB_HOST")) {
|
||||
$Values["DB_HOST"] = Metadata-DbHost $DbType $Values
|
||||
}
|
||||
|
||||
if (-not (User-Overrides "DB_PORT")) {
|
||||
$Values["DB_PORT"] = Metadata-DbPort $DbType $Values
|
||||
}
|
||||
|
||||
if (-not (User-Overrides "DB_USERNAME")) {
|
||||
$Values["DB_USERNAME"] = Metadata-DbUser $DbType $Values
|
||||
}
|
||||
|
||||
if (-not (User-Overrides "CELERY_BROKER_URL")) {
|
||||
$RedisHost = if ($Values.Contains("REDIS_HOST") -and $Values["REDIS_HOST"]) { $Values["REDIS_HOST"] } else { "redis" }
|
||||
$RedisPort = if ($Values.Contains("REDIS_PORT") -and $Values["REDIS_PORT"]) { $Values["REDIS_PORT"] } else { "6379" }
|
||||
$RedisUsername = if ($Values.Contains("REDIS_USERNAME")) { $Values["REDIS_USERNAME"] } else { "" }
|
||||
$RedisPassword = if ($Values.Contains("REDIS_PASSWORD")) { $Values["REDIS_PASSWORD"] } else { "" }
|
||||
$RedisAuth = ""
|
||||
|
||||
if ($RedisUsername -and $RedisPassword) {
|
||||
$RedisAuth = "${RedisUsername}:${RedisPassword}@"
|
||||
}
|
||||
elseif ($RedisPassword) {
|
||||
$RedisAuth = ":${RedisPassword}@"
|
||||
}
|
||||
elseif ($RedisUsername) {
|
||||
$RedisAuth = "${RedisUsername}@"
|
||||
}
|
||||
|
||||
$Values["CELERY_BROKER_URL"] = "redis://$RedisAuth${RedisHost}:${RedisPort}/1"
|
||||
}
|
||||
|
||||
if (-not (User-Overrides "SANDBOX_API_KEY")) {
|
||||
$CodeExecutionApiKey = if ($Values.Contains("CODE_EXECUTION_API_KEY") -and $Values["CODE_EXECUTION_API_KEY"]) { $Values["CODE_EXECUTION_API_KEY"] } else { "dify-sandbox" }
|
||||
$Values["SANDBOX_API_KEY"] = $CodeExecutionApiKey
|
||||
}
|
||||
|
||||
if (-not (User-Overrides "WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS")) {
|
||||
$WeaviateApiKey = if ($Values.Contains("WEAVIATE_API_KEY") -and $Values["WEAVIATE_API_KEY"]) { $Values["WEAVIATE_API_KEY"] } else { "WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih" }
|
||||
$Values["WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS"] = $WeaviateApiKey
|
||||
}
|
||||
|
||||
Write-MergedEnv $Values
|
||||
}
|
||||
|
||||
$ComposeCommand = Get-ComposeCommand
|
||||
|
||||
try {
|
||||
Ensure-EnvFiles
|
||||
Ensure-SecretKey
|
||||
Build-MergedEnv
|
||||
|
||||
$ComposeArgs = @($args)
|
||||
if ($ComposeArgs.Count -eq 0) {
|
||||
$ComposeArgs = @("up", "-d")
|
||||
}
|
||||
|
||||
$ComposeCommandArgs = @()
|
||||
if ($ComposeCommand.Length -gt 1) {
|
||||
$ComposeCommandArgs = @($ComposeCommand[1..($ComposeCommand.Length - 1)])
|
||||
}
|
||||
|
||||
$ComposeExecutable = $ComposeCommand[0]
|
||||
& $ComposeExecutable @ComposeCommandArgs --env-file $MergedEnvFile @ComposeArgs
|
||||
exit $LASTEXITCODE
|
||||
}
|
||||
finally {
|
||||
if ($MergedEnvFile -and (Test-Path $MergedEnvFile -PathType Leaf)) {
|
||||
Remove-Item -Force $MergedEnvFile
|
||||
}
|
||||
}
|
||||
953
docker/docker-compose-template.yaml
Normal file
953
docker/docker-compose-template.yaml
Normal file
@ -0,0 +1,953 @@
|
||||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# Init container to fix permissions
|
||||
init_permissions:
|
||||
image: busybox:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
FLAG_FILE="/app/api/storage/.init_permissions"
|
||||
if [ -f "$${FLAG_FILE}" ]; then
|
||||
echo "Permissions already initialized. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
echo "Initializing permissions for /app/api/storage"
|
||||
chown -R 1001:1001 /app/api/storage && touch "$${FLAG_FILE}"
|
||||
echo "Permissions initialized. Exiting."
|
||||
volumes:
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
restart: "no"
|
||||
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5001/health"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing all queues.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "celery -A celery_healthcheck.celery inspect ping"]
|
||||
interval: ${COMPOSE_WORKER_HEALTHCHECK_INTERVAL:-30s}
|
||||
timeout: ${COMPOSE_WORKER_HEALTHCHECK_TIMEOUT:-30s}
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
disable: ${COMPOSE_WORKER_HEALTHCHECK_DISABLED:-true}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.14.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
|
||||
MODE: beat
|
||||
depends_on:
|
||||
init_permissions:
|
||||
condition: service_completed_successfully
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
redis:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "celery -A celery_healthcheck.celery inspect ping"]
|
||||
interval: ${COMPOSE_WORKER_HEALTHCHECK_INTERVAL:-30s}
|
||||
timeout: ${COMPOSE_WORKER_HEALTHCHECK_TIMEOUT:-30s}
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
disable: ${COMPOSE_WORKER_HEALTHCHECK_DISABLED:-true}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.14.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
NEXT_PUBLIC_SOCKET_URL: ${NEXT_PUBLIC_SOCKET_URL:-ws://localhost}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
EXPERIMENTAL_ENABLE_VINEXT: ${EXPERIMENTAL_ENABLE_VINEXT:-false}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
ALLOW_INLINE_STYLES: ${ALLOW_INLINE_STYLES:-false}
|
||||
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
|
||||
LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
|
||||
MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
|
||||
MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
|
||||
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
|
||||
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
|
||||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
|
||||
# The PostgreSQL database.
|
||||
db_postgres:
|
||||
image: postgres:15-alpine
|
||||
profiles:
|
||||
- postgresql
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: ${DB_USERNAME:-postgres}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${DB_DATABASE:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
-c 'statement_timeout=${POSTGRES_STATEMENT_TIMEOUT:-0}'
|
||||
-c 'idle_in_transaction_session_timeout=${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"db_postgres",
|
||||
"-U",
|
||||
"${DB_USERNAME:-postgres}",
|
||||
"-d",
|
||||
"${DB_DATABASE:-dify}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 60
|
||||
|
||||
# The mysql database.
|
||||
db_mysql:
|
||||
image: mysql:8.0
|
||||
profiles:
|
||||
- mysql
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
MYSQL_DATABASE: ${DB_DATABASE:-dify}
|
||||
command: >
|
||||
--max_connections=1000
|
||||
--innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
--innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
--innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
volumes:
|
||||
- ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}:/var/lib/mysql
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"mysqladmin",
|
||||
"ping",
|
||||
"-u",
|
||||
"root",
|
||||
"-p${DB_PASSWORD:-difyai123456}",
|
||||
]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ./volumes/redis/data:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG",
|
||||
]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.15
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8194/health"]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.6.0-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
DB_SSL_MODE: ${DB_SSL_MODE:-disable}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024}
|
||||
PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
|
||||
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
|
||||
ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
|
||||
SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false}
|
||||
SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
depends_on:
|
||||
db_postgres:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
db_mysql:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
oceanbase:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
seekdb:
|
||||
condition: service_healthy
|
||||
required: false
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#18-why-is-ssrf-proxy-needed%3F
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Certbot service
|
||||
# use `docker-compose --profile certbot up` to start the certbot service.
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
profiles:
|
||||
- certbot
|
||||
volumes:
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
- ./volumes/certbot/logs:/var/log/letsencrypt
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
|
||||
- ./certbot/update-cert.template.txt:/update-cert.template.txt
|
||||
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
environment:
|
||||
- CERTBOT_EMAIL=${CERTBOT_EMAIL:-}
|
||||
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN:-}
|
||||
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
command: ["tail", "-f", "/dev/null"]
|
||||
|
||||
# The nginx reverse proxy.
|
||||
# used for reverse proxying the API service and Web service.
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
|
||||
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
|
||||
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d
|
||||
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
entrypoint:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
|
||||
]
|
||||
environment:
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
depends_on:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}"
|
||||
- "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}"
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.27.0
|
||||
profiles:
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the con tainer.
|
||||
- ./volumes/weaviate:/var/lib/weaviate
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
|
||||
ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5-lts
|
||||
container_name: oceanbase
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: 127.0.0.1
|
||||
MODE: mini
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'obclient -h127.0.0.1 -P2881 -uroot@test -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# seekdb vector database
|
||||
seekdb:
|
||||
image: oceanbase/seekdb:latest
|
||||
container_name: seekdb
|
||||
profiles:
|
||||
- seekdb
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/seekdb:/var/lib/oceanbase
|
||||
environment:
|
||||
ROOT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
REPORTER: dify-ai-seekdb
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
'mysql -h127.0.0.1 -P2881 -uroot -p${OCEANBASE_VECTOR_PASSWORD:-difyai123456} -e "SELECT 1;"',
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
timeout: 5s
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.8.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
|
||||
# The Couchbase vector store.
|
||||
couchbase-server:
|
||||
build: ./couchbase-server
|
||||
profiles:
|
||||
- couchbase
|
||||
restart: always
|
||||
environment:
|
||||
- CLUSTER_NAME=dify_search
|
||||
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
|
||||
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
|
||||
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
- COUCHBASE_BUCKET_RAMSIZE=512
|
||||
- COUCHBASE_RAM_SIZE=2048
|
||||
- COUCHBASE_EVENTING_RAM_SIZE=512
|
||||
- COUCHBASE_INDEX_RAM_SIZE=512
|
||||
- COUCHBASE_FTS_RAM_SIZE=1024
|
||||
hostname: couchbase-server
|
||||
container_name: couchbase-server
|
||||
working_dir: /opt/couchbase
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: [""]
|
||||
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
|
||||
volumes:
|
||||
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
|
||||
healthcheck:
|
||||
# ensure bucket was created before proceeding
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1",
|
||||
]
|
||||
interval: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# The pgvector vector database.
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
profiles:
|
||||
- pgvector
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
# pg_bigm module for full text search
|
||||
PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
|
||||
PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
|
||||
volumes:
|
||||
- ./volumes/pgvector/data:/var/lib/postgresql/data
|
||||
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# get image from https://www.vastdata.com.cn/
|
||||
vastbase:
|
||||
image: vastdata/vastbase-vector
|
||||
profiles:
|
||||
- vastbase
|
||||
restart: always
|
||||
environment:
|
||||
- VB_DBCOMPATIBILITY=PG
|
||||
- VB_DB=dify
|
||||
- VB_USERNAME=dify
|
||||
- VB_PASSWORD=Difyai123456
|
||||
ports:
|
||||
- "5434:5432"
|
||||
volumes:
|
||||
- ./vastbase/lic:/home/vastbase/vastbase/lic
|
||||
- ./vastbase/data:/home/vastbase/data
|
||||
- ./vastbase/backup:/home/vastbase/backup
|
||||
- ./vastbase/backup_log:/home/vastbase/backup_log
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# pgvecto-rs vector store
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.3.0
|
||||
profiles:
|
||||
- pgvecto-rs
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready"]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# Chroma vector database
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.20
|
||||
profiles:
|
||||
- chroma
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# InterSystems IRIS vector database
|
||||
iris:
|
||||
image: containers.intersystems.com/intersystems/iris-community:2025.3
|
||||
profiles:
|
||||
- iris
|
||||
container_name: iris
|
||||
restart: always
|
||||
init: true
|
||||
ports:
|
||||
- "${IRIS_SUPER_SERVER_PORT:-1972}:1972"
|
||||
- "${IRIS_WEB_SERVER_PORT:-52773}:52773"
|
||||
volumes:
|
||||
- ./volumes/iris:/durable
|
||||
- ./iris/iris-init.script:/iris-init.script
|
||||
- ./iris/docker-entrypoint.sh:/custom-entrypoint.sh
|
||||
entrypoint: ["/custom-entrypoint.sh"]
|
||||
tty: true
|
||||
environment:
|
||||
TZ: ${IRIS_TIMEZONE:-UTC}
|
||||
ISC_DATA_DIRECTORY: /durable/iris
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
profiles:
|
||||
- oracle
|
||||
restart: always
|
||||
volumes:
|
||||
- source: oradata
|
||||
type: volume
|
||||
target: /opt/oracle/oradata
|
||||
- ./startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
|
||||
# Milvus vector database services
|
||||
etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
volumes:
|
||||
- ./volumes/milvus/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: ["CMD", "etcdctl", "endpoint", "health"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
volumes:
|
||||
- ./volumes/milvus/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.6.3
|
||||
profiles:
|
||||
- milvus
|
||||
command: ["milvus", "run", "standalone"]
|
||||
environment:
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
volumes:
|
||||
- ./volumes/milvus/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- etcd
|
||||
- minio
|
||||
ports:
|
||||
- 19530:19530
|
||||
- 9091:9091
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
# Opensearch vector database
|
||||
opensearch:
|
||||
container_name: opensearch
|
||||
image: opensearchproject/opensearch:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
nofile:
|
||||
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
volumes:
|
||||
- ./volumes/opensearch/data:/usr/share/opensearch/data
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-dashboards:
|
||||
container_name: opensearch-dashboards
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
volumes:
|
||||
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
depends_on:
|
||||
- opensearch
|
||||
|
||||
# opengauss vector database.
|
||||
opengauss:
|
||||
image: opengauss/opengauss:7.0.0-RC1
|
||||
profiles:
|
||||
- opengauss
|
||||
privileged: true
|
||||
restart: always
|
||||
environment:
|
||||
GS_USERNAME: ${OPENGAUSS_USER:-postgres}
|
||||
GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
|
||||
GS_PORT: ${OPENGAUSS_PORT:-6600}
|
||||
GS_DB: ${OPENGAUSS_DATABASE:-dify}
|
||||
volumes:
|
||||
- ./volumes/opengauss/data:/var/lib/opengauss/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
ports:
|
||||
- ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
|
||||
|
||||
# MyScale vector database
|
||||
myscale:
|
||||
container_name: myscale
|
||||
image: myscale/myscaledb:1.6.4
|
||||
profiles:
|
||||
- myscale
|
||||
restart: always
|
||||
tty: true
|
||||
volumes:
|
||||
- ./volumes/myscale/data:/var/lib/clickhouse
|
||||
- ./volumes/myscale/log:/var/log/clickhouse-server
|
||||
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
|
||||
ports:
|
||||
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
|
||||
|
||||
# Matrixone vector store.
|
||||
matrixone:
|
||||
hostname: matrixone
|
||||
image: matrixorigin/matrixone:2.1.1
|
||||
profiles:
|
||||
- matrixone
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/matrixone/data:/mo-data
|
||||
ports:
|
||||
- ${MATRIXONE_PORT:-6001}:${MATRIXONE_PORT:-6001}
|
||||
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
|
||||
container_name: elasticsearch
|
||||
profiles:
|
||||
- elasticsearch
|
||||
- elasticsearch-ja
|
||||
restart: always
|
||||
volumes:
|
||||
- ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- dify_es01_data:/usr/share/elasticsearch/data
|
||||
environment:
|
||||
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-}
|
||||
cluster.name: dify-es-cluster
|
||||
node.name: dify-es0
|
||||
discovery.type: single-node
|
||||
xpack.license.self_generated.type: basic
|
||||
xpack.security.enabled: "true"
|
||||
xpack.security.enrollment.enabled: "false"
|
||||
xpack.security.http.ssl.enabled: "false"
|
||||
ports:
|
||||
- ${ELASTICSEARCH_PORT:-9200}:9200
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"]
|
||||
healthcheck:
|
||||
test:
|
||||
["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 50
|
||||
|
||||
# https://www.elastic.co/guide/en/kibana/current/docker.html
|
||||
# https://www.elastic.co/guide/en/kibana/current/settings.html
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.14.3
|
||||
container_name: kibana
|
||||
profiles:
|
||||
- elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
|
||||
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
|
||||
XPACK_SECURITY_ENABLED: "true"
|
||||
XPACK_SECURITY_ENROLLMENT_ENABLED: "false"
|
||||
XPACK_SECURITY_HTTP_SSL_ENABLED: "false"
|
||||
XPACK_FLEET_ISAIRGAPPED: "true"
|
||||
I18N_LOCALE: zh-CN
|
||||
SERVER_PORT: "5601"
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
ports:
|
||||
- ${KIBANA_PORT:-5601}:5601
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# unstructured .
|
||||
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
|
||||
unstructured:
|
||||
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
|
||||
profiles:
|
||||
- unstructured
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/unstructured:/app/data
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
milvus:
|
||||
driver: bridge
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
internal: true
|
||||
|
||||
volumes:
|
||||
oradata:
|
||||
dify_es01_data:
|
||||
@ -1,3 +1,728 @@
|
||||
# ==================================================================
|
||||
# WARNING: This file is auto-generated by generate_docker_compose
|
||||
# Do not modify this file directly. Instead, update the .env.example
|
||||
# or docker-compose-template.yaml and regenerate this file.
|
||||
# ==================================================================
|
||||
|
||||
x-shared-env: &shared-api-worker-env
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
|
||||
SERVICE_API_URL: ${SERVICE_API_URL:-}
|
||||
TRIGGER_URL: ${TRIGGER_URL:-http://localhost}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
APP_WEB_URL: ${APP_WEB_URL:-}
|
||||
FILES_URL: ${FILES_URL:-}
|
||||
INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-}
|
||||
LANG: ${LANG:-C.UTF-8}
|
||||
LC_ALL: ${LC_ALL:-C.UTF-8}
|
||||
PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8}
|
||||
UV_CACHE_DIR: ${UV_CACHE_DIR:-/tmp/.uv-cache}
|
||||
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||
LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text}
|
||||
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
|
||||
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
|
||||
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
|
||||
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
|
||||
LOG_TZ: ${LOG_TZ:-UTC}
|
||||
DEBUG: ${DEBUG:-false}
|
||||
FLASK_DEBUG: ${FLASK_DEBUG:-false}
|
||||
ENABLE_REQUEST_LOGGING: ${ENABLE_REQUEST_LOGGING:-False}
|
||||
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
|
||||
INIT_PASSWORD: ${INIT_PASSWORD:-}
|
||||
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
|
||||
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
|
||||
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
|
||||
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
|
||||
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
|
||||
ENABLE_COLLABORATION_MODE: ${ENABLE_COLLABORATION_MODE:-false}
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
|
||||
REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
|
||||
APP_DEFAULT_ACTIVE_REQUESTS: ${APP_DEFAULT_ACTIVE_REQUESTS:-0}
|
||||
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
|
||||
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
|
||||
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
|
||||
DIFY_PORT: ${DIFY_PORT:-5001}
|
||||
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1}
|
||||
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent}
|
||||
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
|
||||
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
|
||||
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
|
||||
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-4}
|
||||
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
|
||||
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
|
||||
CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
|
||||
API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
|
||||
API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
|
||||
ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true}
|
||||
ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true}
|
||||
ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true}
|
||||
NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false}
|
||||
DB_TYPE: ${DB_TYPE:-postgresql}
|
||||
DB_USERNAME: ${DB_USERNAME:-postgres}
|
||||
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
DB_HOST: ${DB_HOST:-db_postgres}
|
||||
DB_PORT: ${DB_PORT:-5432}
|
||||
DB_DATABASE: ${DB_DATABASE:-dify}
|
||||
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
|
||||
SQLALCHEMY_MAX_OVERFLOW: ${SQLALCHEMY_MAX_OVERFLOW:-10}
|
||||
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
|
||||
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
|
||||
SQLALCHEMY_POOL_PRE_PING: ${SQLALCHEMY_POOL_PRE_PING:-false}
|
||||
SQLALCHEMY_POOL_USE_LIFO: ${SQLALCHEMY_POOL_USE_LIFO:-false}
|
||||
SQLALCHEMY_POOL_TIMEOUT: ${SQLALCHEMY_POOL_TIMEOUT:-30}
|
||||
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-200}
|
||||
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
|
||||
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
|
||||
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
|
||||
POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-0}
|
||||
POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0}
|
||||
MYSQL_MAX_CONNECTIONS: ${MYSQL_MAX_CONNECTIONS:-1000}
|
||||
MYSQL_INNODB_BUFFER_POOL_SIZE: ${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M}
|
||||
MYSQL_INNODB_LOG_FILE_SIZE: ${MYSQL_INNODB_LOG_FILE_SIZE:-128M}
|
||||
MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT: ${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_USERNAME: ${REDIS_USERNAME:-}
|
||||
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
|
||||
REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
|
||||
REDIS_SSL_CERT_REQS: ${REDIS_SSL_CERT_REQS:-CERT_NONE}
|
||||
REDIS_SSL_CA_CERTS: ${REDIS_SSL_CA_CERTS:-}
|
||||
REDIS_SSL_CERTFILE: ${REDIS_SSL_CERTFILE:-}
|
||||
REDIS_SSL_KEYFILE: ${REDIS_SSL_KEYFILE:-}
|
||||
REDIS_DB: ${REDIS_DB:-0}
|
||||
REDIS_KEY_PREFIX: ${REDIS_KEY_PREFIX:-}
|
||||
REDIS_MAX_CONNECTIONS: ${REDIS_MAX_CONNECTIONS:-}
|
||||
REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
|
||||
REDIS_SENTINELS: ${REDIS_SENTINELS:-}
|
||||
REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
|
||||
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
|
||||
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
|
||||
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
|
||||
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
|
||||
REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
|
||||
REDIS_RETRY_RETRIES: ${REDIS_RETRY_RETRIES:-3}
|
||||
REDIS_RETRY_BACKOFF_BASE: ${REDIS_RETRY_BACKOFF_BASE:-1.0}
|
||||
REDIS_RETRY_BACKOFF_CAP: ${REDIS_RETRY_BACKOFF_CAP:-10.0}
|
||||
REDIS_SOCKET_TIMEOUT: ${REDIS_SOCKET_TIMEOUT:-5.0}
|
||||
REDIS_SOCKET_CONNECT_TIMEOUT: ${REDIS_SOCKET_CONNECT_TIMEOUT:-5.0}
|
||||
REDIS_HEALTH_CHECK_INTERVAL: ${REDIS_HEALTH_CHECK_INTERVAL:-30}
|
||||
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
|
||||
CELERY_BACKEND: ${CELERY_BACKEND:-redis}
|
||||
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
|
||||
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
|
||||
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
|
||||
CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-}
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
CELERY_TASK_ANNOTATIONS: ${CELERY_TASK_ANNOTATIONS:-null}
|
||||
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
|
||||
COOKIE_DOMAIN: ${COOKIE_DOMAIN:-}
|
||||
NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-}
|
||||
NEXT_PUBLIC_SOCKET_URL: ${NEXT_PUBLIC_SOCKET_URL:-ws://localhost}
|
||||
NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5}
|
||||
STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
|
||||
OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
|
||||
OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
|
||||
CLICKZETTA_VOLUME_TYPE: ${CLICKZETTA_VOLUME_TYPE:-user}
|
||||
CLICKZETTA_VOLUME_NAME: ${CLICKZETTA_VOLUME_NAME:-}
|
||||
CLICKZETTA_VOLUME_TABLE_PREFIX: ${CLICKZETTA_VOLUME_TABLE_PREFIX:-dataset_}
|
||||
CLICKZETTA_VOLUME_DIFY_PREFIX: ${CLICKZETTA_VOLUME_DIFY_PREFIX:-dify_km}
|
||||
S3_ENDPOINT: ${S3_ENDPOINT:-}
|
||||
S3_REGION: ${S3_REGION:-us-east-1}
|
||||
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
|
||||
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
|
||||
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
|
||||
S3_ADDRESS_STYLE: ${S3_ADDRESS_STYLE:-auto}
|
||||
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
|
||||
ARCHIVE_STORAGE_ENABLED: ${ARCHIVE_STORAGE_ENABLED:-false}
|
||||
ARCHIVE_STORAGE_ENDPOINT: ${ARCHIVE_STORAGE_ENDPOINT:-}
|
||||
ARCHIVE_STORAGE_ARCHIVE_BUCKET: ${ARCHIVE_STORAGE_ARCHIVE_BUCKET:-}
|
||||
ARCHIVE_STORAGE_EXPORT_BUCKET: ${ARCHIVE_STORAGE_EXPORT_BUCKET:-}
|
||||
ARCHIVE_STORAGE_ACCESS_KEY: ${ARCHIVE_STORAGE_ACCESS_KEY:-}
|
||||
ARCHIVE_STORAGE_SECRET_KEY: ${ARCHIVE_STORAGE_SECRET_KEY:-}
|
||||
ARCHIVE_STORAGE_REGION: ${ARCHIVE_STORAGE_REGION:-auto}
|
||||
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
|
||||
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
|
||||
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
|
||||
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://<your_account_name>.blob.core.windows.net}
|
||||
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
|
||||
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
|
||||
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
|
||||
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
|
||||
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com}
|
||||
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
|
||||
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
|
||||
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
|
||||
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
|
||||
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
|
||||
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
|
||||
TENCENT_COS_CUSTOM_DOMAIN: ${TENCENT_COS_CUSTOM_DOMAIN:-your-custom-domain}
|
||||
OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com}
|
||||
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
|
||||
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
|
||||
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
|
||||
OCI_REGION: ${OCI_REGION:-us-ashburn-1}
|
||||
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
|
||||
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
|
||||
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
|
||||
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
|
||||
HUAWEI_OBS_PATH_STYLE: ${HUAWEI_OBS_PATH_STYLE:-false}
|
||||
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
|
||||
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
|
||||
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
|
||||
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
|
||||
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
|
||||
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
|
||||
SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
|
||||
SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
|
||||
SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
|
||||
VECTOR_INDEX_NAME_PREFIX: ${VECTOR_INDEX_NAME_PREFIX:-Vector_index}
|
||||
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
|
||||
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
WEAVIATE_GRPC_ENDPOINT: ${WEAVIATE_GRPC_ENDPOINT:-grpc://weaviate:50051}
|
||||
WEAVIATE_TOKENIZATION: ${WEAVIATE_TOKENIZATION:-word}
|
||||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
|
||||
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
|
||||
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
|
||||
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false}
|
||||
OCEANBASE_FULLTEXT_PARSER: ${OCEANBASE_FULLTEXT_PARSER:-ik}
|
||||
SEEKDB_MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G}
|
||||
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
|
||||
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
|
||||
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
|
||||
QDRANT_REPLICATION_FACTOR: ${QDRANT_REPLICATION_FACTOR:-1}
|
||||
MILVUS_URI: ${MILVUS_URI:-http://host.docker.internal:19530}
|
||||
MILVUS_DATABASE: ${MILVUS_DATABASE:-}
|
||||
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
|
||||
MILVUS_USER: ${MILVUS_USER:-}
|
||||
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-}
|
||||
MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False}
|
||||
MILVUS_ANALYZER_PARAMS: ${MILVUS_ANALYZER_PARAMS:-}
|
||||
MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
|
||||
MYSCALE_PORT: ${MYSCALE_PORT:-8123}
|
||||
MYSCALE_USER: ${MYSCALE_USER:-default}
|
||||
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
|
||||
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
|
||||
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
|
||||
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server}
|
||||
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
|
||||
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
|
||||
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
|
||||
HOLOGRES_HOST: ${HOLOGRES_HOST:-}
|
||||
HOLOGRES_PORT: ${HOLOGRES_PORT:-80}
|
||||
HOLOGRES_DATABASE: ${HOLOGRES_DATABASE:-}
|
||||
HOLOGRES_ACCESS_KEY_ID: ${HOLOGRES_ACCESS_KEY_ID:-}
|
||||
HOLOGRES_ACCESS_KEY_SECRET: ${HOLOGRES_ACCESS_KEY_SECRET:-}
|
||||
HOLOGRES_SCHEMA: ${HOLOGRES_SCHEMA:-public}
|
||||
HOLOGRES_TOKENIZER: ${HOLOGRES_TOKENIZER:-jieba}
|
||||
HOLOGRES_DISTANCE_METHOD: ${HOLOGRES_DISTANCE_METHOD:-Cosine}
|
||||
HOLOGRES_BASE_QUANTIZATION_TYPE: ${HOLOGRES_BASE_QUANTIZATION_TYPE:-rabitq}
|
||||
HOLOGRES_MAX_DEGREE: ${HOLOGRES_MAX_DEGREE:-64}
|
||||
HOLOGRES_EF_CONSTRUCTION: ${HOLOGRES_EF_CONSTRUCTION:-400}
|
||||
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
|
||||
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
|
||||
PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
|
||||
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
|
||||
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
|
||||
PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
|
||||
PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
|
||||
PGVECTOR_PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
|
||||
PGVECTOR_PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
|
||||
VASTBASE_HOST: ${VASTBASE_HOST:-vastbase}
|
||||
VASTBASE_PORT: ${VASTBASE_PORT:-5432}
|
||||
VASTBASE_USER: ${VASTBASE_USER:-dify}
|
||||
VASTBASE_PASSWORD: ${VASTBASE_PASSWORD:-Difyai123456}
|
||||
VASTBASE_DATABASE: ${VASTBASE_DATABASE:-dify}
|
||||
VASTBASE_MIN_CONNECTION: ${VASTBASE_MIN_CONNECTION:-1}
|
||||
VASTBASE_MAX_CONNECTION: ${VASTBASE_MAX_CONNECTION:-5}
|
||||
PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
|
||||
PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
|
||||
PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
|
||||
PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
|
||||
PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
|
||||
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
|
||||
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
|
||||
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
|
||||
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
|
||||
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
|
||||
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
|
||||
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
|
||||
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
|
||||
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
|
||||
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
|
||||
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
|
||||
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
|
||||
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
|
||||
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
|
||||
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
|
||||
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
|
||||
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
|
||||
MATRIXONE_HOST: ${MATRIXONE_HOST:-matrixone}
|
||||
MATRIXONE_PORT: ${MATRIXONE_PORT:-6001}
|
||||
MATRIXONE_USER: ${MATRIXONE_USER:-dump}
|
||||
MATRIXONE_PASSWORD: ${MATRIXONE_PASSWORD:-111}
|
||||
MATRIXONE_DATABASE: ${MATRIXONE_DATABASE:-dify}
|
||||
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
|
||||
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
|
||||
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
|
||||
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
|
||||
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
|
||||
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
|
||||
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
|
||||
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
|
||||
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
|
||||
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
|
||||
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
|
||||
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
|
||||
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
|
||||
CHROMA_PORT: ${CHROMA_PORT:-8000}
|
||||
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
|
||||
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
|
||||
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
|
||||
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
|
||||
ORACLE_USER: ${ORACLE_USER:-dify}
|
||||
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
|
||||
ORACLE_DSN: ${ORACLE_DSN:-oracle:1521/FREEPDB1}
|
||||
ORACLE_CONFIG_DIR: ${ORACLE_CONFIG_DIR:-/app/api/storage/wallet}
|
||||
ORACLE_WALLET_LOCATION: ${ORACLE_WALLET_LOCATION:-/app/api/storage/wallet}
|
||||
ORACLE_WALLET_PASSWORD: ${ORACLE_WALLET_PASSWORD:-dify}
|
||||
ORACLE_IS_AUTONOMOUS: ${ORACLE_IS_AUTONOMOUS:-false}
|
||||
ALIBABACLOUD_MYSQL_HOST: ${ALIBABACLOUD_MYSQL_HOST:-127.0.0.1}
|
||||
ALIBABACLOUD_MYSQL_PORT: ${ALIBABACLOUD_MYSQL_PORT:-3306}
|
||||
ALIBABACLOUD_MYSQL_USER: ${ALIBABACLOUD_MYSQL_USER:-root}
|
||||
ALIBABACLOUD_MYSQL_PASSWORD: ${ALIBABACLOUD_MYSQL_PASSWORD:-difyai123456}
|
||||
ALIBABACLOUD_MYSQL_DATABASE: ${ALIBABACLOUD_MYSQL_DATABASE:-dify}
|
||||
ALIBABACLOUD_MYSQL_MAX_CONNECTION: ${ALIBABACLOUD_MYSQL_MAX_CONNECTION:-5}
|
||||
ALIBABACLOUD_MYSQL_HNSW_M: ${ALIBABACLOUD_MYSQL_HNSW_M:-6}
|
||||
RELYT_HOST: ${RELYT_HOST:-db}
|
||||
RELYT_PORT: ${RELYT_PORT:-5432}
|
||||
RELYT_USER: ${RELYT_USER:-postgres}
|
||||
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
|
||||
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
|
||||
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
|
||||
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
|
||||
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
|
||||
OPENSEARCH_VERIFY_CERTS: ${OPENSEARCH_VERIFY_CERTS:-true}
|
||||
OPENSEARCH_AUTH_METHOD: ${OPENSEARCH_AUTH_METHOD:-basic}
|
||||
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
|
||||
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
|
||||
OPENSEARCH_AWS_REGION: ${OPENSEARCH_AWS_REGION:-ap-southeast-1}
|
||||
OPENSEARCH_AWS_SERVICE: ${OPENSEARCH_AWS_SERVICE:-aoss}
|
||||
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
|
||||
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
|
||||
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
|
||||
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
|
||||
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
|
||||
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
|
||||
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
|
||||
TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH: ${TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH:-false}
|
||||
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
|
||||
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
|
||||
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
|
||||
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
KIBANA_PORT: ${KIBANA_PORT:-5601}
|
||||
ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false}
|
||||
ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL}
|
||||
ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY}
|
||||
ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False}
|
||||
ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-}
|
||||
ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000}
|
||||
ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True}
|
||||
ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10}
|
||||
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
|
||||
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
|
||||
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
|
||||
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
|
||||
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
|
||||
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
|
||||
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: ${BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER:-DEFAULT_ANALYZER}
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: ${BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE:-COARSE_MODE}
|
||||
BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT:-500}
|
||||
BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO:-0.05}
|
||||
BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS: ${BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS:-300}
|
||||
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
|
||||
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
|
||||
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
|
||||
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
|
||||
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
|
||||
VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
|
||||
VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
|
||||
LINDORM_URL: ${LINDORM_URL:-http://localhost:30070}
|
||||
LINDORM_USERNAME: ${LINDORM_USERNAME:-admin}
|
||||
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-admin}
|
||||
LINDORM_USING_UGC: ${LINDORM_USING_UGC:-True}
|
||||
LINDORM_QUERY_TIMEOUT: ${LINDORM_QUERY_TIMEOUT:-1}
|
||||
OPENGAUSS_HOST: ${OPENGAUSS_HOST:-opengauss}
|
||||
OPENGAUSS_PORT: ${OPENGAUSS_PORT:-6600}
|
||||
OPENGAUSS_USER: ${OPENGAUSS_USER:-postgres}
|
||||
OPENGAUSS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
|
||||
OPENGAUSS_DATABASE: ${OPENGAUSS_DATABASE:-dify}
|
||||
OPENGAUSS_MIN_CONNECTION: ${OPENGAUSS_MIN_CONNECTION:-1}
|
||||
OPENGAUSS_MAX_CONNECTION: ${OPENGAUSS_MAX_CONNECTION:-5}
|
||||
OPENGAUSS_ENABLE_PQ: ${OPENGAUSS_ENABLE_PQ:-false}
|
||||
HUAWEI_CLOUD_HOSTS: ${HUAWEI_CLOUD_HOSTS:-https://127.0.0.1:9200}
|
||||
HUAWEI_CLOUD_USER: ${HUAWEI_CLOUD_USER:-admin}
|
||||
HUAWEI_CLOUD_PASSWORD: ${HUAWEI_CLOUD_PASSWORD:-admin}
|
||||
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
|
||||
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
|
||||
TABLESTORE_ENDPOINT: ${TABLESTORE_ENDPOINT:-https://instance-name.cn-hangzhou.ots.aliyuncs.com}
|
||||
TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name}
|
||||
TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx}
|
||||
TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx}
|
||||
TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false}
|
||||
CLICKZETTA_USERNAME: ${CLICKZETTA_USERNAME:-}
|
||||
CLICKZETTA_PASSWORD: ${CLICKZETTA_PASSWORD:-}
|
||||
CLICKZETTA_INSTANCE: ${CLICKZETTA_INSTANCE:-}
|
||||
CLICKZETTA_SERVICE: ${CLICKZETTA_SERVICE:-api.clickzetta.com}
|
||||
CLICKZETTA_WORKSPACE: ${CLICKZETTA_WORKSPACE:-quick_start}
|
||||
CLICKZETTA_VCLUSTER: ${CLICKZETTA_VCLUSTER:-default_ap}
|
||||
CLICKZETTA_SCHEMA: ${CLICKZETTA_SCHEMA:-dify}
|
||||
CLICKZETTA_BATCH_SIZE: ${CLICKZETTA_BATCH_SIZE:-100}
|
||||
CLICKZETTA_ENABLE_INVERTED_INDEX: ${CLICKZETTA_ENABLE_INVERTED_INDEX:-true}
|
||||
CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese}
|
||||
CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart}
|
||||
CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance}
|
||||
IRIS_HOST: ${IRIS_HOST:-iris}
|
||||
IRIS_SUPER_SERVER_PORT: ${IRIS_SUPER_SERVER_PORT:-1972}
|
||||
IRIS_WEB_SERVER_PORT: ${IRIS_WEB_SERVER_PORT:-52773}
|
||||
IRIS_USER: ${IRIS_USER:-_SYSTEM}
|
||||
IRIS_PASSWORD: ${IRIS_PASSWORD:-Dify@1234}
|
||||
IRIS_DATABASE: ${IRIS_DATABASE:-USER}
|
||||
IRIS_SCHEMA: ${IRIS_SCHEMA:-dify}
|
||||
IRIS_CONNECTION_URL: ${IRIS_CONNECTION_URL:-}
|
||||
IRIS_MIN_CONNECTION: ${IRIS_MIN_CONNECTION:-1}
|
||||
IRIS_MAX_CONNECTION: ${IRIS_MAX_CONNECTION:-3}
|
||||
IRIS_TEXT_INDEX: ${IRIS_TEXT_INDEX:-true}
|
||||
IRIS_TEXT_INDEX_LANGUAGE: ${IRIS_TEXT_INDEX_LANGUAGE:-en}
|
||||
IRIS_TIMEZONE: ${IRIS_TIMEZONE:-UTC}
|
||||
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
|
||||
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST: ${UPLOAD_FILE_EXTENSION_BLACKLIST:-}
|
||||
SINGLE_CHUNK_ATTACHMENT_LIMIT: ${SINGLE_CHUNK_ATTACHMENT_LIMIT:-10}
|
||||
IMAGE_FILE_BATCH_LIMIT: ${IMAGE_FILE_BATCH_LIMIT:-10}
|
||||
ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: ${ATTACHMENT_IMAGE_FILE_SIZE_LIMIT:-2}
|
||||
ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: ${ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT:-60}
|
||||
ETL_TYPE: ${ETL_TYPE:-dify}
|
||||
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
|
||||
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
|
||||
SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
|
||||
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
|
||||
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
|
||||
PLUGIN_BASED_TOKEN_COUNTING_ENABLED: ${PLUGIN_BASED_TOKEN_COUNTING_ENABLED:-false}
|
||||
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
|
||||
SENTRY_DSN: ${SENTRY_DSN:-}
|
||||
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
PLUGIN_SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false}
|
||||
PLUGIN_SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-}
|
||||
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
|
||||
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
|
||||
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
|
||||
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
|
||||
MAIL_TYPE: ${MAIL_TYPE:-}
|
||||
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
|
||||
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
|
||||
RESEND_API_KEY: ${RESEND_API_KEY:-}
|
||||
SMTP_SERVER: ${SMTP_SERVER:-}
|
||||
SMTP_PORT: ${SMTP_PORT:-465}
|
||||
SMTP_USERNAME: ${SMTP_USERNAME:-}
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
|
||||
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
|
||||
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
|
||||
SMTP_LOCAL_HOSTNAME: ${SMTP_LOCAL_HOSTNAME:-}
|
||||
SENDGRID_API_KEY: ${SENDGRID_API_KEY:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
|
||||
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
|
||||
EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES: ${EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES:-5}
|
||||
CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES: ${CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES:-5}
|
||||
OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES: ${OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES:-5}
|
||||
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
|
||||
CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
|
||||
CODE_EXECUTION_SSL_VERIFY: ${CODE_EXECUTION_SSL_VERIFY:-True}
|
||||
CODE_EXECUTION_POOL_MAX_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_CONNECTIONS:-100}
|
||||
CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS:-20}
|
||||
CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY: ${CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY:-5.0}
|
||||
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
|
||||
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
|
||||
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
|
||||
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
|
||||
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-400000}
|
||||
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
|
||||
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
|
||||
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-400000}
|
||||
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
|
||||
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
|
||||
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
|
||||
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
|
||||
GRAPH_ENGINE_MIN_WORKERS: ${GRAPH_ENGINE_MIN_WORKERS:-1}
|
||||
GRAPH_ENGINE_MAX_WORKERS: ${GRAPH_ENGINE_MAX_WORKERS:-10}
|
||||
GRAPH_ENGINE_SCALE_UP_THRESHOLD: ${GRAPH_ENGINE_SCALE_UP_THRESHOLD:-3}
|
||||
GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME: ${GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME:-5.0}
|
||||
WORKFLOW_NODE_EXECUTION_STORAGE: ${WORKFLOW_NODE_EXECUTION_STORAGE:-rdbms}
|
||||
CORE_WORKFLOW_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository}
|
||||
CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository}
|
||||
API_WORKFLOW_RUN_REPOSITORY: ${API_WORKFLOW_RUN_REPOSITORY:-repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository}
|
||||
API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository}
|
||||
WORKFLOW_LOG_CLEANUP_ENABLED: ${WORKFLOW_LOG_CLEANUP_ENABLED:-false}
|
||||
WORKFLOW_LOG_RETENTION_DAYS: ${WORKFLOW_LOG_RETENTION_DAYS:-30}
|
||||
WORKFLOW_LOG_CLEANUP_BATCH_SIZE: ${WORKFLOW_LOG_CLEANUP_BATCH_SIZE:-100}
|
||||
WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS: ${WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS:-}
|
||||
ALIYUN_SLS_ACCESS_KEY_ID: ${ALIYUN_SLS_ACCESS_KEY_ID:-}
|
||||
ALIYUN_SLS_ACCESS_KEY_SECRET: ${ALIYUN_SLS_ACCESS_KEY_SECRET:-}
|
||||
ALIYUN_SLS_ENDPOINT: ${ALIYUN_SLS_ENDPOINT:-}
|
||||
ALIYUN_SLS_REGION: ${ALIYUN_SLS_REGION:-}
|
||||
ALIYUN_SLS_PROJECT_NAME: ${ALIYUN_SLS_PROJECT_NAME:-}
|
||||
ALIYUN_SLS_LOGSTORE_TTL: ${ALIYUN_SLS_LOGSTORE_TTL:-365}
|
||||
LOGSTORE_DUAL_WRITE_ENABLED: ${LOGSTORE_DUAL_WRITE_ENABLED:-false}
|
||||
LOGSTORE_DUAL_READ_ENABLED: ${LOGSTORE_DUAL_READ_ENABLED:-true}
|
||||
LOGSTORE_ENABLE_PUT_GRAPH_FIELD: ${LOGSTORE_ENABLE_PUT_GRAPH_FIELD:-true}
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
|
||||
HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True}
|
||||
HTTP_REQUEST_MAX_CONNECT_TIMEOUT: ${HTTP_REQUEST_MAX_CONNECT_TIMEOUT:-10}
|
||||
HTTP_REQUEST_MAX_READ_TIMEOUT: ${HTTP_REQUEST_MAX_READ_TIMEOUT:-600}
|
||||
HTTP_REQUEST_MAX_WRITE_TIMEOUT: ${HTTP_REQUEST_MAX_WRITE_TIMEOUT:-600}
|
||||
WEBHOOK_REQUEST_BODY_MAX_SIZE: ${WEBHOOK_REQUEST_BODY_MAX_SIZE:-10485760}
|
||||
RESPECT_XFORWARD_HEADERS_ENABLED: ${RESPECT_XFORWARD_HEADERS_ENABLED:-false}
|
||||
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
|
||||
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
|
||||
LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
|
||||
MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
|
||||
MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
|
||||
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
EXPERIMENTAL_ENABLE_VINEXT: ${EXPERIMENTAL_ENABLE_VINEXT:-false}
|
||||
ALLOW_INLINE_STYLES: ${ALLOW_INLINE_STYLES:-false}
|
||||
ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false}
|
||||
MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
MYSQL_HOST_VOLUME: ${MYSQL_HOST_VOLUME:-./volumes/mysql/data}
|
||||
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
|
||||
WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
WEAVIATE_DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false}
|
||||
WEAVIATE_ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false}
|
||||
WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false}
|
||||
WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false}
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
|
||||
OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_EMAIL: ${CERTBOT_EMAIL:-}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
|
||||
SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5}
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5}
|
||||
SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5}
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5}
|
||||
SSRF_POOL_MAX_CONNECTIONS: ${SSRF_POOL_MAX_CONNECTIONS:-100}
|
||||
SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS: ${SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS:-20}
|
||||
SSRF_POOL_KEEPALIVE_EXPIRY: ${SSRF_POOL_KEEPALIVE_EXPIRY:-5.0}
|
||||
EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80}
|
||||
EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443}
|
||||
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
|
||||
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
|
||||
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
|
||||
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
|
||||
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
|
||||
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
|
||||
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
|
||||
DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002}
|
||||
PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PLUGIN_MODEL_SCHEMA_CACHE_TTL: ${PLUGIN_MODEL_SCHEMA_CACHE_TTL:-3600}
|
||||
PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
CREATORS_PLATFORM_FEATURES_ENABLED: ${CREATORS_PLATFORM_FEATURES_ENABLED:-true}
|
||||
CREATORS_PLATFORM_API_URL: ${CREATORS_PLATFORM_API_URL:-https://creators.dify.ai}
|
||||
CREATORS_PLATFORM_OAUTH_CLIENT_ID: ${CREATORS_PLATFORM_OAUTH_CLIENT_ID:-}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES: ${ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES:-true}
|
||||
PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024}
|
||||
PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880}
|
||||
PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
PLUGIN_S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false}
|
||||
PLUGIN_S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
PLUGIN_S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
PLUGIN_S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
PLUGIN_AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
PLUGIN_AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
PLUGIN_AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
PLUGIN_TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
PLUGIN_TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
PLUGIN_TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
PLUGIN_ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-}
|
||||
PLUGIN_ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-}
|
||||
PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-}
|
||||
PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-}
|
||||
PLUGIN_ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
PLUGIN_ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-}
|
||||
PLUGIN_VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-}
|
||||
PLUGIN_VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-}
|
||||
PLUGIN_VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-}
|
||||
PLUGIN_VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-}
|
||||
ENABLE_OTEL: ${ENABLE_OTEL:-false}
|
||||
OTLP_TRACE_ENDPOINT: ${OTLP_TRACE_ENDPOINT:-}
|
||||
OTLP_METRIC_ENDPOINT: ${OTLP_METRIC_ENDPOINT:-}
|
||||
OTLP_BASE_ENDPOINT: ${OTLP_BASE_ENDPOINT:-http://localhost:4318}
|
||||
OTLP_API_KEY: ${OTLP_API_KEY:-}
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL: ${OTEL_EXPORTER_OTLP_PROTOCOL:-}
|
||||
OTEL_EXPORTER_TYPE: ${OTEL_EXPORTER_TYPE:-otlp}
|
||||
OTEL_SAMPLING_RATE: ${OTEL_SAMPLING_RATE:-0.1}
|
||||
OTEL_BATCH_EXPORT_SCHEDULE_DELAY: ${OTEL_BATCH_EXPORT_SCHEDULE_DELAY:-5000}
|
||||
OTEL_MAX_QUEUE_SIZE: ${OTEL_MAX_QUEUE_SIZE:-2048}
|
||||
OTEL_MAX_EXPORT_BATCH_SIZE: ${OTEL_MAX_EXPORT_BATCH_SIZE:-512}
|
||||
OTEL_METRIC_EXPORT_INTERVAL: ${OTEL_METRIC_EXPORT_INTERVAL:-60000}
|
||||
OTEL_BATCH_EXPORT_TIMEOUT: ${OTEL_BATCH_EXPORT_TIMEOUT:-10000}
|
||||
OTEL_METRIC_EXPORT_TIMEOUT: ${OTEL_METRIC_EXPORT_TIMEOUT:-30000}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
QUEUE_MONITOR_THRESHOLD: ${QUEUE_MONITOR_THRESHOLD:-200}
|
||||
QUEUE_MONITOR_ALERT_EMAILS: ${QUEUE_MONITOR_ALERT_EMAILS:-}
|
||||
QUEUE_MONITOR_INTERVAL: ${QUEUE_MONITOR_INTERVAL:-30}
|
||||
SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-false}
|
||||
SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html}
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID: ${DSL_EXPORT_ENCRYPT_DATASET_ID:-true}
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST: ${DATASET_MAX_SEGMENTS_PER_REQUEST:-0}
|
||||
ENABLE_CLEAN_EMBEDDING_CACHE_TASK: ${ENABLE_CLEAN_EMBEDDING_CACHE_TASK:-false}
|
||||
ENABLE_CLEAN_UNUSED_DATASETS_TASK: ${ENABLE_CLEAN_UNUSED_DATASETS_TASK:-false}
|
||||
ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false}
|
||||
ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK: ${ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK:-false}
|
||||
ENABLE_CLEAN_MESSAGES: ${ENABLE_CLEAN_MESSAGES:-false}
|
||||
ENABLE_WORKFLOW_RUN_CLEANUP_TASK: ${ENABLE_WORKFLOW_RUN_CLEANUP_TASK:-false}
|
||||
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: ${ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK:-false}
|
||||
ENABLE_DATASETS_QUEUE_MONITOR: ${ENABLE_DATASETS_QUEUE_MONITOR:-false}
|
||||
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK: ${ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK:-true}
|
||||
ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK: ${ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK:-true}
|
||||
WORKFLOW_SCHEDULE_POLLER_INTERVAL: ${WORKFLOW_SCHEDULE_POLLER_INTERVAL:-1}
|
||||
WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE: ${WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE:-100}
|
||||
WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK: ${WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK:-0}
|
||||
TENANT_ISOLATED_TASK_CONCURRENCY: ${TENANT_ISOLATED_TASK_CONCURRENCY:-1}
|
||||
ANNOTATION_IMPORT_FILE_SIZE_LIMIT: ${ANNOTATION_IMPORT_FILE_SIZE_LIMIT:-2}
|
||||
ANNOTATION_IMPORT_MAX_RECORDS: ${ANNOTATION_IMPORT_MAX_RECORDS:-10000}
|
||||
ANNOTATION_IMPORT_MIN_RECORDS: ${ANNOTATION_IMPORT_MIN_RECORDS:-1}
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE:-5}
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR:-20}
|
||||
ANNOTATION_IMPORT_MAX_CONCURRENT: ${ANNOTATION_IMPORT_MAX_CONCURRENT:-5}
|
||||
AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-}
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: ${SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD:-21}
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE:-1000}
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL:-200}
|
||||
SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: ${SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS:-30}
|
||||
EVENT_BUS_REDIS_URL: ${EVENT_BUS_REDIS_URL:-}
|
||||
EVENT_BUS_REDIS_CHANNEL_TYPE: ${EVENT_BUS_REDIS_CHANNEL_TYPE:-pubsub}
|
||||
EVENT_BUS_REDIS_USE_CLUSTERS: ${EVENT_BUS_REDIS_USE_CLUSTERS:-false}
|
||||
ENABLE_HUMAN_INPUT_TIMEOUT_TASK: ${ENABLE_HUMAN_INPUT_TIMEOUT_TASK:-true}
|
||||
HUMAN_INPUT_TIMEOUT_TASK_INTERVAL: ${HUMAN_INPUT_TIMEOUT_TASK_INTERVAL:-1}
|
||||
SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL:-90000}
|
||||
|
||||
services:
|
||||
# Init container to fix permissions
|
||||
init_permissions:
|
||||
@ -22,12 +747,9 @@ services:
|
||||
api:
|
||||
image: langgenius/dify-api:1.14.0
|
||||
restart: always
|
||||
env_file:
|
||||
# Defaults checked into git; user overrides go in .env.
|
||||
# `cp .env.example .env` once before `docker compose up`.
|
||||
- .env.example
|
||||
- .env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
@ -73,10 +795,9 @@ services:
|
||||
worker:
|
||||
image: langgenius/dify-api:1.14.0
|
||||
restart: always
|
||||
env_file:
|
||||
- .env.example
|
||||
- .env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing all queues.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
@ -120,10 +841,9 @@ services:
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.14.0
|
||||
restart: always
|
||||
env_file:
|
||||
- .env.example
|
||||
- .env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks.
|
||||
MODE: beat
|
||||
depends_on:
|
||||
@ -298,10 +1018,9 @@ services:
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.6.0-local
|
||||
restart: always
|
||||
env_file:
|
||||
- .env.example
|
||||
- .env
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
DB_SSL_MODE: ${DB_SSL_MODE:-disable}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
|
||||
127
docker/generate_docker_compose
Executable file
127
docker/generate_docker_compose
Executable file
@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Variables that exist only for Docker Compose orchestration and must NOT be
|
||||
# injected into containers as environment variables.
|
||||
SHARED_ENV_EXCLUDE = frozenset(
|
||||
[
|
||||
# Docker Compose profile selection
|
||||
"COMPOSE_PROFILES",
|
||||
# Worker health check orchestration flags (consumed by docker-compose,
|
||||
# not by the application running inside the container)
|
||||
"COMPOSE_WORKER_HEALTHCHECK_DISABLED",
|
||||
"COMPOSE_WORKER_HEALTHCHECK_INTERVAL",
|
||||
"COMPOSE_WORKER_HEALTHCHECK_TIMEOUT",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def parse_env_example(file_path):
|
||||
"""
|
||||
Parses the .env.example file and returns a dictionary with variable names as keys and default values as values.
|
||||
"""
|
||||
env_vars = {}
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
for line_number, line in enumerate(f, 1):
|
||||
line = line.strip()
|
||||
# Ignore empty lines and comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
# Use regex to parse KEY=VALUE
|
||||
match = re.match(r"^([^=]+)=(.*)$", line)
|
||||
if match:
|
||||
key = match.group(1).strip()
|
||||
value = match.group(2).strip()
|
||||
# Remove possible quotes around the value
|
||||
if (value.startswith('"') and value.endswith('"')) or (
|
||||
value.startswith("'") and value.endswith("'")
|
||||
):
|
||||
value = value[1:-1]
|
||||
env_vars[key] = value
|
||||
else:
|
||||
print(f"Warning: Unable to parse line {line_number}: {line}")
|
||||
return env_vars
|
||||
|
||||
|
||||
def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"):
|
||||
"""
|
||||
Generates a shared environment variables block as a YAML string.
|
||||
"""
|
||||
lines = [f"x-shared-env: &{anchor_name}"]
|
||||
for key, default in env_vars.items():
|
||||
if key in SHARED_ENV_EXCLUDE:
|
||||
continue
|
||||
# If default value is empty, use ${KEY:-}
|
||||
if default == "":
|
||||
lines.append(f" {key}: ${{{key}:-}}")
|
||||
else:
|
||||
# If default value contains special characters, wrap it in quotes
|
||||
if re.search(r"[:\s]", default):
|
||||
default = f"{default}"
|
||||
lines.append(f" {key}: ${{{key}:-{default}}}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def insert_shared_env(template_path, output_path, shared_env_block, header_comments):
|
||||
"""
|
||||
Inserts the shared environment variables block and header comments into the template file,
|
||||
removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file.
|
||||
Always writes with LF line endings.
|
||||
"""
|
||||
with open(template_path, "r", encoding="utf-8") as f:
|
||||
template_content = f.read()
|
||||
|
||||
# Remove existing x-shared-env: &shared-api-worker-env lines
|
||||
template_content = re.sub(
|
||||
r"^x-shared-env: &shared-api-worker-env\s*\n?",
|
||||
"",
|
||||
template_content,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
# Prepare the final content with header comments and shared env block
|
||||
final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}"
|
||||
|
||||
with open(output_path, "w", encoding="utf-8", newline="\n") as f:
|
||||
f.write(final_content)
|
||||
print(f"Generated {output_path}")
|
||||
|
||||
|
||||
def main():
|
||||
env_example_path = ".env.example"
|
||||
template_path = "docker-compose-template.yaml"
|
||||
output_path = "docker-compose.yaml"
|
||||
anchor_name = "shared-api-worker-env" # Can be modified as needed
|
||||
|
||||
# Define header comments to be added at the top of docker-compose.yaml
|
||||
header_comments = (
|
||||
"# ==================================================================\n"
|
||||
"# WARNING: This file is auto-generated by generate_docker_compose\n"
|
||||
"# Do not modify this file directly. Instead, update the .env.example\n"
|
||||
"# or docker-compose-template.yaml and regenerate this file.\n"
|
||||
"# ==================================================================\n"
|
||||
)
|
||||
|
||||
# Check if required files exist
|
||||
for path in [env_example_path, template_path]:
|
||||
if not os.path.isfile(path):
|
||||
print(f"Error: File {path} does not exist.")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse .env.example file
|
||||
env_vars = parse_env_example(env_example_path)
|
||||
|
||||
if not env_vars:
|
||||
print("Warning: No environment variables found in .env.example.")
|
||||
|
||||
# Generate shared environment variables block
|
||||
shared_env_block = generate_shared_env_block(env_vars, anchor_name)
|
||||
|
||||
# Insert shared environment variables block and header comments into the template
|
||||
insert_shared_env(template_path, output_path, shared_env_block, header_comments)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user