mirror of
https://github.com/langgenius/dify.git
synced 2026-01-27 07:15:51 +08:00
Compare commits
2 Commits
feature/ta
...
feat/llm-f
| Author | SHA1 | Date | |
|---|---|---|---|
| bfc1583626 | |||
| 5db06175de |
4
.github/workflows/autofix.yml
vendored
4
.github/workflows/autofix.yml
vendored
@ -16,14 +16,14 @@ jobs:
|
||||
|
||||
- name: Check Docker Compose inputs
|
||||
id: docker-compose-changes
|
||||
uses: tj-actions/changed-files@v47
|
||||
uses: tj-actions/changed-files@v46
|
||||
with:
|
||||
files: |
|
||||
docker/generate_docker_compose
|
||||
docker/.env.example
|
||||
docker/docker-compose-template.yaml
|
||||
docker/docker-compose.yaml
|
||||
- uses: actions/setup-python@v6
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
|
||||
2
.github/workflows/build-push.yml
vendored
2
.github/workflows/build-push.yml
vendored
@ -112,7 +112,7 @@ jobs:
|
||||
context: "web"
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v7
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-${{ matrix.context }}-*
|
||||
|
||||
2
.github/workflows/deploy-agent-dev.yml
vendored
2
.github/workflows/deploy-agent-dev.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
github.event.workflow_run.head_branch == 'deploy/agent-dev'
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v1
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
with:
|
||||
host: ${{ secrets.AGENT_DEV_SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
|
||||
2
.github/workflows/deploy-dev.yml
vendored
2
.github/workflows/deploy-dev.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
github.event.workflow_run.head_branch == 'deploy/dev'
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v1
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
with:
|
||||
host: ${{ secrets.SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
|
||||
2
.github/workflows/deploy-hitl.yml
vendored
2
.github/workflows/deploy-hitl.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
)
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v1
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
with:
|
||||
host: ${{ secrets.HITL_SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v10
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: 15
|
||||
days-before-issue-close: 3
|
||||
|
||||
13
.github/workflows/style.yml
vendored
13
.github/workflows/style.yml
vendored
@ -65,9 +65,6 @@ jobs:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./web
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@ -106,15 +103,7 @@ jobs:
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
working-directory: ./web
|
||||
run: |
|
||||
pnpm run lint:report
|
||||
continue-on-error: true
|
||||
|
||||
# - name: Annotate Code
|
||||
# if: steps.changed-files.outputs.any_changed == 'true' && github.event_name == 'pull_request'
|
||||
# uses: DerLev/eslint-annotations@51347b3a0abfb503fc8734d5ae31c4b151297fae
|
||||
# with:
|
||||
# eslint-report: web/eslint_report.json
|
||||
# github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
pnpm run lint
|
||||
|
||||
- name: Web type check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
2
.github/workflows/trigger-i18n-sync.yml
vendored
2
.github/workflows/trigger-i18n-sync.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
@ -12,8 +12,12 @@ The codebase is split into:
|
||||
|
||||
## Backend Workflow
|
||||
|
||||
- Read `api/AGENTS.md` for details
|
||||
- Run backend CLI commands through `uv run --project api <command>`.
|
||||
|
||||
- Before submission, all backend modifications must pass local checks: `make lint`, `make type-check`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
|
||||
|
||||
- Use Makefile targets for linting and formatting; `make lint` and `make type-check` cover the required checks.
|
||||
|
||||
- Integration tests are CI-only and are not expected to run in the local environment.
|
||||
|
||||
## Frontend Workflow
|
||||
|
||||
12
Makefile
12
Makefile
@ -61,8 +61,7 @@ check:
|
||||
|
||||
lint:
|
||||
@echo "🔧 Running ruff format, check with fixes, import linter, and dotenv-linter..."
|
||||
@uv run --project api --dev ruff format ./api
|
||||
@uv run --project api --dev ruff check --fix ./api
|
||||
@uv run --project api --dev sh -c 'ruff format ./api && ruff check --fix ./api'
|
||||
@uv run --directory api --dev lint-imports
|
||||
@uv run --project api --dev dotenv-linter ./api/.env.example ./web/.env.example
|
||||
@echo "✅ Linting complete"
|
||||
@ -74,12 +73,7 @@ type-check:
|
||||
|
||||
test:
|
||||
@echo "🧪 Running backend unit tests..."
|
||||
@if [ -n "$(TARGET_TESTS)" ]; then \
|
||||
echo "Target: $(TARGET_TESTS)"; \
|
||||
uv run --project api --dev pytest $(TARGET_TESTS); \
|
||||
else \
|
||||
uv run --project api --dev dev/pytest/pytest_unit_tests.sh; \
|
||||
fi
|
||||
@uv run --project api --dev dev/pytest/pytest_unit_tests.sh
|
||||
@echo "✅ Tests complete"
|
||||
|
||||
# Build Docker images
|
||||
@ -131,7 +125,7 @@ help:
|
||||
@echo " make check - Check code with ruff"
|
||||
@echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)"
|
||||
@echo " make type-check - Run type checking with basedpyright"
|
||||
@echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/<target_tests>)"
|
||||
@echo " make test - Run backend unit tests"
|
||||
@echo ""
|
||||
@echo "Docker Build Targets:"
|
||||
@echo " make build-web - Build web Docker image"
|
||||
|
||||
248
api/AGENTS.md
248
api/AGENTS.md
@ -1,236 +1,62 @@
|
||||
# API Agent Guide
|
||||
|
||||
## Agent Notes (must-check)
|
||||
|
||||
Before you start work on any backend file under `api/`, you MUST check whether a related note exists under:
|
||||
|
||||
- `agent-notes/<same-relative-path-as-target-file>.md`
|
||||
|
||||
Rules:
|
||||
|
||||
- **Path mapping**: for a target file `<path>/<name>.py`, the note must be `agent-notes/<path>/<name>.py.md` (same folder structure, same filename, plus `.md`).
|
||||
- **Before working**:
|
||||
- If the note exists, read it first and follow any constraints/decisions recorded there.
|
||||
- If the note conflicts with the current code, or references an "origin" file/path that has been deleted, renamed, or migrated, treat the **code as the single source of truth** and update the note to match reality.
|
||||
- If the note does not exist, create it with a short architecture/intent summary and any relevant invariants/edge cases.
|
||||
- **During working**:
|
||||
- Keep the note in sync as you discover constraints, make decisions, or change approach.
|
||||
- If you move/rename a file, migrate its note to the new mapped path (and fix any outdated references inside the note).
|
||||
- Record non-obvious edge cases, trade-offs, and the test/verification plan as you go (not just at the end).
|
||||
- Keep notes **coherent**: integrate new findings into the relevant sections and rewrite for clarity; avoid append-only “recent fix” / changelog-style additions unless the note is explicitly intended to be a changelog.
|
||||
- **When finishing work**:
|
||||
- Update the related note(s) to reflect what changed, why, and any new edge cases/tests.
|
||||
- If a file is deleted, remove or clearly deprecate the corresponding note so it cannot be mistaken as current guidance.
|
||||
- Keep notes concise and accurate; they are meant to prevent repeated rediscovery.
|
||||
|
||||
## Skill Index
|
||||
# Agent Skill Index
|
||||
|
||||
Start with the section that best matches your need. Each entry lists the problems it solves plus key files/concepts so you know what to expect before opening it.
|
||||
|
||||
### Platform Foundations
|
||||
______________________________________________________________________
|
||||
|
||||
#### [Infrastructure Overview](agent_skills/infra.md)
|
||||
## Platform Foundations
|
||||
|
||||
- **[Infrastructure Overview](agent_skills/infra.md)**\
|
||||
When to read this:
|
||||
|
||||
- **When to read this**
|
||||
- You need to understand where a feature belongs in the architecture.
|
||||
- You’re wiring storage, Redis, vector stores, or OTEL.
|
||||
- You’re about to add CLI commands or async jobs.
|
||||
- **What it covers**
|
||||
- Configuration stack (`configs/app_config.py`, remote settings)
|
||||
- Storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`)
|
||||
- Redis conventions (`extensions/ext_redis.py`)
|
||||
- Plugin runtime topology
|
||||
- Vector-store factory (`core/rag/datasource/vdb/*`)
|
||||
- Observability hooks
|
||||
- SSRF proxy usage
|
||||
- Core CLI commands
|
||||
- You’re about to add CLI commands or async jobs.\
|
||||
What it covers: configuration stack (`configs/app_config.py`, remote settings), storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`), Redis conventions (`extensions/ext_redis.py`), plugin runtime topology, vector-store factory (`core/rag/datasource/vdb/*`), observability hooks, SSRF proxy usage, and core CLI commands.
|
||||
|
||||
### Plugin & Extension Development
|
||||
- **[Coding Style](agent_skills/coding_style.md)**\
|
||||
When to read this:
|
||||
|
||||
#### [Plugin Systems](agent_skills/plugin.md)
|
||||
- You’re writing or reviewing backend code and need the authoritative checklist.
|
||||
- You’re unsure about Pydantic validators, SQLAlchemy session usage, or logging patterns.
|
||||
- You want the exact lint/type/test commands used in PRs.\
|
||||
Includes: Ruff & BasedPyright commands, no-annotation policy, session examples (`with Session(db.engine, ...)`), `@field_validator` usage, logging expectations, and the rule set for file size, helpers, and package management.
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
## Plugin & Extension Development
|
||||
|
||||
- **[Plugin Systems](agent_skills/plugin.md)**\
|
||||
When to read this:
|
||||
|
||||
- **When to read this**
|
||||
- You’re building or debugging a marketplace plugin.
|
||||
- You need to know how manifests, providers, daemons, and migrations fit together.
|
||||
- **What it covers**
|
||||
- Plugin manifests (`core/plugin/entities/plugin.py`)
|
||||
- Installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands)
|
||||
- Runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent)
|
||||
- Daemon coordination (`core/plugin/entities/plugin_daemon.py`)
|
||||
- How provider registries surface capabilities to the rest of the platform
|
||||
- You need to know how manifests, providers, daemons, and migrations fit together.\
|
||||
What it covers: plugin manifests (`core/plugin/entities/plugin.py`), installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands), runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent), daemon coordination (`core/plugin/entities/plugin_daemon.py`), and how provider registries surface capabilities to the rest of the platform.
|
||||
|
||||
#### [Plugin OAuth](agent_skills/plugin_oauth.md)
|
||||
- **[Plugin OAuth](agent_skills/plugin_oauth.md)**\
|
||||
When to read this:
|
||||
|
||||
- **When to read this**
|
||||
- You must integrate OAuth for a plugin or datasource.
|
||||
- You’re handling credential encryption or refresh flows.
|
||||
- **Topics**
|
||||
- Credential storage
|
||||
- Encryption helpers (`core/helper/provider_encryption.py`)
|
||||
- OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`)
|
||||
- How console/API layers expose the flows
|
||||
- You’re handling credential encryption or refresh flows.\
|
||||
Topics: credential storage, encryption helpers (`core/helper/provider_encryption.py`), OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`), and how console/API layers expose the flows.
|
||||
|
||||
### Workflow Entry & Execution
|
||||
______________________________________________________________________
|
||||
|
||||
#### [Trigger Concepts](agent_skills/trigger.md)
|
||||
## Workflow Entry & Execution
|
||||
|
||||
- **When to read this**
|
||||
- **[Trigger Concepts](agent_skills/trigger.md)**\
|
||||
When to read this:
|
||||
- You’re debugging why a workflow didn’t start.
|
||||
- You’re adding a new trigger type or hook.
|
||||
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.
|
||||
- **Details**
|
||||
- Start-node taxonomy
|
||||
- Webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`)
|
||||
- Async orchestration (`services/async_workflow_service.py`, Celery queues)
|
||||
- Debug event bus
|
||||
- Storage/logging interactions
|
||||
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.\
|
||||
Details: Start-node taxonomy, webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`), async orchestration (`services/async_workflow_service.py`, Celery queues), debug event bus, and storage/logging interactions.
|
||||
|
||||
## General Reminders
|
||||
______________________________________________________________________
|
||||
|
||||
- All skill docs assume you follow the coding style rules below—run the lint/type/test commands before submitting changes.
|
||||
## Additional Notes for Agents
|
||||
|
||||
- All skill docs assume you follow the coding style guide—run Ruff/BasedPyright/tests listed there before submitting changes.
|
||||
- When you cannot find an answer in these briefs, search the codebase using the paths referenced (e.g., `core/plugin/impl/tool.py`, `services/dataset_service.py`).
|
||||
- If you run into cross-cutting concerns (tenancy, configuration, storage), check the infrastructure guide first; it links to most supporting modules.
|
||||
- Keep multi-tenancy and configuration central: everything flows through `configs.dify_config` and `tenant_id`.
|
||||
- When touching plugins or triggers, consult both the system overview and the specialised doc to ensure you adjust lifecycle, storage, and observability consistently.
|
||||
|
||||
## Coding Style
|
||||
|
||||
This is the default standard for backend code in this repo. Follow it for new code and use it as the checklist when reviewing changes.
|
||||
|
||||
### Linting & Formatting
|
||||
|
||||
- Use Ruff for formatting and linting (follow `.ruff.toml`).
|
||||
- Keep each line under 120 characters (including spaces).
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Use `snake_case` for variables and functions.
|
||||
- Use `PascalCase` for classes.
|
||||
- Use `UPPER_CASE` for constants.
|
||||
|
||||
### Typing & Class Layout
|
||||
|
||||
- Code should usually include type annotations that match the repo’s current Python version (avoid untyped public APIs and “mystery” values).
|
||||
- Prefer modern typing forms (e.g. `list[str]`, `dict[str, int]`) and avoid `Any` unless there’s a strong reason.
|
||||
- For classes, declare member variables at the top of the class body (before `__init__`) so the class shape is obvious at a glance:
|
||||
|
||||
```python
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class Example:
|
||||
user_id: str
|
||||
created_at: datetime
|
||||
|
||||
def __init__(self, user_id: str, created_at: datetime) -> None:
|
||||
self.user_id = user_id
|
||||
self.created_at = created_at
|
||||
```
|
||||
|
||||
### General Rules
|
||||
|
||||
- Use Pydantic v2 conventions.
|
||||
- Use `uv` for Python package management in this repo (usually with `--project api`).
|
||||
- Prefer simple functions over small “utility classes” for lightweight helpers.
|
||||
- Avoid implementing dunder methods unless it’s clearly needed and matches existing patterns.
|
||||
- Never start long-running services as part of agent work (`uv run app.py`, `flask run`, etc.); running tests is allowed.
|
||||
- Keep files below ~800 lines; split when necessary.
|
||||
- Keep code readable and explicit—avoid clever hacks.
|
||||
|
||||
### Architecture & Boundaries
|
||||
|
||||
- Mirror the layered architecture: controller → service → core/domain.
|
||||
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
|
||||
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
|
||||
|
||||
### Logging & Errors
|
||||
|
||||
- Never use `print`; use a module-level logger:
|
||||
- `logger = logging.getLogger(__name__)`
|
||||
- Include tenant/app/workflow identifiers in log context when relevant.
|
||||
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate them into HTTP responses in controllers.
|
||||
- Log retryable events at `warning`, terminal failures at `error`.
|
||||
|
||||
### SQLAlchemy Patterns
|
||||
|
||||
- Models inherit from `models.base.TypeBase`; do not create ad-hoc metadata or engines.
|
||||
- Open sessions with context managers:
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Workflow).where(
|
||||
Workflow.id == workflow_id,
|
||||
Workflow.tenant_id == tenant_id,
|
||||
)
|
||||
workflow = session.execute(stmt).scalar_one_or_none()
|
||||
```
|
||||
|
||||
- Prefer SQLAlchemy expressions; avoid raw SQL unless necessary.
|
||||
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
|
||||
- Introduce repository abstractions only for very large tables (e.g., workflow executions) or when alternative storage strategies are required.
|
||||
|
||||
### Storage & External I/O
|
||||
|
||||
- Access storage via `extensions.ext_storage.storage`.
|
||||
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
|
||||
- Background tasks that touch storage must be idempotent, and should log relevant object identifiers.
|
||||
|
||||
### Pydantic Usage
|
||||
|
||||
- Define DTOs with Pydantic v2 models and forbid extras by default.
|
||||
- Use `@field_validator` / `@model_validator` for domain rules.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
|
||||
|
||||
|
||||
class TriggerConfig(BaseModel):
|
||||
endpoint: HttpUrl
|
||||
secret: str
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
@field_validator("secret")
|
||||
def ensure_secret_prefix(cls, value: str) -> str:
|
||||
if not value.startswith("dify_"):
|
||||
raise ValueError("secret must start with dify_")
|
||||
return value
|
||||
```
|
||||
|
||||
### Generics & Protocols
|
||||
|
||||
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
|
||||
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
|
||||
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
|
||||
|
||||
### Tooling & Checks
|
||||
|
||||
Quick checks while iterating:
|
||||
|
||||
- Format: `make format`
|
||||
- Lint (includes auto-fix): `make lint`
|
||||
- Type check: `make type-check`
|
||||
- Targeted tests: `make test TARGET_TESTS=./api/tests/<target_tests>`
|
||||
|
||||
Before opening a PR / submitting:
|
||||
|
||||
- `make lint`
|
||||
- `make type-check`
|
||||
- `make test`
|
||||
|
||||
### Controllers & Services
|
||||
|
||||
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
|
||||
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
|
||||
- Document non-obvious behaviour with concise comments.
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
- Use `configs.dify_config` for configuration—never read environment variables directly.
|
||||
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
|
||||
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
|
||||
- Keep experimental scripts under `dev/`; do not ship them in production builds.
|
||||
|
||||
115
api/agent_skills/coding_style.md
Normal file
115
api/agent_skills/coding_style.md
Normal file
@ -0,0 +1,115 @@
|
||||
## Linter
|
||||
|
||||
- Always follow `.ruff.toml`.
|
||||
- Run `uv run ruff check --fix --unsafe-fixes`.
|
||||
- Keep each line under 100 characters (including spaces).
|
||||
|
||||
## Code Style
|
||||
|
||||
- `snake_case` for variables and functions.
|
||||
- `PascalCase` for classes.
|
||||
- `UPPER_CASE` for constants.
|
||||
|
||||
## Rules
|
||||
|
||||
- Use Pydantic v2 standard.
|
||||
- Use `uv` for package management.
|
||||
- Do not override dunder methods like `__init__`, `__iadd__`, etc.
|
||||
- Never launch services (`uv run app.py`, `flask run`, etc.); running tests under `tests/` is allowed.
|
||||
- Prefer simple functions over classes for lightweight helpers.
|
||||
- Keep files below 800 lines; split when necessary.
|
||||
- Keep code readable—no clever hacks.
|
||||
- Never use `print`; log with `logger = logging.getLogger(__name__)`.
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
- Mirror the project’s layered architecture: controller → service → core/domain.
|
||||
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
|
||||
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
|
||||
|
||||
## SQLAlchemy Patterns
|
||||
|
||||
- Models inherit from `models.base.Base`; never create ad-hoc metadata or engines.
|
||||
|
||||
- Open sessions with context managers:
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Workflow).where(
|
||||
Workflow.id == workflow_id,
|
||||
Workflow.tenant_id == tenant_id,
|
||||
)
|
||||
workflow = session.execute(stmt).scalar_one_or_none()
|
||||
```
|
||||
|
||||
- Use SQLAlchemy expressions; avoid raw SQL unless necessary.
|
||||
|
||||
- Introduce repository abstractions only for very large tables (e.g., workflow executions) to support alternative storage strategies.
|
||||
|
||||
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
|
||||
|
||||
## Storage & External IO
|
||||
|
||||
- Access storage via `extensions.ext_storage.storage`.
|
||||
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
|
||||
- Background tasks that touch storage must be idempotent and log the relevant object identifiers.
|
||||
|
||||
## Pydantic Usage
|
||||
|
||||
- Define DTOs with Pydantic v2 models and forbid extras by default.
|
||||
|
||||
- Use `@field_validator` / `@model_validator` for domain rules.
|
||||
|
||||
- Example:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
|
||||
|
||||
class TriggerConfig(BaseModel):
|
||||
endpoint: HttpUrl
|
||||
secret: str
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
@field_validator("secret")
|
||||
def ensure_secret_prefix(cls, value: str) -> str:
|
||||
if not value.startswith("dify_"):
|
||||
raise ValueError("secret must start with dify_")
|
||||
return value
|
||||
```
|
||||
|
||||
## Generics & Protocols
|
||||
|
||||
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
|
||||
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
|
||||
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
|
||||
|
||||
## Error Handling & Logging
|
||||
|
||||
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate to HTTP responses in controllers.
|
||||
- Declare `logger = logging.getLogger(__name__)` at module top.
|
||||
- Include tenant/app/workflow identifiers in log context.
|
||||
- Log retryable events at `warning`, terminal failures at `error`.
|
||||
|
||||
## Tooling & Checks
|
||||
|
||||
- Format/lint: `uv run --project api --dev ruff format ./api` and `uv run --project api --dev ruff check --fix --unsafe-fixes ./api`.
|
||||
- Type checks: `uv run --directory api --dev basedpyright`.
|
||||
- Tests: `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
|
||||
- Run all of the above before submitting your work.
|
||||
|
||||
## Controllers & Services
|
||||
|
||||
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
|
||||
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
|
||||
- Avoid repositories unless necessary; direct SQLAlchemy usage is preferred for typical tables.
|
||||
- Document non-obvious behaviour with concise comments.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
- Use `configs.dify_config` for configuration—never read environment variables directly.
|
||||
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
|
||||
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
|
||||
- Keep experimental scripts under `dev/`; do not ship them in production builds.
|
||||
@ -3,7 +3,6 @@ import datetime
|
||||
import json
|
||||
import logging
|
||||
import secrets
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
@ -47,8 +46,6 @@ from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpi
|
||||
from services.plugin.data_migration import PluginDataMigration
|
||||
from services.plugin.plugin_migration import PluginMigration
|
||||
from services.plugin.plugin_service import PluginService
|
||||
from services.retention.conversation.messages_clean_policy import create_message_clean_policy
|
||||
from services.retention.conversation.messages_clean_service import MessagesCleanService
|
||||
from services.retention.workflow_run.clear_free_plan_expired_workflow_run_logs import WorkflowRunCleanup
|
||||
from tasks.remove_app_and_related_data_task import delete_draft_variables_batch
|
||||
|
||||
@ -2175,79 +2172,3 @@ def migrate_oss(
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
click.echo(click.style(f"Failed to update DB storage_type: {str(e)}", fg="red"))
|
||||
|
||||
|
||||
@click.command("clean-expired-messages", help="Clean expired messages.")
|
||||
@click.option(
|
||||
"--start-from",
|
||||
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
|
||||
required=True,
|
||||
help="Lower bound (inclusive) for created_at.",
|
||||
)
|
||||
@click.option(
|
||||
"--end-before",
|
||||
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
|
||||
required=True,
|
||||
help="Upper bound (exclusive) for created_at.",
|
||||
)
|
||||
@click.option("--batch-size", default=1000, show_default=True, help="Batch size for selecting messages.")
|
||||
@click.option(
|
||||
"--graceful-period",
|
||||
default=21,
|
||||
show_default=True,
|
||||
help="Graceful period in days after subscription expiration, will be ignored when billing is disabled.",
|
||||
)
|
||||
@click.option("--dry-run", is_flag=True, default=False, help="Show messages logs would be cleaned without deleting")
|
||||
def clean_expired_messages(
|
||||
batch_size: int,
|
||||
graceful_period: int,
|
||||
start_from: datetime.datetime,
|
||||
end_before: datetime.datetime,
|
||||
dry_run: bool,
|
||||
):
|
||||
"""
|
||||
Clean expired messages and related data for tenants based on clean policy.
|
||||
"""
|
||||
click.echo(click.style("clean_messages: start clean messages.", fg="green"))
|
||||
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Create policy based on billing configuration
|
||||
# NOTE: graceful_period will be ignored when billing is disabled.
|
||||
policy = create_message_clean_policy(graceful_period_days=graceful_period)
|
||||
|
||||
# Create and run the cleanup service
|
||||
service = MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
batch_size=batch_size,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
stats = service.run()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
click.echo(
|
||||
click.style(
|
||||
f"clean_messages: completed successfully\n"
|
||||
f" - Latency: {end_at - start_at:.2f}s\n"
|
||||
f" - Batches processed: {stats['batches']}\n"
|
||||
f" - Total messages scanned: {stats['total_messages']}\n"
|
||||
f" - Messages filtered: {stats['filtered_messages']}\n"
|
||||
f" - Messages deleted: {stats['total_deleted']}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
end_at = time.perf_counter()
|
||||
logger.exception("clean_messages failed")
|
||||
click.echo(
|
||||
click.style(
|
||||
f"clean_messages: failed after {end_at - start_at:.2f}s - {str(e)}",
|
||||
fg="red",
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
||||
click.echo(click.style("messages cleanup completed.", fg="green"))
|
||||
|
||||
@ -30,11 +30,6 @@ class TagBindingRemovePayload(BaseModel):
|
||||
type: Literal["knowledge", "app"] | None = Field(default=None, description="Tag type")
|
||||
|
||||
|
||||
class TagListQueryParam(BaseModel):
|
||||
type: Literal["knowledge", "app", ""] = Field("", description="Tag type filter")
|
||||
keyword: str | None = Field(None, description="Search keyword")
|
||||
|
||||
|
||||
register_schema_models(
|
||||
console_ns,
|
||||
TagBasePayload,
|
||||
@ -48,15 +43,12 @@ class TagListApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@console_ns.doc(
|
||||
params={"type": 'Tag type filter. Can be "knowledge" or "app".', "keyword": "Search keyword for tag name."}
|
||||
)
|
||||
@marshal_with(dataset_tag_fields)
|
||||
def get(self):
|
||||
_, current_tenant_id = current_account_with_tenant()
|
||||
raw_args = request.args.to_dict()
|
||||
param = TagListQueryParam.model_validate(raw_args)
|
||||
tags = TagService.get_tags(param.type, current_tenant_id, param.keyword)
|
||||
tag_type = request.args.get("type", type=str, default="")
|
||||
keyword = request.args.get("keyword", default=None, type=str)
|
||||
tags = TagService.get_tags(tag_type, current_tenant_id, keyword)
|
||||
|
||||
return tags, 200
|
||||
|
||||
|
||||
@ -71,8 +71,8 @@ class LLMGenerator:
|
||||
response: LLMResult = model_instance.invoke_llm(
|
||||
prompt_messages=list(prompts), model_parameters={"max_tokens": 500, "temperature": 1}, stream=False
|
||||
)
|
||||
answer = response.message.get_text_content()
|
||||
if answer == "":
|
||||
answer = cast(str, response.message.content)
|
||||
if answer is None:
|
||||
return ""
|
||||
try:
|
||||
result_dict = json.loads(answer)
|
||||
@ -184,7 +184,7 @@ class LLMGenerator:
|
||||
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
rule_config["prompt"] = response.message.get_text_content()
|
||||
rule_config["prompt"] = cast(str, response.message.content)
|
||||
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
@ -237,11 +237,13 @@ class LLMGenerator:
|
||||
|
||||
return rule_config
|
||||
|
||||
rule_config["prompt"] = prompt_content.message.get_text_content()
|
||||
rule_config["prompt"] = cast(str, prompt_content.message.content)
|
||||
|
||||
if not isinstance(prompt_content.message.content, str):
|
||||
raise NotImplementedError("prompt content is not a string")
|
||||
parameter_generate_prompt = parameter_template.format(
|
||||
inputs={
|
||||
"INPUT_TEXT": prompt_content.message.get_text_content(),
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
},
|
||||
remove_template_variables=False,
|
||||
)
|
||||
@ -251,7 +253,7 @@ class LLMGenerator:
|
||||
statement_generate_prompt = statement_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
"INPUT_TEXT": prompt_content.message.get_text_content(),
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
},
|
||||
remove_template_variables=False,
|
||||
)
|
||||
@ -261,7 +263,7 @@ class LLMGenerator:
|
||||
parameter_content: LLMResult = model_instance.invoke_llm(
|
||||
prompt_messages=list(parameter_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.get_text_content())
|
||||
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', cast(str, parameter_content.message.content))
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate variables"
|
||||
@ -270,7 +272,7 @@ class LLMGenerator:
|
||||
statement_content: LLMResult = model_instance.invoke_llm(
|
||||
prompt_messages=list(statement_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
rule_config["opening_statement"] = statement_content.message.get_text_content()
|
||||
rule_config["opening_statement"] = cast(str, statement_content.message.content)
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate conversation opener"
|
||||
@ -313,7 +315,7 @@ class LLMGenerator:
|
||||
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
generated_code = response.message.get_text_content()
|
||||
generated_code = cast(str, response.message.content)
|
||||
return {"code": generated_code, "language": code_language, "error": ""}
|
||||
|
||||
except InvokeError as e:
|
||||
@ -349,7 +351,7 @@ class LLMGenerator:
|
||||
raise TypeError("Expected LLMResult when stream=False")
|
||||
response = result
|
||||
|
||||
answer = response.message.get_text_content()
|
||||
answer = cast(str, response.message.content)
|
||||
return answer.strip()
|
||||
|
||||
@classmethod
|
||||
@ -373,7 +375,10 @@ class LLMGenerator:
|
||||
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
raw_content = response.message.get_text_content()
|
||||
raw_content = response.message.content
|
||||
|
||||
if not isinstance(raw_content, str):
|
||||
raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}")
|
||||
|
||||
try:
|
||||
parsed_content = json.loads(raw_content)
|
||||
|
||||
@ -21,6 +21,7 @@ from core.model_runtime.model_providers.__base.speech2text_model import Speech2T
|
||||
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
|
||||
from core.model_runtime.model_providers.__base.tts_model import TTSModel
|
||||
from core.provider_manager import ProviderManager
|
||||
from core.workflow.utils.generator_timeout import with_first_token_timeout
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.provider import ProviderType
|
||||
from services.enterprise.plugin_manager_service import PluginCredentialType
|
||||
@ -109,6 +110,7 @@ class ModelInstance:
|
||||
stream: Literal[True] = True,
|
||||
user: str | None = None,
|
||||
callbacks: list[Callback] | None = None,
|
||||
first_token_timeout: float | None = None,
|
||||
) -> Generator: ...
|
||||
|
||||
@overload
|
||||
@ -121,6 +123,7 @@ class ModelInstance:
|
||||
stream: Literal[False] = False,
|
||||
user: str | None = None,
|
||||
callbacks: list[Callback] | None = None,
|
||||
first_token_timeout: float | None = None,
|
||||
) -> LLMResult: ...
|
||||
|
||||
@overload
|
||||
@ -133,6 +136,7 @@ class ModelInstance:
|
||||
stream: bool = True,
|
||||
user: str | None = None,
|
||||
callbacks: list[Callback] | None = None,
|
||||
first_token_timeout: float | None = None,
|
||||
) -> Union[LLMResult, Generator]: ...
|
||||
|
||||
def invoke_llm(
|
||||
@ -144,6 +148,7 @@ class ModelInstance:
|
||||
stream: bool = True,
|
||||
user: str | None = None,
|
||||
callbacks: list[Callback] | None = None,
|
||||
first_token_timeout: float | None = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
"""
|
||||
Invoke large language model
|
||||
@ -155,26 +160,31 @@ class ModelInstance:
|
||||
:param stream: is stream response
|
||||
:param user: unique user id
|
||||
:param callbacks: callbacks
|
||||
:param first_token_timeout: timeout in seconds for receiving first token (streaming only)
|
||||
:return: full response or stream response chunk generator result
|
||||
"""
|
||||
if not isinstance(self.model_type_instance, LargeLanguageModel):
|
||||
raise Exception("Model type instance is not LargeLanguageModel")
|
||||
return cast(
|
||||
Union[LLMResult, Generator],
|
||||
self._round_robin_invoke(
|
||||
function=self.model_type_instance.invoke,
|
||||
model=self.model,
|
||||
credentials=self.credentials,
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=model_parameters,
|
||||
tools=tools,
|
||||
stop=stop,
|
||||
stream=stream,
|
||||
user=user,
|
||||
callbacks=callbacks,
|
||||
),
|
||||
|
||||
result = self._round_robin_invoke(
|
||||
function=self.model_type_instance.invoke,
|
||||
model=self.model,
|
||||
credentials=self.credentials,
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=model_parameters,
|
||||
tools=tools,
|
||||
stop=stop,
|
||||
stream=stream,
|
||||
user=user,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
|
||||
# Apply first token timeout wrapper for streaming responses
|
||||
if stream and first_token_timeout and first_token_timeout > 0 and isinstance(result, Generator):
|
||||
result = with_first_token_timeout(result, first_token_timeout)
|
||||
|
||||
return cast(Union[LLMResult, Generator], result)
|
||||
|
||||
def get_llm_num_tokens(
|
||||
self, prompt_messages: Sequence[PromptMessage], tools: Sequence[PromptMessageTool] | None = None
|
||||
) -> int:
|
||||
|
||||
@ -23,10 +23,22 @@ class RetryConfig(BaseModel):
|
||||
retry_interval: int = 0 # retry interval in milliseconds
|
||||
retry_enabled: bool = False # whether retry is enabled
|
||||
|
||||
# First token timeout for LLM nodes (milliseconds), 0 means no timeout
|
||||
first_token_timeout: int = 0
|
||||
|
||||
@property
|
||||
def first_token_timeout_seconds(self) -> float:
|
||||
return self.first_token_timeout / 1000
|
||||
|
||||
@property
|
||||
def retry_interval_seconds(self) -> float:
|
||||
return self.retry_interval / 1000
|
||||
|
||||
@property
|
||||
def has_first_token_timeout(self) -> bool:
|
||||
"""Check if first token timeout should be applied (retry enabled and timeout > 0)."""
|
||||
return self.retry_enabled and self.first_token_timeout > 0
|
||||
|
||||
|
||||
class VariableSelector(BaseModel):
|
||||
"""
|
||||
|
||||
@ -237,6 +237,13 @@ class LLMNode(Node[LLMNodeData]):
|
||||
)
|
||||
|
||||
# handle invoke result
|
||||
# Get first token timeout from retry config if enabled (convert ms to seconds)
|
||||
first_token_timeout = (
|
||||
self.node_data.retry_config.first_token_timeout_seconds
|
||||
if self.node_data.retry_config.has_first_token_timeout
|
||||
else None
|
||||
)
|
||||
|
||||
generator = LLMNode.invoke_llm(
|
||||
node_data_model=self.node_data.model,
|
||||
model_instance=model_instance,
|
||||
@ -250,6 +257,7 @@ class LLMNode(Node[LLMNodeData]):
|
||||
node_id=self._node_id,
|
||||
node_type=self.node_type,
|
||||
reasoning_format=self.node_data.reasoning_format,
|
||||
first_token_timeout=first_token_timeout,
|
||||
)
|
||||
|
||||
structured_output: LLMStructuredOutput | None = None
|
||||
@ -367,6 +375,7 @@ class LLMNode(Node[LLMNodeData]):
|
||||
node_id: str,
|
||||
node_type: NodeType,
|
||||
reasoning_format: Literal["separated", "tagged"] = "tagged",
|
||||
first_token_timeout: float | None = None,
|
||||
) -> Generator[NodeEventBase | LLMStructuredOutput, None, None]:
|
||||
model_schema = model_instance.model_type_instance.get_model_schema(
|
||||
node_data_model.name, model_instance.credentials
|
||||
@ -400,6 +409,7 @@ class LLMNode(Node[LLMNodeData]):
|
||||
stop=list(stop or []),
|
||||
stream=True,
|
||||
user=user_id,
|
||||
first_token_timeout=first_token_timeout,
|
||||
)
|
||||
|
||||
return LLMNode.handle_invoke_result(
|
||||
|
||||
56
api/core/workflow/utils/generator_timeout.py
Normal file
56
api/core/workflow/utils/generator_timeout.py
Normal file
@ -0,0 +1,56 @@
|
||||
"""
|
||||
Generator timeout utilities for workflow nodes.
|
||||
|
||||
Provides timeout wrappers for streaming generators, primarily used for
|
||||
LLM response streaming where we need to enforce time-to-first-token limits.
|
||||
"""
|
||||
|
||||
import time
|
||||
from collections.abc import Generator
|
||||
from typing import TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class FirstTokenTimeoutError(Exception):
|
||||
"""Raised when a generator fails to yield its first item within the configured timeout."""
|
||||
|
||||
def __init__(self, timeout_ms: int):
|
||||
self.timeout_ms = timeout_ms
|
||||
super().__init__(f"Generator timed out after {timeout_ms}ms without yielding first item")
|
||||
|
||||
|
||||
def with_first_token_timeout(
|
||||
generator: Generator[T, None, None],
|
||||
timeout_seconds: float,
|
||||
) -> Generator[T, None, None]:
|
||||
"""
|
||||
Wrap a generator with first token timeout monitoring.
|
||||
|
||||
Only monitors the time until the FIRST item is yielded.
|
||||
Once the first item arrives, timeout monitoring stops and
|
||||
subsequent items are yielded without timeout checks.
|
||||
|
||||
Args:
|
||||
generator: The source generator to wrap
|
||||
timeout_seconds: Maximum time to wait for first item (in seconds)
|
||||
|
||||
Yields:
|
||||
Items from the source generator
|
||||
|
||||
Raises:
|
||||
FirstTokenTimeoutError: If first item doesn't arrive within timeout
|
||||
"""
|
||||
start_time = time.monotonic()
|
||||
|
||||
# Handle first item separately to check timeout only once
|
||||
try:
|
||||
first_item = next(generator)
|
||||
if time.monotonic() - start_time > timeout_seconds:
|
||||
raise FirstTokenTimeoutError(int(timeout_seconds * 1000))
|
||||
yield first_item
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
# Yield remaining items without timeout checks
|
||||
yield from generator
|
||||
@ -189,7 +189,8 @@ class WorkflowEntry:
|
||||
)
|
||||
|
||||
try:
|
||||
generator = cls._traced_node_run(node)
|
||||
# run node
|
||||
generator = node.run()
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"error while running node, workflow_id=%s, node_id=%s, node_type=%s, node_version=%s",
|
||||
@ -322,7 +323,8 @@ class WorkflowEntry:
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
|
||||
generator = cls._traced_node_run(node)
|
||||
# run node
|
||||
generator = node.run()
|
||||
|
||||
return node, generator
|
||||
except Exception as e:
|
||||
@ -428,26 +430,3 @@ class WorkflowEntry:
|
||||
input_value = current_variable.value | input_value
|
||||
|
||||
variable_pool.add([variable_node_id] + variable_key_list, input_value)
|
||||
|
||||
@staticmethod
|
||||
def _traced_node_run(node: Node) -> Generator[GraphNodeEventBase, None, None]:
|
||||
"""
|
||||
Wraps a node's run method with OpenTelemetry tracing and returns a generator.
|
||||
"""
|
||||
# Wrap node.run() with ObservabilityLayer hooks to produce node-level spans
|
||||
layer = ObservabilityLayer()
|
||||
layer.on_graph_start()
|
||||
node.ensure_execution_id()
|
||||
|
||||
def _gen():
|
||||
error: Exception | None = None
|
||||
layer.on_node_run_start(node)
|
||||
try:
|
||||
yield from node.run()
|
||||
except Exception as exc:
|
||||
error = exc
|
||||
raise
|
||||
finally:
|
||||
layer.on_node_run_end(node, error)
|
||||
|
||||
return _gen()
|
||||
|
||||
@ -6,7 +6,6 @@ from .create_site_record_when_app_created import handle as handle_create_site_re
|
||||
from .delete_tool_parameters_cache_when_sync_draft_workflow import (
|
||||
handle as handle_delete_tool_parameters_cache_when_sync_draft_workflow,
|
||||
)
|
||||
from .queue_credential_sync_when_tenant_created import handle as handle_queue_credential_sync_when_tenant_created
|
||||
from .sync_plugin_trigger_when_app_created import handle as handle_sync_plugin_trigger_when_app_created
|
||||
from .sync_webhook_when_app_created import handle as handle_sync_webhook_when_app_created
|
||||
from .sync_workflow_schedule_when_app_published import handle as handle_sync_workflow_schedule_when_app_published
|
||||
@ -31,7 +30,6 @@ __all__ = [
|
||||
"handle_create_installed_app_when_app_created",
|
||||
"handle_create_site_record_when_app_created",
|
||||
"handle_delete_tool_parameters_cache_when_sync_draft_workflow",
|
||||
"handle_queue_credential_sync_when_tenant_created",
|
||||
"handle_sync_plugin_trigger_when_app_created",
|
||||
"handle_sync_webhook_when_app_created",
|
||||
"handle_sync_workflow_schedule_when_app_published",
|
||||
|
||||
@ -1,19 +0,0 @@
|
||||
from configs import dify_config
|
||||
from events.tenant_event import tenant_was_created
|
||||
from services.enterprise.workspace_sync import WorkspaceSyncService
|
||||
|
||||
|
||||
@tenant_was_created.connect
|
||||
def handle(sender, **kwargs):
|
||||
"""Queue credential sync when a tenant/workspace is created."""
|
||||
# Only queue sync tasks if plugin manager (enterprise feature) is enabled
|
||||
if not dify_config.ENTERPRISE_ENABLED:
|
||||
return
|
||||
|
||||
tenant = sender
|
||||
|
||||
# Determine source from kwargs if available, otherwise use generic
|
||||
source = kwargs.get("source", "tenant_created")
|
||||
|
||||
# Queue credential sync task to Redis for enterprise backend to process
|
||||
WorkspaceSyncService.queue_credential_sync(tenant.id, source=source)
|
||||
@ -4,7 +4,6 @@ from dify_app import DifyApp
|
||||
def init_app(app: DifyApp):
|
||||
from commands import (
|
||||
add_qdrant_index,
|
||||
clean_expired_messages,
|
||||
clean_workflow_runs,
|
||||
cleanup_orphaned_draft_variables,
|
||||
clear_free_plan_tenant_expired_logs,
|
||||
@ -59,7 +58,6 @@ def init_app(app: DifyApp):
|
||||
transform_datasource_credentials,
|
||||
install_rag_pipeline_plugins,
|
||||
clean_workflow_runs,
|
||||
clean_expired_messages,
|
||||
]
|
||||
for cmd in cmds_to_register:
|
||||
app.cli.add_command(cmd)
|
||||
|
||||
@ -1,33 +0,0 @@
|
||||
"""feat: add created_at id index to messages
|
||||
|
||||
Revision ID: 3334862ee907
|
||||
Revises: 905527cc8fd3
|
||||
Create Date: 2026-01-12 17:29:44.846544
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import models as models
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '3334862ee907'
|
||||
down_revision = '905527cc8fd3'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('messages', schema=None) as batch_op:
|
||||
batch_op.create_index('message_created_at_id_idx', ['created_at', 'id'], unique=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('messages', schema=None) as batch_op:
|
||||
batch_op.drop_index('message_created_at_id_idx')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@ -968,7 +968,6 @@ class Message(Base):
|
||||
Index("message_workflow_run_id_idx", "conversation_id", "workflow_run_id"),
|
||||
Index("message_created_at_idx", "created_at"),
|
||||
Index("message_app_mode_idx", "app_mode"),
|
||||
Index("message_created_at_id_idx", "created_at", "id"),
|
||||
)
|
||||
|
||||
id: Mapped[str] = mapped_column(StringUUID, default=lambda: str(uuid4()))
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "dify-api"
|
||||
version = "1.11.4"
|
||||
version = "1.11.3"
|
||||
requires-python = ">=3.11,<3.13"
|
||||
|
||||
dependencies = [
|
||||
|
||||
@ -1,62 +1,90 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
import app
|
||||
from configs import dify_config
|
||||
from services.retention.conversation.messages_clean_policy import create_message_clean_policy
|
||||
from services.retention.conversation.messages_clean_service import MessagesCleanService
|
||||
from enums.cloud_plan import CloudPlan
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.model import (
|
||||
App,
|
||||
Message,
|
||||
MessageAgentThought,
|
||||
MessageAnnotation,
|
||||
MessageChain,
|
||||
MessageFeedback,
|
||||
MessageFile,
|
||||
)
|
||||
from models.web import SavedMessage
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@app.celery.task(queue="retention")
|
||||
@app.celery.task(queue="dataset")
|
||||
def clean_messages():
|
||||
"""
|
||||
Clean expired messages based on clean policy.
|
||||
|
||||
This task uses MessagesCleanService to efficiently clean messages in batches.
|
||||
The behavior depends on BILLING_ENABLED configuration:
|
||||
- BILLING_ENABLED=True: only delete messages from sandbox tenants (with whitelist/grace period)
|
||||
- BILLING_ENABLED=False: delete all messages within the time range
|
||||
"""
|
||||
click.echo(click.style("clean_messages: start clean messages.", fg="green"))
|
||||
click.echo(click.style("Start clean messages.", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Create policy based on billing configuration
|
||||
policy = create_message_clean_policy(
|
||||
graceful_period_days=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD,
|
||||
)
|
||||
|
||||
# Create and run the cleanup service
|
||||
service = MessagesCleanService.from_days(
|
||||
policy=policy,
|
||||
days=dify_config.SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS,
|
||||
batch_size=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE,
|
||||
)
|
||||
stats = service.run()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
click.echo(
|
||||
click.style(
|
||||
f"clean_messages: completed successfully\n"
|
||||
f" - Latency: {end_at - start_at:.2f}s\n"
|
||||
f" - Batches processed: {stats['batches']}\n"
|
||||
f" - Total messages scanned: {stats['total_messages']}\n"
|
||||
f" - Messages filtered: {stats['filtered_messages']}\n"
|
||||
f" - Messages deleted: {stats['total_deleted']}",
|
||||
fg="green",
|
||||
plan_sandbox_clean_message_day = datetime.datetime.now() - datetime.timedelta(
|
||||
days=dify_config.PLAN_SANDBOX_CLEAN_MESSAGE_DAY_SETTING
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
# Main query with join and filter
|
||||
messages = (
|
||||
db.session.query(Message)
|
||||
.where(Message.created_at < plan_sandbox_clean_message_day)
|
||||
.order_by(Message.created_at.desc())
|
||||
.limit(100)
|
||||
.all()
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
end_at = time.perf_counter()
|
||||
logger.exception("clean_messages failed")
|
||||
click.echo(
|
||||
click.style(
|
||||
f"clean_messages: failed after {end_at - start_at:.2f}s - {str(e)}",
|
||||
fg="red",
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
||||
except SQLAlchemyError:
|
||||
raise
|
||||
if not messages:
|
||||
break
|
||||
for message in messages:
|
||||
app = db.session.query(App).filter_by(id=message.app_id).first()
|
||||
if not app:
|
||||
logger.warning(
|
||||
"Expected App record to exist, but none was found, app_id=%s, message_id=%s",
|
||||
message.app_id,
|
||||
message.id,
|
||||
)
|
||||
continue
|
||||
features_cache_key = f"features:{app.tenant_id}"
|
||||
plan_cache = redis_client.get(features_cache_key)
|
||||
if plan_cache is None:
|
||||
features = FeatureService.get_features(app.tenant_id)
|
||||
redis_client.setex(features_cache_key, 600, features.billing.subscription.plan)
|
||||
plan = features.billing.subscription.plan
|
||||
else:
|
||||
plan = plan_cache.decode()
|
||||
if plan == CloudPlan.SANDBOX:
|
||||
# clean related message
|
||||
db.session.query(MessageFeedback).where(MessageFeedback.message_id == message.id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
db.session.query(MessageAnnotation).where(MessageAnnotation.message_id == message.id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
db.session.query(MessageChain).where(MessageChain.message_id == message.id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
db.session.query(MessageAgentThought).where(MessageAgentThought.message_id == message.id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
db.session.query(MessageFile).where(MessageFile.message_id == message.id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
db.session.query(SavedMessage).where(SavedMessage.message_id == message.id).delete(
|
||||
synchronize_session=False
|
||||
)
|
||||
db.session.query(Message).where(Message.id == message.id).delete()
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
click.echo(click.style(f"Cleaned messages from db success latency: {end_at - start_at}", fg="green"))
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from redis import RedisError
|
||||
|
||||
from extensions.ext_redis import redis_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
WORKSPACE_SYNC_QUEUE = "enterprise:workspace:sync:queue"
|
||||
WORKSPACE_SYNC_PROCESSING = "enterprise:workspace:sync:processing"
|
||||
|
||||
|
||||
class WorkspaceSyncService:
|
||||
"""Service to publish workspace sync tasks to Redis queue for enterprise backend consumption"""
|
||||
|
||||
@staticmethod
|
||||
def queue_credential_sync(workspace_id: str, *, source: str) -> bool:
|
||||
"""
|
||||
Queue a credential sync task for a newly created workspace.
|
||||
|
||||
This publishes a task to Redis that will be consumed by the enterprise backend
|
||||
worker to sync credentials with the plugin-manager.
|
||||
|
||||
Args:
|
||||
workspace_id: The workspace/tenant ID to sync credentials for
|
||||
source: Source of the sync request (for debugging/tracking)
|
||||
|
||||
Returns:
|
||||
bool: True if task was queued successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
task = {
|
||||
"task_id": str(uuid.uuid4()),
|
||||
"workspace_id": workspace_id,
|
||||
"retry_count": 0,
|
||||
"created_at": datetime.now(UTC).isoformat(),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
# Push to Redis list (queue) - LPUSH adds to the head, worker consumes from tail with RPOP
|
||||
redis_client.lpush(WORKSPACE_SYNC_QUEUE, json.dumps(task))
|
||||
|
||||
logger.info(
|
||||
"Queued credential sync task for workspace %s, task_id: %s, source: %s",
|
||||
workspace_id,
|
||||
task["task_id"],
|
||||
source,
|
||||
)
|
||||
return True
|
||||
|
||||
except (RedisError, TypeError) as e:
|
||||
logger.error("Failed to queue credential sync for workspace %s: %s", workspace_id, str(e), exc_info=True)
|
||||
# Don't raise - we don't want to fail workspace creation if queueing fails
|
||||
# The scheduled task will catch it later
|
||||
return False
|
||||
@ -1,216 +0,0 @@
|
||||
import datetime
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable, Sequence
|
||||
from dataclasses import dataclass
|
||||
|
||||
from configs import dify_config
|
||||
from enums.cloud_plan import CloudPlan
|
||||
from services.billing_service import BillingService, SubscriptionPlan
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SimpleMessage:
|
||||
id: str
|
||||
app_id: str
|
||||
created_at: datetime.datetime
|
||||
|
||||
|
||||
class MessagesCleanPolicy(ABC):
|
||||
"""
|
||||
Abstract base class for message cleanup policies.
|
||||
|
||||
A policy determines which messages from a batch should be deleted.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def filter_message_ids(
|
||||
self,
|
||||
messages: Sequence[SimpleMessage],
|
||||
app_to_tenant: dict[str, str],
|
||||
) -> Sequence[str]:
|
||||
"""
|
||||
Filter messages and return IDs of messages that should be deleted.
|
||||
|
||||
Args:
|
||||
messages: Batch of messages to evaluate
|
||||
app_to_tenant: Mapping from app_id to tenant_id
|
||||
|
||||
Returns:
|
||||
List of message IDs that should be deleted
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class BillingDisabledPolicy(MessagesCleanPolicy):
|
||||
"""
|
||||
Policy for community or enterpriseedition (billing disabled).
|
||||
|
||||
No special filter logic, just return all message ids.
|
||||
"""
|
||||
|
||||
def filter_message_ids(
|
||||
self,
|
||||
messages: Sequence[SimpleMessage],
|
||||
app_to_tenant: dict[str, str],
|
||||
) -> Sequence[str]:
|
||||
return [msg.id for msg in messages]
|
||||
|
||||
|
||||
class BillingSandboxPolicy(MessagesCleanPolicy):
|
||||
"""
|
||||
Policy for sandbox plan tenants in cloud edition (billing enabled).
|
||||
|
||||
Filters messages based on sandbox plan expiration rules:
|
||||
- Skip tenants in the whitelist
|
||||
- Only delete messages from sandbox plan tenants
|
||||
- Respect grace period after subscription expiration
|
||||
- Safe default: if tenant mapping or plan is missing, do NOT delete
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
plan_provider: Callable[[Sequence[str]], dict[str, SubscriptionPlan]],
|
||||
graceful_period_days: int = 21,
|
||||
tenant_whitelist: Sequence[str] | None = None,
|
||||
current_timestamp: int | None = None,
|
||||
) -> None:
|
||||
self._graceful_period_days = graceful_period_days
|
||||
self._tenant_whitelist: Sequence[str] = tenant_whitelist or []
|
||||
self._plan_provider = plan_provider
|
||||
self._current_timestamp = current_timestamp
|
||||
|
||||
def filter_message_ids(
|
||||
self,
|
||||
messages: Sequence[SimpleMessage],
|
||||
app_to_tenant: dict[str, str],
|
||||
) -> Sequence[str]:
|
||||
"""
|
||||
Filter messages based on sandbox plan expiration rules.
|
||||
|
||||
Args:
|
||||
messages: Batch of messages to evaluate
|
||||
app_to_tenant: Mapping from app_id to tenant_id
|
||||
|
||||
Returns:
|
||||
List of message IDs that should be deleted
|
||||
"""
|
||||
if not messages or not app_to_tenant:
|
||||
return []
|
||||
|
||||
# Get unique tenant_ids and fetch subscription plans
|
||||
tenant_ids = list(set(app_to_tenant.values()))
|
||||
tenant_plans = self._plan_provider(tenant_ids)
|
||||
|
||||
if not tenant_plans:
|
||||
return []
|
||||
|
||||
# Apply sandbox deletion rules
|
||||
return self._filter_expired_sandbox_messages(
|
||||
messages=messages,
|
||||
app_to_tenant=app_to_tenant,
|
||||
tenant_plans=tenant_plans,
|
||||
)
|
||||
|
||||
def _filter_expired_sandbox_messages(
|
||||
self,
|
||||
messages: Sequence[SimpleMessage],
|
||||
app_to_tenant: dict[str, str],
|
||||
tenant_plans: dict[str, SubscriptionPlan],
|
||||
) -> list[str]:
|
||||
"""
|
||||
Filter messages that should be deleted based on sandbox plan expiration.
|
||||
|
||||
A message should be deleted if:
|
||||
1. It belongs to a sandbox tenant AND
|
||||
2. Either:
|
||||
a) The tenant has no previous subscription (expiration_date == -1), OR
|
||||
b) The subscription expired more than graceful_period_days ago
|
||||
|
||||
Args:
|
||||
messages: List of message objects with id and app_id attributes
|
||||
app_to_tenant: Mapping from app_id to tenant_id
|
||||
tenant_plans: Mapping from tenant_id to subscription plan info
|
||||
|
||||
Returns:
|
||||
List of message IDs that should be deleted
|
||||
"""
|
||||
current_timestamp = self._current_timestamp
|
||||
if current_timestamp is None:
|
||||
current_timestamp = int(datetime.datetime.now(datetime.UTC).timestamp())
|
||||
|
||||
sandbox_message_ids: list[str] = []
|
||||
graceful_period_seconds = self._graceful_period_days * 24 * 60 * 60
|
||||
|
||||
for msg in messages:
|
||||
# Get tenant_id for this message's app
|
||||
tenant_id = app_to_tenant.get(msg.app_id)
|
||||
if not tenant_id:
|
||||
continue
|
||||
|
||||
# Skip tenant messages in whitelist
|
||||
if tenant_id in self._tenant_whitelist:
|
||||
continue
|
||||
|
||||
# Get subscription plan for this tenant
|
||||
tenant_plan = tenant_plans.get(tenant_id)
|
||||
if not tenant_plan:
|
||||
continue
|
||||
|
||||
plan = str(tenant_plan["plan"])
|
||||
expiration_date = int(tenant_plan["expiration_date"])
|
||||
|
||||
# Only process sandbox plans
|
||||
if plan != CloudPlan.SANDBOX:
|
||||
continue
|
||||
|
||||
# Case 1: No previous subscription (-1 means never had a paid subscription)
|
||||
if expiration_date == -1:
|
||||
sandbox_message_ids.append(msg.id)
|
||||
continue
|
||||
|
||||
# Case 2: Subscription expired beyond grace period
|
||||
if current_timestamp - expiration_date > graceful_period_seconds:
|
||||
sandbox_message_ids.append(msg.id)
|
||||
|
||||
return sandbox_message_ids
|
||||
|
||||
|
||||
def create_message_clean_policy(
|
||||
graceful_period_days: int = 21,
|
||||
current_timestamp: int | None = None,
|
||||
) -> MessagesCleanPolicy:
|
||||
"""
|
||||
Factory function to create the appropriate message clean policy.
|
||||
|
||||
Determines which policy to use based on BILLING_ENABLED configuration:
|
||||
- If BILLING_ENABLED is True: returns BillingSandboxPolicy
|
||||
- If BILLING_ENABLED is False: returns BillingDisabledPolicy
|
||||
|
||||
Args:
|
||||
graceful_period_days: Grace period in days after subscription expiration (default: 21)
|
||||
current_timestamp: Current Unix timestamp for testing (default: None, uses current time)
|
||||
"""
|
||||
if not dify_config.BILLING_ENABLED:
|
||||
logger.info("create_message_clean_policy: billing disabled, using BillingDisabledPolicy")
|
||||
return BillingDisabledPolicy()
|
||||
|
||||
# Billing enabled - fetch whitelist from BillingService
|
||||
tenant_whitelist = BillingService.get_expired_subscription_cleanup_whitelist()
|
||||
plan_provider = BillingService.get_plan_bulk_with_cache
|
||||
|
||||
logger.info(
|
||||
"create_message_clean_policy: billing enabled, using BillingSandboxPolicy "
|
||||
"(graceful_period_days=%s, whitelist=%s)",
|
||||
graceful_period_days,
|
||||
tenant_whitelist,
|
||||
)
|
||||
|
||||
return BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=graceful_period_days,
|
||||
tenant_whitelist=tenant_whitelist,
|
||||
current_timestamp=current_timestamp,
|
||||
)
|
||||
@ -1,334 +0,0 @@
|
||||
import datetime
|
||||
import logging
|
||||
import random
|
||||
from collections.abc import Sequence
|
||||
from typing import cast
|
||||
|
||||
from sqlalchemy import delete, select
|
||||
from sqlalchemy.engine import CursorResult
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from extensions.ext_database import db
|
||||
from models.model import (
|
||||
App,
|
||||
AppAnnotationHitHistory,
|
||||
DatasetRetrieverResource,
|
||||
Message,
|
||||
MessageAgentThought,
|
||||
MessageAnnotation,
|
||||
MessageChain,
|
||||
MessageFeedback,
|
||||
MessageFile,
|
||||
)
|
||||
from models.web import SavedMessage
|
||||
from services.retention.conversation.messages_clean_policy import (
|
||||
MessagesCleanPolicy,
|
||||
SimpleMessage,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessagesCleanService:
|
||||
"""
|
||||
Service for cleaning expired messages based on retention policies.
|
||||
|
||||
Compatible with non cloud edition (billing disabled): all messages in the time range will be deleted.
|
||||
If billing is enabled: only sandbox plan tenant messages are deleted (with whitelist and grace period support).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
policy: MessagesCleanPolicy,
|
||||
end_before: datetime.datetime,
|
||||
start_from: datetime.datetime | None = None,
|
||||
batch_size: int = 1000,
|
||||
dry_run: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the service with cleanup parameters.
|
||||
|
||||
Args:
|
||||
policy: The policy that determines which messages to delete
|
||||
end_before: End time (exclusive) of the range
|
||||
start_from: Optional start time (inclusive) of the range
|
||||
batch_size: Number of messages to process per batch
|
||||
dry_run: Whether to perform a dry run (no actual deletion)
|
||||
"""
|
||||
self._policy = policy
|
||||
self._end_before = end_before
|
||||
self._start_from = start_from
|
||||
self._batch_size = batch_size
|
||||
self._dry_run = dry_run
|
||||
|
||||
@classmethod
|
||||
def from_time_range(
|
||||
cls,
|
||||
policy: MessagesCleanPolicy,
|
||||
start_from: datetime.datetime,
|
||||
end_before: datetime.datetime,
|
||||
batch_size: int = 1000,
|
||||
dry_run: bool = False,
|
||||
) -> "MessagesCleanService":
|
||||
"""
|
||||
Create a service instance for cleaning messages within a specific time range.
|
||||
|
||||
Time range is [start_from, end_before).
|
||||
|
||||
Args:
|
||||
policy: The policy that determines which messages to delete
|
||||
start_from: Start time (inclusive) of the range
|
||||
end_before: End time (exclusive) of the range
|
||||
batch_size: Number of messages to process per batch
|
||||
dry_run: Whether to perform a dry run (no actual deletion)
|
||||
|
||||
Returns:
|
||||
MessagesCleanService instance
|
||||
|
||||
Raises:
|
||||
ValueError: If start_from >= end_before or invalid parameters
|
||||
"""
|
||||
if start_from >= end_before:
|
||||
raise ValueError(f"start_from ({start_from}) must be less than end_before ({end_before})")
|
||||
|
||||
if batch_size <= 0:
|
||||
raise ValueError(f"batch_size ({batch_size}) must be greater than 0")
|
||||
|
||||
logger.info(
|
||||
"clean_messages: start_from=%s, end_before=%s, batch_size=%s, policy=%s",
|
||||
start_from,
|
||||
end_before,
|
||||
batch_size,
|
||||
policy.__class__.__name__,
|
||||
)
|
||||
|
||||
return cls(
|
||||
policy=policy,
|
||||
end_before=end_before,
|
||||
start_from=start_from,
|
||||
batch_size=batch_size,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_days(
|
||||
cls,
|
||||
policy: MessagesCleanPolicy,
|
||||
days: int = 30,
|
||||
batch_size: int = 1000,
|
||||
dry_run: bool = False,
|
||||
) -> "MessagesCleanService":
|
||||
"""
|
||||
Create a service instance for cleaning messages older than specified days.
|
||||
|
||||
Args:
|
||||
policy: The policy that determines which messages to delete
|
||||
days: Number of days to look back from now
|
||||
batch_size: Number of messages to process per batch
|
||||
dry_run: Whether to perform a dry run (no actual deletion)
|
||||
|
||||
Returns:
|
||||
MessagesCleanService instance
|
||||
|
||||
Raises:
|
||||
ValueError: If invalid parameters
|
||||
"""
|
||||
if days < 0:
|
||||
raise ValueError(f"days ({days}) must be greater than or equal to 0")
|
||||
|
||||
if batch_size <= 0:
|
||||
raise ValueError(f"batch_size ({batch_size}) must be greater than 0")
|
||||
|
||||
end_before = datetime.datetime.now() - datetime.timedelta(days=days)
|
||||
|
||||
logger.info(
|
||||
"clean_messages: days=%s, end_before=%s, batch_size=%s, policy=%s",
|
||||
days,
|
||||
end_before,
|
||||
batch_size,
|
||||
policy.__class__.__name__,
|
||||
)
|
||||
|
||||
return cls(policy=policy, end_before=end_before, start_from=None, batch_size=batch_size, dry_run=dry_run)
|
||||
|
||||
def run(self) -> dict[str, int]:
|
||||
"""
|
||||
Execute the message cleanup operation.
|
||||
|
||||
Returns:
|
||||
Dict with statistics: batches, filtered_messages, total_deleted
|
||||
"""
|
||||
return self._clean_messages_by_time_range()
|
||||
|
||||
def _clean_messages_by_time_range(self) -> dict[str, int]:
|
||||
"""
|
||||
Clean messages within a time range using cursor-based pagination.
|
||||
|
||||
Time range is [start_from, end_before)
|
||||
|
||||
Steps:
|
||||
1. Iterate messages using cursor pagination (by created_at, id)
|
||||
2. Query app_id -> tenant_id mapping
|
||||
3. Delegate to policy to determine which messages to delete
|
||||
4. Batch delete messages and their relations
|
||||
|
||||
Returns:
|
||||
Dict with statistics: batches, filtered_messages, total_deleted
|
||||
"""
|
||||
stats = {
|
||||
"batches": 0,
|
||||
"total_messages": 0,
|
||||
"filtered_messages": 0,
|
||||
"total_deleted": 0,
|
||||
}
|
||||
|
||||
# Cursor-based pagination using (created_at, id) to avoid infinite loops
|
||||
# and ensure proper ordering with time-based filtering
|
||||
_cursor: tuple[datetime.datetime, str] | None = None
|
||||
|
||||
logger.info(
|
||||
"clean_messages: start cleaning messages (dry_run=%s), start_from=%s, end_before=%s",
|
||||
self._dry_run,
|
||||
self._start_from,
|
||||
self._end_before,
|
||||
)
|
||||
|
||||
while True:
|
||||
stats["batches"] += 1
|
||||
|
||||
# Step 1: Fetch a batch of messages using cursor
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
msg_stmt = (
|
||||
select(Message.id, Message.app_id, Message.created_at)
|
||||
.where(Message.created_at < self._end_before)
|
||||
.order_by(Message.created_at, Message.id)
|
||||
.limit(self._batch_size)
|
||||
)
|
||||
|
||||
if self._start_from:
|
||||
msg_stmt = msg_stmt.where(Message.created_at >= self._start_from)
|
||||
|
||||
# Apply cursor condition: (created_at, id) > (last_created_at, last_message_id)
|
||||
# This translates to:
|
||||
# created_at > last_created_at OR (created_at = last_created_at AND id > last_message_id)
|
||||
if _cursor:
|
||||
# Continuing from previous batch
|
||||
msg_stmt = msg_stmt.where(
|
||||
(Message.created_at > _cursor[0])
|
||||
| ((Message.created_at == _cursor[0]) & (Message.id > _cursor[1]))
|
||||
)
|
||||
|
||||
raw_messages = list(session.execute(msg_stmt).all())
|
||||
messages = [
|
||||
SimpleMessage(id=msg_id, app_id=app_id, created_at=msg_created_at)
|
||||
for msg_id, app_id, msg_created_at in raw_messages
|
||||
]
|
||||
|
||||
# Track total messages fetched across all batches
|
||||
stats["total_messages"] += len(messages)
|
||||
|
||||
if not messages:
|
||||
logger.info("clean_messages (batch %s): no more messages to process", stats["batches"])
|
||||
break
|
||||
|
||||
# Update cursor to the last message's (created_at, id)
|
||||
_cursor = (messages[-1].created_at, messages[-1].id)
|
||||
|
||||
# Step 2: Extract app_ids and query tenant_ids
|
||||
app_ids = list({msg.app_id for msg in messages})
|
||||
|
||||
if not app_ids:
|
||||
logger.info("clean_messages (batch %s): no app_ids found, skip", stats["batches"])
|
||||
continue
|
||||
|
||||
app_stmt = select(App.id, App.tenant_id).where(App.id.in_(app_ids))
|
||||
apps = list(session.execute(app_stmt).all())
|
||||
|
||||
if not apps:
|
||||
logger.info("clean_messages (batch %s): no apps found, skip", stats["batches"])
|
||||
continue
|
||||
|
||||
# Build app_id -> tenant_id mapping
|
||||
app_to_tenant: dict[str, str] = {app.id: app.tenant_id for app in apps}
|
||||
|
||||
# Step 3: Delegate to policy to determine which messages to delete
|
||||
message_ids_to_delete = self._policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
if not message_ids_to_delete:
|
||||
logger.info("clean_messages (batch %s): no messages to delete, skip", stats["batches"])
|
||||
continue
|
||||
|
||||
stats["filtered_messages"] += len(message_ids_to_delete)
|
||||
|
||||
# Step 4: Batch delete messages and their relations
|
||||
if not self._dry_run:
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
# Delete related records first
|
||||
self._batch_delete_message_relations(session, message_ids_to_delete)
|
||||
|
||||
# Delete messages
|
||||
delete_stmt = delete(Message).where(Message.id.in_(message_ids_to_delete))
|
||||
delete_result = cast(CursorResult, session.execute(delete_stmt))
|
||||
messages_deleted = delete_result.rowcount
|
||||
session.commit()
|
||||
|
||||
stats["total_deleted"] += messages_deleted
|
||||
|
||||
logger.info(
|
||||
"clean_messages (batch %s): processed %s messages, deleted %s messages",
|
||||
stats["batches"],
|
||||
len(messages),
|
||||
messages_deleted,
|
||||
)
|
||||
else:
|
||||
# Log random sample of message IDs that would be deleted (up to 10)
|
||||
sample_size = min(10, len(message_ids_to_delete))
|
||||
sampled_ids = random.sample(list(message_ids_to_delete), sample_size)
|
||||
|
||||
logger.info(
|
||||
"clean_messages (batch %s, dry_run): would delete %s messages, sampling %s ids:",
|
||||
stats["batches"],
|
||||
len(message_ids_to_delete),
|
||||
sample_size,
|
||||
)
|
||||
for msg_id in sampled_ids:
|
||||
logger.info("clean_messages (batch %s, dry_run) sample: message_id=%s", stats["batches"], msg_id)
|
||||
|
||||
logger.info(
|
||||
"clean_messages completed: total batches: %s, total messages: %s, filtered messages: %s, total deleted: %s",
|
||||
stats["batches"],
|
||||
stats["total_messages"],
|
||||
stats["filtered_messages"],
|
||||
stats["total_deleted"],
|
||||
)
|
||||
|
||||
return stats
|
||||
|
||||
@staticmethod
|
||||
def _batch_delete_message_relations(session: Session, message_ids: Sequence[str]) -> None:
|
||||
"""
|
||||
Batch delete all related records for given message IDs.
|
||||
|
||||
Args:
|
||||
session: Database session
|
||||
message_ids: List of message IDs to delete relations for
|
||||
"""
|
||||
if not message_ids:
|
||||
return
|
||||
|
||||
# Delete all related records in batch
|
||||
session.execute(delete(MessageFeedback).where(MessageFeedback.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(MessageAnnotation).where(MessageAnnotation.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(MessageChain).where(MessageChain.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(MessageAgentThought).where(MessageAgentThought.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(MessageFile).where(MessageFile.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(SavedMessage).where(SavedMessage.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(AppAnnotationHitHistory).where(AppAnnotationHitHistory.message_id.in_(message_ids)))
|
||||
|
||||
session.execute(delete(DatasetRetrieverResource).where(DatasetRetrieverResource.message_id.in_(message_ids)))
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,395 @@
|
||||
"""Tests for LLM Node first token timeout retry functionality."""
|
||||
|
||||
import time
|
||||
from collections.abc import Generator
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage
|
||||
from core.workflow.nodes.base.entities import RetryConfig
|
||||
from core.workflow.utils.generator_timeout import FirstTokenTimeoutError, with_first_token_timeout
|
||||
|
||||
|
||||
class TestRetryConfigFirstTokenTimeout:
|
||||
"""Test cases for RetryConfig first token timeout fields."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""Test that first token timeout fields have correct default values."""
|
||||
config = RetryConfig()
|
||||
|
||||
assert config.first_token_timeout == 0
|
||||
assert config.has_first_token_timeout is False
|
||||
|
||||
def test_has_first_token_timeout_when_retry_enabled_and_positive(self):
|
||||
"""Test has_first_token_timeout returns True when retry enabled with positive timeout."""
|
||||
config = RetryConfig(
|
||||
retry_enabled=True,
|
||||
first_token_timeout=3000, # 3000ms = 3s
|
||||
)
|
||||
|
||||
assert config.has_first_token_timeout is True
|
||||
assert config.first_token_timeout_seconds == 3.0
|
||||
|
||||
def test_has_first_token_timeout_when_retry_disabled(self):
|
||||
"""Test has_first_token_timeout returns False when retry is disabled."""
|
||||
config = RetryConfig(
|
||||
retry_enabled=False,
|
||||
first_token_timeout=60,
|
||||
)
|
||||
|
||||
assert config.has_first_token_timeout is False
|
||||
|
||||
def test_has_first_token_timeout_when_zero_timeout(self):
|
||||
"""Test has_first_token_timeout returns False when timeout is 0."""
|
||||
config = RetryConfig(
|
||||
retry_enabled=True,
|
||||
first_token_timeout=0,
|
||||
)
|
||||
|
||||
assert config.has_first_token_timeout is False
|
||||
|
||||
def test_backward_compatibility(self):
|
||||
"""Test that existing workflows without first_token_timeout work correctly."""
|
||||
old_config_data = {
|
||||
"max_retries": 3,
|
||||
"retry_interval": 1000,
|
||||
"retry_enabled": True,
|
||||
}
|
||||
|
||||
config = RetryConfig.model_validate(old_config_data)
|
||||
|
||||
assert config.max_retries == 3
|
||||
assert config.retry_interval == 1000
|
||||
assert config.retry_enabled is True
|
||||
assert config.first_token_timeout == 0
|
||||
# has_first_token_timeout is False because timeout is 0
|
||||
assert config.has_first_token_timeout is False
|
||||
|
||||
def test_full_config_serialization(self):
|
||||
"""Test that full config can be serialized and deserialized."""
|
||||
config = RetryConfig(
|
||||
max_retries=5,
|
||||
retry_interval=2000,
|
||||
retry_enabled=True,
|
||||
first_token_timeout=120,
|
||||
)
|
||||
|
||||
config_dict = config.model_dump()
|
||||
restored_config = RetryConfig.model_validate(config_dict)
|
||||
|
||||
assert restored_config.max_retries == 5
|
||||
assert restored_config.retry_interval == 2000
|
||||
assert restored_config.retry_enabled is True
|
||||
assert restored_config.first_token_timeout == 120
|
||||
assert restored_config.has_first_token_timeout is True
|
||||
|
||||
|
||||
class TestWithFirstTokenTimeout:
|
||||
"""Test cases for with_first_token_timeout function."""
|
||||
|
||||
@staticmethod
|
||||
def _create_mock_chunk(text: str = "test") -> LLMResultChunk:
|
||||
"""Helper to create a mock LLMResultChunk."""
|
||||
return LLMResultChunk(
|
||||
model="test-model",
|
||||
prompt_messages=[],
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(content=text),
|
||||
),
|
||||
)
|
||||
|
||||
def test_first_token_arrives_within_timeout(self):
|
||||
"""Test that chunks are yielded normally when first token arrives in time."""
|
||||
|
||||
def mock_generator() -> Generator[LLMResultChunk, None, None]:
|
||||
yield self._create_mock_chunk("Hello")
|
||||
yield self._create_mock_chunk(" world")
|
||||
|
||||
wrapped = with_first_token_timeout(mock_generator(), timeout_seconds=10)
|
||||
chunks = list(wrapped)
|
||||
|
||||
assert len(chunks) == 2
|
||||
|
||||
def test_first_token_timeout_raises_error(self, monkeypatch):
|
||||
"""Test that timeout error is raised when first token doesn't arrive in time."""
|
||||
call_count = 0
|
||||
|
||||
def mock_monotonic():
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
# First call: start_time = 0
|
||||
# Second call (when checking): current_time = 11 (exceeds 10 second timeout)
|
||||
if call_count == 1:
|
||||
return 0.0
|
||||
return 11.0
|
||||
|
||||
monkeypatch.setattr(time, "monotonic", mock_monotonic)
|
||||
|
||||
def slow_generator() -> Generator[LLMResultChunk, None, None]:
|
||||
# This chunk arrives "after timeout"
|
||||
yield self._create_mock_chunk("Late token")
|
||||
|
||||
wrapped = with_first_token_timeout(slow_generator(), timeout_seconds=10)
|
||||
|
||||
with pytest.raises(FirstTokenTimeoutError) as exc_info:
|
||||
list(wrapped)
|
||||
|
||||
# Error message shows milliseconds (10 seconds = 10000ms)
|
||||
assert "10000ms" in str(exc_info.value)
|
||||
|
||||
def test_no_timeout_check_after_first_token(self, monkeypatch):
|
||||
"""Test that subsequent chunks are not subject to timeout after first token received."""
|
||||
call_count = 0
|
||||
|
||||
def mock_monotonic():
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return 0.0 # start_time
|
||||
elif call_count == 2:
|
||||
return 5.0 # first token arrives at 5s (within 10s timeout)
|
||||
else:
|
||||
# Subsequent calls simulate long delays for remaining chunks
|
||||
# These should NOT trigger timeout because first token already received
|
||||
return 100.0 + call_count
|
||||
|
||||
monkeypatch.setattr(time, "monotonic", mock_monotonic)
|
||||
|
||||
def generator_with_slow_subsequent_chunks() -> Generator[LLMResultChunk, None, None]:
|
||||
yield self._create_mock_chunk("First")
|
||||
yield self._create_mock_chunk("Second")
|
||||
yield self._create_mock_chunk("Third")
|
||||
|
||||
wrapped = with_first_token_timeout(
|
||||
generator_with_slow_subsequent_chunks(),
|
||||
timeout_seconds=10,
|
||||
)
|
||||
|
||||
# Should not raise, even though "time" passes beyond timeout after first token
|
||||
chunks = list(wrapped)
|
||||
assert len(chunks) == 3
|
||||
|
||||
def test_empty_generator_no_error(self):
|
||||
"""Test that empty generator doesn't raise timeout error (no chunks to check)."""
|
||||
|
||||
def empty_generator() -> Generator[LLMResultChunk, None, None]:
|
||||
return
|
||||
yield # unreachable, but makes this a generator
|
||||
|
||||
wrapped = with_first_token_timeout(empty_generator(), timeout_seconds=10)
|
||||
chunks = list(wrapped)
|
||||
|
||||
assert chunks == []
|
||||
|
||||
def test_exact_timeout_boundary(self, monkeypatch):
|
||||
"""Test behavior at exact timeout boundary (should not raise when equal)."""
|
||||
call_count = 0
|
||||
|
||||
def mock_monotonic():
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return 0.0
|
||||
# Exactly at boundary: current_time - start_time = 10, timeout_seconds = 10
|
||||
# Since we check > not >=, this should NOT raise
|
||||
return 10.0
|
||||
|
||||
monkeypatch.setattr(time, "monotonic", mock_monotonic)
|
||||
|
||||
def generator() -> Generator[LLMResultChunk, None, None]:
|
||||
yield self._create_mock_chunk("Token at boundary")
|
||||
|
||||
wrapped = with_first_token_timeout(generator(), timeout_seconds=10)
|
||||
|
||||
# Should not raise because 10 is not > 10
|
||||
chunks = list(wrapped)
|
||||
assert len(chunks) == 1
|
||||
|
||||
def test_just_over_timeout_boundary(self, monkeypatch):
|
||||
"""Test behavior just over timeout boundary (should raise)."""
|
||||
call_count = 0
|
||||
|
||||
def mock_monotonic():
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
return 0.0
|
||||
# Just over boundary
|
||||
return 10.001
|
||||
|
||||
monkeypatch.setattr(time, "monotonic", mock_monotonic)
|
||||
|
||||
def generator() -> Generator[LLMResultChunk, None, None]:
|
||||
yield self._create_mock_chunk("Late token")
|
||||
|
||||
wrapped = with_first_token_timeout(generator(), timeout_seconds=10)
|
||||
|
||||
with pytest.raises(FirstTokenTimeoutError):
|
||||
list(wrapped)
|
||||
|
||||
|
||||
class TestLLMNodeInvokeLLMWithTimeout:
|
||||
"""Test cases for LLMNode.invoke_llm with first_token_timeout parameter."""
|
||||
|
||||
def test_invoke_llm_without_timeout(self):
|
||||
"""Test invoke_llm works normally when first_token_timeout is None."""
|
||||
from core.workflow.nodes.llm.node import LLMNode
|
||||
|
||||
with mock.patch.object(LLMNode, "handle_invoke_result") as mock_handle:
|
||||
mock_handle.return_value = iter([])
|
||||
|
||||
# Mock model_instance.invoke_llm to return empty generator
|
||||
mock_model_instance = mock.MagicMock()
|
||||
mock_model_instance.invoke_llm.return_value = iter([])
|
||||
mock_model_instance.model_type_instance.get_model_schema.return_value = mock.MagicMock()
|
||||
|
||||
mock_node_data_model = mock.MagicMock()
|
||||
mock_node_data_model.completion_params = {}
|
||||
|
||||
result = LLMNode.invoke_llm(
|
||||
node_data_model=mock_node_data_model,
|
||||
model_instance=mock_model_instance,
|
||||
prompt_messages=[],
|
||||
user_id="test-user",
|
||||
structured_output_enabled=False,
|
||||
structured_output=None,
|
||||
file_saver=mock.MagicMock(),
|
||||
file_outputs=[],
|
||||
node_id="test-node",
|
||||
node_type=mock.MagicMock(),
|
||||
first_token_timeout=None, # No timeout
|
||||
)
|
||||
|
||||
list(result) # Consume generator
|
||||
mock_handle.assert_called_once()
|
||||
|
||||
def test_invoke_llm_with_timeout_passes_to_model_instance(self):
|
||||
"""Test invoke_llm passes first_token_timeout to model_instance.invoke_llm."""
|
||||
from core.workflow.nodes.llm.node import LLMNode
|
||||
|
||||
with mock.patch.object(LLMNode, "handle_invoke_result") as mock_handle:
|
||||
mock_handle.return_value = iter([])
|
||||
|
||||
mock_model_instance = mock.MagicMock()
|
||||
mock_model_instance.invoke_llm.return_value = iter([])
|
||||
mock_model_instance.model_type_instance.get_model_schema.return_value = mock.MagicMock()
|
||||
|
||||
mock_node_data_model = mock.MagicMock()
|
||||
mock_node_data_model.completion_params = {}
|
||||
|
||||
result = LLMNode.invoke_llm(
|
||||
node_data_model=mock_node_data_model,
|
||||
model_instance=mock_model_instance,
|
||||
prompt_messages=[],
|
||||
user_id="test-user",
|
||||
structured_output_enabled=False,
|
||||
structured_output=None,
|
||||
file_saver=mock.MagicMock(),
|
||||
file_outputs=[],
|
||||
node_id="test-node",
|
||||
node_type=mock.MagicMock(),
|
||||
first_token_timeout=60, # With timeout
|
||||
)
|
||||
|
||||
list(result) # Consume generator
|
||||
|
||||
# Verify model_instance.invoke_llm was called with first_token_timeout
|
||||
mock_model_instance.invoke_llm.assert_called_once()
|
||||
call_kwargs = mock_model_instance.invoke_llm.call_args.kwargs
|
||||
assert call_kwargs.get("first_token_timeout") == 60
|
||||
|
||||
def test_invoke_llm_with_zero_timeout_passes_zero(self):
|
||||
"""Test invoke_llm passes zero timeout to model_instance."""
|
||||
from core.workflow.nodes.llm.node import LLMNode
|
||||
|
||||
with mock.patch.object(LLMNode, "handle_invoke_result") as mock_handle:
|
||||
mock_handle.return_value = iter([])
|
||||
|
||||
mock_model_instance = mock.MagicMock()
|
||||
mock_model_instance.invoke_llm.return_value = iter([])
|
||||
mock_model_instance.model_type_instance.get_model_schema.return_value = mock.MagicMock()
|
||||
|
||||
mock_node_data_model = mock.MagicMock()
|
||||
mock_node_data_model.completion_params = {}
|
||||
|
||||
result = LLMNode.invoke_llm(
|
||||
node_data_model=mock_node_data_model,
|
||||
model_instance=mock_model_instance,
|
||||
prompt_messages=[],
|
||||
user_id="test-user",
|
||||
structured_output_enabled=False,
|
||||
structured_output=None,
|
||||
file_saver=mock.MagicMock(),
|
||||
file_outputs=[],
|
||||
node_id="test-node",
|
||||
node_type=mock.MagicMock(),
|
||||
first_token_timeout=0, # Zero timeout
|
||||
)
|
||||
|
||||
list(result) # Consume generator
|
||||
|
||||
# Verify model_instance.invoke_llm was called with zero timeout
|
||||
mock_model_instance.invoke_llm.assert_called_once()
|
||||
call_kwargs = mock_model_instance.invoke_llm.call_args.kwargs
|
||||
assert call_kwargs.get("first_token_timeout") == 0
|
||||
|
||||
|
||||
class TestRetryConfigIntegration:
|
||||
"""Integration tests for RetryConfig with LLM node data."""
|
||||
|
||||
def test_retry_config_in_node_data(self):
|
||||
"""Test RetryConfig can be properly configured in LLMNodeData."""
|
||||
from core.model_runtime.entities.llm_entities import LLMMode
|
||||
from core.workflow.nodes.llm.entities import ContextConfig, LLMNodeData, ModelConfig
|
||||
|
||||
node_data = LLMNodeData(
|
||||
title="Test LLM",
|
||||
model=ModelConfig(
|
||||
provider="openai",
|
||||
name="gpt-4",
|
||||
mode=LLMMode.CHAT,
|
||||
completion_params={},
|
||||
),
|
||||
prompt_template=[],
|
||||
context=ContextConfig(enabled=False),
|
||||
structured_output_enabled=False,
|
||||
retry_config=RetryConfig(
|
||||
max_retries=3,
|
||||
retry_interval=1000,
|
||||
retry_enabled=True,
|
||||
first_token_timeout=3000, # 3000ms = 3s
|
||||
),
|
||||
)
|
||||
|
||||
assert node_data.retry_config.max_retries == 3
|
||||
assert node_data.retry_config.retry_enabled is True
|
||||
assert node_data.retry_config.first_token_timeout == 3000
|
||||
assert node_data.retry_config.first_token_timeout_seconds == 3.0
|
||||
assert node_data.retry_config.has_first_token_timeout is True
|
||||
|
||||
def test_default_retry_config_in_node_data(self):
|
||||
"""Test default RetryConfig in LLMNodeData."""
|
||||
from core.model_runtime.entities.llm_entities import LLMMode
|
||||
from core.workflow.nodes.llm.entities import ContextConfig, LLMNodeData, ModelConfig
|
||||
|
||||
node_data = LLMNodeData(
|
||||
title="Test LLM",
|
||||
model=ModelConfig(
|
||||
provider="openai",
|
||||
name="gpt-4",
|
||||
mode=LLMMode.CHAT,
|
||||
completion_params={},
|
||||
),
|
||||
prompt_template=[],
|
||||
context=ContextConfig(enabled=False),
|
||||
structured_output_enabled=False,
|
||||
)
|
||||
|
||||
# Should have default RetryConfig
|
||||
assert node_data.retry_config.max_retries == 0
|
||||
assert node_data.retry_config.retry_enabled is False
|
||||
assert node_data.retry_config.first_token_timeout == 0
|
||||
assert node_data.retry_config.has_first_token_timeout is False
|
||||
@ -1,627 +0,0 @@
|
||||
import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from enums.cloud_plan import CloudPlan
|
||||
from services.retention.conversation.messages_clean_policy import (
|
||||
BillingDisabledPolicy,
|
||||
BillingSandboxPolicy,
|
||||
SimpleMessage,
|
||||
create_message_clean_policy,
|
||||
)
|
||||
from services.retention.conversation.messages_clean_service import MessagesCleanService
|
||||
|
||||
|
||||
def make_simple_message(msg_id: str, app_id: str) -> SimpleMessage:
|
||||
"""Helper to create a SimpleMessage with a fixed created_at timestamp."""
|
||||
return SimpleMessage(id=msg_id, app_id=app_id, created_at=datetime.datetime(2024, 1, 1))
|
||||
|
||||
|
||||
def make_plan_provider(tenant_plans: dict) -> MagicMock:
|
||||
"""Helper to create a mock plan_provider that returns the given tenant_plans."""
|
||||
provider = MagicMock()
|
||||
provider.return_value = tenant_plans
|
||||
return provider
|
||||
|
||||
|
||||
class TestBillingSandboxPolicyFilterMessageIds:
|
||||
"""Unit tests for BillingSandboxPolicy.filter_message_ids method."""
|
||||
|
||||
# Fixed timestamp for deterministic tests
|
||||
CURRENT_TIMESTAMP = 1000000
|
||||
GRACEFUL_PERIOD_DAYS = 8
|
||||
GRACEFUL_PERIOD_SECONDS = GRACEFUL_PERIOD_DAYS * 24 * 60 * 60
|
||||
|
||||
def test_missing_tenant_mapping_excluded(self):
|
||||
"""Test that messages with missing app-to-tenant mapping are excluded."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
]
|
||||
app_to_tenant = {} # No mapping
|
||||
tenant_plans = {"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1}}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert
|
||||
assert list(result) == []
|
||||
|
||||
def test_missing_tenant_plan_excluded(self):
|
||||
"""Test that messages with missing tenant plan are excluded (safe default)."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
|
||||
tenant_plans = {} # No plans
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert
|
||||
assert list(result) == []
|
||||
|
||||
def test_non_sandbox_plan_excluded(self):
|
||||
"""Test that messages from non-sandbox plans (PROFESSIONAL/TEAM) are excluded."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
make_simple_message("msg3", "app3"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant3"}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.PROFESSIONAL, "expiration_date": -1},
|
||||
"tenant2": {"plan": CloudPlan.TEAM, "expiration_date": -1},
|
||||
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": -1}, # Only this one
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - only msg3 (sandbox tenant) should be included
|
||||
assert set(result) == {"msg3"}
|
||||
|
||||
def test_whitelist_skip(self):
|
||||
"""Test that whitelisted tenants are excluded even if sandbox + expired."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"), # Whitelisted - excluded
|
||||
make_simple_message("msg2", "app2"), # Not whitelisted - included
|
||||
make_simple_message("msg3", "app3"), # Whitelisted - excluded
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant3"}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
|
||||
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
|
||||
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
tenant_whitelist = ["tenant1", "tenant3"]
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
tenant_whitelist=tenant_whitelist,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - only msg2 should be included
|
||||
assert set(result) == {"msg2"}
|
||||
|
||||
def test_no_previous_subscription_included(self):
|
||||
"""Test that messages with expiration_date=-1 (no previous subscription) are included."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
|
||||
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - all messages should be included
|
||||
assert set(result) == {"msg1", "msg2"}
|
||||
|
||||
def test_within_grace_period_excluded(self):
|
||||
"""Test that messages within grace period are excluded."""
|
||||
# Arrange
|
||||
now = self.CURRENT_TIMESTAMP
|
||||
expired_1_day_ago = now - (1 * 24 * 60 * 60)
|
||||
expired_5_days_ago = now - (5 * 24 * 60 * 60)
|
||||
expired_7_days_ago = now - (7 * 24 * 60 * 60)
|
||||
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
make_simple_message("msg3", "app3"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant3"}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_1_day_ago},
|
||||
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_5_days_ago},
|
||||
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_7_days_ago},
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS, # 8 days
|
||||
current_timestamp=now,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - all within 8-day grace period, none should be included
|
||||
assert list(result) == []
|
||||
|
||||
def test_exactly_at_boundary_excluded(self):
|
||||
"""Test that messages exactly at grace period boundary are excluded (code uses >)."""
|
||||
# Arrange
|
||||
now = self.CURRENT_TIMESTAMP
|
||||
expired_exactly_8_days_ago = now - self.GRACEFUL_PERIOD_SECONDS # Exactly at boundary
|
||||
|
||||
messages = [make_simple_message("msg1", "app1")]
|
||||
app_to_tenant = {"app1": "tenant1"}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_exactly_8_days_ago},
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=now,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - exactly at boundary (==) should be excluded (code uses >)
|
||||
assert list(result) == []
|
||||
|
||||
def test_beyond_grace_period_included(self):
|
||||
"""Test that messages beyond grace period are included."""
|
||||
# Arrange
|
||||
now = self.CURRENT_TIMESTAMP
|
||||
expired_9_days_ago = now - (9 * 24 * 60 * 60) # Just beyond 8-day grace
|
||||
expired_30_days_ago = now - (30 * 24 * 60 * 60) # Well beyond
|
||||
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_9_days_ago},
|
||||
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_30_days_ago},
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=now,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - both beyond grace period, should be included
|
||||
assert set(result) == {"msg1", "msg2"}
|
||||
|
||||
def test_empty_messages_returns_empty(self):
|
||||
"""Test that empty messages returns empty list."""
|
||||
# Arrange
|
||||
messages: list[SimpleMessage] = []
|
||||
app_to_tenant = {"app1": "tenant1"}
|
||||
plan_provider = make_plan_provider({"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1}})
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert
|
||||
assert list(result) == []
|
||||
|
||||
def test_plan_provider_called_with_correct_tenant_ids(self):
|
||||
"""Test that plan_provider is called with correct tenant_ids."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
make_simple_message("msg3", "app3"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant1"} # tenant1 appears twice
|
||||
plan_provider = make_plan_provider({})
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
current_timestamp=self.CURRENT_TIMESTAMP,
|
||||
)
|
||||
|
||||
# Act
|
||||
policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - plan_provider should be called once with unique tenant_ids
|
||||
plan_provider.assert_called_once()
|
||||
called_tenant_ids = set(plan_provider.call_args[0][0])
|
||||
assert called_tenant_ids == {"tenant1", "tenant2"}
|
||||
|
||||
def test_complex_mixed_scenario(self):
|
||||
"""Test complex scenario with mixed plans, expirations, whitelist, and missing mappings."""
|
||||
# Arrange
|
||||
now = self.CURRENT_TIMESTAMP
|
||||
sandbox_expired_old = now - (15 * 24 * 60 * 60) # Beyond grace
|
||||
sandbox_expired_recent = now - (3 * 24 * 60 * 60) # Within grace
|
||||
future_expiration = now + (30 * 24 * 60 * 60)
|
||||
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"), # Sandbox, no subscription - included
|
||||
make_simple_message("msg2", "app2"), # Sandbox, expired old - included
|
||||
make_simple_message("msg3", "app3"), # Sandbox, within grace - excluded
|
||||
make_simple_message("msg4", "app4"), # Team plan, active - excluded
|
||||
make_simple_message("msg5", "app5"), # No tenant mapping - excluded
|
||||
make_simple_message("msg6", "app6"), # No plan info - excluded
|
||||
make_simple_message("msg7", "app7"), # Sandbox, expired old, whitelisted - excluded
|
||||
]
|
||||
app_to_tenant = {
|
||||
"app1": "tenant1",
|
||||
"app2": "tenant2",
|
||||
"app3": "tenant3",
|
||||
"app4": "tenant4",
|
||||
"app6": "tenant6", # Has mapping but no plan
|
||||
"app7": "tenant7",
|
||||
# app5 has no mapping
|
||||
}
|
||||
tenant_plans = {
|
||||
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
|
||||
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": sandbox_expired_old},
|
||||
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": sandbox_expired_recent},
|
||||
"tenant4": {"plan": CloudPlan.TEAM, "expiration_date": future_expiration},
|
||||
"tenant7": {"plan": CloudPlan.SANDBOX, "expiration_date": sandbox_expired_old},
|
||||
# tenant6 has no plan
|
||||
}
|
||||
plan_provider = make_plan_provider(tenant_plans)
|
||||
tenant_whitelist = ["tenant7"]
|
||||
|
||||
policy = BillingSandboxPolicy(
|
||||
plan_provider=plan_provider,
|
||||
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
|
||||
tenant_whitelist=tenant_whitelist,
|
||||
current_timestamp=now,
|
||||
)
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - only msg1 and msg2 should be included
|
||||
assert set(result) == {"msg1", "msg2"}
|
||||
|
||||
|
||||
class TestBillingDisabledPolicyFilterMessageIds:
|
||||
"""Unit tests for BillingDisabledPolicy.filter_message_ids method."""
|
||||
|
||||
def test_returns_all_message_ids(self):
|
||||
"""Test that all message IDs are returned (order-preserving)."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
make_simple_message("msg3", "app3"),
|
||||
]
|
||||
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
|
||||
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - all message IDs returned in order
|
||||
assert list(result) == ["msg1", "msg2", "msg3"]
|
||||
|
||||
def test_ignores_app_to_tenant(self):
|
||||
"""Test that app_to_tenant mapping is ignored."""
|
||||
# Arrange
|
||||
messages = [
|
||||
make_simple_message("msg1", "app1"),
|
||||
make_simple_message("msg2", "app2"),
|
||||
]
|
||||
app_to_tenant: dict[str, str] = {} # Empty - should be ignored
|
||||
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert - all message IDs still returned
|
||||
assert list(result) == ["msg1", "msg2"]
|
||||
|
||||
def test_empty_messages_returns_empty(self):
|
||||
"""Test that empty messages returns empty list."""
|
||||
# Arrange
|
||||
messages: list[SimpleMessage] = []
|
||||
app_to_tenant = {"app1": "tenant1"}
|
||||
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act
|
||||
result = policy.filter_message_ids(messages, app_to_tenant)
|
||||
|
||||
# Assert
|
||||
assert list(result) == []
|
||||
|
||||
|
||||
class TestCreateMessageCleanPolicy:
|
||||
"""Unit tests for create_message_clean_policy factory function."""
|
||||
|
||||
@patch("services.retention.conversation.messages_clean_policy.dify_config")
|
||||
def test_billing_disabled_returns_billing_disabled_policy(self, mock_config):
|
||||
"""Test that BILLING_ENABLED=False returns BillingDisabledPolicy."""
|
||||
# Arrange
|
||||
mock_config.BILLING_ENABLED = False
|
||||
|
||||
# Act
|
||||
policy = create_message_clean_policy(graceful_period_days=21)
|
||||
|
||||
# Assert
|
||||
assert isinstance(policy, BillingDisabledPolicy)
|
||||
|
||||
@patch("services.retention.conversation.messages_clean_policy.BillingService")
|
||||
@patch("services.retention.conversation.messages_clean_policy.dify_config")
|
||||
def test_billing_enabled_policy_has_correct_internals(self, mock_config, mock_billing_service):
|
||||
"""Test that BillingSandboxPolicy is created with correct internal values."""
|
||||
# Arrange
|
||||
mock_config.BILLING_ENABLED = True
|
||||
whitelist = ["tenant1", "tenant2"]
|
||||
mock_billing_service.get_expired_subscription_cleanup_whitelist.return_value = whitelist
|
||||
mock_plan_provider = MagicMock()
|
||||
mock_billing_service.get_plan_bulk_with_cache = mock_plan_provider
|
||||
|
||||
# Act
|
||||
policy = create_message_clean_policy(graceful_period_days=14, current_timestamp=1234567)
|
||||
|
||||
# Assert
|
||||
mock_billing_service.get_expired_subscription_cleanup_whitelist.assert_called_once()
|
||||
assert isinstance(policy, BillingSandboxPolicy)
|
||||
assert policy._graceful_period_days == 14
|
||||
assert list(policy._tenant_whitelist) == whitelist
|
||||
assert policy._plan_provider == mock_plan_provider
|
||||
assert policy._current_timestamp == 1234567
|
||||
|
||||
|
||||
class TestMessagesCleanServiceFromTimeRange:
|
||||
"""Unit tests for MessagesCleanService.from_time_range factory method."""
|
||||
|
||||
def test_start_from_end_before_raises_value_error(self):
|
||||
"""Test that start_from == end_before raises ValueError."""
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Arrange
|
||||
same_time = datetime.datetime(2024, 1, 1, 12, 0, 0)
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="start_from .* must be less than end_before"):
|
||||
MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=same_time,
|
||||
end_before=same_time,
|
||||
)
|
||||
|
||||
# Arrange
|
||||
start_from = datetime.datetime(2024, 12, 31)
|
||||
end_before = datetime.datetime(2024, 1, 1)
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="start_from .* must be less than end_before"):
|
||||
MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
)
|
||||
|
||||
def test_batch_size_raises_value_error(self):
|
||||
"""Test that batch_size=0 raises ValueError."""
|
||||
# Arrange
|
||||
start_from = datetime.datetime(2024, 1, 1)
|
||||
end_before = datetime.datetime(2024, 2, 1)
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
|
||||
MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
batch_size=0,
|
||||
)
|
||||
|
||||
start_from = datetime.datetime(2024, 1, 1)
|
||||
end_before = datetime.datetime(2024, 2, 1)
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
|
||||
MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
batch_size=-100,
|
||||
)
|
||||
|
||||
def test_valid_params_creates_instance(self):
|
||||
"""Test that valid parameters create a correctly configured instance."""
|
||||
# Arrange
|
||||
start_from = datetime.datetime(2024, 1, 1, 0, 0, 0)
|
||||
end_before = datetime.datetime(2024, 12, 31, 23, 59, 59)
|
||||
policy = BillingDisabledPolicy()
|
||||
batch_size = 500
|
||||
dry_run = True
|
||||
|
||||
# Act
|
||||
service = MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
batch_size=batch_size,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(service, MessagesCleanService)
|
||||
assert service._policy is policy
|
||||
assert service._start_from == start_from
|
||||
assert service._end_before == end_before
|
||||
assert service._batch_size == batch_size
|
||||
assert service._dry_run == dry_run
|
||||
|
||||
def test_default_params(self):
|
||||
"""Test that default parameters are applied correctly."""
|
||||
# Arrange
|
||||
start_from = datetime.datetime(2024, 1, 1)
|
||||
end_before = datetime.datetime(2024, 2, 1)
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act
|
||||
service = MessagesCleanService.from_time_range(
|
||||
policy=policy,
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert service._batch_size == 1000 # default
|
||||
assert service._dry_run is False # default
|
||||
|
||||
|
||||
class TestMessagesCleanServiceFromDays:
|
||||
"""Unit tests for MessagesCleanService.from_days factory method."""
|
||||
|
||||
def test_days_raises_value_error(self):
|
||||
"""Test that days < 0 raises ValueError."""
|
||||
# Arrange
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="days .* must be greater than or equal to 0"):
|
||||
MessagesCleanService.from_days(policy=policy, days=-1)
|
||||
|
||||
# Act
|
||||
with patch("services.retention.conversation.messages_clean_service.datetime") as mock_datetime:
|
||||
fixed_now = datetime.datetime(2024, 6, 15, 14, 0, 0)
|
||||
mock_datetime.datetime.now.return_value = fixed_now
|
||||
mock_datetime.timedelta = datetime.timedelta
|
||||
|
||||
service = MessagesCleanService.from_days(policy=policy, days=0)
|
||||
|
||||
# Assert
|
||||
assert service._end_before == fixed_now
|
||||
|
||||
def test_batch_size_raises_value_error(self):
|
||||
"""Test that batch_size=0 raises ValueError."""
|
||||
# Arrange
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
|
||||
MessagesCleanService.from_days(policy=policy, days=30, batch_size=0)
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
|
||||
MessagesCleanService.from_days(policy=policy, days=30, batch_size=-500)
|
||||
|
||||
def test_valid_params_creates_instance(self):
|
||||
"""Test that valid parameters create a correctly configured instance."""
|
||||
# Arrange
|
||||
policy = BillingDisabledPolicy()
|
||||
days = 90
|
||||
batch_size = 500
|
||||
dry_run = True
|
||||
|
||||
# Act
|
||||
with patch("services.retention.conversation.messages_clean_service.datetime") as mock_datetime:
|
||||
fixed_now = datetime.datetime(2024, 6, 15, 10, 30, 0)
|
||||
mock_datetime.datetime.now.return_value = fixed_now
|
||||
mock_datetime.timedelta = datetime.timedelta
|
||||
|
||||
service = MessagesCleanService.from_days(
|
||||
policy=policy,
|
||||
days=days,
|
||||
batch_size=batch_size,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
|
||||
# Assert
|
||||
expected_end_before = fixed_now - datetime.timedelta(days=days)
|
||||
assert isinstance(service, MessagesCleanService)
|
||||
assert service._policy is policy
|
||||
assert service._start_from is None
|
||||
assert service._end_before == expected_end_before
|
||||
assert service._batch_size == batch_size
|
||||
assert service._dry_run == dry_run
|
||||
|
||||
def test_default_params(self):
|
||||
"""Test that default parameters are applied correctly."""
|
||||
# Arrange
|
||||
policy = BillingDisabledPolicy()
|
||||
|
||||
# Act
|
||||
with patch("services.retention.conversation.messages_clean_service.datetime") as mock_datetime:
|
||||
fixed_now = datetime.datetime(2024, 6, 15, 10, 30, 0)
|
||||
mock_datetime.datetime.now.return_value = fixed_now
|
||||
mock_datetime.timedelta = datetime.timedelta
|
||||
|
||||
service = MessagesCleanService.from_days(policy=policy)
|
||||
|
||||
# Assert
|
||||
expected_end_before = fixed_now - datetime.timedelta(days=30) # default days=30
|
||||
assert service._end_before == expected_end_before
|
||||
assert service._batch_size == 1000 # default
|
||||
assert service._dry_run is False # default
|
||||
2
api/uv.lock
generated
2
api/uv.lock
generated
@ -1368,7 +1368,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "dify-api"
|
||||
version = "1.11.4"
|
||||
version = "1.11.3"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "aliyun-log-python-sdk" },
|
||||
|
||||
@ -21,7 +21,7 @@ services:
|
||||
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.11.4
|
||||
image: langgenius/dify-api:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -63,7 +63,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.11.4
|
||||
image: langgenius/dify-api:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -102,7 +102,7 @@ services:
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.11.4
|
||||
image: langgenius/dify-api:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -132,7 +132,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.11.4
|
||||
image: langgenius/dify-web:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@ -705,7 +705,7 @@ services:
|
||||
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.11.4
|
||||
image: langgenius/dify-api:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -747,7 +747,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
|
||||
worker:
|
||||
image: langgenius/dify-api:1.11.4
|
||||
image: langgenius/dify-api:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -786,7 +786,7 @@ services:
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.11.4
|
||||
image: langgenius/dify-api:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@ -816,7 +816,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.11.4
|
||||
image: langgenius/dify-web:1.11.3
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@ -65,17 +65,15 @@ const CardView: FC<ICardViewProps> = ({ appId, isInPanel, className }) => {
|
||||
<div className="text-xs text-text-secondary">
|
||||
{t('overview.disableTooltip.triggerMode', { ns: 'appOverview', feature: featureName })}
|
||||
</div>
|
||||
<a
|
||||
href={triggerDocUrl}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="block cursor-pointer text-xs font-medium text-text-accent hover:underline"
|
||||
<div
|
||||
className="cursor-pointer text-xs font-medium text-text-accent hover:underline"
|
||||
onClick={(event) => {
|
||||
event.stopPropagation()
|
||||
window.open(triggerDocUrl, '_blank')
|
||||
}}
|
||||
>
|
||||
{t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })}
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
), [t, triggerDocUrl])
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@ import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/
|
||||
import FileUploadSetting from '@/app/components/workflow/nodes/_base/components/file-upload-setting'
|
||||
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
|
||||
import { ChangeType, InputVarType, SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import { AppModeEnum, TransferMethod } from '@/types/app'
|
||||
import { checkKeys, getNewVarInWorkflow, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var'
|
||||
@ -197,6 +198,8 @@ const ConfigModal: FC<IConfigModalProps> = ({
|
||||
if (type === InputVarType.multiFiles)
|
||||
draft.max_length = DEFAULT_FILE_UPLOAD_SETTING.max_length
|
||||
}
|
||||
if (type === InputVarType.paragraph)
|
||||
draft.max_length = DEFAULT_VALUE_MAX_LEN
|
||||
})
|
||||
setTempPayload(newPayload)
|
||||
}, [tempPayload])
|
||||
|
||||
@ -15,6 +15,7 @@ import Confirm from '@/app/components/base/confirm'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
import { useModalContext } from '@/context/modal-context'
|
||||
@ -57,6 +58,8 @@ const buildPromptVariableFromInput = (payload: InputVar): PromptVariable => {
|
||||
key: variable,
|
||||
name: label as string,
|
||||
}
|
||||
if (payload.type === InputVarType.textInput)
|
||||
nextItem.max_length = nextItem.max_length || DEFAULT_VALUE_MAX_LEN
|
||||
|
||||
if (payload.type !== InputVarType.select)
|
||||
delete nextItem.options
|
||||
|
||||
@ -7,6 +7,7 @@ import Input from '@/app/components/base/input'
|
||||
import Select from '@/app/components/base/select'
|
||||
import Textarea from '@/app/components/base/textarea'
|
||||
import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input'
|
||||
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
@ -87,7 +88,7 @@ const ChatUserInput = ({
|
||||
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
|
||||
placeholder={name}
|
||||
autoFocus={index === 0}
|
||||
maxLength={max_length}
|
||||
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
|
||||
/>
|
||||
)}
|
||||
{type === 'paragraph' && (
|
||||
@ -114,7 +115,7 @@ const ChatUserInput = ({
|
||||
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
|
||||
placeholder={name}
|
||||
autoFocus={index === 0}
|
||||
maxLength={max_length}
|
||||
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
|
||||
/>
|
||||
)}
|
||||
{type === 'checkbox' && (
|
||||
|
||||
@ -20,6 +20,7 @@ import Select from '@/app/components/base/select'
|
||||
import Textarea from '@/app/components/base/textarea'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input'
|
||||
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import { AppModeEnum, ModelModeType } from '@/types/app'
|
||||
import { cn } from '@/utils/classnames'
|
||||
@ -141,7 +142,7 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
|
||||
placeholder={name}
|
||||
autoFocus={index === 0}
|
||||
maxLength={max_length}
|
||||
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
|
||||
/>
|
||||
)}
|
||||
{type === 'paragraph' && (
|
||||
@ -169,7 +170,7 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
|
||||
placeholder={name}
|
||||
autoFocus={index === 0}
|
||||
maxLength={max_length}
|
||||
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
|
||||
/>
|
||||
)}
|
||||
{type === 'checkbox' && (
|
||||
|
||||
@ -12,6 +12,7 @@ import { useDebounceFn } from 'ahooks'
|
||||
import dynamic from 'next/dynamic'
|
||||
import {
|
||||
useRouter,
|
||||
useSearchParams,
|
||||
} from 'next/navigation'
|
||||
import { parseAsString, useQueryState } from 'nuqs'
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
@ -28,6 +29,7 @@ import { CheckModal } from '@/hooks/use-pay'
|
||||
import { useInfiniteAppList } from '@/service/use-apps'
|
||||
import { AppModeEnum } from '@/types/app'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import { isServer } from '@/utils/client'
|
||||
import AppCard from './app-card'
|
||||
import { AppCardSkeleton } from './app-card-skeleton'
|
||||
import Empty from './empty'
|
||||
@ -57,6 +59,7 @@ const List = () => {
|
||||
const { t } = useTranslation()
|
||||
const { systemFeatures } = useGlobalPublicStore()
|
||||
const router = useRouter()
|
||||
const searchParams = useSearchParams()
|
||||
const { isCurrentWorkspaceEditor, isCurrentWorkspaceDatasetOperator, isLoadingCurrentWorkspace } = useAppContext()
|
||||
const showTagManagementModal = useTagStore(s => s.showTagManagementModal)
|
||||
const [activeTab, setActiveTab] = useQueryState(
|
||||
@ -64,6 +67,33 @@ const List = () => {
|
||||
parseAsString.withDefault('all').withOptions({ history: 'push' }),
|
||||
)
|
||||
|
||||
// valid tabs for apps list; anything else should fallback to 'all'
|
||||
|
||||
// 1) Normalize legacy/incorrect query params like ?mode=discover -> ?category=all
|
||||
useEffect(() => {
|
||||
// avoid running on server
|
||||
if (isServer)
|
||||
return
|
||||
const mode = searchParams.get('mode')
|
||||
if (!mode)
|
||||
return
|
||||
const url = new URL(window.location.href)
|
||||
url.searchParams.delete('mode')
|
||||
if (validTabs.has(mode)) {
|
||||
// migrate to category key
|
||||
url.searchParams.set('category', mode)
|
||||
}
|
||||
else {
|
||||
url.searchParams.set('category', 'all')
|
||||
}
|
||||
router.replace(url.pathname + url.search)
|
||||
}, [router, searchParams])
|
||||
|
||||
// 2) If category has an invalid value (e.g., 'discover'), reset to 'all'
|
||||
useEffect(() => {
|
||||
if (!validTabs.has(activeTab))
|
||||
setActiveTab('all')
|
||||
}, [activeTab, setActiveTab])
|
||||
const { query: { tagIDs = [], keywords = '', isCreatedByMe: queryIsCreatedByMe = false }, setQuery } = useAppsQueryState()
|
||||
const [isCreatedByMe, setIsCreatedByMe] = useState(queryIsCreatedByMe)
|
||||
const [tagFilterValue, setTagFilterValue] = useState<string[]>(tagIDs)
|
||||
|
||||
@ -16,7 +16,6 @@ import { Theme } from '@/types/app'
|
||||
import SVGRenderer from '../svg-gallery' // Assumes svg-gallery.tsx is in /base directory
|
||||
|
||||
const Flowchart = dynamic(() => import('@/app/components/base/mermaid'), { ssr: false })
|
||||
const QuadrantMatrix = dynamic(() => import('@/app/components/base/quadrant-matrix'), { ssr: false })
|
||||
|
||||
// Available language https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_HLJS.MD
|
||||
const capitalizationLanguageNameMap: Record<string, string> = {
|
||||
@ -41,7 +40,6 @@ const capitalizationLanguageNameMap: Record<string, string> = {
|
||||
latex: 'Latex',
|
||||
svg: 'SVG',
|
||||
abc: 'ABC',
|
||||
quadrant: 'Quadrant',
|
||||
}
|
||||
const getCorrectCapitalizationLanguageName = (language: string) => {
|
||||
if (!language)
|
||||
@ -411,12 +409,6 @@ const CodeBlock: any = memo(({ inline, className, children = '', ...props }: any
|
||||
<MarkdownMusic children={content} />
|
||||
</ErrorBoundary>
|
||||
)
|
||||
case 'quadrant':
|
||||
return (
|
||||
<ErrorBoundary>
|
||||
<QuadrantMatrix content={content} />
|
||||
</ErrorBoundary>
|
||||
)
|
||||
default:
|
||||
return (
|
||||
<SyntaxHighlighter
|
||||
|
||||
@ -1,153 +0,0 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import type { QuadrantData } from './types'
|
||||
import { RiExpandDiagonalLine } from '@remixicon/react'
|
||||
import { useCallback, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import ActionButton from '@/app/components/base/action-button'
|
||||
import FullScreenModal from '@/app/components/base/fullscreen-modal'
|
||||
import QuadrantCard from './quadrant-card'
|
||||
import { isValidQuadrantData, QUADRANT_CONFIGS } from './types'
|
||||
|
||||
type QuadrantMatrixProps = {
|
||||
content: string
|
||||
}
|
||||
|
||||
const QuadrantMatrix: FC<QuadrantMatrixProps> = ({ content }) => {
|
||||
const { t } = useTranslation()
|
||||
const [isExpanded, setIsExpanded] = useState(false)
|
||||
|
||||
const parsedData = useMemo<QuadrantData | null>(() => {
|
||||
try {
|
||||
const trimmed = content.trim()
|
||||
const data = JSON.parse(trimmed)
|
||||
|
||||
if (!isValidQuadrantData(data))
|
||||
return null
|
||||
|
||||
return data
|
||||
}
|
||||
catch {
|
||||
return null
|
||||
}
|
||||
}, [content])
|
||||
|
||||
const handleExpand = useCallback(() => {
|
||||
setIsExpanded(true)
|
||||
}, [])
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
setIsExpanded(false)
|
||||
}, [])
|
||||
|
||||
if (!parsedData) {
|
||||
return (
|
||||
<div className="flex items-center justify-center rounded-xl bg-components-panel-bg-blur p-8">
|
||||
<div className="text-center text-text-secondary">
|
||||
<div className="system-md-semibold mb-2">{t('quadrantMatrix.invalidData', { ns: 'app' })}</div>
|
||||
<div className="text-sm text-text-tertiary">
|
||||
{t('quadrantMatrix.invalidDataDesc', { ns: 'app' })}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
const totalTasks
|
||||
= parsedData.q1.length
|
||||
+ parsedData.q2.length
|
||||
+ parsedData.q3.length
|
||||
+ parsedData.q4.length
|
||||
|
||||
// Shared grid content component
|
||||
const renderGrid = (expanded: boolean) => (
|
||||
<div className="grid grid-cols-2 gap-3">
|
||||
{/* Row 1: Q1 (Do First), Q2 (Schedule) */}
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q1}
|
||||
tasks={parsedData.q1}
|
||||
expanded={expanded}
|
||||
/>
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q2}
|
||||
tasks={parsedData.q2}
|
||||
expanded={expanded}
|
||||
/>
|
||||
|
||||
{/* Row 2: Q3 (Delegate), Q4 (Don't Do) */}
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q3}
|
||||
tasks={parsedData.q3}
|
||||
expanded={expanded}
|
||||
/>
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q4}
|
||||
tasks={parsedData.q4}
|
||||
expanded={expanded}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="w-full overflow-hidden rounded-xl bg-components-panel-bg-blur p-4">
|
||||
{/* Header */}
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<div className="system-md-semibold text-text-primary">
|
||||
{t('quadrantMatrix.title', { ns: 'app' })}
|
||||
</div>
|
||||
<div className="text-xs text-text-tertiary">
|
||||
{t('quadrantMatrix.taskCount', { ns: 'app', count: totalTasks })}
|
||||
</div>
|
||||
</div>
|
||||
{/* Legend + Expand Button */}
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="flex items-center gap-3 text-[11px] text-text-quaternary">
|
||||
<span>{t('quadrantMatrix.legend.importance', { ns: 'app' })}</span>
|
||||
<span>{t('quadrantMatrix.legend.urgency', { ns: 'app' })}</span>
|
||||
</div>
|
||||
<ActionButton onClick={handleExpand}>
|
||||
<RiExpandDiagonalLine className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 2x2 Grid */}
|
||||
{renderGrid(false)}
|
||||
</div>
|
||||
|
||||
{/* Fullscreen Modal */}
|
||||
<FullScreenModal
|
||||
open={isExpanded}
|
||||
onClose={handleClose}
|
||||
closable
|
||||
>
|
||||
<div className="flex h-full flex-col p-6">
|
||||
{/* Modal Header */}
|
||||
<div className="mb-6 flex items-center justify-between">
|
||||
<div>
|
||||
<div className="text-xl font-semibold text-text-primary">
|
||||
{t('quadrantMatrix.title', { ns: 'app' })}
|
||||
</div>
|
||||
<div className="text-sm text-text-tertiary">
|
||||
{t('quadrantMatrix.taskCount', { ns: 'app', count: totalTasks })}
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-3 text-sm text-text-quaternary">
|
||||
<span>{t('quadrantMatrix.legend.importance', { ns: 'app' })}</span>
|
||||
<span>{t('quadrantMatrix.legend.urgency', { ns: 'app' })}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Expanded Grid */}
|
||||
<div className="min-h-0 flex-1">
|
||||
{renderGrid(true)}
|
||||
</div>
|
||||
</div>
|
||||
</FullScreenModal>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export default QuadrantMatrix
|
||||
@ -1,102 +0,0 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import type { QuadrantConfig, Task } from './types'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import TaskItem from './task-item'
|
||||
|
||||
type QuadrantCardProps = {
|
||||
config: QuadrantConfig
|
||||
tasks: Task[]
|
||||
expanded?: boolean
|
||||
maxDisplay?: number
|
||||
}
|
||||
|
||||
const QuadrantCard: FC<QuadrantCardProps> = ({
|
||||
config,
|
||||
tasks,
|
||||
expanded = false,
|
||||
maxDisplay = 3,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const { number, titleKey, subtitleKey, bgClass, borderClass, titleClass } = config
|
||||
const displayLimit = expanded ? Infinity : maxDisplay
|
||||
const displayTasks = tasks.slice(0, displayLimit)
|
||||
const remainingCount = Math.max(0, tasks.length - displayLimit)
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'flex min-w-0 flex-col rounded-xl border p-3',
|
||||
bgClass,
|
||||
borderClass,
|
||||
expanded ? 'min-h-[280px]' : 'min-h-[200px]',
|
||||
)}
|
||||
>
|
||||
{/* Header with numbered circle */}
|
||||
<div className="mb-2 shrink-0">
|
||||
<div className="flex items-center gap-2">
|
||||
{/* Numbered circle */}
|
||||
<span className={cn(
|
||||
'flex h-5 w-5 items-center justify-center rounded-full border text-xs font-semibold',
|
||||
borderClass,
|
||||
titleClass,
|
||||
)}
|
||||
>
|
||||
{number}
|
||||
</span>
|
||||
<span className={cn('system-sm-semibold', titleClass)}>{t(titleKey, { ns: 'app' })}</span>
|
||||
{tasks.length > 0 && (
|
||||
<span className="bg-components-badge-bg-gray rounded-full px-1.5 py-0.5 text-[10px] font-medium text-text-tertiary">
|
||||
{tasks.length}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<div className="text-[11px] text-text-tertiary">{t(subtitleKey, { ns: 'app' })}</div>
|
||||
</div>
|
||||
|
||||
{/* Task List */}
|
||||
<div className={cn(
|
||||
'flex min-h-0 flex-1 flex-col gap-2',
|
||||
expanded && 'overflow-y-auto',
|
||||
)}
|
||||
>
|
||||
{displayTasks.length > 0
|
||||
? (
|
||||
displayTasks.map((task) => {
|
||||
const taskKey = [
|
||||
task.name,
|
||||
task.deadline ?? 'no-deadline',
|
||||
task.importance_score,
|
||||
task.urgency_score,
|
||||
task.description ?? '',
|
||||
task.action_advice ?? '',
|
||||
].join('|')
|
||||
|
||||
return (
|
||||
<TaskItem
|
||||
key={taskKey}
|
||||
task={task}
|
||||
expanded={expanded}
|
||||
/>
|
||||
)
|
||||
})
|
||||
)
|
||||
: (
|
||||
<div className="flex flex-1 items-center justify-center text-xs text-text-quaternary">
|
||||
{t('quadrantMatrix.noTasks', { ns: 'app' })}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* More indicator (only in non-expanded mode) */}
|
||||
{!expanded && remainingCount > 0 && (
|
||||
<div className="mt-2 shrink-0 text-center text-[11px] text-text-tertiary">
|
||||
{t('quadrantMatrix.more', { ns: 'app', count: remainingCount })}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default QuadrantCard
|
||||
@ -1,88 +0,0 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import type { Task } from './types'
|
||||
import { RiCalendarLine } from '@remixicon/react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
type TaskItemProps = {
|
||||
task: Task
|
||||
expanded?: boolean
|
||||
showScores?: boolean
|
||||
}
|
||||
|
||||
const TaskItem: FC<TaskItemProps> = ({ task, expanded = false, showScores = true }) => {
|
||||
const { t } = useTranslation()
|
||||
const { name, description, deadline, importance_score, urgency_score, action_advice } = task
|
||||
|
||||
return (
|
||||
<div className="group min-w-0 rounded-lg bg-components-panel-bg p-2.5 shadow-xs transition-all hover:shadow-sm">
|
||||
{/* Header: Task Name + Scores */}
|
||||
<div className="flex items-start justify-between gap-2">
|
||||
<div
|
||||
className={cn(
|
||||
'system-sm-medium min-w-0 flex-1 text-text-primary',
|
||||
!expanded && 'truncate',
|
||||
)}
|
||||
title={name}
|
||||
>
|
||||
{name}
|
||||
</div>
|
||||
{showScores && (
|
||||
<div className="flex shrink-0 items-center gap-1 text-[10px] font-medium">
|
||||
<span className="text-text-accent">
|
||||
I:
|
||||
{importance_score}
|
||||
</span>
|
||||
<span className="text-text-warning">
|
||||
U:
|
||||
{urgency_score}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Description */}
|
||||
{description && (
|
||||
<div className={cn(
|
||||
'mt-1 text-xs text-text-tertiary',
|
||||
!expanded && 'line-clamp-2',
|
||||
)}
|
||||
>
|
||||
{description}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Deadline Badge */}
|
||||
{deadline && (
|
||||
<div className="mt-1.5">
|
||||
<span className="bg-components-badge-bg-gray inline-flex items-center gap-1 rounded px-1.5 py-0.5 text-[10px] text-text-tertiary">
|
||||
<RiCalendarLine className="h-3 w-3" />
|
||||
<span>
|
||||
{t('quadrantMatrix.deadline', { ns: 'app' })}
|
||||
{' '}
|
||||
{deadline}
|
||||
</span>
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Action Advice */}
|
||||
{action_advice && (
|
||||
<div className="mt-2 border-t border-divider-subtle pt-2">
|
||||
<p
|
||||
className={cn(
|
||||
'text-xs italic text-text-quaternary',
|
||||
!expanded && 'line-clamp-2',
|
||||
)}
|
||||
title={!expanded ? action_advice : undefined}
|
||||
>
|
||||
{action_advice}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default TaskItem
|
||||
@ -1,92 +0,0 @@
|
||||
/**
|
||||
* Type definitions for Eisenhower Matrix (Task Quadrant) visualization
|
||||
*/
|
||||
import type { I18nKeysWithPrefix } from '@/types/i18n'
|
||||
|
||||
export type Task = {
|
||||
name: string
|
||||
description?: string
|
||||
deadline?: string // YYYY-MM-DD format
|
||||
importance_score: number // 0-100, based on goal alignment and long-term value
|
||||
urgency_score: number // 0-100, based on deadline pressure and delay penalty
|
||||
action_advice?: string // Suggested action for this task
|
||||
}
|
||||
|
||||
export type QuadrantData = {
|
||||
q1: Task[] // Urgent & Important - Do First
|
||||
q2: Task[] // Not Urgent & Important - Schedule
|
||||
q3: Task[] // Urgent & Not Important - Delegate
|
||||
q4: Task[] // Not Urgent & Not Important - Don't Do
|
||||
}
|
||||
|
||||
type QuadrantKeyBase = I18nKeysWithPrefix<'app', 'quadrantMatrix.q'>
|
||||
type QuadrantTitleKey = Extract<QuadrantKeyBase, `${string}.title`>
|
||||
type QuadrantSubtitleKey = Extract<QuadrantKeyBase, `${string}.subtitle`>
|
||||
|
||||
export type QuadrantConfig = {
|
||||
key: 'q1' | 'q2' | 'q3' | 'q4'
|
||||
number: number
|
||||
titleKey: QuadrantTitleKey // i18n key for title
|
||||
subtitleKey: QuadrantSubtitleKey // i18n key for subtitle
|
||||
bgClass: string
|
||||
borderClass: string
|
||||
titleClass: string
|
||||
}
|
||||
|
||||
// Layout based on Eisenhower Matrix:
|
||||
// Q1 (Do First) - top-left, Q2 (Schedule) - top-right
|
||||
// Q3 (Delegate) - bottom-left, Q4 (Don't Do) - bottom-right
|
||||
export const QUADRANT_CONFIGS: Record<string, QuadrantConfig> = {
|
||||
q1: {
|
||||
key: 'q1',
|
||||
number: 1,
|
||||
titleKey: 'quadrantMatrix.q1.title',
|
||||
subtitleKey: 'quadrantMatrix.q1.subtitle',
|
||||
bgClass: 'bg-state-destructive-hover',
|
||||
borderClass: 'border-state-destructive-border',
|
||||
titleClass: 'text-text-destructive',
|
||||
},
|
||||
q2: {
|
||||
key: 'q2',
|
||||
number: 2,
|
||||
titleKey: 'quadrantMatrix.q2.title',
|
||||
subtitleKey: 'quadrantMatrix.q2.subtitle',
|
||||
bgClass: 'bg-state-accent-hover',
|
||||
borderClass: 'border-state-accent-border',
|
||||
titleClass: 'text-text-accent',
|
||||
},
|
||||
q3: {
|
||||
key: 'q3',
|
||||
number: 3,
|
||||
titleKey: 'quadrantMatrix.q3.title',
|
||||
subtitleKey: 'quadrantMatrix.q3.subtitle',
|
||||
bgClass: 'bg-state-warning-hover',
|
||||
borderClass: 'border-state-warning-border',
|
||||
titleClass: 'text-text-warning',
|
||||
},
|
||||
q4: {
|
||||
key: 'q4',
|
||||
number: 4,
|
||||
titleKey: 'quadrantMatrix.q4.title',
|
||||
subtitleKey: 'quadrantMatrix.q4.subtitle',
|
||||
bgClass: 'bg-components-panel-on-panel-item-bg',
|
||||
borderClass: 'border-divider-regular',
|
||||
titleClass: 'text-text-tertiary',
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates if the data structure matches QuadrantData interface
|
||||
*/
|
||||
export function isValidQuadrantData(data: unknown): data is QuadrantData {
|
||||
if (typeof data !== 'object' || data === null)
|
||||
return false
|
||||
|
||||
const d = data as Record<string, unknown>
|
||||
return (
|
||||
Array.isArray(d.q1)
|
||||
&& Array.isArray(d.q2)
|
||||
&& Array.isArray(d.q3)
|
||||
&& Array.isArray(d.q4)
|
||||
)
|
||||
}
|
||||
@ -20,7 +20,6 @@ const SearchInput: FC<SearchInputProps> = ({
|
||||
white,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const inputRef = useRef<HTMLInputElement>(null)
|
||||
const [focus, setFocus] = useState<boolean>(false)
|
||||
const isComposing = useRef<boolean>(false)
|
||||
const [compositionValue, setCompositionValue] = useState<string>('')
|
||||
@ -37,7 +36,6 @@ const SearchInput: FC<SearchInputProps> = ({
|
||||
<RiSearchLine className="h-4 w-4 text-components-input-text-placeholder" aria-hidden="true" />
|
||||
</div>
|
||||
<input
|
||||
ref={inputRef}
|
||||
type="text"
|
||||
name="query"
|
||||
className={cn(
|
||||
@ -67,17 +65,14 @@ const SearchInput: FC<SearchInputProps> = ({
|
||||
autoComplete="off"
|
||||
/>
|
||||
{value && (
|
||||
<button
|
||||
type="button"
|
||||
aria-label={t('operation.clear', { ns: 'common' })}
|
||||
className="group/clear flex h-4 w-4 shrink-0 cursor-pointer items-center justify-center border-none bg-transparent p-0"
|
||||
<div
|
||||
className="group/clear flex h-4 w-4 shrink-0 cursor-pointer items-center justify-center"
|
||||
onClick={() => {
|
||||
onChange('')
|
||||
inputRef.current?.focus()
|
||||
}}
|
||||
>
|
||||
<RiCloseCircleFill className="h-4 w-4 text-text-quaternary group-hover/clear:text-text-tertiary" />
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -442,10 +442,6 @@ const Completed: FC<ICompletedProps> = ({
|
||||
setFullScreen(!fullScreen)
|
||||
}, [fullScreen])
|
||||
|
||||
const toggleCollapsed = useCallback(() => {
|
||||
setIsCollapsed(prev => !prev)
|
||||
}, [])
|
||||
|
||||
const viewNewlyAddedChunk = useCallback(async () => {
|
||||
const totalPages = segmentListData?.total_pages || 0
|
||||
const total = segmentListData?.total || 0
|
||||
@ -582,16 +578,15 @@ const Completed: FC<ICompletedProps> = ({
|
||||
return selectedStatus ? 1 : 0
|
||||
}, [selectedStatus])
|
||||
|
||||
const contextValue = useMemo<SegmentListContextValue>(() => ({
|
||||
isCollapsed,
|
||||
fullScreen,
|
||||
toggleFullScreen,
|
||||
currSegment,
|
||||
currChildChunk,
|
||||
}), [isCollapsed, fullScreen, toggleFullScreen, currSegment, currChildChunk])
|
||||
|
||||
return (
|
||||
<SegmentListContext.Provider value={contextValue}>
|
||||
<SegmentListContext.Provider value={{
|
||||
isCollapsed,
|
||||
fullScreen,
|
||||
toggleFullScreen,
|
||||
currSegment,
|
||||
currChildChunk,
|
||||
}}
|
||||
>
|
||||
{/* Menu Bar */}
|
||||
{!isFullDocMode && (
|
||||
<div className={s.docSearchWrapper}>
|
||||
@ -623,7 +618,7 @@ const Completed: FC<ICompletedProps> = ({
|
||||
onClear={() => handleInputChange('')}
|
||||
/>
|
||||
<Divider type="vertical" className="mx-3 h-3.5" />
|
||||
<DisplayToggle isCollapsed={isCollapsed} toggleCollapsed={toggleCollapsed} />
|
||||
<DisplayToggle isCollapsed={isCollapsed} toggleCollapsed={() => setIsCollapsed(!isCollapsed)} />
|
||||
</div>
|
||||
)}
|
||||
{/* Segment list */}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import type { FC } from 'react'
|
||||
import type { SegmentListContextValue } from '..'
|
||||
import * as React from 'react'
|
||||
import { Markdown } from '@/app/components/base/markdown'
|
||||
import { cn } from '@/utils/classnames'
|
||||
@ -15,15 +14,13 @@ type ChunkContentProps = {
|
||||
className?: string
|
||||
}
|
||||
|
||||
const selectIsCollapsed = (s: SegmentListContextValue) => s.isCollapsed
|
||||
|
||||
const ChunkContent: FC<ChunkContentProps> = ({
|
||||
detail,
|
||||
isFullDocMode,
|
||||
className,
|
||||
}) => {
|
||||
const { answer, content, sign_content } = detail
|
||||
const isCollapsed = useSegmentListContext(selectIsCollapsed)
|
||||
const isCollapsed = useSegmentListContext(s => s.isCollapsed)
|
||||
|
||||
if (answer) {
|
||||
return (
|
||||
|
||||
@ -2,9 +2,9 @@
|
||||
|
||||
import type { INavSelectorProps } from './nav-selector'
|
||||
import Link from 'next/link'
|
||||
import { useSelectedLayoutSegment } from 'next/navigation'
|
||||
import { usePathname, useSearchParams, useSelectedLayoutSegment } from 'next/navigation'
|
||||
import * as React from 'react'
|
||||
import { useState } from 'react'
|
||||
import { useEffect, useState } from 'react'
|
||||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
import { ArrowNarrowLeft } from '@/app/components/base/icons/src/vender/line/arrows'
|
||||
import { cn } from '@/utils/classnames'
|
||||
@ -36,6 +36,14 @@ const Nav = ({
|
||||
const [hovered, setHovered] = useState(false)
|
||||
const segment = useSelectedLayoutSegment()
|
||||
const isActivated = Array.isArray(activeSegment) ? activeSegment.includes(segment!) : segment === activeSegment
|
||||
const pathname = usePathname()
|
||||
const searchParams = useSearchParams()
|
||||
const [linkLastSearchParams, setLinkLastSearchParams] = useState('')
|
||||
|
||||
useEffect(() => {
|
||||
if (pathname === link)
|
||||
setLinkLastSearchParams(searchParams.toString())
|
||||
}, [pathname, searchParams])
|
||||
|
||||
return (
|
||||
<div className={`
|
||||
@ -44,7 +52,7 @@ const Nav = ({
|
||||
${!curNav && !isActivated && 'hover:bg-components-main-nav-nav-button-bg-hover'}
|
||||
`}
|
||||
>
|
||||
<Link href={link}>
|
||||
<Link href={link + (linkLastSearchParams && `?${linkLastSearchParams}`)}>
|
||||
<div
|
||||
onClick={(e) => {
|
||||
// Don't clear state if opening in new tab/window
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import type { ActivePluginType } from './constants'
|
||||
import type { PluginsSort, SearchParamsFromCollection } from './types'
|
||||
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
|
||||
import { useQueryState } from 'nuqs'
|
||||
@ -16,14 +17,32 @@ export function useSetMarketplaceSort() {
|
||||
return useSetAtom(marketplaceSortAtom)
|
||||
}
|
||||
|
||||
/**
|
||||
* Preserve the state for marketplace
|
||||
*/
|
||||
export const preserveSearchStateInQueryAtom = atom<boolean>(false)
|
||||
|
||||
const searchPluginTextAtom = atom<string>('')
|
||||
const activePluginTypeAtom = atom<ActivePluginType>('all')
|
||||
const filterPluginTagsAtom = atom<string[]>([])
|
||||
|
||||
export function useSearchPluginText() {
|
||||
return useQueryState('q', marketplaceSearchParamsParsers.q)
|
||||
const preserveSearchStateInQuery = useAtomValue(preserveSearchStateInQueryAtom)
|
||||
const queryState = useQueryState('q', marketplaceSearchParamsParsers.q)
|
||||
const atomState = useAtom(searchPluginTextAtom)
|
||||
return preserveSearchStateInQuery ? queryState : atomState
|
||||
}
|
||||
export function useActivePluginType() {
|
||||
return useQueryState('category', marketplaceSearchParamsParsers.category)
|
||||
const preserveSearchStateInQuery = useAtomValue(preserveSearchStateInQueryAtom)
|
||||
const queryState = useQueryState('category', marketplaceSearchParamsParsers.category)
|
||||
const atomState = useAtom(activePluginTypeAtom)
|
||||
return preserveSearchStateInQuery ? queryState : atomState
|
||||
}
|
||||
export function useFilterPluginTags() {
|
||||
return useQueryState('tags', marketplaceSearchParamsParsers.tags)
|
||||
const preserveSearchStateInQuery = useAtomValue(preserveSearchStateInQueryAtom)
|
||||
const queryState = useQueryState('tags', marketplaceSearchParamsParsers.tags)
|
||||
const atomState = useAtom(filterPluginTagsAtom)
|
||||
return preserveSearchStateInQuery ? queryState : atomState
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
15
web/app/components/plugins/marketplace/hydration-client.tsx
Normal file
15
web/app/components/plugins/marketplace/hydration-client.tsx
Normal file
@ -0,0 +1,15 @@
|
||||
'use client'
|
||||
|
||||
import { useHydrateAtoms } from 'jotai/utils'
|
||||
import { preserveSearchStateInQueryAtom } from './atoms'
|
||||
|
||||
export function HydrateMarketplaceAtoms({
|
||||
preserveSearchStateInQuery,
|
||||
children,
|
||||
}: {
|
||||
preserveSearchStateInQuery: boolean
|
||||
children: React.ReactNode
|
||||
}) {
|
||||
useHydrateAtoms([[preserveSearchStateInQueryAtom, preserveSearchStateInQuery]])
|
||||
return <>{children}</>
|
||||
}
|
||||
@ -1,6 +1,7 @@
|
||||
import type { SearchParams } from 'nuqs'
|
||||
import { TanstackQueryInitializer } from '@/context/query-client'
|
||||
import Description from './description'
|
||||
import { HydrateMarketplaceAtoms } from './hydration-client'
|
||||
import { HydrateQueryClient } from './hydration-server'
|
||||
import ListWrapper from './list/list-wrapper'
|
||||
import StickySearchAndSwitchWrapper from './sticky-search-and-switch-wrapper'
|
||||
@ -9,7 +10,8 @@ type MarketplaceProps = {
|
||||
showInstallButton?: boolean
|
||||
pluginTypeSwitchClassName?: string
|
||||
/**
|
||||
* Pass the search params from the request to prefetch data on the server.
|
||||
* Pass the search params from the request to prefetch data on the server
|
||||
* and preserve the search params in the URL.
|
||||
*/
|
||||
searchParams?: Promise<SearchParams>
|
||||
}
|
||||
@ -22,13 +24,15 @@ const Marketplace = async ({
|
||||
return (
|
||||
<TanstackQueryInitializer>
|
||||
<HydrateQueryClient searchParams={searchParams}>
|
||||
<Description />
|
||||
<StickySearchAndSwitchWrapper
|
||||
pluginTypeSwitchClassName={pluginTypeSwitchClassName}
|
||||
/>
|
||||
<ListWrapper
|
||||
showInstallButton={showInstallButton}
|
||||
/>
|
||||
<HydrateMarketplaceAtoms preserveSearchStateInQuery={!!searchParams}>
|
||||
<Description />
|
||||
<StickySearchAndSwitchWrapper
|
||||
pluginTypeSwitchClassName={pluginTypeSwitchClassName}
|
||||
/>
|
||||
<ListWrapper
|
||||
showInstallButton={showInstallButton}
|
||||
/>
|
||||
</HydrateMarketplaceAtoms>
|
||||
</HydrateQueryClient>
|
||||
</TanstackQueryInitializer>
|
||||
)
|
||||
|
||||
@ -68,7 +68,7 @@ export const PluginPageContextProvider = ({
|
||||
const options = useMemo(() => {
|
||||
return enable_marketplace ? tabs : tabs.filter(tab => tab.value !== PLUGIN_PAGE_TABS_MAP.marketplace)
|
||||
}, [tabs, enable_marketplace])
|
||||
const [activeTab, setActiveTab] = useQueryState('tab', {
|
||||
const [activeTab, setActiveTab] = useQueryState('category', {
|
||||
defaultValue: options[0].value,
|
||||
})
|
||||
|
||||
|
||||
@ -6,6 +6,7 @@ import { useTranslation } from 'react-i18next'
|
||||
import { useFileSizeLimit } from '@/app/components/base/file-uploader/hooks'
|
||||
import { InputFieldType } from '@/app/components/base/form/form-scenarios/input-field/types'
|
||||
import { DEFAULT_FILE_UPLOAD_SETTING } from '@/app/components/workflow/constants'
|
||||
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import { PipelineInputVarType } from '@/models/pipeline'
|
||||
import { useFileUploadConfig } from '@/service/use-common'
|
||||
import { formatFileSize } from '@/utils/format'
|
||||
@ -86,6 +87,8 @@ export const useConfigurations = (props: {
|
||||
if (type === PipelineInputVarType.multiFiles)
|
||||
setFieldValue('maxLength', DEFAULT_FILE_UPLOAD_SETTING.max_length)
|
||||
}
|
||||
if (type === PipelineInputVarType.paragraph)
|
||||
setFieldValue('maxLength', DEFAULT_VALUE_MAX_LEN)
|
||||
}, [setFieldValue])
|
||||
|
||||
const handleVariableNameBlur = useCallback((value: string) => {
|
||||
|
||||
@ -779,6 +779,27 @@ describe('useConfigurations', () => {
|
||||
expect(mockSetFieldValue).toHaveBeenCalledWith('maxLength', expect.any(Number))
|
||||
})
|
||||
|
||||
it('should call setFieldValue when type changes to paragraph', () => {
|
||||
// Arrange
|
||||
const mockGetFieldValue = vi.fn()
|
||||
const mockSetFieldValue = vi.fn()
|
||||
|
||||
const { result } = renderHookWithProviders(() =>
|
||||
useConfigurations({
|
||||
getFieldValue: mockGetFieldValue,
|
||||
setFieldValue: mockSetFieldValue,
|
||||
supportFile: false,
|
||||
}),
|
||||
)
|
||||
|
||||
// Act
|
||||
const typeConfig = result.current.find(config => config.variable === 'type')
|
||||
typeConfig?.listeners?.onChange?.(createMockEvent(PipelineInputVarType.paragraph))
|
||||
|
||||
// Assert
|
||||
expect(mockSetFieldValue).toHaveBeenCalledWith('maxLength', 48) // DEFAULT_VALUE_MAX_LEN
|
||||
})
|
||||
|
||||
it('should set label from variable name on blur when label is empty', () => {
|
||||
// Arrange
|
||||
const mockGetFieldValue = vi.fn().mockReturnValue('')
|
||||
|
||||
@ -26,7 +26,7 @@ import DifyLogo from '@/app/components/base/logo/dify-logo'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import Res from '@/app/components/share/text-generation/result'
|
||||
import RunOnce from '@/app/components/share/text-generation/run-once'
|
||||
import { appDefaultIconBackground, BATCH_CONCURRENCY } from '@/config'
|
||||
import { appDefaultIconBackground, BATCH_CONCURRENCY, DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import { useGlobalPublicStore } from '@/context/global-public-context'
|
||||
import { useWebAppStore } from '@/context/web-app-context'
|
||||
import { useAppFavicon } from '@/hooks/use-app-favicon'
|
||||
@ -256,10 +256,11 @@ const TextGeneration: FC<IMainProps> = ({
|
||||
promptConfig?.prompt_variables.forEach((varItem, varIndex) => {
|
||||
if (errorRowIndex !== 0)
|
||||
return
|
||||
if (varItem.type === 'string' && varItem.max_length) {
|
||||
if (item[varIndex].length > varItem.max_length) {
|
||||
if (varItem.type === 'string') {
|
||||
const maxLen = varItem.max_length || DEFAULT_VALUE_MAX_LEN
|
||||
if (item[varIndex].length > maxLen) {
|
||||
moreThanMaxLengthVarName = varItem.name
|
||||
maxLength = varItem.max_length
|
||||
maxLength = maxLen
|
||||
errorRowIndex = index + 1
|
||||
return
|
||||
}
|
||||
|
||||
@ -236,46 +236,4 @@ describe('RunOnce', () => {
|
||||
const stopButton = screen.getByTestId('stop-button')
|
||||
expect(stopButton).toBeDisabled()
|
||||
})
|
||||
|
||||
describe('maxLength behavior', () => {
|
||||
it('should not have maxLength attribute when max_length is not set', async () => {
|
||||
const promptConfig: PromptConfig = {
|
||||
prompt_template: 'template',
|
||||
prompt_variables: [
|
||||
createPromptVariable({
|
||||
key: 'textInput',
|
||||
name: 'Text Input',
|
||||
type: 'string',
|
||||
// max_length is not set
|
||||
}),
|
||||
],
|
||||
}
|
||||
const { onInputsChange } = setup({ promptConfig, visionConfig: { ...baseVisionConfig, enabled: false } })
|
||||
await waitFor(() => {
|
||||
expect(onInputsChange).toHaveBeenCalled()
|
||||
})
|
||||
const input = screen.getByPlaceholderText('Text Input')
|
||||
expect(input).not.toHaveAttribute('maxLength')
|
||||
})
|
||||
|
||||
it('should have maxLength attribute when max_length is set', async () => {
|
||||
const promptConfig: PromptConfig = {
|
||||
prompt_template: 'template',
|
||||
prompt_variables: [
|
||||
createPromptVariable({
|
||||
key: 'textInput',
|
||||
name: 'Text Input',
|
||||
type: 'string',
|
||||
max_length: 100,
|
||||
}),
|
||||
],
|
||||
}
|
||||
const { onInputsChange } = setup({ promptConfig, visionConfig: { ...baseVisionConfig, enabled: false } })
|
||||
await waitFor(() => {
|
||||
expect(onInputsChange).toHaveBeenCalled()
|
||||
})
|
||||
const input = screen.getByPlaceholderText('Text Input')
|
||||
expect(input).toHaveAttribute('maxLength', '100')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -19,6 +19,7 @@ import Textarea from '@/app/components/base/textarea'
|
||||
import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input'
|
||||
import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor'
|
||||
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
|
||||
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
|
||||
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
@ -139,7 +140,7 @@ const RunOnce: FC<IRunOnceProps> = ({
|
||||
placeholder={item.name}
|
||||
value={inputs[item.key]}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) => { handleInputsChange({ ...inputsRef.current, [item.key]: e.target.value }) }}
|
||||
maxLength={item.max_length}
|
||||
maxLength={item.max_length || DEFAULT_VALUE_MAX_LEN}
|
||||
/>
|
||||
)}
|
||||
{item.type === 'paragraph' && (
|
||||
|
||||
@ -5,7 +5,6 @@ import type { NodeOutPutVar, Variable } from '@/app/components/workflow/types'
|
||||
import { useBoolean } from 'ahooks'
|
||||
import * as React from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { createPortal } from 'react-dom'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars'
|
||||
import { cn } from '@/utils/classnames'
|
||||
@ -148,7 +147,7 @@ const CodeEditor: FC<Props> = ({
|
||||
onMount={onEditorMounted}
|
||||
placeholder={t('common.jinjaEditorPlaceholder', { ns: 'workflow' })!}
|
||||
/>
|
||||
{isShowVarPicker && createPortal(
|
||||
{isShowVarPicker && (
|
||||
<div
|
||||
ref={popupRef}
|
||||
className="w-[228px] space-y-1 rounded-lg border border-components-panel-border bg-components-panel-bg p-1 shadow-lg"
|
||||
@ -165,8 +164,7 @@ const CodeEditor: FC<Props> = ({
|
||||
onChange={handleSelectVar}
|
||||
isSupportFileVar={false}
|
||||
/>
|
||||
</div>,
|
||||
document.body,
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -9,6 +9,10 @@ import Split from '@/app/components/workflow/nodes/_base/components/split'
|
||||
import { useRetryConfig } from './hooks'
|
||||
import s from './style.module.css'
|
||||
|
||||
// Nodes that support first token timeout configuration
|
||||
// These nodes internally call LLM and have streaming response characteristics
|
||||
const LLM_RELATED_NODE_TYPES = ['llm', 'agent', 'parameter-extractor', 'question-classifier']
|
||||
|
||||
type RetryOnPanelProps = Pick<Node, 'id' | 'data'>
|
||||
const RetryOnPanel = ({
|
||||
id,
|
||||
@ -16,10 +20,14 @@ const RetryOnPanel = ({
|
||||
}: RetryOnPanelProps) => {
|
||||
const { t } = useTranslation()
|
||||
const { handleRetryConfigChange } = useRetryConfig(id)
|
||||
const { retry_config } = data
|
||||
const { retry_config, type } = data
|
||||
|
||||
// Check if this is an LLM-related node that supports first token timeout
|
||||
const isLLMRelatedNode = LLM_RELATED_NODE_TYPES.includes(type)
|
||||
|
||||
const handleRetryEnabledChange = (value: boolean) => {
|
||||
handleRetryConfigChange({
|
||||
...retry_config,
|
||||
retry_enabled: value,
|
||||
max_retries: retry_config?.max_retries || 3,
|
||||
retry_interval: retry_config?.retry_interval || 1000,
|
||||
@ -32,6 +40,7 @@ const RetryOnPanel = ({
|
||||
else if (value < 1)
|
||||
value = 1
|
||||
handleRetryConfigChange({
|
||||
...retry_config,
|
||||
retry_enabled: true,
|
||||
max_retries: value,
|
||||
retry_interval: retry_config?.retry_interval || 1000,
|
||||
@ -44,12 +53,27 @@ const RetryOnPanel = ({
|
||||
else if (value < 100)
|
||||
value = 100
|
||||
handleRetryConfigChange({
|
||||
...retry_config,
|
||||
retry_enabled: true,
|
||||
max_retries: retry_config?.max_retries || 3,
|
||||
retry_interval: value,
|
||||
})
|
||||
}
|
||||
|
||||
const handleFirstTokenTimeoutChange = (value: number) => {
|
||||
if (value > 60000)
|
||||
value = 60000
|
||||
else if (value < 0)
|
||||
value = 0
|
||||
handleRetryConfigChange({
|
||||
...retry_config,
|
||||
retry_enabled: true,
|
||||
max_retries: retry_config?.max_retries || 3,
|
||||
retry_interval: retry_config?.retry_interval || 1000,
|
||||
first_token_timeout: value,
|
||||
})
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="pt-2">
|
||||
@ -62,54 +86,76 @@ const RetryOnPanel = ({
|
||||
onChange={v => handleRetryEnabledChange(v)}
|
||||
/>
|
||||
</div>
|
||||
{
|
||||
retry_config?.retry_enabled && (
|
||||
<div className="px-4 pb-2">
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="system-xs-medium-uppercase mr-2 grow text-text-secondary">{t('nodes.common.retry.maxRetries', { ns: 'workflow' })}</div>
|
||||
{retry_config?.retry_enabled && (
|
||||
<div className="px-4 pb-2">
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="system-xs-medium-uppercase mr-2 grow text-text-secondary">{t('nodes.common.retry.maxRetries', { ns: 'workflow' })}</div>
|
||||
<Slider
|
||||
className="mr-3 w-[108px]"
|
||||
value={retry_config?.max_retries || 3}
|
||||
onChange={handleMaxRetriesChange}
|
||||
min={1}
|
||||
max={10}
|
||||
/>
|
||||
<Input
|
||||
type="number"
|
||||
wrapperClassName="w-[100px]"
|
||||
value={retry_config?.max_retries || 3}
|
||||
onChange={e =>
|
||||
handleMaxRetriesChange(Number.parseInt(e.currentTarget.value, 10) || 3)}
|
||||
min={1}
|
||||
max={10}
|
||||
unit={t('nodes.common.retry.times', { ns: 'workflow' }) || ''}
|
||||
className={s.input}
|
||||
/>
|
||||
</div>
|
||||
<div className="mb-1 flex w-full items-center">
|
||||
<div className="system-xs-medium-uppercase mr-2 grow text-text-secondary">{t('nodes.common.retry.retryInterval', { ns: 'workflow' })}</div>
|
||||
<Slider
|
||||
className="mr-3 w-[108px]"
|
||||
value={retry_config?.retry_interval || 1000}
|
||||
onChange={handleRetryIntervalChange}
|
||||
min={100}
|
||||
max={5000}
|
||||
/>
|
||||
<Input
|
||||
type="number"
|
||||
wrapperClassName="w-[100px]"
|
||||
value={retry_config?.retry_interval || 1000}
|
||||
onChange={e =>
|
||||
handleRetryIntervalChange(Number.parseInt(e.currentTarget.value, 10) || 1000)}
|
||||
min={100}
|
||||
max={5000}
|
||||
unit={t('nodes.common.retry.ms', { ns: 'workflow' }) || ''}
|
||||
className={s.input}
|
||||
/>
|
||||
</div>
|
||||
{/* First token timeout - only for LLM-related nodes */}
|
||||
{isLLMRelatedNode && (
|
||||
<div className="flex w-full items-center">
|
||||
<div className="system-xs-medium-uppercase mr-2 grow text-text-secondary">{t('nodes.common.retry.firstTokenTimeout', { ns: 'workflow' })}</div>
|
||||
<Slider
|
||||
className="mr-3 w-[108px]"
|
||||
value={retry_config?.max_retries || 3}
|
||||
onChange={handleMaxRetriesChange}
|
||||
min={1}
|
||||
max={10}
|
||||
value={retry_config?.first_token_timeout ?? 3000}
|
||||
onChange={handleFirstTokenTimeoutChange}
|
||||
min={0}
|
||||
max={60000}
|
||||
/>
|
||||
<Input
|
||||
type="number"
|
||||
wrapperClassName="w-[100px]"
|
||||
value={retry_config?.max_retries || 3}
|
||||
value={retry_config?.first_token_timeout ?? 3000}
|
||||
onChange={e =>
|
||||
handleMaxRetriesChange(Number.parseInt(e.currentTarget.value, 10) || 3)}
|
||||
min={1}
|
||||
max={10}
|
||||
unit={t('nodes.common.retry.times', { ns: 'workflow' }) || ''}
|
||||
className={s.input}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex items-center">
|
||||
<div className="system-xs-medium-uppercase mr-2 grow text-text-secondary">{t('nodes.common.retry.retryInterval', { ns: 'workflow' })}</div>
|
||||
<Slider
|
||||
className="mr-3 w-[108px]"
|
||||
value={retry_config?.retry_interval || 1000}
|
||||
onChange={handleRetryIntervalChange}
|
||||
min={100}
|
||||
max={5000}
|
||||
/>
|
||||
<Input
|
||||
type="number"
|
||||
wrapperClassName="w-[100px]"
|
||||
value={retry_config?.retry_interval || 1000}
|
||||
onChange={e =>
|
||||
handleRetryIntervalChange(Number.parseInt(e.currentTarget.value, 10) || 1000)}
|
||||
min={100}
|
||||
max={5000}
|
||||
handleFirstTokenTimeoutChange(Number.parseInt(e.currentTarget.value, 10) || 0)}
|
||||
min={0}
|
||||
max={60000}
|
||||
unit={t('nodes.common.retry.ms', { ns: 'workflow' }) || ''}
|
||||
className={s.input}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<Split className="mx-4 mt-2" />
|
||||
</>
|
||||
|
||||
@ -2,4 +2,6 @@ export type WorkflowRetryConfig = {
|
||||
max_retries: number
|
||||
retry_interval: number
|
||||
retry_enabled: boolean
|
||||
// First token timeout for LLM nodes (seconds), 0 means no timeout
|
||||
first_token_timeout?: number
|
||||
}
|
||||
|
||||
@ -208,6 +208,7 @@ export const VAR_ITEM_TEMPLATE = {
|
||||
key: '',
|
||||
name: '',
|
||||
type: 'string',
|
||||
max_length: DEFAULT_VALUE_MAX_LEN,
|
||||
required: true,
|
||||
}
|
||||
|
||||
@ -215,6 +216,7 @@ export const VAR_ITEM_TEMPLATE_IN_WORKFLOW = {
|
||||
variable: '',
|
||||
label: '',
|
||||
type: InputVarType.textInput,
|
||||
max_length: DEFAULT_VALUE_MAX_LEN,
|
||||
required: true,
|
||||
options: [],
|
||||
}
|
||||
@ -223,6 +225,7 @@ export const VAR_ITEM_TEMPLATE_IN_PIPELINE = {
|
||||
variable: '',
|
||||
label: '',
|
||||
type: PipelineInputVarType.textInput,
|
||||
max_length: DEFAULT_VALUE_MAX_LEN,
|
||||
required: true,
|
||||
options: [],
|
||||
}
|
||||
|
||||
@ -26,8 +26,7 @@ export default antfu(
|
||||
'react-hooks/preserve-manual-memoization': 'warn',
|
||||
'react-hooks/purity': 'warn',
|
||||
'react-hooks/refs': 'warn',
|
||||
// prefer react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
'react-hooks/set-state-in-effect': 'off',
|
||||
'react-hooks/set-state-in-effect': 'warn',
|
||||
'react-hooks/set-state-in-render': 'warn',
|
||||
'react-hooks/static-components': 'warn',
|
||||
'react-hooks/unsupported-syntax': 'warn',
|
||||
@ -54,14 +53,6 @@ export default antfu(
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['**/*.ts', '**/*.tsx'],
|
||||
settings: {
|
||||
'react-x': {
|
||||
additionalStateHooks: '/^use\\w*State(?:s)?|useAtom$/u',
|
||||
},
|
||||
},
|
||||
},
|
||||
// downgrade some rules from error to warn for gradual adoption
|
||||
// we should fix these in following pull requests
|
||||
{
|
||||
|
||||
@ -196,24 +196,6 @@
|
||||
"publishApp.notSet": "Not set",
|
||||
"publishApp.notSetDesc": "Currently nobody can access the web app. Please set permissions.",
|
||||
"publishApp.title": "Who can access web app",
|
||||
"quadrantMatrix.deadline": "DDL:",
|
||||
"quadrantMatrix.invalidData": "Invalid Quadrant Data",
|
||||
"quadrantMatrix.invalidDataDesc": "Expected JSON format with q1, q2, q3, q4 arrays",
|
||||
"quadrantMatrix.legend.importance": "I = Importance",
|
||||
"quadrantMatrix.legend.urgency": "U = Urgency",
|
||||
"quadrantMatrix.more": "+{{count}} more",
|
||||
"quadrantMatrix.noTasks": "No tasks",
|
||||
"quadrantMatrix.q1.subtitle": "Urgent & Important",
|
||||
"quadrantMatrix.q1.title": "Do First",
|
||||
"quadrantMatrix.q2.subtitle": "Important & Not Urgent",
|
||||
"quadrantMatrix.q2.title": "Schedule",
|
||||
"quadrantMatrix.q3.subtitle": "Urgent & Not Important",
|
||||
"quadrantMatrix.q3.title": "Delegate",
|
||||
"quadrantMatrix.q4.subtitle": "Not Urgent & Not Important",
|
||||
"quadrantMatrix.q4.title": "Don't Do",
|
||||
"quadrantMatrix.taskCount_one": "{{count}} task prioritized",
|
||||
"quadrantMatrix.taskCount_other": "{{count}} tasks prioritized",
|
||||
"quadrantMatrix.title": "Eisenhower Matrix",
|
||||
"removeOriginal": "Delete the original app",
|
||||
"roadmap": "See our roadmap",
|
||||
"showMyCreatedAppsOnly": "Created by me",
|
||||
|
||||
@ -433,6 +433,7 @@
|
||||
"nodes.common.memory.windowSize": "Window Size",
|
||||
"nodes.common.outputVars": "Output Variables",
|
||||
"nodes.common.pluginNotInstalled": "Plugin is not installed",
|
||||
"nodes.common.retry.firstTokenTimeout": "First token timeout",
|
||||
"nodes.common.retry.maxRetries": "max retries",
|
||||
"nodes.common.retry.ms": "ms",
|
||||
"nodes.common.retry.retries": "{{num}} Retries",
|
||||
@ -444,6 +445,8 @@
|
||||
"nodes.common.retry.retrySuccessful": "Retry successful",
|
||||
"nodes.common.retry.retryTimes": "Retry {{times}} times on failure",
|
||||
"nodes.common.retry.retrying": "Retrying...",
|
||||
"nodes.common.retry.seconds": "s",
|
||||
"nodes.common.retry.timeoutDuration": "Timeout duration",
|
||||
"nodes.common.retry.times": "times",
|
||||
"nodes.common.typeSwitch.input": "Input value",
|
||||
"nodes.common.typeSwitch.variable": "Use variable",
|
||||
|
||||
@ -196,24 +196,6 @@
|
||||
"publishApp.notSet": "未设置",
|
||||
"publishApp.notSetDesc": "当前任何人都无法访问 Web 应用。请设置访问权限。",
|
||||
"publishApp.title": "谁可以访问 web 应用",
|
||||
"quadrantMatrix.deadline": "截止:",
|
||||
"quadrantMatrix.invalidData": "无效的象限数据",
|
||||
"quadrantMatrix.invalidDataDesc": "需要包含 q1, q2, q3, q4 数组的 JSON 格式",
|
||||
"quadrantMatrix.legend.importance": "I = 重要性",
|
||||
"quadrantMatrix.legend.urgency": "U = 紧急性",
|
||||
"quadrantMatrix.more": "+{{count}} 更多",
|
||||
"quadrantMatrix.noTasks": "暂无任务",
|
||||
"quadrantMatrix.q1.subtitle": "紧急且重要",
|
||||
"quadrantMatrix.q1.title": "立即执行",
|
||||
"quadrantMatrix.q2.subtitle": "重要但不紧急",
|
||||
"quadrantMatrix.q2.title": "计划安排",
|
||||
"quadrantMatrix.q3.subtitle": "紧急但不重要",
|
||||
"quadrantMatrix.q3.title": "委派他人",
|
||||
"quadrantMatrix.q4.subtitle": "不紧急也不重要",
|
||||
"quadrantMatrix.q4.title": "不要做",
|
||||
"quadrantMatrix.taskCount_one": "{{count}} 个任务已排序",
|
||||
"quadrantMatrix.taskCount_other": "{{count}} 个任务已排序",
|
||||
"quadrantMatrix.title": "艾森豪威尔矩阵",
|
||||
"removeOriginal": "删除原应用",
|
||||
"roadmap": "产品路线图",
|
||||
"showMyCreatedAppsOnly": "我创建的",
|
||||
|
||||
@ -433,6 +433,7 @@
|
||||
"nodes.common.memory.windowSize": "记忆窗口",
|
||||
"nodes.common.outputVars": "输出变量",
|
||||
"nodes.common.pluginNotInstalled": "插件未安装",
|
||||
"nodes.common.retry.firstTokenTimeout": "首个 Token 超时",
|
||||
"nodes.common.retry.maxRetries": "最大重试次数",
|
||||
"nodes.common.retry.ms": "毫秒",
|
||||
"nodes.common.retry.retries": "{{num}} 重试次数",
|
||||
@ -444,6 +445,8 @@
|
||||
"nodes.common.retry.retrySuccessful": "重试成功",
|
||||
"nodes.common.retry.retryTimes": "失败时重试 {{times}} 次",
|
||||
"nodes.common.retry.retrying": "重试中...",
|
||||
"nodes.common.retry.seconds": "秒",
|
||||
"nodes.common.retry.timeoutDuration": "超时时长",
|
||||
"nodes.common.retry.times": "次",
|
||||
"nodes.common.typeSwitch.input": "输入值",
|
||||
"nodes.common.typeSwitch.variable": "使用变量",
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "dify-web",
|
||||
"type": "module",
|
||||
"version": "1.11.4",
|
||||
"version": "1.11.3",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@10.27.0+sha512.72d699da16b1179c14ba9e64dc71c9a40988cbdc65c264cb0e489db7de917f20dcf4d64d8723625f2969ba52d4b7e2a1170682d9ac2a5dcaeaab732b7e16f04a",
|
||||
"imports": {
|
||||
@ -28,10 +28,9 @@
|
||||
"build:docker": "next build && node scripts/optimize-standalone.js",
|
||||
"start": "node ./scripts/copy-and-start.mjs",
|
||||
"lint": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache",
|
||||
"lint:fix": "pnpm lint --fix",
|
||||
"lint:quiet": "pnpm lint --quiet",
|
||||
"lint:complexity": "pnpm lint --rule 'complexity: [error, {max: 15}]' --quiet",
|
||||
"lint:report": "pnpm lint --output-file eslint_report.json --format json",
|
||||
"lint:fix": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --fix",
|
||||
"lint:quiet": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --quiet",
|
||||
"lint:complexity": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --rule 'complexity: [error, {max: 15}]' --quiet",
|
||||
"type-check": "tsc --noEmit",
|
||||
"type-check:tsgo": "tsgo --noEmit",
|
||||
"prepare": "cd ../ && node -e \"if (process.env.NODE_ENV !== 'production'){process.exit(1)} \" || husky ./web/.husky",
|
||||
@ -153,9 +152,9 @@
|
||||
"zustand": "^5.0.9"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@antfu/eslint-config": "^7.0.1",
|
||||
"@antfu/eslint-config": "^6.7.3",
|
||||
"@chromatic-com/storybook": "^4.1.1",
|
||||
"@eslint-react/eslint-plugin": "^2.7.0",
|
||||
"@eslint-react/eslint-plugin": "^2.3.13",
|
||||
"@mdx-js/loader": "^3.1.1",
|
||||
"@mdx-js/react": "^3.1.1",
|
||||
"@next/bundle-analyzer": "15.5.9",
|
||||
@ -190,7 +189,7 @@
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/sortablejs": "^1.15.8",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@typescript-eslint/parser": "^8.53.0",
|
||||
"@typescript-eslint/parser": "^8.50.0",
|
||||
"@typescript/native-preview": "^7.0.0-dev",
|
||||
"@vitejs/plugin-react": "^5.1.2",
|
||||
"@vitest/coverage-v8": "4.0.16",
|
||||
@ -202,7 +201,7 @@
|
||||
"eslint-plugin-react-hooks": "^7.0.1",
|
||||
"eslint-plugin-react-refresh": "^0.4.26",
|
||||
"eslint-plugin-sonarjs": "^3.0.5",
|
||||
"eslint-plugin-storybook": "^10.1.11",
|
||||
"eslint-plugin-storybook": "^10.1.10",
|
||||
"eslint-plugin-tailwindcss": "^3.18.2",
|
||||
"husky": "^9.1.7",
|
||||
"jsdom": "^27.3.0",
|
||||
@ -225,6 +224,7 @@
|
||||
},
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"@eslint/plugin-kit@<0.3.4": "0.3.4",
|
||||
"@monaco-editor/loader": "1.5.0",
|
||||
"@nolyfill/safe-buffer": "npm:safe-buffer@^5.2.1",
|
||||
"array-includes": "npm:@nolyfill/array-includes@^1",
|
||||
@ -275,6 +275,7 @@
|
||||
]
|
||||
},
|
||||
"resolutions": {
|
||||
"@eslint/plugin-kit": "~0.3",
|
||||
"@types/react": "~19.2.7",
|
||||
"@types/react-dom": "~19.2.3",
|
||||
"brace-expansion": "~2.0",
|
||||
|
||||
803
web/pnpm-lock.yaml
generated
803
web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@ -24,12 +24,8 @@ export type FetchOptionType = Omit<RequestInit, 'body'> & {
|
||||
}
|
||||
|
||||
const afterResponse204: AfterResponseHook = async (_request, _options, response) => {
|
||||
if (response.status === 204) {
|
||||
return new Response(JSON.stringify({ result: 'success' }), {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': ContentType.json },
|
||||
})
|
||||
}
|
||||
if (response.status === 204)
|
||||
return Response.json({ result: 'success' })
|
||||
}
|
||||
|
||||
export type ResponseError = {
|
||||
|
||||
@ -30,7 +30,7 @@ export const getNewVar = (key: string, type: string) => {
|
||||
}
|
||||
|
||||
export const getNewVarInWorkflow = (key: string, type = InputVarType.textInput): InputVar => {
|
||||
const { ...rest } = VAR_ITEM_TEMPLATE_IN_WORKFLOW
|
||||
const { max_length: _maxLength, ...rest } = VAR_ITEM_TEMPLATE_IN_WORKFLOW
|
||||
if (type !== InputVarType.textInput) {
|
||||
return {
|
||||
...rest,
|
||||
|
||||
Reference in New Issue
Block a user