mirror of
https://github.com/langgenius/dify.git
synced 2026-02-08 12:35:27 +08:00
Compare commits
14 Commits
build/clea
...
feature/ta
| Author | SHA1 | Date | |
|---|---|---|---|
| a0aa8cdb45 | |||
| ae8618877b | |||
| fad6fa141d | |||
| 30821fd26c | |||
| 1a9fdd9a65 | |||
| de610cbf39 | |||
| 1c55602445 | |||
| a3f1220d23 | |||
| d62e16b9bb | |||
| 13f2a43ccc | |||
| 6903c31b84 | |||
| b2cc9b255d | |||
| e9f0e1e839 | |||
| cd497a8c52 |
4
.github/workflows/autofix.yml
vendored
4
.github/workflows/autofix.yml
vendored
@ -16,14 +16,14 @@ jobs:
|
||||
|
||||
- name: Check Docker Compose inputs
|
||||
id: docker-compose-changes
|
||||
uses: tj-actions/changed-files@v46
|
||||
uses: tj-actions/changed-files@v47
|
||||
with:
|
||||
files: |
|
||||
docker/generate_docker_compose
|
||||
docker/.env.example
|
||||
docker/docker-compose-template.yaml
|
||||
docker/docker-compose.yaml
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
|
||||
2
.github/workflows/build-push.yml
vendored
2
.github/workflows/build-push.yml
vendored
@ -112,7 +112,7 @@ jobs:
|
||||
context: "web"
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-${{ matrix.context }}-*
|
||||
|
||||
2
.github/workflows/deploy-agent-dev.yml
vendored
2
.github/workflows/deploy-agent-dev.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
github.event.workflow_run.head_branch == 'deploy/agent-dev'
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
uses: appleboy/ssh-action@v1
|
||||
with:
|
||||
host: ${{ secrets.AGENT_DEV_SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
|
||||
2
.github/workflows/deploy-dev.yml
vendored
2
.github/workflows/deploy-dev.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
github.event.workflow_run.head_branch == 'deploy/dev'
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
uses: appleboy/ssh-action@v1
|
||||
with:
|
||||
host: ${{ secrets.SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
|
||||
2
.github/workflows/deploy-hitl.yml
vendored
2
.github/workflows/deploy-hitl.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
)
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
uses: appleboy/ssh-action@v1
|
||||
with:
|
||||
host: ${{ secrets.HITL_SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
days-before-issue-stale: 15
|
||||
days-before-issue-close: 3
|
||||
|
||||
2
.github/workflows/trigger-i18n-sync.yml
vendored
2
.github/workflows/trigger-i18n-sync.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
@ -12,12 +12,8 @@ The codebase is split into:
|
||||
|
||||
## Backend Workflow
|
||||
|
||||
- Read `api/AGENTS.md` for details
|
||||
- Run backend CLI commands through `uv run --project api <command>`.
|
||||
|
||||
- Before submission, all backend modifications must pass local checks: `make lint`, `make type-check`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
|
||||
|
||||
- Use Makefile targets for linting and formatting; `make lint` and `make type-check` cover the required checks.
|
||||
|
||||
- Integration tests are CI-only and are not expected to run in the local environment.
|
||||
|
||||
## Frontend Workflow
|
||||
|
||||
12
Makefile
12
Makefile
@ -61,7 +61,8 @@ check:
|
||||
|
||||
lint:
|
||||
@echo "🔧 Running ruff format, check with fixes, import linter, and dotenv-linter..."
|
||||
@uv run --project api --dev sh -c 'ruff format ./api && ruff check --fix ./api'
|
||||
@uv run --project api --dev ruff format ./api
|
||||
@uv run --project api --dev ruff check --fix ./api
|
||||
@uv run --directory api --dev lint-imports
|
||||
@uv run --project api --dev dotenv-linter ./api/.env.example ./web/.env.example
|
||||
@echo "✅ Linting complete"
|
||||
@ -73,7 +74,12 @@ type-check:
|
||||
|
||||
test:
|
||||
@echo "🧪 Running backend unit tests..."
|
||||
@uv run --project api --dev dev/pytest/pytest_unit_tests.sh
|
||||
@if [ -n "$(TARGET_TESTS)" ]; then \
|
||||
echo "Target: $(TARGET_TESTS)"; \
|
||||
uv run --project api --dev pytest $(TARGET_TESTS); \
|
||||
else \
|
||||
uv run --project api --dev dev/pytest/pytest_unit_tests.sh; \
|
||||
fi
|
||||
@echo "✅ Tests complete"
|
||||
|
||||
# Build Docker images
|
||||
@ -125,7 +131,7 @@ help:
|
||||
@echo " make check - Check code with ruff"
|
||||
@echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)"
|
||||
@echo " make type-check - Run type checking with basedpyright"
|
||||
@echo " make test - Run backend unit tests"
|
||||
@echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/<target_tests>)"
|
||||
@echo ""
|
||||
@echo "Docker Build Targets:"
|
||||
@echo " make build-web - Build web Docker image"
|
||||
|
||||
0
agent-notes/.gitkeep
Normal file
0
agent-notes/.gitkeep
Normal file
248
api/AGENTS.md
248
api/AGENTS.md
@ -1,62 +1,236 @@
|
||||
# Agent Skill Index
|
||||
# API Agent Guide
|
||||
|
||||
## Agent Notes (must-check)
|
||||
|
||||
Before you start work on any backend file under `api/`, you MUST check whether a related note exists under:
|
||||
|
||||
- `agent-notes/<same-relative-path-as-target-file>.md`
|
||||
|
||||
Rules:
|
||||
|
||||
- **Path mapping**: for a target file `<path>/<name>.py`, the note must be `agent-notes/<path>/<name>.py.md` (same folder structure, same filename, plus `.md`).
|
||||
- **Before working**:
|
||||
- If the note exists, read it first and follow any constraints/decisions recorded there.
|
||||
- If the note conflicts with the current code, or references an "origin" file/path that has been deleted, renamed, or migrated, treat the **code as the single source of truth** and update the note to match reality.
|
||||
- If the note does not exist, create it with a short architecture/intent summary and any relevant invariants/edge cases.
|
||||
- **During working**:
|
||||
- Keep the note in sync as you discover constraints, make decisions, or change approach.
|
||||
- If you move/rename a file, migrate its note to the new mapped path (and fix any outdated references inside the note).
|
||||
- Record non-obvious edge cases, trade-offs, and the test/verification plan as you go (not just at the end).
|
||||
- Keep notes **coherent**: integrate new findings into the relevant sections and rewrite for clarity; avoid append-only “recent fix” / changelog-style additions unless the note is explicitly intended to be a changelog.
|
||||
- **When finishing work**:
|
||||
- Update the related note(s) to reflect what changed, why, and any new edge cases/tests.
|
||||
- If a file is deleted, remove or clearly deprecate the corresponding note so it cannot be mistaken as current guidance.
|
||||
- Keep notes concise and accurate; they are meant to prevent repeated rediscovery.
|
||||
|
||||
## Skill Index
|
||||
|
||||
Start with the section that best matches your need. Each entry lists the problems it solves plus key files/concepts so you know what to expect before opening it.
|
||||
|
||||
______________________________________________________________________
|
||||
### Platform Foundations
|
||||
|
||||
## Platform Foundations
|
||||
|
||||
- **[Infrastructure Overview](agent_skills/infra.md)**\
|
||||
When to read this:
|
||||
#### [Infrastructure Overview](agent_skills/infra.md)
|
||||
|
||||
- **When to read this**
|
||||
- You need to understand where a feature belongs in the architecture.
|
||||
- You’re wiring storage, Redis, vector stores, or OTEL.
|
||||
- You’re about to add CLI commands or async jobs.\
|
||||
What it covers: configuration stack (`configs/app_config.py`, remote settings), storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`), Redis conventions (`extensions/ext_redis.py`), plugin runtime topology, vector-store factory (`core/rag/datasource/vdb/*`), observability hooks, SSRF proxy usage, and core CLI commands.
|
||||
- You’re about to add CLI commands or async jobs.
|
||||
- **What it covers**
|
||||
- Configuration stack (`configs/app_config.py`, remote settings)
|
||||
- Storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`)
|
||||
- Redis conventions (`extensions/ext_redis.py`)
|
||||
- Plugin runtime topology
|
||||
- Vector-store factory (`core/rag/datasource/vdb/*`)
|
||||
- Observability hooks
|
||||
- SSRF proxy usage
|
||||
- Core CLI commands
|
||||
|
||||
- **[Coding Style](agent_skills/coding_style.md)**\
|
||||
When to read this:
|
||||
### Plugin & Extension Development
|
||||
|
||||
- You’re writing or reviewing backend code and need the authoritative checklist.
|
||||
- You’re unsure about Pydantic validators, SQLAlchemy session usage, or logging patterns.
|
||||
- You want the exact lint/type/test commands used in PRs.\
|
||||
Includes: Ruff & BasedPyright commands, no-annotation policy, session examples (`with Session(db.engine, ...)`), `@field_validator` usage, logging expectations, and the rule set for file size, helpers, and package management.
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
## Plugin & Extension Development
|
||||
|
||||
- **[Plugin Systems](agent_skills/plugin.md)**\
|
||||
When to read this:
|
||||
#### [Plugin Systems](agent_skills/plugin.md)
|
||||
|
||||
- **When to read this**
|
||||
- You’re building or debugging a marketplace plugin.
|
||||
- You need to know how manifests, providers, daemons, and migrations fit together.\
|
||||
What it covers: plugin manifests (`core/plugin/entities/plugin.py`), installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands), runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent), daemon coordination (`core/plugin/entities/plugin_daemon.py`), and how provider registries surface capabilities to the rest of the platform.
|
||||
- You need to know how manifests, providers, daemons, and migrations fit together.
|
||||
- **What it covers**
|
||||
- Plugin manifests (`core/plugin/entities/plugin.py`)
|
||||
- Installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands)
|
||||
- Runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent)
|
||||
- Daemon coordination (`core/plugin/entities/plugin_daemon.py`)
|
||||
- How provider registries surface capabilities to the rest of the platform
|
||||
|
||||
- **[Plugin OAuth](agent_skills/plugin_oauth.md)**\
|
||||
When to read this:
|
||||
#### [Plugin OAuth](agent_skills/plugin_oauth.md)
|
||||
|
||||
- **When to read this**
|
||||
- You must integrate OAuth for a plugin or datasource.
|
||||
- You’re handling credential encryption or refresh flows.\
|
||||
Topics: credential storage, encryption helpers (`core/helper/provider_encryption.py`), OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`), and how console/API layers expose the flows.
|
||||
- You’re handling credential encryption or refresh flows.
|
||||
- **Topics**
|
||||
- Credential storage
|
||||
- Encryption helpers (`core/helper/provider_encryption.py`)
|
||||
- OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`)
|
||||
- How console/API layers expose the flows
|
||||
|
||||
______________________________________________________________________
|
||||
### Workflow Entry & Execution
|
||||
|
||||
## Workflow Entry & Execution
|
||||
#### [Trigger Concepts](agent_skills/trigger.md)
|
||||
|
||||
- **[Trigger Concepts](agent_skills/trigger.md)**\
|
||||
When to read this:
|
||||
- **When to read this**
|
||||
- You’re debugging why a workflow didn’t start.
|
||||
- You’re adding a new trigger type or hook.
|
||||
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.\
|
||||
Details: Start-node taxonomy, webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`), async orchestration (`services/async_workflow_service.py`, Celery queues), debug event bus, and storage/logging interactions.
|
||||
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.
|
||||
- **Details**
|
||||
- Start-node taxonomy
|
||||
- Webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`)
|
||||
- Async orchestration (`services/async_workflow_service.py`, Celery queues)
|
||||
- Debug event bus
|
||||
- Storage/logging interactions
|
||||
|
||||
______________________________________________________________________
|
||||
## General Reminders
|
||||
|
||||
## Additional Notes for Agents
|
||||
|
||||
- All skill docs assume you follow the coding style guide—run Ruff/BasedPyright/tests listed there before submitting changes.
|
||||
- All skill docs assume you follow the coding style rules below—run the lint/type/test commands before submitting changes.
|
||||
- When you cannot find an answer in these briefs, search the codebase using the paths referenced (e.g., `core/plugin/impl/tool.py`, `services/dataset_service.py`).
|
||||
- If you run into cross-cutting concerns (tenancy, configuration, storage), check the infrastructure guide first; it links to most supporting modules.
|
||||
- Keep multi-tenancy and configuration central: everything flows through `configs.dify_config` and `tenant_id`.
|
||||
- When touching plugins or triggers, consult both the system overview and the specialised doc to ensure you adjust lifecycle, storage, and observability consistently.
|
||||
|
||||
## Coding Style
|
||||
|
||||
This is the default standard for backend code in this repo. Follow it for new code and use it as the checklist when reviewing changes.
|
||||
|
||||
### Linting & Formatting
|
||||
|
||||
- Use Ruff for formatting and linting (follow `.ruff.toml`).
|
||||
- Keep each line under 120 characters (including spaces).
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Use `snake_case` for variables and functions.
|
||||
- Use `PascalCase` for classes.
|
||||
- Use `UPPER_CASE` for constants.
|
||||
|
||||
### Typing & Class Layout
|
||||
|
||||
- Code should usually include type annotations that match the repo’s current Python version (avoid untyped public APIs and “mystery” values).
|
||||
- Prefer modern typing forms (e.g. `list[str]`, `dict[str, int]`) and avoid `Any` unless there’s a strong reason.
|
||||
- For classes, declare member variables at the top of the class body (before `__init__`) so the class shape is obvious at a glance:
|
||||
|
||||
```python
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class Example:
|
||||
user_id: str
|
||||
created_at: datetime
|
||||
|
||||
def __init__(self, user_id: str, created_at: datetime) -> None:
|
||||
self.user_id = user_id
|
||||
self.created_at = created_at
|
||||
```
|
||||
|
||||
### General Rules
|
||||
|
||||
- Use Pydantic v2 conventions.
|
||||
- Use `uv` for Python package management in this repo (usually with `--project api`).
|
||||
- Prefer simple functions over small “utility classes” for lightweight helpers.
|
||||
- Avoid implementing dunder methods unless it’s clearly needed and matches existing patterns.
|
||||
- Never start long-running services as part of agent work (`uv run app.py`, `flask run`, etc.); running tests is allowed.
|
||||
- Keep files below ~800 lines; split when necessary.
|
||||
- Keep code readable and explicit—avoid clever hacks.
|
||||
|
||||
### Architecture & Boundaries
|
||||
|
||||
- Mirror the layered architecture: controller → service → core/domain.
|
||||
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
|
||||
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
|
||||
|
||||
### Logging & Errors
|
||||
|
||||
- Never use `print`; use a module-level logger:
|
||||
- `logger = logging.getLogger(__name__)`
|
||||
- Include tenant/app/workflow identifiers in log context when relevant.
|
||||
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate them into HTTP responses in controllers.
|
||||
- Log retryable events at `warning`, terminal failures at `error`.
|
||||
|
||||
### SQLAlchemy Patterns
|
||||
|
||||
- Models inherit from `models.base.TypeBase`; do not create ad-hoc metadata or engines.
|
||||
- Open sessions with context managers:
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Workflow).where(
|
||||
Workflow.id == workflow_id,
|
||||
Workflow.tenant_id == tenant_id,
|
||||
)
|
||||
workflow = session.execute(stmt).scalar_one_or_none()
|
||||
```
|
||||
|
||||
- Prefer SQLAlchemy expressions; avoid raw SQL unless necessary.
|
||||
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
|
||||
- Introduce repository abstractions only for very large tables (e.g., workflow executions) or when alternative storage strategies are required.
|
||||
|
||||
### Storage & External I/O
|
||||
|
||||
- Access storage via `extensions.ext_storage.storage`.
|
||||
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
|
||||
- Background tasks that touch storage must be idempotent, and should log relevant object identifiers.
|
||||
|
||||
### Pydantic Usage
|
||||
|
||||
- Define DTOs with Pydantic v2 models and forbid extras by default.
|
||||
- Use `@field_validator` / `@model_validator` for domain rules.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
|
||||
|
||||
|
||||
class TriggerConfig(BaseModel):
|
||||
endpoint: HttpUrl
|
||||
secret: str
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
@field_validator("secret")
|
||||
def ensure_secret_prefix(cls, value: str) -> str:
|
||||
if not value.startswith("dify_"):
|
||||
raise ValueError("secret must start with dify_")
|
||||
return value
|
||||
```
|
||||
|
||||
### Generics & Protocols
|
||||
|
||||
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
|
||||
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
|
||||
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
|
||||
|
||||
### Tooling & Checks
|
||||
|
||||
Quick checks while iterating:
|
||||
|
||||
- Format: `make format`
|
||||
- Lint (includes auto-fix): `make lint`
|
||||
- Type check: `make type-check`
|
||||
- Targeted tests: `make test TARGET_TESTS=./api/tests/<target_tests>`
|
||||
|
||||
Before opening a PR / submitting:
|
||||
|
||||
- `make lint`
|
||||
- `make type-check`
|
||||
- `make test`
|
||||
|
||||
### Controllers & Services
|
||||
|
||||
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
|
||||
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
|
||||
- Document non-obvious behaviour with concise comments.
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
- Use `configs.dify_config` for configuration—never read environment variables directly.
|
||||
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
|
||||
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
|
||||
- Keep experimental scripts under `dev/`; do not ship them in production builds.
|
||||
|
||||
@ -1,115 +0,0 @@
|
||||
## Linter
|
||||
|
||||
- Always follow `.ruff.toml`.
|
||||
- Run `uv run ruff check --fix --unsafe-fixes`.
|
||||
- Keep each line under 100 characters (including spaces).
|
||||
|
||||
## Code Style
|
||||
|
||||
- `snake_case` for variables and functions.
|
||||
- `PascalCase` for classes.
|
||||
- `UPPER_CASE` for constants.
|
||||
|
||||
## Rules
|
||||
|
||||
- Use Pydantic v2 standard.
|
||||
- Use `uv` for package management.
|
||||
- Do not override dunder methods like `__init__`, `__iadd__`, etc.
|
||||
- Never launch services (`uv run app.py`, `flask run`, etc.); running tests under `tests/` is allowed.
|
||||
- Prefer simple functions over classes for lightweight helpers.
|
||||
- Keep files below 800 lines; split when necessary.
|
||||
- Keep code readable—no clever hacks.
|
||||
- Never use `print`; log with `logger = logging.getLogger(__name__)`.
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
- Mirror the project’s layered architecture: controller → service → core/domain.
|
||||
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
|
||||
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
|
||||
|
||||
## SQLAlchemy Patterns
|
||||
|
||||
- Models inherit from `models.base.Base`; never create ad-hoc metadata or engines.
|
||||
|
||||
- Open sessions with context managers:
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Workflow).where(
|
||||
Workflow.id == workflow_id,
|
||||
Workflow.tenant_id == tenant_id,
|
||||
)
|
||||
workflow = session.execute(stmt).scalar_one_or_none()
|
||||
```
|
||||
|
||||
- Use SQLAlchemy expressions; avoid raw SQL unless necessary.
|
||||
|
||||
- Introduce repository abstractions only for very large tables (e.g., workflow executions) to support alternative storage strategies.
|
||||
|
||||
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
|
||||
|
||||
## Storage & External IO
|
||||
|
||||
- Access storage via `extensions.ext_storage.storage`.
|
||||
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
|
||||
- Background tasks that touch storage must be idempotent and log the relevant object identifiers.
|
||||
|
||||
## Pydantic Usage
|
||||
|
||||
- Define DTOs with Pydantic v2 models and forbid extras by default.
|
||||
|
||||
- Use `@field_validator` / `@model_validator` for domain rules.
|
||||
|
||||
- Example:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
|
||||
|
||||
class TriggerConfig(BaseModel):
|
||||
endpoint: HttpUrl
|
||||
secret: str
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
@field_validator("secret")
|
||||
def ensure_secret_prefix(cls, value: str) -> str:
|
||||
if not value.startswith("dify_"):
|
||||
raise ValueError("secret must start with dify_")
|
||||
return value
|
||||
```
|
||||
|
||||
## Generics & Protocols
|
||||
|
||||
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
|
||||
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
|
||||
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
|
||||
|
||||
## Error Handling & Logging
|
||||
|
||||
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate to HTTP responses in controllers.
|
||||
- Declare `logger = logging.getLogger(__name__)` at module top.
|
||||
- Include tenant/app/workflow identifiers in log context.
|
||||
- Log retryable events at `warning`, terminal failures at `error`.
|
||||
|
||||
## Tooling & Checks
|
||||
|
||||
- Format/lint: `uv run --project api --dev ruff format ./api` and `uv run --project api --dev ruff check --fix --unsafe-fixes ./api`.
|
||||
- Type checks: `uv run --directory api --dev basedpyright`.
|
||||
- Tests: `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
|
||||
- Run all of the above before submitting your work.
|
||||
|
||||
## Controllers & Services
|
||||
|
||||
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
|
||||
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
|
||||
- Avoid repositories unless necessary; direct SQLAlchemy usage is preferred for typical tables.
|
||||
- Document non-obvious behaviour with concise comments.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
- Use `configs.dify_config` for configuration—never read environment variables directly.
|
||||
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
|
||||
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
|
||||
- Keep experimental scripts under `dev/`; do not ship them in production builds.
|
||||
@ -881,25 +881,12 @@ def clear_free_plan_tenant_expired_logs(days: int, batch: int, tenant_ids: list[
|
||||
is_flag=True,
|
||||
help="Preview cleanup results without deleting any workflow run data.",
|
||||
)
|
||||
@click.option(
|
||||
"--log-sql",
|
||||
is_flag=True,
|
||||
help="Log SQL statements and timings for cleanup queries.",
|
||||
)
|
||||
@click.option(
|
||||
"--log-sql-min-ms",
|
||||
default=0,
|
||||
show_default=True,
|
||||
help="Only log SQL statements slower than N milliseconds (0 logs all).",
|
||||
)
|
||||
def clean_workflow_runs(
|
||||
days: int,
|
||||
batch_size: int,
|
||||
start_from: datetime.datetime | None,
|
||||
end_before: datetime.datetime | None,
|
||||
dry_run: bool,
|
||||
log_sql: bool,
|
||||
log_sql_min_ms: int,
|
||||
):
|
||||
"""
|
||||
Clean workflow runs and related workflow data for free tenants.
|
||||
@ -916,8 +903,6 @@ def clean_workflow_runs(
|
||||
start_from=start_from,
|
||||
end_before=end_before,
|
||||
dry_run=dry_run,
|
||||
log_sql=log_sql,
|
||||
log_sql_min_ms=log_sql_min_ms,
|
||||
).run()
|
||||
|
||||
end_time = datetime.datetime.now(datetime.UTC)
|
||||
|
||||
@ -30,6 +30,11 @@ class TagBindingRemovePayload(BaseModel):
|
||||
type: Literal["knowledge", "app"] | None = Field(default=None, description="Tag type")
|
||||
|
||||
|
||||
class TagListQueryParam(BaseModel):
|
||||
type: Literal["knowledge", "app", ""] = Field("", description="Tag type filter")
|
||||
keyword: str | None = Field(None, description="Search keyword")
|
||||
|
||||
|
||||
register_schema_models(
|
||||
console_ns,
|
||||
TagBasePayload,
|
||||
@ -43,12 +48,15 @@ class TagListApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@console_ns.doc(
|
||||
params={"type": 'Tag type filter. Can be "knowledge" or "app".', "keyword": "Search keyword for tag name."}
|
||||
)
|
||||
@marshal_with(dataset_tag_fields)
|
||||
def get(self):
|
||||
_, current_tenant_id = current_account_with_tenant()
|
||||
tag_type = request.args.get("type", type=str, default="")
|
||||
keyword = request.args.get("keyword", default=None, type=str)
|
||||
tags = TagService.get_tags(tag_type, current_tenant_id, keyword)
|
||||
raw_args = request.args.to_dict()
|
||||
param = TagListQueryParam.model_validate(raw_args)
|
||||
tags = TagService.get_tags(param.type, current_tenant_id, param.keyword)
|
||||
|
||||
return tags, 200
|
||||
|
||||
|
||||
@ -71,8 +71,8 @@ class LLMGenerator:
|
||||
response: LLMResult = model_instance.invoke_llm(
|
||||
prompt_messages=list(prompts), model_parameters={"max_tokens": 500, "temperature": 1}, stream=False
|
||||
)
|
||||
answer = cast(str, response.message.content)
|
||||
if answer is None:
|
||||
answer = response.message.get_text_content()
|
||||
if answer == "":
|
||||
return ""
|
||||
try:
|
||||
result_dict = json.loads(answer)
|
||||
@ -184,7 +184,7 @@ class LLMGenerator:
|
||||
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
rule_config["prompt"] = cast(str, response.message.content)
|
||||
rule_config["prompt"] = response.message.get_text_content()
|
||||
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
@ -237,13 +237,11 @@ class LLMGenerator:
|
||||
|
||||
return rule_config
|
||||
|
||||
rule_config["prompt"] = cast(str, prompt_content.message.content)
|
||||
rule_config["prompt"] = prompt_content.message.get_text_content()
|
||||
|
||||
if not isinstance(prompt_content.message.content, str):
|
||||
raise NotImplementedError("prompt content is not a string")
|
||||
parameter_generate_prompt = parameter_template.format(
|
||||
inputs={
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
"INPUT_TEXT": prompt_content.message.get_text_content(),
|
||||
},
|
||||
remove_template_variables=False,
|
||||
)
|
||||
@ -253,7 +251,7 @@ class LLMGenerator:
|
||||
statement_generate_prompt = statement_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
"INPUT_TEXT": prompt_content.message.get_text_content(),
|
||||
},
|
||||
remove_template_variables=False,
|
||||
)
|
||||
@ -263,7 +261,7 @@ class LLMGenerator:
|
||||
parameter_content: LLMResult = model_instance.invoke_llm(
|
||||
prompt_messages=list(parameter_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', cast(str, parameter_content.message.content))
|
||||
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.get_text_content())
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate variables"
|
||||
@ -272,7 +270,7 @@ class LLMGenerator:
|
||||
statement_content: LLMResult = model_instance.invoke_llm(
|
||||
prompt_messages=list(statement_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
rule_config["opening_statement"] = cast(str, statement_content.message.content)
|
||||
rule_config["opening_statement"] = statement_content.message.get_text_content()
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate conversation opener"
|
||||
@ -315,7 +313,7 @@ class LLMGenerator:
|
||||
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
generated_code = cast(str, response.message.content)
|
||||
generated_code = response.message.get_text_content()
|
||||
return {"code": generated_code, "language": code_language, "error": ""}
|
||||
|
||||
except InvokeError as e:
|
||||
@ -351,7 +349,7 @@ class LLMGenerator:
|
||||
raise TypeError("Expected LLMResult when stream=False")
|
||||
response = result
|
||||
|
||||
answer = cast(str, response.message.content)
|
||||
answer = response.message.get_text_content()
|
||||
return answer.strip()
|
||||
|
||||
@classmethod
|
||||
@ -375,10 +373,7 @@ class LLMGenerator:
|
||||
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
raw_content = response.message.content
|
||||
|
||||
if not isinstance(raw_content, str):
|
||||
raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}")
|
||||
raw_content = response.message.get_text_content()
|
||||
|
||||
try:
|
||||
parsed_content = json.loads(raw_content)
|
||||
|
||||
@ -9,7 +9,7 @@ from collections.abc import Sequence
|
||||
from datetime import datetime
|
||||
from typing import TypedDict, cast
|
||||
|
||||
from sqlalchemy import asc, delete, desc, func, select
|
||||
from sqlalchemy import asc, delete, desc, func, select, tuple_
|
||||
from sqlalchemy.engine import CursorResult
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
@ -328,14 +328,39 @@ class DifyAPISQLAlchemyWorkflowNodeExecutionRepository(DifyAPIWorkflowNodeExecut
|
||||
"""
|
||||
Delete node executions (and offloads) for the given workflow runs using indexed columns.
|
||||
|
||||
Uses the workflow_run_id index to target executions by run id.
|
||||
Uses the composite index on (tenant_id, app_id, workflow_id, triggered_from, workflow_run_id)
|
||||
by filtering on those columns with tuple IN.
|
||||
"""
|
||||
if not runs:
|
||||
return 0, 0
|
||||
|
||||
run_ids = [run["run_id"] for run in runs]
|
||||
run_id_filter = WorkflowNodeExecutionModel.workflow_run_id.in_(run_ids)
|
||||
node_execution_ids = select(WorkflowNodeExecutionModel.id).where(run_id_filter)
|
||||
tuple_values = [
|
||||
(
|
||||
run["tenant_id"],
|
||||
run["app_id"],
|
||||
run["workflow_id"],
|
||||
DifyAPISQLAlchemyWorkflowNodeExecutionRepository._map_run_triggered_from_to_node_triggered_from(
|
||||
run["triggered_from"]
|
||||
),
|
||||
run["run_id"],
|
||||
)
|
||||
for run in runs
|
||||
]
|
||||
|
||||
node_execution_ids = session.scalars(
|
||||
select(WorkflowNodeExecutionModel.id).where(
|
||||
tuple_(
|
||||
WorkflowNodeExecutionModel.tenant_id,
|
||||
WorkflowNodeExecutionModel.app_id,
|
||||
WorkflowNodeExecutionModel.workflow_id,
|
||||
WorkflowNodeExecutionModel.triggered_from,
|
||||
WorkflowNodeExecutionModel.workflow_run_id,
|
||||
).in_(tuple_values)
|
||||
)
|
||||
).all()
|
||||
|
||||
if not node_execution_ids:
|
||||
return 0, 0
|
||||
|
||||
offloads_deleted = (
|
||||
cast(
|
||||
@ -353,7 +378,7 @@ class DifyAPISQLAlchemyWorkflowNodeExecutionRepository(DifyAPIWorkflowNodeExecut
|
||||
cast(
|
||||
CursorResult,
|
||||
session.execute(
|
||||
delete(WorkflowNodeExecutionModel).where(run_id_filter)
|
||||
delete(WorkflowNodeExecutionModel).where(WorkflowNodeExecutionModel.id.in_(node_execution_ids))
|
||||
),
|
||||
).rowcount
|
||||
or 0
|
||||
@ -369,18 +394,38 @@ class DifyAPISQLAlchemyWorkflowNodeExecutionRepository(DifyAPIWorkflowNodeExecut
|
||||
if not runs:
|
||||
return 0, 0
|
||||
|
||||
run_ids = [run["run_id"] for run in runs]
|
||||
run_id_filter = WorkflowNodeExecutionModel.workflow_run_id.in_(run_ids)
|
||||
tuple_values = [
|
||||
(
|
||||
run["tenant_id"],
|
||||
run["app_id"],
|
||||
run["workflow_id"],
|
||||
DifyAPISQLAlchemyWorkflowNodeExecutionRepository._map_run_triggered_from_to_node_triggered_from(
|
||||
run["triggered_from"]
|
||||
),
|
||||
run["run_id"],
|
||||
)
|
||||
for run in runs
|
||||
]
|
||||
tuple_filter = tuple_(
|
||||
WorkflowNodeExecutionModel.tenant_id,
|
||||
WorkflowNodeExecutionModel.app_id,
|
||||
WorkflowNodeExecutionModel.workflow_id,
|
||||
WorkflowNodeExecutionModel.triggered_from,
|
||||
WorkflowNodeExecutionModel.workflow_run_id,
|
||||
).in_(tuple_values)
|
||||
|
||||
node_executions_count = (
|
||||
session.scalar(select(func.count()).select_from(WorkflowNodeExecutionModel).where(run_id_filter)) or 0
|
||||
session.scalar(select(func.count()).select_from(WorkflowNodeExecutionModel).where(tuple_filter)) or 0
|
||||
)
|
||||
node_execution_ids = select(WorkflowNodeExecutionModel.id).where(run_id_filter)
|
||||
offloads_count = (
|
||||
session.scalar(
|
||||
select(func.count())
|
||||
.select_from(WorkflowNodeExecutionOffload)
|
||||
.where(WorkflowNodeExecutionOffload.node_execution_id.in_(node_execution_ids))
|
||||
.join(
|
||||
WorkflowNodeExecutionModel,
|
||||
WorkflowNodeExecutionOffload.node_execution_id == WorkflowNodeExecutionModel.id,
|
||||
)
|
||||
.where(tuple_filter)
|
||||
)
|
||||
or 0
|
||||
)
|
||||
|
||||
@ -42,7 +42,6 @@ class BillingService:
|
||||
params = {"tenant_id": tenant_id}
|
||||
|
||||
billing_info = cls._send_request("GET", "/subscription/info", params=params)
|
||||
cls._log_expiration_date_if_missing(tenant_id, billing_info, "get_info")
|
||||
return billing_info
|
||||
|
||||
@classmethod
|
||||
@ -282,7 +281,6 @@ class BillingService:
|
||||
try:
|
||||
subscription_plan = subscription_adapter.validate_python(plan)
|
||||
results[tenant_id] = subscription_plan
|
||||
cls._log_expiration_date_if_missing(tenant_id, subscription_plan, "get_plan_bulk")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"get_plan_bulk: failed to validate subscription plan for tenant(%s)", tenant_id
|
||||
@ -298,21 +296,6 @@ class BillingService:
|
||||
def _make_plan_cache_key(cls, tenant_id: str) -> str:
|
||||
return f"{cls._PLAN_CACHE_KEY_PREFIX}{tenant_id}"
|
||||
|
||||
@staticmethod
|
||||
def _log_expiration_date_if_missing(tenant_id: str, payload: dict, source: str) -> None:
|
||||
expiration_date = None
|
||||
if isinstance(payload, dict):
|
||||
if "expiration_date" in payload:
|
||||
expiration_date = payload.get("expiration_date")
|
||||
elif isinstance(payload.get("subscription"), dict):
|
||||
expiration_date = payload["subscription"].get("expiration_date")
|
||||
if expiration_date == -1:
|
||||
logger.warning(
|
||||
"billing %s: tenant %s returned expiration_date=-1",
|
||||
source,
|
||||
tenant_id,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_plan_bulk_with_cache(cls, tenant_ids: Sequence[str]) -> dict[str, SubscriptionPlan]:
|
||||
"""
|
||||
|
||||
@ -1,11 +1,8 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Iterable, Sequence
|
||||
from contextlib import contextmanager
|
||||
|
||||
import click
|
||||
from sqlalchemy import event
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
from configs import dify_config
|
||||
@ -31,8 +28,6 @@ class WorkflowRunCleanup:
|
||||
end_before: datetime.datetime | None = None,
|
||||
workflow_run_repo: APIWorkflowRunRepository | None = None,
|
||||
dry_run: bool = False,
|
||||
log_sql: bool = False,
|
||||
log_sql_min_ms: int = 0,
|
||||
):
|
||||
if (start_from is None) ^ (end_before is None):
|
||||
raise ValueError("start_from and end_before must be both set or both omitted.")
|
||||
@ -50,8 +45,6 @@ class WorkflowRunCleanup:
|
||||
self.batch_size = batch_size
|
||||
self._cleanup_whitelist: set[str] | None = None
|
||||
self.dry_run = dry_run
|
||||
self.log_sql = log_sql
|
||||
self.log_sql_min_ms = max(0, log_sql_min_ms)
|
||||
self.free_plan_grace_period_days = dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD
|
||||
self.workflow_run_repo: APIWorkflowRunRepository
|
||||
if workflow_run_repo:
|
||||
@ -63,38 +56,6 @@ class WorkflowRunCleanup:
|
||||
session_maker = sessionmaker(bind=db.engine, expire_on_commit=False)
|
||||
self.workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker)
|
||||
|
||||
@contextmanager
|
||||
def _sql_logger(self):
|
||||
if not self.log_sql:
|
||||
yield
|
||||
return
|
||||
|
||||
def _before_cursor_execute(conn, cursor, statement, parameters, context, executemany) -> None:
|
||||
context._dify_sql_start_time = time.monotonic()
|
||||
context._dify_sql_statement = statement
|
||||
context._dify_sql_parameters = parameters
|
||||
|
||||
def _after_cursor_execute(conn, cursor, statement, parameters, context, executemany) -> None:
|
||||
start = getattr(context, "_dify_sql_start_time", None)
|
||||
if start is None:
|
||||
return
|
||||
elapsed_ms = (time.monotonic() - start) * 1000
|
||||
if elapsed_ms < self.log_sql_min_ms:
|
||||
return
|
||||
logged_statement = getattr(context, "_dify_sql_statement", statement)
|
||||
logged_parameters = getattr(context, "_dify_sql_parameters", parameters)
|
||||
click.echo(f"[sql] {elapsed_ms:.1f} ms {logged_statement}")
|
||||
if logged_parameters:
|
||||
click.echo(f"[sql] params: {logged_parameters}")
|
||||
|
||||
event.listen(db.engine, "before_cursor_execute", _before_cursor_execute)
|
||||
event.listen(db.engine, "after_cursor_execute", _after_cursor_execute)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
event.remove(db.engine, "before_cursor_execute", _before_cursor_execute)
|
||||
event.remove(db.engine, "after_cursor_execute", _after_cursor_execute)
|
||||
|
||||
def run(self) -> None:
|
||||
click.echo(
|
||||
click.style(
|
||||
@ -113,95 +74,74 @@ class WorkflowRunCleanup:
|
||||
batch_index = 0
|
||||
last_seen: tuple[datetime.datetime, str] | None = None
|
||||
|
||||
with self._sql_logger():
|
||||
while True:
|
||||
batch_start = time.monotonic()
|
||||
run_rows = self.workflow_run_repo.get_runs_batch_by_time_range(
|
||||
start_from=self.window_start,
|
||||
end_before=self.window_end,
|
||||
last_seen=last_seen,
|
||||
batch_size=self.batch_size,
|
||||
)
|
||||
fetch_ms = (time.monotonic() - batch_start) * 1000
|
||||
if not run_rows:
|
||||
break
|
||||
while True:
|
||||
run_rows = self.workflow_run_repo.get_runs_batch_by_time_range(
|
||||
start_from=self.window_start,
|
||||
end_before=self.window_end,
|
||||
last_seen=last_seen,
|
||||
batch_size=self.batch_size,
|
||||
)
|
||||
if not run_rows:
|
||||
break
|
||||
|
||||
batch_index += 1
|
||||
last_seen = (run_rows[-1].created_at, run_rows[-1].id)
|
||||
tenant_ids = {row.tenant_id for row in run_rows}
|
||||
billing_start = time.monotonic()
|
||||
free_tenants = self._filter_free_tenants(tenant_ids)
|
||||
billing_ms = (time.monotonic() - billing_start) * 1000
|
||||
free_runs = [row for row in run_rows if row.tenant_id in free_tenants]
|
||||
paid_or_skipped = len(run_rows) - len(free_runs)
|
||||
batch_index += 1
|
||||
last_seen = (run_rows[-1].created_at, run_rows[-1].id)
|
||||
tenant_ids = {row.tenant_id for row in run_rows}
|
||||
free_tenants = self._filter_free_tenants(tenant_ids)
|
||||
free_runs = [row for row in run_rows if row.tenant_id in free_tenants]
|
||||
paid_or_skipped = len(run_rows) - len(free_runs)
|
||||
|
||||
if self.log_sql:
|
||||
click.echo(
|
||||
click.style(
|
||||
f"[batch #{batch_index}] fetch_ms={fetch_ms:.1f} billing_ms={billing_ms:.1f}",
|
||||
fg="white",
|
||||
)
|
||||
)
|
||||
|
||||
if not free_runs:
|
||||
skipped_message = (
|
||||
f"[batch #{batch_index}] skipped (no sandbox runs in batch, "
|
||||
f"{paid_or_skipped} paid/unknown)"
|
||||
)
|
||||
click.echo(
|
||||
click.style(
|
||||
skipped_message,
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
total_runs_targeted += len(free_runs)
|
||||
|
||||
if self.dry_run:
|
||||
count_start = time.monotonic()
|
||||
batch_counts = self.workflow_run_repo.count_runs_with_related(
|
||||
free_runs,
|
||||
count_node_executions=self._count_node_executions,
|
||||
count_trigger_logs=self._count_trigger_logs,
|
||||
)
|
||||
count_ms = (time.monotonic() - count_start) * 1000
|
||||
if self.log_sql:
|
||||
click.echo(click.style(f"[batch #{batch_index}] count_ms={count_ms:.1f}", fg="white"))
|
||||
if related_totals is not None:
|
||||
for key in related_totals:
|
||||
related_totals[key] += batch_counts.get(key, 0)
|
||||
sample_ids = ", ".join(run.id for run in free_runs[:5])
|
||||
click.echo(
|
||||
click.style(
|
||||
f"[batch #{batch_index}] would delete {len(free_runs)} runs "
|
||||
f"(sample ids: {sample_ids}) and skip {paid_or_skipped} paid/unknown",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
counts = self.workflow_run_repo.delete_runs_with_related(
|
||||
free_runs,
|
||||
delete_node_executions=self._delete_node_executions,
|
||||
delete_trigger_logs=self._delete_trigger_logs,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to delete workflow runs batch ending at %s", last_seen[0])
|
||||
raise
|
||||
|
||||
total_runs_deleted += counts["runs"]
|
||||
if not free_runs:
|
||||
click.echo(
|
||||
click.style(
|
||||
f"[batch #{batch_index}] deleted runs: {counts['runs']} "
|
||||
f"(nodes {counts['node_executions']}, offloads {counts['offloads']}, "
|
||||
f"app_logs {counts['app_logs']}, trigger_logs {counts['trigger_logs']}, "
|
||||
f"pauses {counts['pauses']}, pause_reasons {counts['pause_reasons']}); "
|
||||
f"skipped {paid_or_skipped} paid/unknown",
|
||||
fg="green",
|
||||
f"[batch #{batch_index}] skipped (no sandbox runs in batch, {paid_or_skipped} paid/unknown)",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
total_runs_targeted += len(free_runs)
|
||||
|
||||
if self.dry_run:
|
||||
batch_counts = self.workflow_run_repo.count_runs_with_related(
|
||||
free_runs,
|
||||
count_node_executions=self._count_node_executions,
|
||||
count_trigger_logs=self._count_trigger_logs,
|
||||
)
|
||||
if related_totals is not None:
|
||||
for key in related_totals:
|
||||
related_totals[key] += batch_counts.get(key, 0)
|
||||
sample_ids = ", ".join(run.id for run in free_runs[:5])
|
||||
click.echo(
|
||||
click.style(
|
||||
f"[batch #{batch_index}] would delete {len(free_runs)} runs "
|
||||
f"(sample ids: {sample_ids}) and skip {paid_or_skipped} paid/unknown",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
counts = self.workflow_run_repo.delete_runs_with_related(
|
||||
free_runs,
|
||||
delete_node_executions=self._delete_node_executions,
|
||||
delete_trigger_logs=self._delete_trigger_logs,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to delete workflow runs batch ending at %s", last_seen[0])
|
||||
raise
|
||||
|
||||
total_runs_deleted += counts["runs"]
|
||||
click.echo(
|
||||
click.style(
|
||||
f"[batch #{batch_index}] deleted runs: {counts['runs']} "
|
||||
f"(nodes {counts['node_executions']}, offloads {counts['offloads']}, "
|
||||
f"app_logs {counts['app_logs']}, trigger_logs {counts['trigger_logs']}, "
|
||||
f"pauses {counts['pauses']}, pause_reasons {counts['pause_reasons']}); "
|
||||
f"skipped {paid_or_skipped} paid/unknown",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
|
||||
if self.dry_run:
|
||||
if self.window_start:
|
||||
|
||||
@ -65,15 +65,17 @@ const CardView: FC<ICardViewProps> = ({ appId, isInPanel, className }) => {
|
||||
<div className="text-xs text-text-secondary">
|
||||
{t('overview.disableTooltip.triggerMode', { ns: 'appOverview', feature: featureName })}
|
||||
</div>
|
||||
<div
|
||||
className="cursor-pointer text-xs font-medium text-text-accent hover:underline"
|
||||
<a
|
||||
href={triggerDocUrl}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="block cursor-pointer text-xs font-medium text-text-accent hover:underline"
|
||||
onClick={(event) => {
|
||||
event.stopPropagation()
|
||||
window.open(triggerDocUrl, '_blank')
|
||||
}}
|
||||
>
|
||||
{t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })}
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
), [t, triggerDocUrl])
|
||||
|
||||
|
||||
@ -16,6 +16,7 @@ import { Theme } from '@/types/app'
|
||||
import SVGRenderer from '../svg-gallery' // Assumes svg-gallery.tsx is in /base directory
|
||||
|
||||
const Flowchart = dynamic(() => import('@/app/components/base/mermaid'), { ssr: false })
|
||||
const QuadrantMatrix = dynamic(() => import('@/app/components/base/quadrant-matrix'), { ssr: false })
|
||||
|
||||
// Available language https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_HLJS.MD
|
||||
const capitalizationLanguageNameMap: Record<string, string> = {
|
||||
@ -40,6 +41,7 @@ const capitalizationLanguageNameMap: Record<string, string> = {
|
||||
latex: 'Latex',
|
||||
svg: 'SVG',
|
||||
abc: 'ABC',
|
||||
quadrant: 'Quadrant',
|
||||
}
|
||||
const getCorrectCapitalizationLanguageName = (language: string) => {
|
||||
if (!language)
|
||||
@ -409,6 +411,12 @@ const CodeBlock: any = memo(({ inline, className, children = '', ...props }: any
|
||||
<MarkdownMusic children={content} />
|
||||
</ErrorBoundary>
|
||||
)
|
||||
case 'quadrant':
|
||||
return (
|
||||
<ErrorBoundary>
|
||||
<QuadrantMatrix content={content} />
|
||||
</ErrorBoundary>
|
||||
)
|
||||
default:
|
||||
return (
|
||||
<SyntaxHighlighter
|
||||
|
||||
153
web/app/components/base/quadrant-matrix/index.tsx
Normal file
153
web/app/components/base/quadrant-matrix/index.tsx
Normal file
@ -0,0 +1,153 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import type { QuadrantData } from './types'
|
||||
import { RiExpandDiagonalLine } from '@remixicon/react'
|
||||
import { useCallback, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import ActionButton from '@/app/components/base/action-button'
|
||||
import FullScreenModal from '@/app/components/base/fullscreen-modal'
|
||||
import QuadrantCard from './quadrant-card'
|
||||
import { isValidQuadrantData, QUADRANT_CONFIGS } from './types'
|
||||
|
||||
type QuadrantMatrixProps = {
|
||||
content: string
|
||||
}
|
||||
|
||||
const QuadrantMatrix: FC<QuadrantMatrixProps> = ({ content }) => {
|
||||
const { t } = useTranslation()
|
||||
const [isExpanded, setIsExpanded] = useState(false)
|
||||
|
||||
const parsedData = useMemo<QuadrantData | null>(() => {
|
||||
try {
|
||||
const trimmed = content.trim()
|
||||
const data = JSON.parse(trimmed)
|
||||
|
||||
if (!isValidQuadrantData(data))
|
||||
return null
|
||||
|
||||
return data
|
||||
}
|
||||
catch {
|
||||
return null
|
||||
}
|
||||
}, [content])
|
||||
|
||||
const handleExpand = useCallback(() => {
|
||||
setIsExpanded(true)
|
||||
}, [])
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
setIsExpanded(false)
|
||||
}, [])
|
||||
|
||||
if (!parsedData) {
|
||||
return (
|
||||
<div className="flex items-center justify-center rounded-xl bg-components-panel-bg-blur p-8">
|
||||
<div className="text-center text-text-secondary">
|
||||
<div className="system-md-semibold mb-2">{t('quadrantMatrix.invalidData', { ns: 'app' })}</div>
|
||||
<div className="text-sm text-text-tertiary">
|
||||
{t('quadrantMatrix.invalidDataDesc', { ns: 'app' })}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
const totalTasks
|
||||
= parsedData.q1.length
|
||||
+ parsedData.q2.length
|
||||
+ parsedData.q3.length
|
||||
+ parsedData.q4.length
|
||||
|
||||
// Shared grid content component
|
||||
const renderGrid = (expanded: boolean) => (
|
||||
<div className="grid grid-cols-2 gap-3">
|
||||
{/* Row 1: Q1 (Do First), Q2 (Schedule) */}
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q1}
|
||||
tasks={parsedData.q1}
|
||||
expanded={expanded}
|
||||
/>
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q2}
|
||||
tasks={parsedData.q2}
|
||||
expanded={expanded}
|
||||
/>
|
||||
|
||||
{/* Row 2: Q3 (Delegate), Q4 (Don't Do) */}
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q3}
|
||||
tasks={parsedData.q3}
|
||||
expanded={expanded}
|
||||
/>
|
||||
<QuadrantCard
|
||||
config={QUADRANT_CONFIGS.q4}
|
||||
tasks={parsedData.q4}
|
||||
expanded={expanded}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="w-full overflow-hidden rounded-xl bg-components-panel-bg-blur p-4">
|
||||
{/* Header */}
|
||||
<div className="mb-4 flex items-center justify-between">
|
||||
<div>
|
||||
<div className="system-md-semibold text-text-primary">
|
||||
{t('quadrantMatrix.title', { ns: 'app' })}
|
||||
</div>
|
||||
<div className="text-xs text-text-tertiary">
|
||||
{t('quadrantMatrix.taskCount', { ns: 'app', count: totalTasks })}
|
||||
</div>
|
||||
</div>
|
||||
{/* Legend + Expand Button */}
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="flex items-center gap-3 text-[11px] text-text-quaternary">
|
||||
<span>{t('quadrantMatrix.legend.importance', { ns: 'app' })}</span>
|
||||
<span>{t('quadrantMatrix.legend.urgency', { ns: 'app' })}</span>
|
||||
</div>
|
||||
<ActionButton onClick={handleExpand}>
|
||||
<RiExpandDiagonalLine className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 2x2 Grid */}
|
||||
{renderGrid(false)}
|
||||
</div>
|
||||
|
||||
{/* Fullscreen Modal */}
|
||||
<FullScreenModal
|
||||
open={isExpanded}
|
||||
onClose={handleClose}
|
||||
closable
|
||||
>
|
||||
<div className="flex h-full flex-col p-6">
|
||||
{/* Modal Header */}
|
||||
<div className="mb-6 flex items-center justify-between">
|
||||
<div>
|
||||
<div className="text-xl font-semibold text-text-primary">
|
||||
{t('quadrantMatrix.title', { ns: 'app' })}
|
||||
</div>
|
||||
<div className="text-sm text-text-tertiary">
|
||||
{t('quadrantMatrix.taskCount', { ns: 'app', count: totalTasks })}
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-3 text-sm text-text-quaternary">
|
||||
<span>{t('quadrantMatrix.legend.importance', { ns: 'app' })}</span>
|
||||
<span>{t('quadrantMatrix.legend.urgency', { ns: 'app' })}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Expanded Grid */}
|
||||
<div className="min-h-0 flex-1">
|
||||
{renderGrid(true)}
|
||||
</div>
|
||||
</div>
|
||||
</FullScreenModal>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export default QuadrantMatrix
|
||||
102
web/app/components/base/quadrant-matrix/quadrant-card.tsx
Normal file
102
web/app/components/base/quadrant-matrix/quadrant-card.tsx
Normal file
@ -0,0 +1,102 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import type { QuadrantConfig, Task } from './types'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import TaskItem from './task-item'
|
||||
|
||||
type QuadrantCardProps = {
|
||||
config: QuadrantConfig
|
||||
tasks: Task[]
|
||||
expanded?: boolean
|
||||
maxDisplay?: number
|
||||
}
|
||||
|
||||
const QuadrantCard: FC<QuadrantCardProps> = ({
|
||||
config,
|
||||
tasks,
|
||||
expanded = false,
|
||||
maxDisplay = 3,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const { number, titleKey, subtitleKey, bgClass, borderClass, titleClass } = config
|
||||
const displayLimit = expanded ? Infinity : maxDisplay
|
||||
const displayTasks = tasks.slice(0, displayLimit)
|
||||
const remainingCount = Math.max(0, tasks.length - displayLimit)
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'flex min-w-0 flex-col rounded-xl border p-3',
|
||||
bgClass,
|
||||
borderClass,
|
||||
expanded ? 'min-h-[280px]' : 'min-h-[200px]',
|
||||
)}
|
||||
>
|
||||
{/* Header with numbered circle */}
|
||||
<div className="mb-2 shrink-0">
|
||||
<div className="flex items-center gap-2">
|
||||
{/* Numbered circle */}
|
||||
<span className={cn(
|
||||
'flex h-5 w-5 items-center justify-center rounded-full border text-xs font-semibold',
|
||||
borderClass,
|
||||
titleClass,
|
||||
)}
|
||||
>
|
||||
{number}
|
||||
</span>
|
||||
<span className={cn('system-sm-semibold', titleClass)}>{t(titleKey, { ns: 'app' })}</span>
|
||||
{tasks.length > 0 && (
|
||||
<span className="bg-components-badge-bg-gray rounded-full px-1.5 py-0.5 text-[10px] font-medium text-text-tertiary">
|
||||
{tasks.length}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
<div className="text-[11px] text-text-tertiary">{t(subtitleKey, { ns: 'app' })}</div>
|
||||
</div>
|
||||
|
||||
{/* Task List */}
|
||||
<div className={cn(
|
||||
'flex min-h-0 flex-1 flex-col gap-2',
|
||||
expanded && 'overflow-y-auto',
|
||||
)}
|
||||
>
|
||||
{displayTasks.length > 0
|
||||
? (
|
||||
displayTasks.map((task) => {
|
||||
const taskKey = [
|
||||
task.name,
|
||||
task.deadline ?? 'no-deadline',
|
||||
task.importance_score,
|
||||
task.urgency_score,
|
||||
task.description ?? '',
|
||||
task.action_advice ?? '',
|
||||
].join('|')
|
||||
|
||||
return (
|
||||
<TaskItem
|
||||
key={taskKey}
|
||||
task={task}
|
||||
expanded={expanded}
|
||||
/>
|
||||
)
|
||||
})
|
||||
)
|
||||
: (
|
||||
<div className="flex flex-1 items-center justify-center text-xs text-text-quaternary">
|
||||
{t('quadrantMatrix.noTasks', { ns: 'app' })}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* More indicator (only in non-expanded mode) */}
|
||||
{!expanded && remainingCount > 0 && (
|
||||
<div className="mt-2 shrink-0 text-center text-[11px] text-text-tertiary">
|
||||
{t('quadrantMatrix.more', { ns: 'app', count: remainingCount })}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default QuadrantCard
|
||||
88
web/app/components/base/quadrant-matrix/task-item.tsx
Normal file
88
web/app/components/base/quadrant-matrix/task-item.tsx
Normal file
@ -0,0 +1,88 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import type { Task } from './types'
|
||||
import { RiCalendarLine } from '@remixicon/react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
type TaskItemProps = {
|
||||
task: Task
|
||||
expanded?: boolean
|
||||
showScores?: boolean
|
||||
}
|
||||
|
||||
const TaskItem: FC<TaskItemProps> = ({ task, expanded = false, showScores = true }) => {
|
||||
const { t } = useTranslation()
|
||||
const { name, description, deadline, importance_score, urgency_score, action_advice } = task
|
||||
|
||||
return (
|
||||
<div className="group min-w-0 rounded-lg bg-components-panel-bg p-2.5 shadow-xs transition-all hover:shadow-sm">
|
||||
{/* Header: Task Name + Scores */}
|
||||
<div className="flex items-start justify-between gap-2">
|
||||
<div
|
||||
className={cn(
|
||||
'system-sm-medium min-w-0 flex-1 text-text-primary',
|
||||
!expanded && 'truncate',
|
||||
)}
|
||||
title={name}
|
||||
>
|
||||
{name}
|
||||
</div>
|
||||
{showScores && (
|
||||
<div className="flex shrink-0 items-center gap-1 text-[10px] font-medium">
|
||||
<span className="text-text-accent">
|
||||
I:
|
||||
{importance_score}
|
||||
</span>
|
||||
<span className="text-text-warning">
|
||||
U:
|
||||
{urgency_score}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Description */}
|
||||
{description && (
|
||||
<div className={cn(
|
||||
'mt-1 text-xs text-text-tertiary',
|
||||
!expanded && 'line-clamp-2',
|
||||
)}
|
||||
>
|
||||
{description}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Deadline Badge */}
|
||||
{deadline && (
|
||||
<div className="mt-1.5">
|
||||
<span className="bg-components-badge-bg-gray inline-flex items-center gap-1 rounded px-1.5 py-0.5 text-[10px] text-text-tertiary">
|
||||
<RiCalendarLine className="h-3 w-3" />
|
||||
<span>
|
||||
{t('quadrantMatrix.deadline', { ns: 'app' })}
|
||||
{' '}
|
||||
{deadline}
|
||||
</span>
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Action Advice */}
|
||||
{action_advice && (
|
||||
<div className="mt-2 border-t border-divider-subtle pt-2">
|
||||
<p
|
||||
className={cn(
|
||||
'text-xs italic text-text-quaternary',
|
||||
!expanded && 'line-clamp-2',
|
||||
)}
|
||||
title={!expanded ? action_advice : undefined}
|
||||
>
|
||||
{action_advice}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default TaskItem
|
||||
92
web/app/components/base/quadrant-matrix/types.ts
Normal file
92
web/app/components/base/quadrant-matrix/types.ts
Normal file
@ -0,0 +1,92 @@
|
||||
/**
|
||||
* Type definitions for Eisenhower Matrix (Task Quadrant) visualization
|
||||
*/
|
||||
import type { I18nKeysWithPrefix } from '@/types/i18n'
|
||||
|
||||
export type Task = {
|
||||
name: string
|
||||
description?: string
|
||||
deadline?: string // YYYY-MM-DD format
|
||||
importance_score: number // 0-100, based on goal alignment and long-term value
|
||||
urgency_score: number // 0-100, based on deadline pressure and delay penalty
|
||||
action_advice?: string // Suggested action for this task
|
||||
}
|
||||
|
||||
export type QuadrantData = {
|
||||
q1: Task[] // Urgent & Important - Do First
|
||||
q2: Task[] // Not Urgent & Important - Schedule
|
||||
q3: Task[] // Urgent & Not Important - Delegate
|
||||
q4: Task[] // Not Urgent & Not Important - Don't Do
|
||||
}
|
||||
|
||||
type QuadrantKeyBase = I18nKeysWithPrefix<'app', 'quadrantMatrix.q'>
|
||||
type QuadrantTitleKey = Extract<QuadrantKeyBase, `${string}.title`>
|
||||
type QuadrantSubtitleKey = Extract<QuadrantKeyBase, `${string}.subtitle`>
|
||||
|
||||
export type QuadrantConfig = {
|
||||
key: 'q1' | 'q2' | 'q3' | 'q4'
|
||||
number: number
|
||||
titleKey: QuadrantTitleKey // i18n key for title
|
||||
subtitleKey: QuadrantSubtitleKey // i18n key for subtitle
|
||||
bgClass: string
|
||||
borderClass: string
|
||||
titleClass: string
|
||||
}
|
||||
|
||||
// Layout based on Eisenhower Matrix:
|
||||
// Q1 (Do First) - top-left, Q2 (Schedule) - top-right
|
||||
// Q3 (Delegate) - bottom-left, Q4 (Don't Do) - bottom-right
|
||||
export const QUADRANT_CONFIGS: Record<string, QuadrantConfig> = {
|
||||
q1: {
|
||||
key: 'q1',
|
||||
number: 1,
|
||||
titleKey: 'quadrantMatrix.q1.title',
|
||||
subtitleKey: 'quadrantMatrix.q1.subtitle',
|
||||
bgClass: 'bg-state-destructive-hover',
|
||||
borderClass: 'border-state-destructive-border',
|
||||
titleClass: 'text-text-destructive',
|
||||
},
|
||||
q2: {
|
||||
key: 'q2',
|
||||
number: 2,
|
||||
titleKey: 'quadrantMatrix.q2.title',
|
||||
subtitleKey: 'quadrantMatrix.q2.subtitle',
|
||||
bgClass: 'bg-state-accent-hover',
|
||||
borderClass: 'border-state-accent-border',
|
||||
titleClass: 'text-text-accent',
|
||||
},
|
||||
q3: {
|
||||
key: 'q3',
|
||||
number: 3,
|
||||
titleKey: 'quadrantMatrix.q3.title',
|
||||
subtitleKey: 'quadrantMatrix.q3.subtitle',
|
||||
bgClass: 'bg-state-warning-hover',
|
||||
borderClass: 'border-state-warning-border',
|
||||
titleClass: 'text-text-warning',
|
||||
},
|
||||
q4: {
|
||||
key: 'q4',
|
||||
number: 4,
|
||||
titleKey: 'quadrantMatrix.q4.title',
|
||||
subtitleKey: 'quadrantMatrix.q4.subtitle',
|
||||
bgClass: 'bg-components-panel-on-panel-item-bg',
|
||||
borderClass: 'border-divider-regular',
|
||||
titleClass: 'text-text-tertiary',
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates if the data structure matches QuadrantData interface
|
||||
*/
|
||||
export function isValidQuadrantData(data: unknown): data is QuadrantData {
|
||||
if (typeof data !== 'object' || data === null)
|
||||
return false
|
||||
|
||||
const d = data as Record<string, unknown>
|
||||
return (
|
||||
Array.isArray(d.q1)
|
||||
&& Array.isArray(d.q2)
|
||||
&& Array.isArray(d.q3)
|
||||
&& Array.isArray(d.q4)
|
||||
)
|
||||
}
|
||||
@ -20,6 +20,7 @@ const SearchInput: FC<SearchInputProps> = ({
|
||||
white,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const inputRef = useRef<HTMLInputElement>(null)
|
||||
const [focus, setFocus] = useState<boolean>(false)
|
||||
const isComposing = useRef<boolean>(false)
|
||||
const [compositionValue, setCompositionValue] = useState<string>('')
|
||||
@ -36,6 +37,7 @@ const SearchInput: FC<SearchInputProps> = ({
|
||||
<RiSearchLine className="h-4 w-4 text-components-input-text-placeholder" aria-hidden="true" />
|
||||
</div>
|
||||
<input
|
||||
ref={inputRef}
|
||||
type="text"
|
||||
name="query"
|
||||
className={cn(
|
||||
@ -65,14 +67,17 @@ const SearchInput: FC<SearchInputProps> = ({
|
||||
autoComplete="off"
|
||||
/>
|
||||
{value && (
|
||||
<div
|
||||
className="group/clear flex h-4 w-4 shrink-0 cursor-pointer items-center justify-center"
|
||||
<button
|
||||
type="button"
|
||||
aria-label={t('operation.clear', { ns: 'common' })}
|
||||
className="group/clear flex h-4 w-4 shrink-0 cursor-pointer items-center justify-center border-none bg-transparent p-0"
|
||||
onClick={() => {
|
||||
onChange('')
|
||||
inputRef.current?.focus()
|
||||
}}
|
||||
>
|
||||
<RiCloseCircleFill className="h-4 w-4 text-text-quaternary group-hover/clear:text-text-tertiary" />
|
||||
</div>
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -5,6 +5,7 @@ import type { NodeOutPutVar, Variable } from '@/app/components/workflow/types'
|
||||
import { useBoolean } from 'ahooks'
|
||||
import * as React from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { createPortal } from 'react-dom'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars'
|
||||
import { cn } from '@/utils/classnames'
|
||||
@ -147,7 +148,7 @@ const CodeEditor: FC<Props> = ({
|
||||
onMount={onEditorMounted}
|
||||
placeholder={t('common.jinjaEditorPlaceholder', { ns: 'workflow' })!}
|
||||
/>
|
||||
{isShowVarPicker && (
|
||||
{isShowVarPicker && createPortal(
|
||||
<div
|
||||
ref={popupRef}
|
||||
className="w-[228px] space-y-1 rounded-lg border border-components-panel-border bg-components-panel-bg p-1 shadow-lg"
|
||||
@ -164,7 +165,8 @@ const CodeEditor: FC<Props> = ({
|
||||
onChange={handleSelectVar}
|
||||
isSupportFileVar={false}
|
||||
/>
|
||||
</div>
|
||||
</div>,
|
||||
document.body,
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -196,6 +196,24 @@
|
||||
"publishApp.notSet": "Not set",
|
||||
"publishApp.notSetDesc": "Currently nobody can access the web app. Please set permissions.",
|
||||
"publishApp.title": "Who can access web app",
|
||||
"quadrantMatrix.deadline": "DDL:",
|
||||
"quadrantMatrix.invalidData": "Invalid Quadrant Data",
|
||||
"quadrantMatrix.invalidDataDesc": "Expected JSON format with q1, q2, q3, q4 arrays",
|
||||
"quadrantMatrix.legend.importance": "I = Importance",
|
||||
"quadrantMatrix.legend.urgency": "U = Urgency",
|
||||
"quadrantMatrix.more": "+{{count}} more",
|
||||
"quadrantMatrix.noTasks": "No tasks",
|
||||
"quadrantMatrix.q1.subtitle": "Urgent & Important",
|
||||
"quadrantMatrix.q1.title": "Do First",
|
||||
"quadrantMatrix.q2.subtitle": "Important & Not Urgent",
|
||||
"quadrantMatrix.q2.title": "Schedule",
|
||||
"quadrantMatrix.q3.subtitle": "Urgent & Not Important",
|
||||
"quadrantMatrix.q3.title": "Delegate",
|
||||
"quadrantMatrix.q4.subtitle": "Not Urgent & Not Important",
|
||||
"quadrantMatrix.q4.title": "Don't Do",
|
||||
"quadrantMatrix.taskCount_one": "{{count}} task prioritized",
|
||||
"quadrantMatrix.taskCount_other": "{{count}} tasks prioritized",
|
||||
"quadrantMatrix.title": "Eisenhower Matrix",
|
||||
"removeOriginal": "Delete the original app",
|
||||
"roadmap": "See our roadmap",
|
||||
"showMyCreatedAppsOnly": "Created by me",
|
||||
|
||||
@ -196,6 +196,24 @@
|
||||
"publishApp.notSet": "未设置",
|
||||
"publishApp.notSetDesc": "当前任何人都无法访问 Web 应用。请设置访问权限。",
|
||||
"publishApp.title": "谁可以访问 web 应用",
|
||||
"quadrantMatrix.deadline": "截止:",
|
||||
"quadrantMatrix.invalidData": "无效的象限数据",
|
||||
"quadrantMatrix.invalidDataDesc": "需要包含 q1, q2, q3, q4 数组的 JSON 格式",
|
||||
"quadrantMatrix.legend.importance": "I = 重要性",
|
||||
"quadrantMatrix.legend.urgency": "U = 紧急性",
|
||||
"quadrantMatrix.more": "+{{count}} 更多",
|
||||
"quadrantMatrix.noTasks": "暂无任务",
|
||||
"quadrantMatrix.q1.subtitle": "紧急且重要",
|
||||
"quadrantMatrix.q1.title": "立即执行",
|
||||
"quadrantMatrix.q2.subtitle": "重要但不紧急",
|
||||
"quadrantMatrix.q2.title": "计划安排",
|
||||
"quadrantMatrix.q3.subtitle": "紧急但不重要",
|
||||
"quadrantMatrix.q3.title": "委派他人",
|
||||
"quadrantMatrix.q4.subtitle": "不紧急也不重要",
|
||||
"quadrantMatrix.q4.title": "不要做",
|
||||
"quadrantMatrix.taskCount_one": "{{count}} 个任务已排序",
|
||||
"quadrantMatrix.taskCount_other": "{{count}} 个任务已排序",
|
||||
"quadrantMatrix.title": "艾森豪威尔矩阵",
|
||||
"removeOriginal": "删除原应用",
|
||||
"roadmap": "产品路线图",
|
||||
"showMyCreatedAppsOnly": "我创建的",
|
||||
|
||||
@ -24,8 +24,12 @@ export type FetchOptionType = Omit<RequestInit, 'body'> & {
|
||||
}
|
||||
|
||||
const afterResponse204: AfterResponseHook = async (_request, _options, response) => {
|
||||
if (response.status === 204)
|
||||
return Response.json({ result: 'success' })
|
||||
if (response.status === 204) {
|
||||
return new Response(JSON.stringify({ result: 'success' }), {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': ContentType.json },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export type ResponseError = {
|
||||
|
||||
Reference in New Issue
Block a user