Compare commits

...

27 Commits

Author SHA1 Message Date
yyh
a0aa8cdb45 Merge remote-tracking branch 'origin/main' into feature/task-quadrant-view 2026-01-16 18:20:29 +08:00
yyh
ae8618877b fix(web): quadrant matrix i18n 2026-01-16 18:17:28 +08:00
가은 정
fad6fa141d chore: improve accessibility for learn more link (#31120)
Some checks failed
autofix.ci / autofix (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Has been cancelled
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Has been cancelled
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Has been cancelled
Main CI Pipeline / Check Changed Files (push) Has been cancelled
Main CI Pipeline / API Tests (push) Has been cancelled
Main CI Pipeline / Web Tests (push) Has been cancelled
Main CI Pipeline / Style Check (push) Has been cancelled
Main CI Pipeline / VDB Tests (push) Has been cancelled
Main CI Pipeline / DB Migration Test (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Co-authored-by: khmandarrin <jeong-ga-eun@jeong-ga-eun-ui-MacBookAir.local>
2026-01-16 18:12:07 +08:00
Pádraic Slattery
30821fd26c chore: Update outdated GitHub Actions versions (#31114) 2026-01-16 17:56:55 +08:00
Xiangxuan Qu
1a9fdd9a65 refactor: migrate tag list API query parameters to Pydantic (#31097)
Co-authored-by: fghpdf <fghpdf@users.noreply.github.com>
2026-01-16 17:49:52 +08:00
Stream
de610cbf39 fix: call get_text_content() instead of casting to str (#31121)
Signed-off-by: Stream <Stream_2@qq.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-16 18:41:00 +09:00
yyh
1c55602445 fix(web): add calendar icon and DDL label to deadline badge in task-item 2026-01-16 17:24:11 +08:00
yyh
a3f1220d23 feat(web): add fullscreen expand mode to quadrant-matrix component
- Add expand button in header to open FullScreenModal
- Add numbered circles (1-4) to quadrant headers
- Add expanded prop to show full content without line-clamp
- Reorder grid layout: Q1 top-left, Q2 top-right, Q3 bottom-left, Q4 bottom-right
- Remove axis labels for cleaner design
2026-01-16 17:16:13 +08:00
yyh
d62e16b9bb fix(web): improve quadrant-matrix layout and text overflow handling
- Simplify axis label layout with horizontal/vertical arrangement
- Add proper text truncation with line-clamp and tooltips
- Fix overflow issues by adding min-w-0 on flex children
- Move scores inline with task name for compact display
- Add task count badge to quadrant headers
- Reduce maxDisplay to 3 for better density
2026-01-16 16:58:57 +08:00
yyh
13f2a43ccc feat(web): add Eisenhower Matrix visualization component for task quadrants
Add a new quadrant-matrix component that renders tasks in a 2x2 grid based
on importance and urgency scores. Integrate with code-block as a new
'quadrant' language type for markdown rendering.
2026-01-16 16:58:56 +08:00
yyh
6903c31b84 fix(search-input): retain focus after clearing input (#31107) 2026-01-16 16:22:14 +08:00
盐粒 Yanli
b2cc9b255d chore: Update coding agent workflow for backend (#31093) 2026-01-16 14:28:47 +08:00
XiaoBa
e9f0e1e839 fix(web): replace Response.json with legacy Response constructor for pre-Chrome 105 compatibility(#31091) (#31095)
Co-authored-by: Xiaoba Yu <xb1823725853@gmail.com>
2026-01-16 14:26:23 +08:00
pavior
cd497a8c52 fix(web): use portal for variable picker in code editor (Fixes #31063) (#31066) 2026-01-16 13:31:57 +08:00
Stephen Zhou
7aab4529e6 chore: lint for state hooks (#31088) 2026-01-16 11:58:28 +08:00
E.G
4bff0cd0ab fix: resolve 'Expand all chunks' button not working (#31074)
Some checks failed
autofix.ci / autofix (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Has been cancelled
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Has been cancelled
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Has been cancelled
Main CI Pipeline / Check Changed Files (push) Has been cancelled
Main CI Pipeline / API Tests (push) Has been cancelled
Main CI Pipeline / Web Tests (push) Has been cancelled
Main CI Pipeline / Style Check (push) Has been cancelled
Main CI Pipeline / VDB Tests (push) Has been cancelled
Main CI Pipeline / DB Migration Test (push) Has been cancelled
Co-authored-by: GlobalStar117 <GlobalStar117@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2026-01-16 11:34:42 +08:00
byteforge
c98870c3f4 refactor: always preserve marketplace search state in URL (#31069)
Co-authored-by: Stephen Zhou <38493346+hyoban@users.noreply.github.com>
2026-01-16 08:52:53 +09:00
Stephen Zhou
b06c7c8f33 ci: disable limit annotation (#31072)
Some checks failed
autofix.ci / autofix (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Has been cancelled
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Has been cancelled
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Has been cancelled
Main CI Pipeline / Check Changed Files (push) Has been cancelled
Main CI Pipeline / API Tests (push) Has been cancelled
Main CI Pipeline / Web Tests (push) Has been cancelled
Main CI Pipeline / Style Check (push) Has been cancelled
Main CI Pipeline / VDB Tests (push) Has been cancelled
Main CI Pipeline / DB Migration Test (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2026-01-15 23:04:26 +08:00
Stephen Zhou
1a2fce7055 ci: eslint annotation (#31056) 2026-01-15 21:49:46 +08:00
lif
2b021e8752 fix: remove hardcoded 48-character limit from text inputs (#30156)
Some checks failed
autofix.ci / autofix (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Has been cancelled
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Has been cancelled
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Has been cancelled
Main CI Pipeline / Check Changed Files (push) Has been cancelled
Main CI Pipeline / API Tests (push) Has been cancelled
Main CI Pipeline / Web Tests (push) Has been cancelled
Main CI Pipeline / Style Check (push) Has been cancelled
Main CI Pipeline / VDB Tests (push) Has been cancelled
Main CI Pipeline / DB Migration Test (push) Has been cancelled
Signed-off-by: majiayu000 <1835304752@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2026-01-15 17:43:00 +08:00
wangxiaolei
4a197b9458 fix: fix log updated_at is refreshed (#31045) 2026-01-15 15:42:46 +08:00
Xiyuan Chen
772ff636ec feat: credential sync fix for enterprise edition (#30626) 2026-01-14 23:33:24 -08:00
Stephen Zhou
ab1c5a2027 refactor: remove manual set query logic (#31039) 2026-01-15 15:25:43 +08:00
hj24
33e99f069b fix: message clean service ut (#31038) 2026-01-15 15:13:25 +08:00
hj24
52af829f1f refactor: enhance clean messages task (#29638)
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: 非法操作 <hjlarry@163.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-01-15 14:03:17 +08:00
-LAN-
0ef8b5a0ca chore: bump version to 1.11.4 (#30961) 2026-01-15 11:36:15 +08:00
wangxiaolei
2bfc54314e feat: single run add opentelemetry (#31020) 2026-01-15 11:10:55 +08:00
65 changed files with 3767 additions and 881 deletions

View File

@@ -16,14 +16,14 @@ jobs:
- name: Check Docker Compose inputs
id: docker-compose-changes
uses: tj-actions/changed-files@v46
uses: tj-actions/changed-files@v47
with:
files: |
docker/generate_docker_compose
docker/.env.example
docker/docker-compose-template.yaml
docker/docker-compose.yaml
- uses: actions/setup-python@v5
- uses: actions/setup-python@v6
with:
python-version: "3.11"

View File

@@ -112,7 +112,7 @@ jobs:
context: "web"
steps:
- name: Download digests
uses: actions/download-artifact@v4
uses: actions/download-artifact@v7
with:
path: /tmp/digests
pattern: digests-${{ matrix.context }}-*

View File

@@ -19,7 +19,7 @@ jobs:
github.event.workflow_run.head_branch == 'deploy/agent-dev'
steps:
- name: Deploy to server
uses: appleboy/ssh-action@v0.1.8
uses: appleboy/ssh-action@v1
with:
host: ${{ secrets.AGENT_DEV_SSH_HOST }}
username: ${{ secrets.SSH_USER }}

View File

@@ -16,7 +16,7 @@ jobs:
github.event.workflow_run.head_branch == 'deploy/dev'
steps:
- name: Deploy to server
uses: appleboy/ssh-action@v0.1.8
uses: appleboy/ssh-action@v1
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}

View File

@@ -20,7 +20,7 @@ jobs:
)
steps:
- name: Deploy to server
uses: appleboy/ssh-action@v0.1.8
uses: appleboy/ssh-action@v1
with:
host: ${{ secrets.HITL_SSH_HOST }}
username: ${{ secrets.SSH_USER }}

View File

@@ -18,7 +18,7 @@ jobs:
pull-requests: write
steps:
- uses: actions/stale@v5
- uses: actions/stale@v10
with:
days-before-issue-stale: 15
days-before-issue-close: 3

View File

@@ -65,6 +65,9 @@ jobs:
defaults:
run:
working-directory: ./web
permissions:
checks: write
pull-requests: read
steps:
- name: Checkout code
@@ -103,7 +106,15 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web
run: |
pnpm run lint
pnpm run lint:report
continue-on-error: true
# - name: Annotate Code
# if: steps.changed-files.outputs.any_changed == 'true' && github.event_name == 'pull_request'
# uses: DerLev/eslint-annotations@51347b3a0abfb503fc8734d5ae31c4b151297fae
# with:
# eslint-report: web/eslint_report.json
# github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Web type check
if: steps.changed-files.outputs.any_changed == 'true'

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 0

View File

@@ -12,12 +12,8 @@ The codebase is split into:
## Backend Workflow
- Read `api/AGENTS.md` for details
- Run backend CLI commands through `uv run --project api <command>`.
- Before submission, all backend modifications must pass local checks: `make lint`, `make type-check`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
- Use Makefile targets for linting and formatting; `make lint` and `make type-check` cover the required checks.
- Integration tests are CI-only and are not expected to run in the local environment.
## Frontend Workflow

View File

@@ -61,7 +61,8 @@ check:
lint:
@echo "🔧 Running ruff format, check with fixes, import linter, and dotenv-linter..."
@uv run --project api --dev sh -c 'ruff format ./api && ruff check --fix ./api'
@uv run --project api --dev ruff format ./api
@uv run --project api --dev ruff check --fix ./api
@uv run --directory api --dev lint-imports
@uv run --project api --dev dotenv-linter ./api/.env.example ./web/.env.example
@echo "✅ Linting complete"
@@ -73,7 +74,12 @@ type-check:
test:
@echo "🧪 Running backend unit tests..."
@uv run --project api --dev dev/pytest/pytest_unit_tests.sh
@if [ -n "$(TARGET_TESTS)" ]; then \
echo "Target: $(TARGET_TESTS)"; \
uv run --project api --dev pytest $(TARGET_TESTS); \
else \
uv run --project api --dev dev/pytest/pytest_unit_tests.sh; \
fi
@echo "✅ Tests complete"
# Build Docker images
@@ -125,7 +131,7 @@ help:
@echo " make check - Check code with ruff"
@echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)"
@echo " make type-check - Run type checking with basedpyright"
@echo " make test - Run backend unit tests"
@echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/<target_tests>)"
@echo ""
@echo "Docker Build Targets:"
@echo " make build-web - Build web Docker image"

0
agent-notes/.gitkeep Normal file
View File

View File

@@ -1,62 +1,236 @@
# Agent Skill Index
# API Agent Guide
## Agent Notes (must-check)
Before you start work on any backend file under `api/`, you MUST check whether a related note exists under:
- `agent-notes/<same-relative-path-as-target-file>.md`
Rules:
- **Path mapping**: for a target file `<path>/<name>.py`, the note must be `agent-notes/<path>/<name>.py.md` (same folder structure, same filename, plus `.md`).
- **Before working**:
- If the note exists, read it first and follow any constraints/decisions recorded there.
- If the note conflicts with the current code, or references an "origin" file/path that has been deleted, renamed, or migrated, treat the **code as the single source of truth** and update the note to match reality.
- If the note does not exist, create it with a short architecture/intent summary and any relevant invariants/edge cases.
- **During working**:
- Keep the note in sync as you discover constraints, make decisions, or change approach.
- If you move/rename a file, migrate its note to the new mapped path (and fix any outdated references inside the note).
- Record non-obvious edge cases, trade-offs, and the test/verification plan as you go (not just at the end).
- Keep notes **coherent**: integrate new findings into the relevant sections and rewrite for clarity; avoid append-only “recent fix” / changelog-style additions unless the note is explicitly intended to be a changelog.
- **When finishing work**:
- Update the related note(s) to reflect what changed, why, and any new edge cases/tests.
- If a file is deleted, remove or clearly deprecate the corresponding note so it cannot be mistaken as current guidance.
- Keep notes concise and accurate; they are meant to prevent repeated rediscovery.
## Skill Index
Start with the section that best matches your need. Each entry lists the problems it solves plus key files/concepts so you know what to expect before opening it.
______________________________________________________________________
### Platform Foundations
## Platform Foundations
- **[Infrastructure Overview](agent_skills/infra.md)**\
When to read this:
#### [Infrastructure Overview](agent_skills/infra.md)
- **When to read this**
- You need to understand where a feature belongs in the architecture.
- Youre wiring storage, Redis, vector stores, or OTEL.
- Youre about to add CLI commands or async jobs.\
What it covers: configuration stack (`configs/app_config.py`, remote settings), storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`), Redis conventions (`extensions/ext_redis.py`), plugin runtime topology, vector-store factory (`core/rag/datasource/vdb/*`), observability hooks, SSRF proxy usage, and core CLI commands.
- Youre about to add CLI commands or async jobs.
- **What it covers**
- Configuration stack (`configs/app_config.py`, remote settings)
- Storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`)
- Redis conventions (`extensions/ext_redis.py`)
- Plugin runtime topology
- Vector-store factory (`core/rag/datasource/vdb/*`)
- Observability hooks
- SSRF proxy usage
- Core CLI commands
- **[Coding Style](agent_skills/coding_style.md)**\
When to read this:
### Plugin & Extension Development
- Youre writing or reviewing backend code and need the authoritative checklist.
- Youre unsure about Pydantic validators, SQLAlchemy session usage, or logging patterns.
- You want the exact lint/type/test commands used in PRs.\
Includes: Ruff & BasedPyright commands, no-annotation policy, session examples (`with Session(db.engine, ...)`), `@field_validator` usage, logging expectations, and the rule set for file size, helpers, and package management.
______________________________________________________________________
## Plugin & Extension Development
- **[Plugin Systems](agent_skills/plugin.md)**\
When to read this:
#### [Plugin Systems](agent_skills/plugin.md)
- **When to read this**
- Youre building or debugging a marketplace plugin.
- You need to know how manifests, providers, daemons, and migrations fit together.\
What it covers: plugin manifests (`core/plugin/entities/plugin.py`), installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands), runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent), daemon coordination (`core/plugin/entities/plugin_daemon.py`), and how provider registries surface capabilities to the rest of the platform.
- You need to know how manifests, providers, daemons, and migrations fit together.
- **What it covers**
- Plugin manifests (`core/plugin/entities/plugin.py`)
- Installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands)
- Runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent)
- Daemon coordination (`core/plugin/entities/plugin_daemon.py`)
- How provider registries surface capabilities to the rest of the platform
- **[Plugin OAuth](agent_skills/plugin_oauth.md)**\
When to read this:
#### [Plugin OAuth](agent_skills/plugin_oauth.md)
- **When to read this**
- You must integrate OAuth for a plugin or datasource.
- Youre handling credential encryption or refresh flows.\
Topics: credential storage, encryption helpers (`core/helper/provider_encryption.py`), OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`), and how console/API layers expose the flows.
- Youre handling credential encryption or refresh flows.
- **Topics**
- Credential storage
- Encryption helpers (`core/helper/provider_encryption.py`)
- OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`)
- How console/API layers expose the flows
______________________________________________________________________
### Workflow Entry & Execution
## Workflow Entry & Execution
#### [Trigger Concepts](agent_skills/trigger.md)
- **[Trigger Concepts](agent_skills/trigger.md)**\
When to read this:
- **When to read this**
- Youre debugging why a workflow didnt start.
- Youre adding a new trigger type or hook.
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.\
Details: Start-node taxonomy, webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`), async orchestration (`services/async_workflow_service.py`, Celery queues), debug event bus, and storage/logging interactions.
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.
- **Details**
- Start-node taxonomy
- Webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`)
- Async orchestration (`services/async_workflow_service.py`, Celery queues)
- Debug event bus
- Storage/logging interactions
______________________________________________________________________
## General Reminders
## Additional Notes for Agents
- All skill docs assume you follow the coding style guide—run Ruff/BasedPyright/tests listed there before submitting changes.
- All skill docs assume you follow the coding style rules below—run the lint/type/test commands before submitting changes.
- When you cannot find an answer in these briefs, search the codebase using the paths referenced (e.g., `core/plugin/impl/tool.py`, `services/dataset_service.py`).
- If you run into cross-cutting concerns (tenancy, configuration, storage), check the infrastructure guide first; it links to most supporting modules.
- Keep multi-tenancy and configuration central: everything flows through `configs.dify_config` and `tenant_id`.
- When touching plugins or triggers, consult both the system overview and the specialised doc to ensure you adjust lifecycle, storage, and observability consistently.
## Coding Style
This is the default standard for backend code in this repo. Follow it for new code and use it as the checklist when reviewing changes.
### Linting & Formatting
- Use Ruff for formatting and linting (follow `.ruff.toml`).
- Keep each line under 120 characters (including spaces).
### Naming Conventions
- Use `snake_case` for variables and functions.
- Use `PascalCase` for classes.
- Use `UPPER_CASE` for constants.
### Typing & Class Layout
- Code should usually include type annotations that match the repos current Python version (avoid untyped public APIs and “mystery” values).
- Prefer modern typing forms (e.g. `list[str]`, `dict[str, int]`) and avoid `Any` unless theres a strong reason.
- For classes, declare member variables at the top of the class body (before `__init__`) so the class shape is obvious at a glance:
```python
from datetime import datetime
class Example:
user_id: str
created_at: datetime
def __init__(self, user_id: str, created_at: datetime) -> None:
self.user_id = user_id
self.created_at = created_at
```
### General Rules
- Use Pydantic v2 conventions.
- Use `uv` for Python package management in this repo (usually with `--project api`).
- Prefer simple functions over small “utility classes” for lightweight helpers.
- Avoid implementing dunder methods unless its clearly needed and matches existing patterns.
- Never start long-running services as part of agent work (`uv run app.py`, `flask run`, etc.); running tests is allowed.
- Keep files below ~800 lines; split when necessary.
- Keep code readable and explicit—avoid clever hacks.
### Architecture & Boundaries
- Mirror the layered architecture: controller → service → core/domain.
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
### Logging & Errors
- Never use `print`; use a module-level logger:
- `logger = logging.getLogger(__name__)`
- Include tenant/app/workflow identifiers in log context when relevant.
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate them into HTTP responses in controllers.
- Log retryable events at `warning`, terminal failures at `error`.
### SQLAlchemy Patterns
- Models inherit from `models.base.TypeBase`; do not create ad-hoc metadata or engines.
- Open sessions with context managers:
```python
from sqlalchemy.orm import Session
with Session(db.engine, expire_on_commit=False) as session:
stmt = select(Workflow).where(
Workflow.id == workflow_id,
Workflow.tenant_id == tenant_id,
)
workflow = session.execute(stmt).scalar_one_or_none()
```
- Prefer SQLAlchemy expressions; avoid raw SQL unless necessary.
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
- Introduce repository abstractions only for very large tables (e.g., workflow executions) or when alternative storage strategies are required.
### Storage & External I/O
- Access storage via `extensions.ext_storage.storage`.
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
- Background tasks that touch storage must be idempotent, and should log relevant object identifiers.
### Pydantic Usage
- Define DTOs with Pydantic v2 models and forbid extras by default.
- Use `@field_validator` / `@model_validator` for domain rules.
Example:
```python
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
class TriggerConfig(BaseModel):
endpoint: HttpUrl
secret: str
model_config = ConfigDict(extra="forbid")
@field_validator("secret")
def ensure_secret_prefix(cls, value: str) -> str:
if not value.startswith("dify_"):
raise ValueError("secret must start with dify_")
return value
```
### Generics & Protocols
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
### Tooling & Checks
Quick checks while iterating:
- Format: `make format`
- Lint (includes auto-fix): `make lint`
- Type check: `make type-check`
- Targeted tests: `make test TARGET_TESTS=./api/tests/<target_tests>`
Before opening a PR / submitting:
- `make lint`
- `make type-check`
- `make test`
### Controllers & Services
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
- Document non-obvious behaviour with concise comments.
### Miscellaneous
- Use `configs.dify_config` for configuration—never read environment variables directly.
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
- Keep experimental scripts under `dev/`; do not ship them in production builds.

View File

@@ -1,115 +0,0 @@
## Linter
- Always follow `.ruff.toml`.
- Run `uv run ruff check --fix --unsafe-fixes`.
- Keep each line under 100 characters (including spaces).
## Code Style
- `snake_case` for variables and functions.
- `PascalCase` for classes.
- `UPPER_CASE` for constants.
## Rules
- Use Pydantic v2 standard.
- Use `uv` for package management.
- Do not override dunder methods like `__init__`, `__iadd__`, etc.
- Never launch services (`uv run app.py`, `flask run`, etc.); running tests under `tests/` is allowed.
- Prefer simple functions over classes for lightweight helpers.
- Keep files below 800 lines; split when necessary.
- Keep code readable—no clever hacks.
- Never use `print`; log with `logger = logging.getLogger(__name__)`.
## Guiding Principles
- Mirror the projects layered architecture: controller → service → core/domain.
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
## SQLAlchemy Patterns
- Models inherit from `models.base.Base`; never create ad-hoc metadata or engines.
- Open sessions with context managers:
```python
from sqlalchemy.orm import Session
with Session(db.engine, expire_on_commit=False) as session:
stmt = select(Workflow).where(
Workflow.id == workflow_id,
Workflow.tenant_id == tenant_id,
)
workflow = session.execute(stmt).scalar_one_or_none()
```
- Use SQLAlchemy expressions; avoid raw SQL unless necessary.
- Introduce repository abstractions only for very large tables (e.g., workflow executions) to support alternative storage strategies.
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
## Storage & External IO
- Access storage via `extensions.ext_storage.storage`.
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
- Background tasks that touch storage must be idempotent and log the relevant object identifiers.
## Pydantic Usage
- Define DTOs with Pydantic v2 models and forbid extras by default.
- Use `@field_validator` / `@model_validator` for domain rules.
- Example:
```python
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
class TriggerConfig(BaseModel):
endpoint: HttpUrl
secret: str
model_config = ConfigDict(extra="forbid")
@field_validator("secret")
def ensure_secret_prefix(cls, value: str) -> str:
if not value.startswith("dify_"):
raise ValueError("secret must start with dify_")
return value
```
## Generics & Protocols
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
## Error Handling & Logging
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate to HTTP responses in controllers.
- Declare `logger = logging.getLogger(__name__)` at module top.
- Include tenant/app/workflow identifiers in log context.
- Log retryable events at `warning`, terminal failures at `error`.
## Tooling & Checks
- Format/lint: `uv run --project api --dev ruff format ./api` and `uv run --project api --dev ruff check --fix --unsafe-fixes ./api`.
- Type checks: `uv run --directory api --dev basedpyright`.
- Tests: `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
- Run all of the above before submitting your work.
## Controllers & Services
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
- Avoid repositories unless necessary; direct SQLAlchemy usage is preferred for typical tables.
- Document non-obvious behaviour with concise comments.
## Miscellaneous
- Use `configs.dify_config` for configuration—never read environment variables directly.
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
- Keep experimental scripts under `dev/`; do not ship them in production builds.

View File

@@ -3,6 +3,7 @@ import datetime
import json
import logging
import secrets
import time
from typing import Any
import click
@@ -46,6 +47,8 @@ from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpi
from services.plugin.data_migration import PluginDataMigration
from services.plugin.plugin_migration import PluginMigration
from services.plugin.plugin_service import PluginService
from services.retention.conversation.messages_clean_policy import create_message_clean_policy
from services.retention.conversation.messages_clean_service import MessagesCleanService
from services.retention.workflow_run.clear_free_plan_expired_workflow_run_logs import WorkflowRunCleanup
from tasks.remove_app_and_related_data_task import delete_draft_variables_batch
@@ -2172,3 +2175,79 @@ def migrate_oss(
except Exception as e:
db.session.rollback()
click.echo(click.style(f"Failed to update DB storage_type: {str(e)}", fg="red"))
@click.command("clean-expired-messages", help="Clean expired messages.")
@click.option(
"--start-from",
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
required=True,
help="Lower bound (inclusive) for created_at.",
)
@click.option(
"--end-before",
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
required=True,
help="Upper bound (exclusive) for created_at.",
)
@click.option("--batch-size", default=1000, show_default=True, help="Batch size for selecting messages.")
@click.option(
"--graceful-period",
default=21,
show_default=True,
help="Graceful period in days after subscription expiration, will be ignored when billing is disabled.",
)
@click.option("--dry-run", is_flag=True, default=False, help="Show messages logs would be cleaned without deleting")
def clean_expired_messages(
batch_size: int,
graceful_period: int,
start_from: datetime.datetime,
end_before: datetime.datetime,
dry_run: bool,
):
"""
Clean expired messages and related data for tenants based on clean policy.
"""
click.echo(click.style("clean_messages: start clean messages.", fg="green"))
start_at = time.perf_counter()
try:
# Create policy based on billing configuration
# NOTE: graceful_period will be ignored when billing is disabled.
policy = create_message_clean_policy(graceful_period_days=graceful_period)
# Create and run the cleanup service
service = MessagesCleanService.from_time_range(
policy=policy,
start_from=start_from,
end_before=end_before,
batch_size=batch_size,
dry_run=dry_run,
)
stats = service.run()
end_at = time.perf_counter()
click.echo(
click.style(
f"clean_messages: completed successfully\n"
f" - Latency: {end_at - start_at:.2f}s\n"
f" - Batches processed: {stats['batches']}\n"
f" - Total messages scanned: {stats['total_messages']}\n"
f" - Messages filtered: {stats['filtered_messages']}\n"
f" - Messages deleted: {stats['total_deleted']}",
fg="green",
)
)
except Exception as e:
end_at = time.perf_counter()
logger.exception("clean_messages failed")
click.echo(
click.style(
f"clean_messages: failed after {end_at - start_at:.2f}s - {str(e)}",
fg="red",
)
)
raise
click.echo(click.style("messages cleanup completed.", fg="green"))

View File

@@ -30,6 +30,11 @@ class TagBindingRemovePayload(BaseModel):
type: Literal["knowledge", "app"] | None = Field(default=None, description="Tag type")
class TagListQueryParam(BaseModel):
type: Literal["knowledge", "app", ""] = Field("", description="Tag type filter")
keyword: str | None = Field(None, description="Search keyword")
register_schema_models(
console_ns,
TagBasePayload,
@@ -43,12 +48,15 @@ class TagListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@console_ns.doc(
params={"type": 'Tag type filter. Can be "knowledge" or "app".', "keyword": "Search keyword for tag name."}
)
@marshal_with(dataset_tag_fields)
def get(self):
_, current_tenant_id = current_account_with_tenant()
tag_type = request.args.get("type", type=str, default="")
keyword = request.args.get("keyword", default=None, type=str)
tags = TagService.get_tags(tag_type, current_tenant_id, keyword)
raw_args = request.args.to_dict()
param = TagListQueryParam.model_validate(raw_args)
tags = TagService.get_tags(param.type, current_tenant_id, param.keyword)
return tags, 200

View File

@@ -71,8 +71,8 @@ class LLMGenerator:
response: LLMResult = model_instance.invoke_llm(
prompt_messages=list(prompts), model_parameters={"max_tokens": 500, "temperature": 1}, stream=False
)
answer = cast(str, response.message.content)
if answer is None:
answer = response.message.get_text_content()
if answer == "":
return ""
try:
result_dict = json.loads(answer)
@@ -184,7 +184,7 @@ class LLMGenerator:
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
)
rule_config["prompt"] = cast(str, response.message.content)
rule_config["prompt"] = response.message.get_text_content()
except InvokeError as e:
error = str(e)
@@ -237,13 +237,11 @@ class LLMGenerator:
return rule_config
rule_config["prompt"] = cast(str, prompt_content.message.content)
rule_config["prompt"] = prompt_content.message.get_text_content()
if not isinstance(prompt_content.message.content, str):
raise NotImplementedError("prompt content is not a string")
parameter_generate_prompt = parameter_template.format(
inputs={
"INPUT_TEXT": prompt_content.message.content,
"INPUT_TEXT": prompt_content.message.get_text_content(),
},
remove_template_variables=False,
)
@@ -253,7 +251,7 @@ class LLMGenerator:
statement_generate_prompt = statement_template.format(
inputs={
"TASK_DESCRIPTION": instruction,
"INPUT_TEXT": prompt_content.message.content,
"INPUT_TEXT": prompt_content.message.get_text_content(),
},
remove_template_variables=False,
)
@@ -263,7 +261,7 @@ class LLMGenerator:
parameter_content: LLMResult = model_instance.invoke_llm(
prompt_messages=list(parameter_messages), model_parameters=model_parameters, stream=False
)
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', cast(str, parameter_content.message.content))
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.get_text_content())
except InvokeError as e:
error = str(e)
error_step = "generate variables"
@@ -272,7 +270,7 @@ class LLMGenerator:
statement_content: LLMResult = model_instance.invoke_llm(
prompt_messages=list(statement_messages), model_parameters=model_parameters, stream=False
)
rule_config["opening_statement"] = cast(str, statement_content.message.content)
rule_config["opening_statement"] = statement_content.message.get_text_content()
except InvokeError as e:
error = str(e)
error_step = "generate conversation opener"
@@ -315,7 +313,7 @@ class LLMGenerator:
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
)
generated_code = cast(str, response.message.content)
generated_code = response.message.get_text_content()
return {"code": generated_code, "language": code_language, "error": ""}
except InvokeError as e:
@@ -351,7 +349,7 @@ class LLMGenerator:
raise TypeError("Expected LLMResult when stream=False")
response = result
answer = cast(str, response.message.content)
answer = response.message.get_text_content()
return answer.strip()
@classmethod
@@ -375,10 +373,7 @@ class LLMGenerator:
prompt_messages=list(prompt_messages), model_parameters=model_parameters, stream=False
)
raw_content = response.message.content
if not isinstance(raw_content, str):
raise ValueError(f"LLM response content must be a string, got: {type(raw_content)}")
raw_content = response.message.get_text_content()
try:
parsed_content = json.loads(raw_content)

View File

@@ -189,8 +189,7 @@ class WorkflowEntry:
)
try:
# run node
generator = node.run()
generator = cls._traced_node_run(node)
except Exception as e:
logger.exception(
"error while running node, workflow_id=%s, node_id=%s, node_type=%s, node_version=%s",
@@ -323,8 +322,7 @@ class WorkflowEntry:
tenant_id=tenant_id,
)
# run node
generator = node.run()
generator = cls._traced_node_run(node)
return node, generator
except Exception as e:
@@ -430,3 +428,26 @@ class WorkflowEntry:
input_value = current_variable.value | input_value
variable_pool.add([variable_node_id] + variable_key_list, input_value)
@staticmethod
def _traced_node_run(node: Node) -> Generator[GraphNodeEventBase, None, None]:
"""
Wraps a node's run method with OpenTelemetry tracing and returns a generator.
"""
# Wrap node.run() with ObservabilityLayer hooks to produce node-level spans
layer = ObservabilityLayer()
layer.on_graph_start()
node.ensure_execution_id()
def _gen():
error: Exception | None = None
layer.on_node_run_start(node)
try:
yield from node.run()
except Exception as exc:
error = exc
raise
finally:
layer.on_node_run_end(node, error)
return _gen()

View File

@@ -6,6 +6,7 @@ from .create_site_record_when_app_created import handle as handle_create_site_re
from .delete_tool_parameters_cache_when_sync_draft_workflow import (
handle as handle_delete_tool_parameters_cache_when_sync_draft_workflow,
)
from .queue_credential_sync_when_tenant_created import handle as handle_queue_credential_sync_when_tenant_created
from .sync_plugin_trigger_when_app_created import handle as handle_sync_plugin_trigger_when_app_created
from .sync_webhook_when_app_created import handle as handle_sync_webhook_when_app_created
from .sync_workflow_schedule_when_app_published import handle as handle_sync_workflow_schedule_when_app_published
@@ -30,6 +31,7 @@ __all__ = [
"handle_create_installed_app_when_app_created",
"handle_create_site_record_when_app_created",
"handle_delete_tool_parameters_cache_when_sync_draft_workflow",
"handle_queue_credential_sync_when_tenant_created",
"handle_sync_plugin_trigger_when_app_created",
"handle_sync_webhook_when_app_created",
"handle_sync_workflow_schedule_when_app_published",

View File

@@ -0,0 +1,19 @@
from configs import dify_config
from events.tenant_event import tenant_was_created
from services.enterprise.workspace_sync import WorkspaceSyncService
@tenant_was_created.connect
def handle(sender, **kwargs):
"""Queue credential sync when a tenant/workspace is created."""
# Only queue sync tasks if plugin manager (enterprise feature) is enabled
if not dify_config.ENTERPRISE_ENABLED:
return
tenant = sender
# Determine source from kwargs if available, otherwise use generic
source = kwargs.get("source", "tenant_created")
# Queue credential sync task to Redis for enterprise backend to process
WorkspaceSyncService.queue_credential_sync(tenant.id, source=source)

View File

@@ -4,6 +4,7 @@ from dify_app import DifyApp
def init_app(app: DifyApp):
from commands import (
add_qdrant_index,
clean_expired_messages,
clean_workflow_runs,
cleanup_orphaned_draft_variables,
clear_free_plan_tenant_expired_logs,
@@ -58,6 +59,7 @@ def init_app(app: DifyApp):
transform_datasource_credentials,
install_rag_pipeline_plugins,
clean_workflow_runs,
clean_expired_messages,
]
for cmd in cmds_to_register:
app.cli.add_command(cmd)

View File

@@ -0,0 +1,33 @@
"""feat: add created_at id index to messages
Revision ID: 3334862ee907
Revises: 905527cc8fd3
Create Date: 2026-01-12 17:29:44.846544
"""
from alembic import op
import models as models
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3334862ee907'
down_revision = '905527cc8fd3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('messages', schema=None) as batch_op:
batch_op.create_index('message_created_at_id_idx', ['created_at', 'id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('messages', schema=None) as batch_op:
batch_op.drop_index('message_created_at_id_idx')
# ### end Alembic commands ###

View File

@@ -968,6 +968,7 @@ class Message(Base):
Index("message_workflow_run_id_idx", "conversation_id", "workflow_run_id"),
Index("message_created_at_idx", "created_at"),
Index("message_app_mode_idx", "app_mode"),
Index("message_created_at_id_idx", "created_at", "id"),
)
id: Mapped[str] = mapped_column(StringUUID, default=lambda: str(uuid4()))

View File

@@ -1,6 +1,6 @@
[project]
name = "dify-api"
version = "1.11.3"
version = "1.11.4"
requires-python = ">=3.11,<3.13"
dependencies = [

View File

@@ -1,90 +1,62 @@
import datetime
import logging
import time
import click
from sqlalchemy.exc import SQLAlchemyError
import app
from configs import dify_config
from enums.cloud_plan import CloudPlan
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.model import (
App,
Message,
MessageAgentThought,
MessageAnnotation,
MessageChain,
MessageFeedback,
MessageFile,
)
from models.web import SavedMessage
from services.feature_service import FeatureService
from services.retention.conversation.messages_clean_policy import create_message_clean_policy
from services.retention.conversation.messages_clean_service import MessagesCleanService
logger = logging.getLogger(__name__)
@app.celery.task(queue="dataset")
@app.celery.task(queue="retention")
def clean_messages():
click.echo(click.style("Start clean messages.", fg="green"))
start_at = time.perf_counter()
plan_sandbox_clean_message_day = datetime.datetime.now() - datetime.timedelta(
days=dify_config.PLAN_SANDBOX_CLEAN_MESSAGE_DAY_SETTING
)
while True:
try:
# Main query with join and filter
messages = (
db.session.query(Message)
.where(Message.created_at < plan_sandbox_clean_message_day)
.order_by(Message.created_at.desc())
.limit(100)
.all()
)
"""
Clean expired messages based on clean policy.
except SQLAlchemyError:
raise
if not messages:
break
for message in messages:
app = db.session.query(App).filter_by(id=message.app_id).first()
if not app:
logger.warning(
"Expected App record to exist, but none was found, app_id=%s, message_id=%s",
message.app_id,
message.id,
)
continue
features_cache_key = f"features:{app.tenant_id}"
plan_cache = redis_client.get(features_cache_key)
if plan_cache is None:
features = FeatureService.get_features(app.tenant_id)
redis_client.setex(features_cache_key, 600, features.billing.subscription.plan)
plan = features.billing.subscription.plan
else:
plan = plan_cache.decode()
if plan == CloudPlan.SANDBOX:
# clean related message
db.session.query(MessageFeedback).where(MessageFeedback.message_id == message.id).delete(
synchronize_session=False
)
db.session.query(MessageAnnotation).where(MessageAnnotation.message_id == message.id).delete(
synchronize_session=False
)
db.session.query(MessageChain).where(MessageChain.message_id == message.id).delete(
synchronize_session=False
)
db.session.query(MessageAgentThought).where(MessageAgentThought.message_id == message.id).delete(
synchronize_session=False
)
db.session.query(MessageFile).where(MessageFile.message_id == message.id).delete(
synchronize_session=False
)
db.session.query(SavedMessage).where(SavedMessage.message_id == message.id).delete(
synchronize_session=False
)
db.session.query(Message).where(Message.id == message.id).delete()
db.session.commit()
end_at = time.perf_counter()
click.echo(click.style(f"Cleaned messages from db success latency: {end_at - start_at}", fg="green"))
This task uses MessagesCleanService to efficiently clean messages in batches.
The behavior depends on BILLING_ENABLED configuration:
- BILLING_ENABLED=True: only delete messages from sandbox tenants (with whitelist/grace period)
- BILLING_ENABLED=False: delete all messages within the time range
"""
click.echo(click.style("clean_messages: start clean messages.", fg="green"))
start_at = time.perf_counter()
try:
# Create policy based on billing configuration
policy = create_message_clean_policy(
graceful_period_days=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD,
)
# Create and run the cleanup service
service = MessagesCleanService.from_days(
policy=policy,
days=dify_config.SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS,
batch_size=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE,
)
stats = service.run()
end_at = time.perf_counter()
click.echo(
click.style(
f"clean_messages: completed successfully\n"
f" - Latency: {end_at - start_at:.2f}s\n"
f" - Batches processed: {stats['batches']}\n"
f" - Total messages scanned: {stats['total_messages']}\n"
f" - Messages filtered: {stats['filtered_messages']}\n"
f" - Messages deleted: {stats['total_deleted']}",
fg="green",
)
)
except Exception as e:
end_at = time.perf_counter()
logger.exception("clean_messages failed")
click.echo(
click.style(
f"clean_messages: failed after {end_at - start_at:.2f}s - {str(e)}",
fg="red",
)
)
raise

View File

@@ -0,0 +1,58 @@
import json
import logging
import uuid
from datetime import UTC, datetime
from redis import RedisError
from extensions.ext_redis import redis_client
logger = logging.getLogger(__name__)
WORKSPACE_SYNC_QUEUE = "enterprise:workspace:sync:queue"
WORKSPACE_SYNC_PROCESSING = "enterprise:workspace:sync:processing"
class WorkspaceSyncService:
"""Service to publish workspace sync tasks to Redis queue for enterprise backend consumption"""
@staticmethod
def queue_credential_sync(workspace_id: str, *, source: str) -> bool:
"""
Queue a credential sync task for a newly created workspace.
This publishes a task to Redis that will be consumed by the enterprise backend
worker to sync credentials with the plugin-manager.
Args:
workspace_id: The workspace/tenant ID to sync credentials for
source: Source of the sync request (for debugging/tracking)
Returns:
bool: True if task was queued successfully, False otherwise
"""
try:
task = {
"task_id": str(uuid.uuid4()),
"workspace_id": workspace_id,
"retry_count": 0,
"created_at": datetime.now(UTC).isoformat(),
"source": source,
}
# Push to Redis list (queue) - LPUSH adds to the head, worker consumes from tail with RPOP
redis_client.lpush(WORKSPACE_SYNC_QUEUE, json.dumps(task))
logger.info(
"Queued credential sync task for workspace %s, task_id: %s, source: %s",
workspace_id,
task["task_id"],
source,
)
return True
except (RedisError, TypeError) as e:
logger.error("Failed to queue credential sync for workspace %s: %s", workspace_id, str(e), exc_info=True)
# Don't raise - we don't want to fail workspace creation if queueing fails
# The scheduled task will catch it later
return False

View File

@@ -0,0 +1,216 @@
import datetime
import logging
from abc import ABC, abstractmethod
from collections.abc import Callable, Sequence
from dataclasses import dataclass
from configs import dify_config
from enums.cloud_plan import CloudPlan
from services.billing_service import BillingService, SubscriptionPlan
logger = logging.getLogger(__name__)
@dataclass
class SimpleMessage:
id: str
app_id: str
created_at: datetime.datetime
class MessagesCleanPolicy(ABC):
"""
Abstract base class for message cleanup policies.
A policy determines which messages from a batch should be deleted.
"""
@abstractmethod
def filter_message_ids(
self,
messages: Sequence[SimpleMessage],
app_to_tenant: dict[str, str],
) -> Sequence[str]:
"""
Filter messages and return IDs of messages that should be deleted.
Args:
messages: Batch of messages to evaluate
app_to_tenant: Mapping from app_id to tenant_id
Returns:
List of message IDs that should be deleted
"""
...
class BillingDisabledPolicy(MessagesCleanPolicy):
"""
Policy for community or enterpriseedition (billing disabled).
No special filter logic, just return all message ids.
"""
def filter_message_ids(
self,
messages: Sequence[SimpleMessage],
app_to_tenant: dict[str, str],
) -> Sequence[str]:
return [msg.id for msg in messages]
class BillingSandboxPolicy(MessagesCleanPolicy):
"""
Policy for sandbox plan tenants in cloud edition (billing enabled).
Filters messages based on sandbox plan expiration rules:
- Skip tenants in the whitelist
- Only delete messages from sandbox plan tenants
- Respect grace period after subscription expiration
- Safe default: if tenant mapping or plan is missing, do NOT delete
"""
def __init__(
self,
plan_provider: Callable[[Sequence[str]], dict[str, SubscriptionPlan]],
graceful_period_days: int = 21,
tenant_whitelist: Sequence[str] | None = None,
current_timestamp: int | None = None,
) -> None:
self._graceful_period_days = graceful_period_days
self._tenant_whitelist: Sequence[str] = tenant_whitelist or []
self._plan_provider = plan_provider
self._current_timestamp = current_timestamp
def filter_message_ids(
self,
messages: Sequence[SimpleMessage],
app_to_tenant: dict[str, str],
) -> Sequence[str]:
"""
Filter messages based on sandbox plan expiration rules.
Args:
messages: Batch of messages to evaluate
app_to_tenant: Mapping from app_id to tenant_id
Returns:
List of message IDs that should be deleted
"""
if not messages or not app_to_tenant:
return []
# Get unique tenant_ids and fetch subscription plans
tenant_ids = list(set(app_to_tenant.values()))
tenant_plans = self._plan_provider(tenant_ids)
if not tenant_plans:
return []
# Apply sandbox deletion rules
return self._filter_expired_sandbox_messages(
messages=messages,
app_to_tenant=app_to_tenant,
tenant_plans=tenant_plans,
)
def _filter_expired_sandbox_messages(
self,
messages: Sequence[SimpleMessage],
app_to_tenant: dict[str, str],
tenant_plans: dict[str, SubscriptionPlan],
) -> list[str]:
"""
Filter messages that should be deleted based on sandbox plan expiration.
A message should be deleted if:
1. It belongs to a sandbox tenant AND
2. Either:
a) The tenant has no previous subscription (expiration_date == -1), OR
b) The subscription expired more than graceful_period_days ago
Args:
messages: List of message objects with id and app_id attributes
app_to_tenant: Mapping from app_id to tenant_id
tenant_plans: Mapping from tenant_id to subscription plan info
Returns:
List of message IDs that should be deleted
"""
current_timestamp = self._current_timestamp
if current_timestamp is None:
current_timestamp = int(datetime.datetime.now(datetime.UTC).timestamp())
sandbox_message_ids: list[str] = []
graceful_period_seconds = self._graceful_period_days * 24 * 60 * 60
for msg in messages:
# Get tenant_id for this message's app
tenant_id = app_to_tenant.get(msg.app_id)
if not tenant_id:
continue
# Skip tenant messages in whitelist
if tenant_id in self._tenant_whitelist:
continue
# Get subscription plan for this tenant
tenant_plan = tenant_plans.get(tenant_id)
if not tenant_plan:
continue
plan = str(tenant_plan["plan"])
expiration_date = int(tenant_plan["expiration_date"])
# Only process sandbox plans
if plan != CloudPlan.SANDBOX:
continue
# Case 1: No previous subscription (-1 means never had a paid subscription)
if expiration_date == -1:
sandbox_message_ids.append(msg.id)
continue
# Case 2: Subscription expired beyond grace period
if current_timestamp - expiration_date > graceful_period_seconds:
sandbox_message_ids.append(msg.id)
return sandbox_message_ids
def create_message_clean_policy(
graceful_period_days: int = 21,
current_timestamp: int | None = None,
) -> MessagesCleanPolicy:
"""
Factory function to create the appropriate message clean policy.
Determines which policy to use based on BILLING_ENABLED configuration:
- If BILLING_ENABLED is True: returns BillingSandboxPolicy
- If BILLING_ENABLED is False: returns BillingDisabledPolicy
Args:
graceful_period_days: Grace period in days after subscription expiration (default: 21)
current_timestamp: Current Unix timestamp for testing (default: None, uses current time)
"""
if not dify_config.BILLING_ENABLED:
logger.info("create_message_clean_policy: billing disabled, using BillingDisabledPolicy")
return BillingDisabledPolicy()
# Billing enabled - fetch whitelist from BillingService
tenant_whitelist = BillingService.get_expired_subscription_cleanup_whitelist()
plan_provider = BillingService.get_plan_bulk_with_cache
logger.info(
"create_message_clean_policy: billing enabled, using BillingSandboxPolicy "
"(graceful_period_days=%s, whitelist=%s)",
graceful_period_days,
tenant_whitelist,
)
return BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=graceful_period_days,
tenant_whitelist=tenant_whitelist,
current_timestamp=current_timestamp,
)

View File

@@ -0,0 +1,334 @@
import datetime
import logging
import random
from collections.abc import Sequence
from typing import cast
from sqlalchemy import delete, select
from sqlalchemy.engine import CursorResult
from sqlalchemy.orm import Session
from extensions.ext_database import db
from models.model import (
App,
AppAnnotationHitHistory,
DatasetRetrieverResource,
Message,
MessageAgentThought,
MessageAnnotation,
MessageChain,
MessageFeedback,
MessageFile,
)
from models.web import SavedMessage
from services.retention.conversation.messages_clean_policy import (
MessagesCleanPolicy,
SimpleMessage,
)
logger = logging.getLogger(__name__)
class MessagesCleanService:
"""
Service for cleaning expired messages based on retention policies.
Compatible with non cloud edition (billing disabled): all messages in the time range will be deleted.
If billing is enabled: only sandbox plan tenant messages are deleted (with whitelist and grace period support).
"""
def __init__(
self,
policy: MessagesCleanPolicy,
end_before: datetime.datetime,
start_from: datetime.datetime | None = None,
batch_size: int = 1000,
dry_run: bool = False,
) -> None:
"""
Initialize the service with cleanup parameters.
Args:
policy: The policy that determines which messages to delete
end_before: End time (exclusive) of the range
start_from: Optional start time (inclusive) of the range
batch_size: Number of messages to process per batch
dry_run: Whether to perform a dry run (no actual deletion)
"""
self._policy = policy
self._end_before = end_before
self._start_from = start_from
self._batch_size = batch_size
self._dry_run = dry_run
@classmethod
def from_time_range(
cls,
policy: MessagesCleanPolicy,
start_from: datetime.datetime,
end_before: datetime.datetime,
batch_size: int = 1000,
dry_run: bool = False,
) -> "MessagesCleanService":
"""
Create a service instance for cleaning messages within a specific time range.
Time range is [start_from, end_before).
Args:
policy: The policy that determines which messages to delete
start_from: Start time (inclusive) of the range
end_before: End time (exclusive) of the range
batch_size: Number of messages to process per batch
dry_run: Whether to perform a dry run (no actual deletion)
Returns:
MessagesCleanService instance
Raises:
ValueError: If start_from >= end_before or invalid parameters
"""
if start_from >= end_before:
raise ValueError(f"start_from ({start_from}) must be less than end_before ({end_before})")
if batch_size <= 0:
raise ValueError(f"batch_size ({batch_size}) must be greater than 0")
logger.info(
"clean_messages: start_from=%s, end_before=%s, batch_size=%s, policy=%s",
start_from,
end_before,
batch_size,
policy.__class__.__name__,
)
return cls(
policy=policy,
end_before=end_before,
start_from=start_from,
batch_size=batch_size,
dry_run=dry_run,
)
@classmethod
def from_days(
cls,
policy: MessagesCleanPolicy,
days: int = 30,
batch_size: int = 1000,
dry_run: bool = False,
) -> "MessagesCleanService":
"""
Create a service instance for cleaning messages older than specified days.
Args:
policy: The policy that determines which messages to delete
days: Number of days to look back from now
batch_size: Number of messages to process per batch
dry_run: Whether to perform a dry run (no actual deletion)
Returns:
MessagesCleanService instance
Raises:
ValueError: If invalid parameters
"""
if days < 0:
raise ValueError(f"days ({days}) must be greater than or equal to 0")
if batch_size <= 0:
raise ValueError(f"batch_size ({batch_size}) must be greater than 0")
end_before = datetime.datetime.now() - datetime.timedelta(days=days)
logger.info(
"clean_messages: days=%s, end_before=%s, batch_size=%s, policy=%s",
days,
end_before,
batch_size,
policy.__class__.__name__,
)
return cls(policy=policy, end_before=end_before, start_from=None, batch_size=batch_size, dry_run=dry_run)
def run(self) -> dict[str, int]:
"""
Execute the message cleanup operation.
Returns:
Dict with statistics: batches, filtered_messages, total_deleted
"""
return self._clean_messages_by_time_range()
def _clean_messages_by_time_range(self) -> dict[str, int]:
"""
Clean messages within a time range using cursor-based pagination.
Time range is [start_from, end_before)
Steps:
1. Iterate messages using cursor pagination (by created_at, id)
2. Query app_id -> tenant_id mapping
3. Delegate to policy to determine which messages to delete
4. Batch delete messages and their relations
Returns:
Dict with statistics: batches, filtered_messages, total_deleted
"""
stats = {
"batches": 0,
"total_messages": 0,
"filtered_messages": 0,
"total_deleted": 0,
}
# Cursor-based pagination using (created_at, id) to avoid infinite loops
# and ensure proper ordering with time-based filtering
_cursor: tuple[datetime.datetime, str] | None = None
logger.info(
"clean_messages: start cleaning messages (dry_run=%s), start_from=%s, end_before=%s",
self._dry_run,
self._start_from,
self._end_before,
)
while True:
stats["batches"] += 1
# Step 1: Fetch a batch of messages using cursor
with Session(db.engine, expire_on_commit=False) as session:
msg_stmt = (
select(Message.id, Message.app_id, Message.created_at)
.where(Message.created_at < self._end_before)
.order_by(Message.created_at, Message.id)
.limit(self._batch_size)
)
if self._start_from:
msg_stmt = msg_stmt.where(Message.created_at >= self._start_from)
# Apply cursor condition: (created_at, id) > (last_created_at, last_message_id)
# This translates to:
# created_at > last_created_at OR (created_at = last_created_at AND id > last_message_id)
if _cursor:
# Continuing from previous batch
msg_stmt = msg_stmt.where(
(Message.created_at > _cursor[0])
| ((Message.created_at == _cursor[0]) & (Message.id > _cursor[1]))
)
raw_messages = list(session.execute(msg_stmt).all())
messages = [
SimpleMessage(id=msg_id, app_id=app_id, created_at=msg_created_at)
for msg_id, app_id, msg_created_at in raw_messages
]
# Track total messages fetched across all batches
stats["total_messages"] += len(messages)
if not messages:
logger.info("clean_messages (batch %s): no more messages to process", stats["batches"])
break
# Update cursor to the last message's (created_at, id)
_cursor = (messages[-1].created_at, messages[-1].id)
# Step 2: Extract app_ids and query tenant_ids
app_ids = list({msg.app_id for msg in messages})
if not app_ids:
logger.info("clean_messages (batch %s): no app_ids found, skip", stats["batches"])
continue
app_stmt = select(App.id, App.tenant_id).where(App.id.in_(app_ids))
apps = list(session.execute(app_stmt).all())
if not apps:
logger.info("clean_messages (batch %s): no apps found, skip", stats["batches"])
continue
# Build app_id -> tenant_id mapping
app_to_tenant: dict[str, str] = {app.id: app.tenant_id for app in apps}
# Step 3: Delegate to policy to determine which messages to delete
message_ids_to_delete = self._policy.filter_message_ids(messages, app_to_tenant)
if not message_ids_to_delete:
logger.info("clean_messages (batch %s): no messages to delete, skip", stats["batches"])
continue
stats["filtered_messages"] += len(message_ids_to_delete)
# Step 4: Batch delete messages and their relations
if not self._dry_run:
with Session(db.engine, expire_on_commit=False) as session:
# Delete related records first
self._batch_delete_message_relations(session, message_ids_to_delete)
# Delete messages
delete_stmt = delete(Message).where(Message.id.in_(message_ids_to_delete))
delete_result = cast(CursorResult, session.execute(delete_stmt))
messages_deleted = delete_result.rowcount
session.commit()
stats["total_deleted"] += messages_deleted
logger.info(
"clean_messages (batch %s): processed %s messages, deleted %s messages",
stats["batches"],
len(messages),
messages_deleted,
)
else:
# Log random sample of message IDs that would be deleted (up to 10)
sample_size = min(10, len(message_ids_to_delete))
sampled_ids = random.sample(list(message_ids_to_delete), sample_size)
logger.info(
"clean_messages (batch %s, dry_run): would delete %s messages, sampling %s ids:",
stats["batches"],
len(message_ids_to_delete),
sample_size,
)
for msg_id in sampled_ids:
logger.info("clean_messages (batch %s, dry_run) sample: message_id=%s", stats["batches"], msg_id)
logger.info(
"clean_messages completed: total batches: %s, total messages: %s, filtered messages: %s, total deleted: %s",
stats["batches"],
stats["total_messages"],
stats["filtered_messages"],
stats["total_deleted"],
)
return stats
@staticmethod
def _batch_delete_message_relations(session: Session, message_ids: Sequence[str]) -> None:
"""
Batch delete all related records for given message IDs.
Args:
session: Database session
message_ids: List of message IDs to delete relations for
"""
if not message_ids:
return
# Delete all related records in batch
session.execute(delete(MessageFeedback).where(MessageFeedback.message_id.in_(message_ids)))
session.execute(delete(MessageAnnotation).where(MessageAnnotation.message_id.in_(message_ids)))
session.execute(delete(MessageChain).where(MessageChain.message_id.in_(message_ids)))
session.execute(delete(MessageAgentThought).where(MessageAgentThought.message_id.in_(message_ids)))
session.execute(delete(MessageFile).where(MessageFile.message_id.in_(message_ids)))
session.execute(delete(SavedMessage).where(SavedMessage.message_id.in_(message_ids)))
session.execute(delete(AppAnnotationHitHistory).where(AppAnnotationHitHistory.message_id.in_(message_ids)))
session.execute(delete(DatasetRetrieverResource).where(DatasetRetrieverResource.message_id.in_(message_ids)))

View File

@@ -0,0 +1,627 @@
import datetime
from unittest.mock import MagicMock, patch
import pytest
from enums.cloud_plan import CloudPlan
from services.retention.conversation.messages_clean_policy import (
BillingDisabledPolicy,
BillingSandboxPolicy,
SimpleMessage,
create_message_clean_policy,
)
from services.retention.conversation.messages_clean_service import MessagesCleanService
def make_simple_message(msg_id: str, app_id: str) -> SimpleMessage:
"""Helper to create a SimpleMessage with a fixed created_at timestamp."""
return SimpleMessage(id=msg_id, app_id=app_id, created_at=datetime.datetime(2024, 1, 1))
def make_plan_provider(tenant_plans: dict) -> MagicMock:
"""Helper to create a mock plan_provider that returns the given tenant_plans."""
provider = MagicMock()
provider.return_value = tenant_plans
return provider
class TestBillingSandboxPolicyFilterMessageIds:
"""Unit tests for BillingSandboxPolicy.filter_message_ids method."""
# Fixed timestamp for deterministic tests
CURRENT_TIMESTAMP = 1000000
GRACEFUL_PERIOD_DAYS = 8
GRACEFUL_PERIOD_SECONDS = GRACEFUL_PERIOD_DAYS * 24 * 60 * 60
def test_missing_tenant_mapping_excluded(self):
"""Test that messages with missing app-to-tenant mapping are excluded."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
]
app_to_tenant = {} # No mapping
tenant_plans = {"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1}}
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert
assert list(result) == []
def test_missing_tenant_plan_excluded(self):
"""Test that messages with missing tenant plan are excluded (safe default)."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
tenant_plans = {} # No plans
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert
assert list(result) == []
def test_non_sandbox_plan_excluded(self):
"""Test that messages from non-sandbox plans (PROFESSIONAL/TEAM) are excluded."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
make_simple_message("msg3", "app3"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant3"}
tenant_plans = {
"tenant1": {"plan": CloudPlan.PROFESSIONAL, "expiration_date": -1},
"tenant2": {"plan": CloudPlan.TEAM, "expiration_date": -1},
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": -1}, # Only this one
}
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - only msg3 (sandbox tenant) should be included
assert set(result) == {"msg3"}
def test_whitelist_skip(self):
"""Test that whitelisted tenants are excluded even if sandbox + expired."""
# Arrange
messages = [
make_simple_message("msg1", "app1"), # Whitelisted - excluded
make_simple_message("msg2", "app2"), # Not whitelisted - included
make_simple_message("msg3", "app3"), # Whitelisted - excluded
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant3"}
tenant_plans = {
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
}
plan_provider = make_plan_provider(tenant_plans)
tenant_whitelist = ["tenant1", "tenant3"]
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
tenant_whitelist=tenant_whitelist,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - only msg2 should be included
assert set(result) == {"msg2"}
def test_no_previous_subscription_included(self):
"""Test that messages with expiration_date=-1 (no previous subscription) are included."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
tenant_plans = {
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
}
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - all messages should be included
assert set(result) == {"msg1", "msg2"}
def test_within_grace_period_excluded(self):
"""Test that messages within grace period are excluded."""
# Arrange
now = self.CURRENT_TIMESTAMP
expired_1_day_ago = now - (1 * 24 * 60 * 60)
expired_5_days_ago = now - (5 * 24 * 60 * 60)
expired_7_days_ago = now - (7 * 24 * 60 * 60)
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
make_simple_message("msg3", "app3"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant3"}
tenant_plans = {
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_1_day_ago},
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_5_days_ago},
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_7_days_ago},
}
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS, # 8 days
current_timestamp=now,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - all within 8-day grace period, none should be included
assert list(result) == []
def test_exactly_at_boundary_excluded(self):
"""Test that messages exactly at grace period boundary are excluded (code uses >)."""
# Arrange
now = self.CURRENT_TIMESTAMP
expired_exactly_8_days_ago = now - self.GRACEFUL_PERIOD_SECONDS # Exactly at boundary
messages = [make_simple_message("msg1", "app1")]
app_to_tenant = {"app1": "tenant1"}
tenant_plans = {
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_exactly_8_days_ago},
}
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=now,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - exactly at boundary (==) should be excluded (code uses >)
assert list(result) == []
def test_beyond_grace_period_included(self):
"""Test that messages beyond grace period are included."""
# Arrange
now = self.CURRENT_TIMESTAMP
expired_9_days_ago = now - (9 * 24 * 60 * 60) # Just beyond 8-day grace
expired_30_days_ago = now - (30 * 24 * 60 * 60) # Well beyond
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
tenant_plans = {
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_9_days_ago},
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": expired_30_days_ago},
}
plan_provider = make_plan_provider(tenant_plans)
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=now,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - both beyond grace period, should be included
assert set(result) == {"msg1", "msg2"}
def test_empty_messages_returns_empty(self):
"""Test that empty messages returns empty list."""
# Arrange
messages: list[SimpleMessage] = []
app_to_tenant = {"app1": "tenant1"}
plan_provider = make_plan_provider({"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1}})
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert
assert list(result) == []
def test_plan_provider_called_with_correct_tenant_ids(self):
"""Test that plan_provider is called with correct tenant_ids."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
make_simple_message("msg3", "app3"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2", "app3": "tenant1"} # tenant1 appears twice
plan_provider = make_plan_provider({})
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
current_timestamp=self.CURRENT_TIMESTAMP,
)
# Act
policy.filter_message_ids(messages, app_to_tenant)
# Assert - plan_provider should be called once with unique tenant_ids
plan_provider.assert_called_once()
called_tenant_ids = set(plan_provider.call_args[0][0])
assert called_tenant_ids == {"tenant1", "tenant2"}
def test_complex_mixed_scenario(self):
"""Test complex scenario with mixed plans, expirations, whitelist, and missing mappings."""
# Arrange
now = self.CURRENT_TIMESTAMP
sandbox_expired_old = now - (15 * 24 * 60 * 60) # Beyond grace
sandbox_expired_recent = now - (3 * 24 * 60 * 60) # Within grace
future_expiration = now + (30 * 24 * 60 * 60)
messages = [
make_simple_message("msg1", "app1"), # Sandbox, no subscription - included
make_simple_message("msg2", "app2"), # Sandbox, expired old - included
make_simple_message("msg3", "app3"), # Sandbox, within grace - excluded
make_simple_message("msg4", "app4"), # Team plan, active - excluded
make_simple_message("msg5", "app5"), # No tenant mapping - excluded
make_simple_message("msg6", "app6"), # No plan info - excluded
make_simple_message("msg7", "app7"), # Sandbox, expired old, whitelisted - excluded
]
app_to_tenant = {
"app1": "tenant1",
"app2": "tenant2",
"app3": "tenant3",
"app4": "tenant4",
"app6": "tenant6", # Has mapping but no plan
"app7": "tenant7",
# app5 has no mapping
}
tenant_plans = {
"tenant1": {"plan": CloudPlan.SANDBOX, "expiration_date": -1},
"tenant2": {"plan": CloudPlan.SANDBOX, "expiration_date": sandbox_expired_old},
"tenant3": {"plan": CloudPlan.SANDBOX, "expiration_date": sandbox_expired_recent},
"tenant4": {"plan": CloudPlan.TEAM, "expiration_date": future_expiration},
"tenant7": {"plan": CloudPlan.SANDBOX, "expiration_date": sandbox_expired_old},
# tenant6 has no plan
}
plan_provider = make_plan_provider(tenant_plans)
tenant_whitelist = ["tenant7"]
policy = BillingSandboxPolicy(
plan_provider=plan_provider,
graceful_period_days=self.GRACEFUL_PERIOD_DAYS,
tenant_whitelist=tenant_whitelist,
current_timestamp=now,
)
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - only msg1 and msg2 should be included
assert set(result) == {"msg1", "msg2"}
class TestBillingDisabledPolicyFilterMessageIds:
"""Unit tests for BillingDisabledPolicy.filter_message_ids method."""
def test_returns_all_message_ids(self):
"""Test that all message IDs are returned (order-preserving)."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
make_simple_message("msg3", "app3"),
]
app_to_tenant = {"app1": "tenant1", "app2": "tenant2"}
policy = BillingDisabledPolicy()
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - all message IDs returned in order
assert list(result) == ["msg1", "msg2", "msg3"]
def test_ignores_app_to_tenant(self):
"""Test that app_to_tenant mapping is ignored."""
# Arrange
messages = [
make_simple_message("msg1", "app1"),
make_simple_message("msg2", "app2"),
]
app_to_tenant: dict[str, str] = {} # Empty - should be ignored
policy = BillingDisabledPolicy()
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert - all message IDs still returned
assert list(result) == ["msg1", "msg2"]
def test_empty_messages_returns_empty(self):
"""Test that empty messages returns empty list."""
# Arrange
messages: list[SimpleMessage] = []
app_to_tenant = {"app1": "tenant1"}
policy = BillingDisabledPolicy()
# Act
result = policy.filter_message_ids(messages, app_to_tenant)
# Assert
assert list(result) == []
class TestCreateMessageCleanPolicy:
"""Unit tests for create_message_clean_policy factory function."""
@patch("services.retention.conversation.messages_clean_policy.dify_config")
def test_billing_disabled_returns_billing_disabled_policy(self, mock_config):
"""Test that BILLING_ENABLED=False returns BillingDisabledPolicy."""
# Arrange
mock_config.BILLING_ENABLED = False
# Act
policy = create_message_clean_policy(graceful_period_days=21)
# Assert
assert isinstance(policy, BillingDisabledPolicy)
@patch("services.retention.conversation.messages_clean_policy.BillingService")
@patch("services.retention.conversation.messages_clean_policy.dify_config")
def test_billing_enabled_policy_has_correct_internals(self, mock_config, mock_billing_service):
"""Test that BillingSandboxPolicy is created with correct internal values."""
# Arrange
mock_config.BILLING_ENABLED = True
whitelist = ["tenant1", "tenant2"]
mock_billing_service.get_expired_subscription_cleanup_whitelist.return_value = whitelist
mock_plan_provider = MagicMock()
mock_billing_service.get_plan_bulk_with_cache = mock_plan_provider
# Act
policy = create_message_clean_policy(graceful_period_days=14, current_timestamp=1234567)
# Assert
mock_billing_service.get_expired_subscription_cleanup_whitelist.assert_called_once()
assert isinstance(policy, BillingSandboxPolicy)
assert policy._graceful_period_days == 14
assert list(policy._tenant_whitelist) == whitelist
assert policy._plan_provider == mock_plan_provider
assert policy._current_timestamp == 1234567
class TestMessagesCleanServiceFromTimeRange:
"""Unit tests for MessagesCleanService.from_time_range factory method."""
def test_start_from_end_before_raises_value_error(self):
"""Test that start_from == end_before raises ValueError."""
policy = BillingDisabledPolicy()
# Arrange
same_time = datetime.datetime(2024, 1, 1, 12, 0, 0)
# Act & Assert
with pytest.raises(ValueError, match="start_from .* must be less than end_before"):
MessagesCleanService.from_time_range(
policy=policy,
start_from=same_time,
end_before=same_time,
)
# Arrange
start_from = datetime.datetime(2024, 12, 31)
end_before = datetime.datetime(2024, 1, 1)
# Act & Assert
with pytest.raises(ValueError, match="start_from .* must be less than end_before"):
MessagesCleanService.from_time_range(
policy=policy,
start_from=start_from,
end_before=end_before,
)
def test_batch_size_raises_value_error(self):
"""Test that batch_size=0 raises ValueError."""
# Arrange
start_from = datetime.datetime(2024, 1, 1)
end_before = datetime.datetime(2024, 2, 1)
policy = BillingDisabledPolicy()
# Act & Assert
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
MessagesCleanService.from_time_range(
policy=policy,
start_from=start_from,
end_before=end_before,
batch_size=0,
)
start_from = datetime.datetime(2024, 1, 1)
end_before = datetime.datetime(2024, 2, 1)
policy = BillingDisabledPolicy()
# Act & Assert
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
MessagesCleanService.from_time_range(
policy=policy,
start_from=start_from,
end_before=end_before,
batch_size=-100,
)
def test_valid_params_creates_instance(self):
"""Test that valid parameters create a correctly configured instance."""
# Arrange
start_from = datetime.datetime(2024, 1, 1, 0, 0, 0)
end_before = datetime.datetime(2024, 12, 31, 23, 59, 59)
policy = BillingDisabledPolicy()
batch_size = 500
dry_run = True
# Act
service = MessagesCleanService.from_time_range(
policy=policy,
start_from=start_from,
end_before=end_before,
batch_size=batch_size,
dry_run=dry_run,
)
# Assert
assert isinstance(service, MessagesCleanService)
assert service._policy is policy
assert service._start_from == start_from
assert service._end_before == end_before
assert service._batch_size == batch_size
assert service._dry_run == dry_run
def test_default_params(self):
"""Test that default parameters are applied correctly."""
# Arrange
start_from = datetime.datetime(2024, 1, 1)
end_before = datetime.datetime(2024, 2, 1)
policy = BillingDisabledPolicy()
# Act
service = MessagesCleanService.from_time_range(
policy=policy,
start_from=start_from,
end_before=end_before,
)
# Assert
assert service._batch_size == 1000 # default
assert service._dry_run is False # default
class TestMessagesCleanServiceFromDays:
"""Unit tests for MessagesCleanService.from_days factory method."""
def test_days_raises_value_error(self):
"""Test that days < 0 raises ValueError."""
# Arrange
policy = BillingDisabledPolicy()
# Act & Assert
with pytest.raises(ValueError, match="days .* must be greater than or equal to 0"):
MessagesCleanService.from_days(policy=policy, days=-1)
# Act
with patch("services.retention.conversation.messages_clean_service.datetime") as mock_datetime:
fixed_now = datetime.datetime(2024, 6, 15, 14, 0, 0)
mock_datetime.datetime.now.return_value = fixed_now
mock_datetime.timedelta = datetime.timedelta
service = MessagesCleanService.from_days(policy=policy, days=0)
# Assert
assert service._end_before == fixed_now
def test_batch_size_raises_value_error(self):
"""Test that batch_size=0 raises ValueError."""
# Arrange
policy = BillingDisabledPolicy()
# Act & Assert
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
MessagesCleanService.from_days(policy=policy, days=30, batch_size=0)
# Act & Assert
with pytest.raises(ValueError, match="batch_size .* must be greater than 0"):
MessagesCleanService.from_days(policy=policy, days=30, batch_size=-500)
def test_valid_params_creates_instance(self):
"""Test that valid parameters create a correctly configured instance."""
# Arrange
policy = BillingDisabledPolicy()
days = 90
batch_size = 500
dry_run = True
# Act
with patch("services.retention.conversation.messages_clean_service.datetime") as mock_datetime:
fixed_now = datetime.datetime(2024, 6, 15, 10, 30, 0)
mock_datetime.datetime.now.return_value = fixed_now
mock_datetime.timedelta = datetime.timedelta
service = MessagesCleanService.from_days(
policy=policy,
days=days,
batch_size=batch_size,
dry_run=dry_run,
)
# Assert
expected_end_before = fixed_now - datetime.timedelta(days=days)
assert isinstance(service, MessagesCleanService)
assert service._policy is policy
assert service._start_from is None
assert service._end_before == expected_end_before
assert service._batch_size == batch_size
assert service._dry_run == dry_run
def test_default_params(self):
"""Test that default parameters are applied correctly."""
# Arrange
policy = BillingDisabledPolicy()
# Act
with patch("services.retention.conversation.messages_clean_service.datetime") as mock_datetime:
fixed_now = datetime.datetime(2024, 6, 15, 10, 30, 0)
mock_datetime.datetime.now.return_value = fixed_now
mock_datetime.timedelta = datetime.timedelta
service = MessagesCleanService.from_days(policy=policy)
# Assert
expected_end_before = fixed_now - datetime.timedelta(days=30) # default days=30
assert service._end_before == expected_end_before
assert service._batch_size == 1000 # default
assert service._dry_run is False # default

2
api/uv.lock generated
View File

@@ -1368,7 +1368,7 @@ wheels = [
[[package]]
name = "dify-api"
version = "1.11.3"
version = "1.11.4"
source = { virtual = "." }
dependencies = [
{ name = "aliyun-log-python-sdk" },

View File

@@ -21,7 +21,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.11.3
image: langgenius/dify-api:1.11.4
restart: always
environment:
# Use the shared environment variables.
@@ -63,7 +63,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.11.3
image: langgenius/dify-api:1.11.4
restart: always
environment:
# Use the shared environment variables.
@@ -102,7 +102,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.11.3
image: langgenius/dify-api:1.11.4
restart: always
environment:
# Use the shared environment variables.
@@ -132,7 +132,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.11.3
image: langgenius/dify-web:1.11.4
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@@ -705,7 +705,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.11.3
image: langgenius/dify-api:1.11.4
restart: always
environment:
# Use the shared environment variables.
@@ -747,7 +747,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.11.3
image: langgenius/dify-api:1.11.4
restart: always
environment:
# Use the shared environment variables.
@@ -786,7 +786,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.11.3
image: langgenius/dify-api:1.11.4
restart: always
environment:
# Use the shared environment variables.
@@ -816,7 +816,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.11.3
image: langgenius/dify-web:1.11.4
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@@ -65,15 +65,17 @@ const CardView: FC<ICardViewProps> = ({ appId, isInPanel, className }) => {
<div className="text-xs text-text-secondary">
{t('overview.disableTooltip.triggerMode', { ns: 'appOverview', feature: featureName })}
</div>
<div
className="cursor-pointer text-xs font-medium text-text-accent hover:underline"
<a
href={triggerDocUrl}
target="_blank"
rel="noopener noreferrer"
className="block cursor-pointer text-xs font-medium text-text-accent hover:underline"
onClick={(event) => {
event.stopPropagation()
window.open(triggerDocUrl, '_blank')
}}
>
{t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })}
</div>
</a>
</div>
), [t, triggerDocUrl])

View File

@@ -21,7 +21,6 @@ import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/
import FileUploadSetting from '@/app/components/workflow/nodes/_base/components/file-upload-setting'
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
import { ChangeType, InputVarType, SupportUploadFileTypes } from '@/app/components/workflow/types'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
import ConfigContext from '@/context/debug-configuration'
import { AppModeEnum, TransferMethod } from '@/types/app'
import { checkKeys, getNewVarInWorkflow, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var'
@@ -198,8 +197,6 @@ const ConfigModal: FC<IConfigModalProps> = ({
if (type === InputVarType.multiFiles)
draft.max_length = DEFAULT_FILE_UPLOAD_SETTING.max_length
}
if (type === InputVarType.paragraph)
draft.max_length = DEFAULT_VALUE_MAX_LEN
})
setTempPayload(newPayload)
}, [tempPayload])

View File

@@ -15,7 +15,6 @@ import Confirm from '@/app/components/base/confirm'
import Toast from '@/app/components/base/toast'
import Tooltip from '@/app/components/base/tooltip'
import { InputVarType } from '@/app/components/workflow/types'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
import ConfigContext from '@/context/debug-configuration'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { useModalContext } from '@/context/modal-context'
@@ -58,8 +57,6 @@ const buildPromptVariableFromInput = (payload: InputVar): PromptVariable => {
key: variable,
name: label as string,
}
if (payload.type === InputVarType.textInput)
nextItem.max_length = nextItem.max_length || DEFAULT_VALUE_MAX_LEN
if (payload.type !== InputVarType.select)
delete nextItem.options

View File

@@ -7,7 +7,6 @@ import Input from '@/app/components/base/input'
import Select from '@/app/components/base/select'
import Textarea from '@/app/components/base/textarea'
import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
import ConfigContext from '@/context/debug-configuration'
import { cn } from '@/utils/classnames'
@@ -88,7 +87,7 @@ const ChatUserInput = ({
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
placeholder={name}
autoFocus={index === 0}
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
maxLength={max_length}
/>
)}
{type === 'paragraph' && (
@@ -115,7 +114,7 @@ const ChatUserInput = ({
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
placeholder={name}
autoFocus={index === 0}
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
maxLength={max_length}
/>
)}
{type === 'checkbox' && (

View File

@@ -20,7 +20,6 @@ import Select from '@/app/components/base/select'
import Textarea from '@/app/components/base/textarea'
import Tooltip from '@/app/components/base/tooltip'
import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
import ConfigContext from '@/context/debug-configuration'
import { AppModeEnum, ModelModeType } from '@/types/app'
import { cn } from '@/utils/classnames'
@@ -142,7 +141,7 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
placeholder={name}
autoFocus={index === 0}
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
maxLength={max_length}
/>
)}
{type === 'paragraph' && (
@@ -170,7 +169,7 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
placeholder={name}
autoFocus={index === 0}
maxLength={max_length || DEFAULT_VALUE_MAX_LEN}
maxLength={max_length}
/>
)}
{type === 'checkbox' && (

View File

@@ -12,7 +12,6 @@ import { useDebounceFn } from 'ahooks'
import dynamic from 'next/dynamic'
import {
useRouter,
useSearchParams,
} from 'next/navigation'
import { parseAsString, useQueryState } from 'nuqs'
import { useCallback, useEffect, useRef, useState } from 'react'
@@ -29,7 +28,6 @@ import { CheckModal } from '@/hooks/use-pay'
import { useInfiniteAppList } from '@/service/use-apps'
import { AppModeEnum } from '@/types/app'
import { cn } from '@/utils/classnames'
import { isServer } from '@/utils/client'
import AppCard from './app-card'
import { AppCardSkeleton } from './app-card-skeleton'
import Empty from './empty'
@@ -59,7 +57,6 @@ const List = () => {
const { t } = useTranslation()
const { systemFeatures } = useGlobalPublicStore()
const router = useRouter()
const searchParams = useSearchParams()
const { isCurrentWorkspaceEditor, isCurrentWorkspaceDatasetOperator, isLoadingCurrentWorkspace } = useAppContext()
const showTagManagementModal = useTagStore(s => s.showTagManagementModal)
const [activeTab, setActiveTab] = useQueryState(
@@ -67,33 +64,6 @@ const List = () => {
parseAsString.withDefault('all').withOptions({ history: 'push' }),
)
// valid tabs for apps list; anything else should fallback to 'all'
// 1) Normalize legacy/incorrect query params like ?mode=discover -> ?category=all
useEffect(() => {
// avoid running on server
if (isServer)
return
const mode = searchParams.get('mode')
if (!mode)
return
const url = new URL(window.location.href)
url.searchParams.delete('mode')
if (validTabs.has(mode)) {
// migrate to category key
url.searchParams.set('category', mode)
}
else {
url.searchParams.set('category', 'all')
}
router.replace(url.pathname + url.search)
}, [router, searchParams])
// 2) If category has an invalid value (e.g., 'discover'), reset to 'all'
useEffect(() => {
if (!validTabs.has(activeTab))
setActiveTab('all')
}, [activeTab, setActiveTab])
const { query: { tagIDs = [], keywords = '', isCreatedByMe: queryIsCreatedByMe = false }, setQuery } = useAppsQueryState()
const [isCreatedByMe, setIsCreatedByMe] = useState(queryIsCreatedByMe)
const [tagFilterValue, setTagFilterValue] = useState<string[]>(tagIDs)

View File

@@ -16,6 +16,7 @@ import { Theme } from '@/types/app'
import SVGRenderer from '../svg-gallery' // Assumes svg-gallery.tsx is in /base directory
const Flowchart = dynamic(() => import('@/app/components/base/mermaid'), { ssr: false })
const QuadrantMatrix = dynamic(() => import('@/app/components/base/quadrant-matrix'), { ssr: false })
// Available language https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_HLJS.MD
const capitalizationLanguageNameMap: Record<string, string> = {
@@ -40,6 +41,7 @@ const capitalizationLanguageNameMap: Record<string, string> = {
latex: 'Latex',
svg: 'SVG',
abc: 'ABC',
quadrant: 'Quadrant',
}
const getCorrectCapitalizationLanguageName = (language: string) => {
if (!language)
@@ -409,6 +411,12 @@ const CodeBlock: any = memo(({ inline, className, children = '', ...props }: any
<MarkdownMusic children={content} />
</ErrorBoundary>
)
case 'quadrant':
return (
<ErrorBoundary>
<QuadrantMatrix content={content} />
</ErrorBoundary>
)
default:
return (
<SyntaxHighlighter

View File

@@ -0,0 +1,153 @@
'use client'
import type { FC } from 'react'
import type { QuadrantData } from './types'
import { RiExpandDiagonalLine } from '@remixicon/react'
import { useCallback, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import ActionButton from '@/app/components/base/action-button'
import FullScreenModal from '@/app/components/base/fullscreen-modal'
import QuadrantCard from './quadrant-card'
import { isValidQuadrantData, QUADRANT_CONFIGS } from './types'
type QuadrantMatrixProps = {
content: string
}
const QuadrantMatrix: FC<QuadrantMatrixProps> = ({ content }) => {
const { t } = useTranslation()
const [isExpanded, setIsExpanded] = useState(false)
const parsedData = useMemo<QuadrantData | null>(() => {
try {
const trimmed = content.trim()
const data = JSON.parse(trimmed)
if (!isValidQuadrantData(data))
return null
return data
}
catch {
return null
}
}, [content])
const handleExpand = useCallback(() => {
setIsExpanded(true)
}, [])
const handleClose = useCallback(() => {
setIsExpanded(false)
}, [])
if (!parsedData) {
return (
<div className="flex items-center justify-center rounded-xl bg-components-panel-bg-blur p-8">
<div className="text-center text-text-secondary">
<div className="system-md-semibold mb-2">{t('quadrantMatrix.invalidData', { ns: 'app' })}</div>
<div className="text-sm text-text-tertiary">
{t('quadrantMatrix.invalidDataDesc', { ns: 'app' })}
</div>
</div>
</div>
)
}
const totalTasks
= parsedData.q1.length
+ parsedData.q2.length
+ parsedData.q3.length
+ parsedData.q4.length
// Shared grid content component
const renderGrid = (expanded: boolean) => (
<div className="grid grid-cols-2 gap-3">
{/* Row 1: Q1 (Do First), Q2 (Schedule) */}
<QuadrantCard
config={QUADRANT_CONFIGS.q1}
tasks={parsedData.q1}
expanded={expanded}
/>
<QuadrantCard
config={QUADRANT_CONFIGS.q2}
tasks={parsedData.q2}
expanded={expanded}
/>
{/* Row 2: Q3 (Delegate), Q4 (Don't Do) */}
<QuadrantCard
config={QUADRANT_CONFIGS.q3}
tasks={parsedData.q3}
expanded={expanded}
/>
<QuadrantCard
config={QUADRANT_CONFIGS.q4}
tasks={parsedData.q4}
expanded={expanded}
/>
</div>
)
return (
<>
<div className="w-full overflow-hidden rounded-xl bg-components-panel-bg-blur p-4">
{/* Header */}
<div className="mb-4 flex items-center justify-between">
<div>
<div className="system-md-semibold text-text-primary">
{t('quadrantMatrix.title', { ns: 'app' })}
</div>
<div className="text-xs text-text-tertiary">
{t('quadrantMatrix.taskCount', { ns: 'app', count: totalTasks })}
</div>
</div>
{/* Legend + Expand Button */}
<div className="flex items-center gap-3">
<div className="flex items-center gap-3 text-[11px] text-text-quaternary">
<span>{t('quadrantMatrix.legend.importance', { ns: 'app' })}</span>
<span>{t('quadrantMatrix.legend.urgency', { ns: 'app' })}</span>
</div>
<ActionButton onClick={handleExpand}>
<RiExpandDiagonalLine className="h-4 w-4" />
</ActionButton>
</div>
</div>
{/* 2x2 Grid */}
{renderGrid(false)}
</div>
{/* Fullscreen Modal */}
<FullScreenModal
open={isExpanded}
onClose={handleClose}
closable
>
<div className="flex h-full flex-col p-6">
{/* Modal Header */}
<div className="mb-6 flex items-center justify-between">
<div>
<div className="text-xl font-semibold text-text-primary">
{t('quadrantMatrix.title', { ns: 'app' })}
</div>
<div className="text-sm text-text-tertiary">
{t('quadrantMatrix.taskCount', { ns: 'app', count: totalTasks })}
</div>
</div>
<div className="flex items-center gap-3 text-sm text-text-quaternary">
<span>{t('quadrantMatrix.legend.importance', { ns: 'app' })}</span>
<span>{t('quadrantMatrix.legend.urgency', { ns: 'app' })}</span>
</div>
</div>
{/* Expanded Grid */}
<div className="min-h-0 flex-1">
{renderGrid(true)}
</div>
</div>
</FullScreenModal>
</>
)
}
export default QuadrantMatrix

View File

@@ -0,0 +1,102 @@
'use client'
import type { FC } from 'react'
import type { QuadrantConfig, Task } from './types'
import { useTranslation } from 'react-i18next'
import { cn } from '@/utils/classnames'
import TaskItem from './task-item'
type QuadrantCardProps = {
config: QuadrantConfig
tasks: Task[]
expanded?: boolean
maxDisplay?: number
}
const QuadrantCard: FC<QuadrantCardProps> = ({
config,
tasks,
expanded = false,
maxDisplay = 3,
}) => {
const { t } = useTranslation()
const { number, titleKey, subtitleKey, bgClass, borderClass, titleClass } = config
const displayLimit = expanded ? Infinity : maxDisplay
const displayTasks = tasks.slice(0, displayLimit)
const remainingCount = Math.max(0, tasks.length - displayLimit)
return (
<div
className={cn(
'flex min-w-0 flex-col rounded-xl border p-3',
bgClass,
borderClass,
expanded ? 'min-h-[280px]' : 'min-h-[200px]',
)}
>
{/* Header with numbered circle */}
<div className="mb-2 shrink-0">
<div className="flex items-center gap-2">
{/* Numbered circle */}
<span className={cn(
'flex h-5 w-5 items-center justify-center rounded-full border text-xs font-semibold',
borderClass,
titleClass,
)}
>
{number}
</span>
<span className={cn('system-sm-semibold', titleClass)}>{t(titleKey, { ns: 'app' })}</span>
{tasks.length > 0 && (
<span className="bg-components-badge-bg-gray rounded-full px-1.5 py-0.5 text-[10px] font-medium text-text-tertiary">
{tasks.length}
</span>
)}
</div>
<div className="text-[11px] text-text-tertiary">{t(subtitleKey, { ns: 'app' })}</div>
</div>
{/* Task List */}
<div className={cn(
'flex min-h-0 flex-1 flex-col gap-2',
expanded && 'overflow-y-auto',
)}
>
{displayTasks.length > 0
? (
displayTasks.map((task) => {
const taskKey = [
task.name,
task.deadline ?? 'no-deadline',
task.importance_score,
task.urgency_score,
task.description ?? '',
task.action_advice ?? '',
].join('|')
return (
<TaskItem
key={taskKey}
task={task}
expanded={expanded}
/>
)
})
)
: (
<div className="flex flex-1 items-center justify-center text-xs text-text-quaternary">
{t('quadrantMatrix.noTasks', { ns: 'app' })}
</div>
)}
</div>
{/* More indicator (only in non-expanded mode) */}
{!expanded && remainingCount > 0 && (
<div className="mt-2 shrink-0 text-center text-[11px] text-text-tertiary">
{t('quadrantMatrix.more', { ns: 'app', count: remainingCount })}
</div>
)}
</div>
)
}
export default QuadrantCard

View File

@@ -0,0 +1,88 @@
'use client'
import type { FC } from 'react'
import type { Task } from './types'
import { RiCalendarLine } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import { cn } from '@/utils/classnames'
type TaskItemProps = {
task: Task
expanded?: boolean
showScores?: boolean
}
const TaskItem: FC<TaskItemProps> = ({ task, expanded = false, showScores = true }) => {
const { t } = useTranslation()
const { name, description, deadline, importance_score, urgency_score, action_advice } = task
return (
<div className="group min-w-0 rounded-lg bg-components-panel-bg p-2.5 shadow-xs transition-all hover:shadow-sm">
{/* Header: Task Name + Scores */}
<div className="flex items-start justify-between gap-2">
<div
className={cn(
'system-sm-medium min-w-0 flex-1 text-text-primary',
!expanded && 'truncate',
)}
title={name}
>
{name}
</div>
{showScores && (
<div className="flex shrink-0 items-center gap-1 text-[10px] font-medium">
<span className="text-text-accent">
I:
{importance_score}
</span>
<span className="text-text-warning">
U:
{urgency_score}
</span>
</div>
)}
</div>
{/* Description */}
{description && (
<div className={cn(
'mt-1 text-xs text-text-tertiary',
!expanded && 'line-clamp-2',
)}
>
{description}
</div>
)}
{/* Deadline Badge */}
{deadline && (
<div className="mt-1.5">
<span className="bg-components-badge-bg-gray inline-flex items-center gap-1 rounded px-1.5 py-0.5 text-[10px] text-text-tertiary">
<RiCalendarLine className="h-3 w-3" />
<span>
{t('quadrantMatrix.deadline', { ns: 'app' })}
{' '}
{deadline}
</span>
</span>
</div>
)}
{/* Action Advice */}
{action_advice && (
<div className="mt-2 border-t border-divider-subtle pt-2">
<p
className={cn(
'text-xs italic text-text-quaternary',
!expanded && 'line-clamp-2',
)}
title={!expanded ? action_advice : undefined}
>
{action_advice}
</p>
</div>
)}
</div>
)
}
export default TaskItem

View File

@@ -0,0 +1,92 @@
/**
* Type definitions for Eisenhower Matrix (Task Quadrant) visualization
*/
import type { I18nKeysWithPrefix } from '@/types/i18n'
export type Task = {
name: string
description?: string
deadline?: string // YYYY-MM-DD format
importance_score: number // 0-100, based on goal alignment and long-term value
urgency_score: number // 0-100, based on deadline pressure and delay penalty
action_advice?: string // Suggested action for this task
}
export type QuadrantData = {
q1: Task[] // Urgent & Important - Do First
q2: Task[] // Not Urgent & Important - Schedule
q3: Task[] // Urgent & Not Important - Delegate
q4: Task[] // Not Urgent & Not Important - Don't Do
}
type QuadrantKeyBase = I18nKeysWithPrefix<'app', 'quadrantMatrix.q'>
type QuadrantTitleKey = Extract<QuadrantKeyBase, `${string}.title`>
type QuadrantSubtitleKey = Extract<QuadrantKeyBase, `${string}.subtitle`>
export type QuadrantConfig = {
key: 'q1' | 'q2' | 'q3' | 'q4'
number: number
titleKey: QuadrantTitleKey // i18n key for title
subtitleKey: QuadrantSubtitleKey // i18n key for subtitle
bgClass: string
borderClass: string
titleClass: string
}
// Layout based on Eisenhower Matrix:
// Q1 (Do First) - top-left, Q2 (Schedule) - top-right
// Q3 (Delegate) - bottom-left, Q4 (Don't Do) - bottom-right
export const QUADRANT_CONFIGS: Record<string, QuadrantConfig> = {
q1: {
key: 'q1',
number: 1,
titleKey: 'quadrantMatrix.q1.title',
subtitleKey: 'quadrantMatrix.q1.subtitle',
bgClass: 'bg-state-destructive-hover',
borderClass: 'border-state-destructive-border',
titleClass: 'text-text-destructive',
},
q2: {
key: 'q2',
number: 2,
titleKey: 'quadrantMatrix.q2.title',
subtitleKey: 'quadrantMatrix.q2.subtitle',
bgClass: 'bg-state-accent-hover',
borderClass: 'border-state-accent-border',
titleClass: 'text-text-accent',
},
q3: {
key: 'q3',
number: 3,
titleKey: 'quadrantMatrix.q3.title',
subtitleKey: 'quadrantMatrix.q3.subtitle',
bgClass: 'bg-state-warning-hover',
borderClass: 'border-state-warning-border',
titleClass: 'text-text-warning',
},
q4: {
key: 'q4',
number: 4,
titleKey: 'quadrantMatrix.q4.title',
subtitleKey: 'quadrantMatrix.q4.subtitle',
bgClass: 'bg-components-panel-on-panel-item-bg',
borderClass: 'border-divider-regular',
titleClass: 'text-text-tertiary',
},
}
/**
* Validates if the data structure matches QuadrantData interface
*/
export function isValidQuadrantData(data: unknown): data is QuadrantData {
if (typeof data !== 'object' || data === null)
return false
const d = data as Record<string, unknown>
return (
Array.isArray(d.q1)
&& Array.isArray(d.q2)
&& Array.isArray(d.q3)
&& Array.isArray(d.q4)
)
}

View File

@@ -20,6 +20,7 @@ const SearchInput: FC<SearchInputProps> = ({
white,
}) => {
const { t } = useTranslation()
const inputRef = useRef<HTMLInputElement>(null)
const [focus, setFocus] = useState<boolean>(false)
const isComposing = useRef<boolean>(false)
const [compositionValue, setCompositionValue] = useState<string>('')
@@ -36,6 +37,7 @@ const SearchInput: FC<SearchInputProps> = ({
<RiSearchLine className="h-4 w-4 text-components-input-text-placeholder" aria-hidden="true" />
</div>
<input
ref={inputRef}
type="text"
name="query"
className={cn(
@@ -65,14 +67,17 @@ const SearchInput: FC<SearchInputProps> = ({
autoComplete="off"
/>
{value && (
<div
className="group/clear flex h-4 w-4 shrink-0 cursor-pointer items-center justify-center"
<button
type="button"
aria-label={t('operation.clear', { ns: 'common' })}
className="group/clear flex h-4 w-4 shrink-0 cursor-pointer items-center justify-center border-none bg-transparent p-0"
onClick={() => {
onChange('')
inputRef.current?.focus()
}}
>
<RiCloseCircleFill className="h-4 w-4 text-text-quaternary group-hover/clear:text-text-tertiary" />
</div>
</button>
)}
</div>
)

View File

@@ -442,6 +442,10 @@ const Completed: FC<ICompletedProps> = ({
setFullScreen(!fullScreen)
}, [fullScreen])
const toggleCollapsed = useCallback(() => {
setIsCollapsed(prev => !prev)
}, [])
const viewNewlyAddedChunk = useCallback(async () => {
const totalPages = segmentListData?.total_pages || 0
const total = segmentListData?.total || 0
@@ -578,15 +582,16 @@ const Completed: FC<ICompletedProps> = ({
return selectedStatus ? 1 : 0
}, [selectedStatus])
const contextValue = useMemo<SegmentListContextValue>(() => ({
isCollapsed,
fullScreen,
toggleFullScreen,
currSegment,
currChildChunk,
}), [isCollapsed, fullScreen, toggleFullScreen, currSegment, currChildChunk])
return (
<SegmentListContext.Provider value={{
isCollapsed,
fullScreen,
toggleFullScreen,
currSegment,
currChildChunk,
}}
>
<SegmentListContext.Provider value={contextValue}>
{/* Menu Bar */}
{!isFullDocMode && (
<div className={s.docSearchWrapper}>
@@ -618,7 +623,7 @@ const Completed: FC<ICompletedProps> = ({
onClear={() => handleInputChange('')}
/>
<Divider type="vertical" className="mx-3 h-3.5" />
<DisplayToggle isCollapsed={isCollapsed} toggleCollapsed={() => setIsCollapsed(!isCollapsed)} />
<DisplayToggle isCollapsed={isCollapsed} toggleCollapsed={toggleCollapsed} />
</div>
)}
{/* Segment list */}

View File

@@ -1,4 +1,5 @@
import type { FC } from 'react'
import type { SegmentListContextValue } from '..'
import * as React from 'react'
import { Markdown } from '@/app/components/base/markdown'
import { cn } from '@/utils/classnames'
@@ -14,13 +15,15 @@ type ChunkContentProps = {
className?: string
}
const selectIsCollapsed = (s: SegmentListContextValue) => s.isCollapsed
const ChunkContent: FC<ChunkContentProps> = ({
detail,
isFullDocMode,
className,
}) => {
const { answer, content, sign_content } = detail
const isCollapsed = useSegmentListContext(s => s.isCollapsed)
const isCollapsed = useSegmentListContext(selectIsCollapsed)
if (answer) {
return (

View File

@@ -2,9 +2,9 @@
import type { INavSelectorProps } from './nav-selector'
import Link from 'next/link'
import { usePathname, useSearchParams, useSelectedLayoutSegment } from 'next/navigation'
import { useSelectedLayoutSegment } from 'next/navigation'
import * as React from 'react'
import { useEffect, useState } from 'react'
import { useState } from 'react'
import { useStore as useAppStore } from '@/app/components/app/store'
import { ArrowNarrowLeft } from '@/app/components/base/icons/src/vender/line/arrows'
import { cn } from '@/utils/classnames'
@@ -36,14 +36,6 @@ const Nav = ({
const [hovered, setHovered] = useState(false)
const segment = useSelectedLayoutSegment()
const isActivated = Array.isArray(activeSegment) ? activeSegment.includes(segment!) : segment === activeSegment
const pathname = usePathname()
const searchParams = useSearchParams()
const [linkLastSearchParams, setLinkLastSearchParams] = useState('')
useEffect(() => {
if (pathname === link)
setLinkLastSearchParams(searchParams.toString())
}, [pathname, searchParams])
return (
<div className={`
@@ -52,7 +44,7 @@ const Nav = ({
${!curNav && !isActivated && 'hover:bg-components-main-nav-nav-button-bg-hover'}
`}
>
<Link href={link + (linkLastSearchParams && `?${linkLastSearchParams}`)}>
<Link href={link}>
<div
onClick={(e) => {
// Don't clear state if opening in new tab/window

View File

@@ -1,4 +1,3 @@
import type { ActivePluginType } from './constants'
import type { PluginsSort, SearchParamsFromCollection } from './types'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { useQueryState } from 'nuqs'
@@ -17,32 +16,14 @@ export function useSetMarketplaceSort() {
return useSetAtom(marketplaceSortAtom)
}
/**
* Preserve the state for marketplace
*/
export const preserveSearchStateInQueryAtom = atom<boolean>(false)
const searchPluginTextAtom = atom<string>('')
const activePluginTypeAtom = atom<ActivePluginType>('all')
const filterPluginTagsAtom = atom<string[]>([])
export function useSearchPluginText() {
const preserveSearchStateInQuery = useAtomValue(preserveSearchStateInQueryAtom)
const queryState = useQueryState('q', marketplaceSearchParamsParsers.q)
const atomState = useAtom(searchPluginTextAtom)
return preserveSearchStateInQuery ? queryState : atomState
return useQueryState('q', marketplaceSearchParamsParsers.q)
}
export function useActivePluginType() {
const preserveSearchStateInQuery = useAtomValue(preserveSearchStateInQueryAtom)
const queryState = useQueryState('category', marketplaceSearchParamsParsers.category)
const atomState = useAtom(activePluginTypeAtom)
return preserveSearchStateInQuery ? queryState : atomState
return useQueryState('category', marketplaceSearchParamsParsers.category)
}
export function useFilterPluginTags() {
const preserveSearchStateInQuery = useAtomValue(preserveSearchStateInQueryAtom)
const queryState = useQueryState('tags', marketplaceSearchParamsParsers.tags)
const atomState = useAtom(filterPluginTagsAtom)
return preserveSearchStateInQuery ? queryState : atomState
return useQueryState('tags', marketplaceSearchParamsParsers.tags)
}
/**

View File

@@ -1,15 +0,0 @@
'use client'
import { useHydrateAtoms } from 'jotai/utils'
import { preserveSearchStateInQueryAtom } from './atoms'
export function HydrateMarketplaceAtoms({
preserveSearchStateInQuery,
children,
}: {
preserveSearchStateInQuery: boolean
children: React.ReactNode
}) {
useHydrateAtoms([[preserveSearchStateInQueryAtom, preserveSearchStateInQuery]])
return <>{children}</>
}

View File

@@ -1,7 +1,6 @@
import type { SearchParams } from 'nuqs'
import { TanstackQueryInitializer } from '@/context/query-client'
import Description from './description'
import { HydrateMarketplaceAtoms } from './hydration-client'
import { HydrateQueryClient } from './hydration-server'
import ListWrapper from './list/list-wrapper'
import StickySearchAndSwitchWrapper from './sticky-search-and-switch-wrapper'
@@ -10,8 +9,7 @@ type MarketplaceProps = {
showInstallButton?: boolean
pluginTypeSwitchClassName?: string
/**
* Pass the search params from the request to prefetch data on the server
* and preserve the search params in the URL.
* Pass the search params from the request to prefetch data on the server.
*/
searchParams?: Promise<SearchParams>
}
@@ -24,15 +22,13 @@ const Marketplace = async ({
return (
<TanstackQueryInitializer>
<HydrateQueryClient searchParams={searchParams}>
<HydrateMarketplaceAtoms preserveSearchStateInQuery={!!searchParams}>
<Description />
<StickySearchAndSwitchWrapper
pluginTypeSwitchClassName={pluginTypeSwitchClassName}
/>
<ListWrapper
showInstallButton={showInstallButton}
/>
</HydrateMarketplaceAtoms>
<Description />
<StickySearchAndSwitchWrapper
pluginTypeSwitchClassName={pluginTypeSwitchClassName}
/>
<ListWrapper
showInstallButton={showInstallButton}
/>
</HydrateQueryClient>
</TanstackQueryInitializer>
)

View File

@@ -68,7 +68,7 @@ export const PluginPageContextProvider = ({
const options = useMemo(() => {
return enable_marketplace ? tabs : tabs.filter(tab => tab.value !== PLUGIN_PAGE_TABS_MAP.marketplace)
}, [tabs, enable_marketplace])
const [activeTab, setActiveTab] = useQueryState('category', {
const [activeTab, setActiveTab] = useQueryState('tab', {
defaultValue: options[0].value,
})

View File

@@ -6,7 +6,6 @@ import { useTranslation } from 'react-i18next'
import { useFileSizeLimit } from '@/app/components/base/file-uploader/hooks'
import { InputFieldType } from '@/app/components/base/form/form-scenarios/input-field/types'
import { DEFAULT_FILE_UPLOAD_SETTING } from '@/app/components/workflow/constants'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
import { PipelineInputVarType } from '@/models/pipeline'
import { useFileUploadConfig } from '@/service/use-common'
import { formatFileSize } from '@/utils/format'
@@ -87,8 +86,6 @@ export const useConfigurations = (props: {
if (type === PipelineInputVarType.multiFiles)
setFieldValue('maxLength', DEFAULT_FILE_UPLOAD_SETTING.max_length)
}
if (type === PipelineInputVarType.paragraph)
setFieldValue('maxLength', DEFAULT_VALUE_MAX_LEN)
}, [setFieldValue])
const handleVariableNameBlur = useCallback((value: string) => {

View File

@@ -779,27 +779,6 @@ describe('useConfigurations', () => {
expect(mockSetFieldValue).toHaveBeenCalledWith('maxLength', expect.any(Number))
})
it('should call setFieldValue when type changes to paragraph', () => {
// Arrange
const mockGetFieldValue = vi.fn()
const mockSetFieldValue = vi.fn()
const { result } = renderHookWithProviders(() =>
useConfigurations({
getFieldValue: mockGetFieldValue,
setFieldValue: mockSetFieldValue,
supportFile: false,
}),
)
// Act
const typeConfig = result.current.find(config => config.variable === 'type')
typeConfig?.listeners?.onChange?.(createMockEvent(PipelineInputVarType.paragraph))
// Assert
expect(mockSetFieldValue).toHaveBeenCalledWith('maxLength', 48) // DEFAULT_VALUE_MAX_LEN
})
it('should set label from variable name on blur when label is empty', () => {
// Arrange
const mockGetFieldValue = vi.fn().mockReturnValue('')

View File

@@ -26,7 +26,7 @@ import DifyLogo from '@/app/components/base/logo/dify-logo'
import Toast from '@/app/components/base/toast'
import Res from '@/app/components/share/text-generation/result'
import RunOnce from '@/app/components/share/text-generation/run-once'
import { appDefaultIconBackground, BATCH_CONCURRENCY, DEFAULT_VALUE_MAX_LEN } from '@/config'
import { appDefaultIconBackground, BATCH_CONCURRENCY } from '@/config'
import { useGlobalPublicStore } from '@/context/global-public-context'
import { useWebAppStore } from '@/context/web-app-context'
import { useAppFavicon } from '@/hooks/use-app-favicon'
@@ -256,11 +256,10 @@ const TextGeneration: FC<IMainProps> = ({
promptConfig?.prompt_variables.forEach((varItem, varIndex) => {
if (errorRowIndex !== 0)
return
if (varItem.type === 'string') {
const maxLen = varItem.max_length || DEFAULT_VALUE_MAX_LEN
if (item[varIndex].length > maxLen) {
if (varItem.type === 'string' && varItem.max_length) {
if (item[varIndex].length > varItem.max_length) {
moreThanMaxLengthVarName = varItem.name
maxLength = maxLen
maxLength = varItem.max_length
errorRowIndex = index + 1
return
}

View File

@@ -236,4 +236,46 @@ describe('RunOnce', () => {
const stopButton = screen.getByTestId('stop-button')
expect(stopButton).toBeDisabled()
})
describe('maxLength behavior', () => {
it('should not have maxLength attribute when max_length is not set', async () => {
const promptConfig: PromptConfig = {
prompt_template: 'template',
prompt_variables: [
createPromptVariable({
key: 'textInput',
name: 'Text Input',
type: 'string',
// max_length is not set
}),
],
}
const { onInputsChange } = setup({ promptConfig, visionConfig: { ...baseVisionConfig, enabled: false } })
await waitFor(() => {
expect(onInputsChange).toHaveBeenCalled()
})
const input = screen.getByPlaceholderText('Text Input')
expect(input).not.toHaveAttribute('maxLength')
})
it('should have maxLength attribute when max_length is set', async () => {
const promptConfig: PromptConfig = {
prompt_template: 'template',
prompt_variables: [
createPromptVariable({
key: 'textInput',
name: 'Text Input',
type: 'string',
max_length: 100,
}),
],
}
const { onInputsChange } = setup({ promptConfig, visionConfig: { ...baseVisionConfig, enabled: false } })
await waitFor(() => {
expect(onInputsChange).toHaveBeenCalled()
})
const input = screen.getByPlaceholderText('Text Input')
expect(input).toHaveAttribute('maxLength', '100')
})
})
})

View File

@@ -19,7 +19,6 @@ import Textarea from '@/app/components/base/textarea'
import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input'
import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor'
import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
import { cn } from '@/utils/classnames'
@@ -140,7 +139,7 @@ const RunOnce: FC<IRunOnceProps> = ({
placeholder={item.name}
value={inputs[item.key]}
onChange={(e: ChangeEvent<HTMLInputElement>) => { handleInputsChange({ ...inputsRef.current, [item.key]: e.target.value }) }}
maxLength={item.max_length || DEFAULT_VALUE_MAX_LEN}
maxLength={item.max_length}
/>
)}
{item.type === 'paragraph' && (

View File

@@ -5,6 +5,7 @@ import type { NodeOutPutVar, Variable } from '@/app/components/workflow/types'
import { useBoolean } from 'ahooks'
import * as React from 'react'
import { useEffect, useRef, useState } from 'react'
import { createPortal } from 'react-dom'
import { useTranslation } from 'react-i18next'
import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars'
import { cn } from '@/utils/classnames'
@@ -147,7 +148,7 @@ const CodeEditor: FC<Props> = ({
onMount={onEditorMounted}
placeholder={t('common.jinjaEditorPlaceholder', { ns: 'workflow' })!}
/>
{isShowVarPicker && (
{isShowVarPicker && createPortal(
<div
ref={popupRef}
className="w-[228px] space-y-1 rounded-lg border border-components-panel-border bg-components-panel-bg p-1 shadow-lg"
@@ -164,7 +165,8 @@ const CodeEditor: FC<Props> = ({
onChange={handleSelectVar}
isSupportFileVar={false}
/>
</div>
</div>,
document.body,
)}
</div>
)

View File

@@ -208,7 +208,6 @@ export const VAR_ITEM_TEMPLATE = {
key: '',
name: '',
type: 'string',
max_length: DEFAULT_VALUE_MAX_LEN,
required: true,
}
@@ -216,7 +215,6 @@ export const VAR_ITEM_TEMPLATE_IN_WORKFLOW = {
variable: '',
label: '',
type: InputVarType.textInput,
max_length: DEFAULT_VALUE_MAX_LEN,
required: true,
options: [],
}
@@ -225,7 +223,6 @@ export const VAR_ITEM_TEMPLATE_IN_PIPELINE = {
variable: '',
label: '',
type: PipelineInputVarType.textInput,
max_length: DEFAULT_VALUE_MAX_LEN,
required: true,
options: [],
}

View File

@@ -26,7 +26,8 @@ export default antfu(
'react-hooks/preserve-manual-memoization': 'warn',
'react-hooks/purity': 'warn',
'react-hooks/refs': 'warn',
'react-hooks/set-state-in-effect': 'warn',
// prefer react-hooks-extra/no-direct-set-state-in-use-effect
'react-hooks/set-state-in-effect': 'off',
'react-hooks/set-state-in-render': 'warn',
'react-hooks/static-components': 'warn',
'react-hooks/unsupported-syntax': 'warn',
@@ -53,6 +54,14 @@ export default antfu(
},
},
},
{
files: ['**/*.ts', '**/*.tsx'],
settings: {
'react-x': {
additionalStateHooks: '/^use\\w*State(?:s)?|useAtom$/u',
},
},
},
// downgrade some rules from error to warn for gradual adoption
// we should fix these in following pull requests
{

View File

@@ -196,6 +196,24 @@
"publishApp.notSet": "Not set",
"publishApp.notSetDesc": "Currently nobody can access the web app. Please set permissions.",
"publishApp.title": "Who can access web app",
"quadrantMatrix.deadline": "DDL:",
"quadrantMatrix.invalidData": "Invalid Quadrant Data",
"quadrantMatrix.invalidDataDesc": "Expected JSON format with q1, q2, q3, q4 arrays",
"quadrantMatrix.legend.importance": "I = Importance",
"quadrantMatrix.legend.urgency": "U = Urgency",
"quadrantMatrix.more": "+{{count}} more",
"quadrantMatrix.noTasks": "No tasks",
"quadrantMatrix.q1.subtitle": "Urgent & Important",
"quadrantMatrix.q1.title": "Do First",
"quadrantMatrix.q2.subtitle": "Important & Not Urgent",
"quadrantMatrix.q2.title": "Schedule",
"quadrantMatrix.q3.subtitle": "Urgent & Not Important",
"quadrantMatrix.q3.title": "Delegate",
"quadrantMatrix.q4.subtitle": "Not Urgent & Not Important",
"quadrantMatrix.q4.title": "Don't Do",
"quadrantMatrix.taskCount_one": "{{count}} task prioritized",
"quadrantMatrix.taskCount_other": "{{count}} tasks prioritized",
"quadrantMatrix.title": "Eisenhower Matrix",
"removeOriginal": "Delete the original app",
"roadmap": "See our roadmap",
"showMyCreatedAppsOnly": "Created by me",

View File

@@ -196,6 +196,24 @@
"publishApp.notSet": "未设置",
"publishApp.notSetDesc": "当前任何人都无法访问 Web 应用。请设置访问权限。",
"publishApp.title": "谁可以访问 web 应用",
"quadrantMatrix.deadline": "截止:",
"quadrantMatrix.invalidData": "无效的象限数据",
"quadrantMatrix.invalidDataDesc": "需要包含 q1, q2, q3, q4 数组的 JSON 格式",
"quadrantMatrix.legend.importance": "I = 重要性",
"quadrantMatrix.legend.urgency": "U = 紧急性",
"quadrantMatrix.more": "+{{count}} 更多",
"quadrantMatrix.noTasks": "暂无任务",
"quadrantMatrix.q1.subtitle": "紧急且重要",
"quadrantMatrix.q1.title": "立即执行",
"quadrantMatrix.q2.subtitle": "重要但不紧急",
"quadrantMatrix.q2.title": "计划安排",
"quadrantMatrix.q3.subtitle": "紧急但不重要",
"quadrantMatrix.q3.title": "委派他人",
"quadrantMatrix.q4.subtitle": "不紧急也不重要",
"quadrantMatrix.q4.title": "不要做",
"quadrantMatrix.taskCount_one": "{{count}} 个任务已排序",
"quadrantMatrix.taskCount_other": "{{count}} 个任务已排序",
"quadrantMatrix.title": "艾森豪威尔矩阵",
"removeOriginal": "删除原应用",
"roadmap": "产品路线图",
"showMyCreatedAppsOnly": "我创建的",

View File

@@ -1,7 +1,7 @@
{
"name": "dify-web",
"type": "module",
"version": "1.11.3",
"version": "1.11.4",
"private": true,
"packageManager": "pnpm@10.27.0+sha512.72d699da16b1179c14ba9e64dc71c9a40988cbdc65c264cb0e489db7de917f20dcf4d64d8723625f2969ba52d4b7e2a1170682d9ac2a5dcaeaab732b7e16f04a",
"imports": {
@@ -28,9 +28,10 @@
"build:docker": "next build && node scripts/optimize-standalone.js",
"start": "node ./scripts/copy-and-start.mjs",
"lint": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache",
"lint:fix": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --fix",
"lint:quiet": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --quiet",
"lint:complexity": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --rule 'complexity: [error, {max: 15}]' --quiet",
"lint:fix": "pnpm lint --fix",
"lint:quiet": "pnpm lint --quiet",
"lint:complexity": "pnpm lint --rule 'complexity: [error, {max: 15}]' --quiet",
"lint:report": "pnpm lint --output-file eslint_report.json --format json",
"type-check": "tsc --noEmit",
"type-check:tsgo": "tsgo --noEmit",
"prepare": "cd ../ && node -e \"if (process.env.NODE_ENV !== 'production'){process.exit(1)} \" || husky ./web/.husky",
@@ -152,9 +153,9 @@
"zustand": "^5.0.9"
},
"devDependencies": {
"@antfu/eslint-config": "^6.7.3",
"@antfu/eslint-config": "^7.0.1",
"@chromatic-com/storybook": "^4.1.1",
"@eslint-react/eslint-plugin": "^2.3.13",
"@eslint-react/eslint-plugin": "^2.7.0",
"@mdx-js/loader": "^3.1.1",
"@mdx-js/react": "^3.1.1",
"@next/bundle-analyzer": "15.5.9",
@@ -189,7 +190,7 @@
"@types/semver": "^7.7.1",
"@types/sortablejs": "^1.15.8",
"@types/uuid": "^10.0.0",
"@typescript-eslint/parser": "^8.50.0",
"@typescript-eslint/parser": "^8.53.0",
"@typescript/native-preview": "^7.0.0-dev",
"@vitejs/plugin-react": "^5.1.2",
"@vitest/coverage-v8": "4.0.16",
@@ -201,7 +202,7 @@
"eslint-plugin-react-hooks": "^7.0.1",
"eslint-plugin-react-refresh": "^0.4.26",
"eslint-plugin-sonarjs": "^3.0.5",
"eslint-plugin-storybook": "^10.1.10",
"eslint-plugin-storybook": "^10.1.11",
"eslint-plugin-tailwindcss": "^3.18.2",
"husky": "^9.1.7",
"jsdom": "^27.3.0",
@@ -224,7 +225,6 @@
},
"pnpm": {
"overrides": {
"@eslint/plugin-kit@<0.3.4": "0.3.4",
"@monaco-editor/loader": "1.5.0",
"@nolyfill/safe-buffer": "npm:safe-buffer@^5.2.1",
"array-includes": "npm:@nolyfill/array-includes@^1",
@@ -275,7 +275,6 @@
]
},
"resolutions": {
"@eslint/plugin-kit": "~0.3",
"@types/react": "~19.2.7",
"@types/react-dom": "~19.2.3",
"brace-expansion": "~2.0",

803
web/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -24,8 +24,12 @@ export type FetchOptionType = Omit<RequestInit, 'body'> & {
}
const afterResponse204: AfterResponseHook = async (_request, _options, response) => {
if (response.status === 204)
return Response.json({ result: 'success' })
if (response.status === 204) {
return new Response(JSON.stringify({ result: 'success' }), {
status: 200,
headers: { 'Content-Type': ContentType.json },
})
}
}
export type ResponseError = {

View File

@@ -30,7 +30,7 @@ export const getNewVar = (key: string, type: string) => {
}
export const getNewVarInWorkflow = (key: string, type = InputVarType.textInput): InputVar => {
const { max_length: _maxLength, ...rest } = VAR_ITEM_TEMPLATE_IN_WORKFLOW
const { ...rest } = VAR_ITEM_TEMPLATE_IN_WORKFLOW
if (type !== InputVarType.textInput) {
return {
...rest,