Compare commits

..

15 Commits

Author SHA1 Message Date
wangxiaolei
2f75e38c08 fix: fix miss use db.session (#31971) 2026-02-05 15:59:37 +08:00
wangxiaolei
cd03e0a9ef fix: fix delete_draft_variables_batch cycle forever (#31934)
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-02-04 19:42:50 +08:00
zxhlyh
df2421d187 fix: auto summary env (#31930) 2026-02-04 19:42:26 +08:00
QuantumGhost
0ba321d840 chore: bump version in docker-compose and package manager to 1.12.1 (#31947) 2026-02-04 19:41:50 +08:00
Stephen Zhou
d8402f686e fix: base url in client (#31902)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-02-04 12:09:22 +08:00
Tomo
8bd8dee767 fix(docker): improve IRIS data persistence with proper Durable %SYS (#31901)
Co-authored-by: Tomo Okuyama <tomo.okuyama@intersystems.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-02-04 11:39:26 +08:00
Tomo
05f2764d7c fix(docker): persist IRIS data across container recreation using Durable %SYS (#31899)
Co-authored-by: Tomo Okuyama <tomo.okuyama@intersystems.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-02-04 09:57:46 +08:00
Asuka Minato
f5d6c250ed fix: "refactor: port api/controllers/console/tag/tags.py to ov3" (#31887)
Some checks failed
autofix.ci / autofix (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Has been cancelled
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Has been cancelled
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Has been cancelled
Main CI Pipeline / Check Changed Files (push) Has been cancelled
Main CI Pipeline / API Tests (push) Has been cancelled
Main CI Pipeline / Web Tests (push) Has been cancelled
Main CI Pipeline / Style Check (push) Has been cancelled
Main CI Pipeline / VDB Tests (push) Has been cancelled
Main CI Pipeline / DB Migration Test (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-02-03 22:18:53 +08:00
niveshdandyan
45daec7541 refactor: replace line-clamp package with native CSS (#31877)
Co-authored-by: OSS Contributor <oss-contributor@example.com>
Co-authored-by: Claude (claude-opus-4-5) <noreply@anthropic.com>
Co-authored-by: niveshdandyan <niveshdandyan@users.noreply.github.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-02-03 22:14:18 +08:00
盐粒 Yanli
c14a8bb437 chore(dev): use strict bash mode for pytest (#31873) 2026-02-03 19:42:42 +08:00
Stephen Zhou
b76c8fa853 test: fix test (#31880)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-02-03 18:47:05 +08:00
QuantumGhost
8c3e77cd0c chore: update version to 1.12.0 (#31878) 2026-02-03 18:08:15 +08:00
Stephen Zhou
476946f122 test: fix test (#31869) 2026-02-03 17:43:27 +08:00
Joel
62a698a883 fix: create app from template not support review (#31866) 2026-02-03 16:40:35 +08:00
Coding On Star
ebca36ffbb refactor: update oauth_new_user handling in AppInitializer to use parseAsBoolean (#31862)
Co-authored-by: CodingOnStar <hanxujiang@dify.com>
2026-02-03 15:20:26 +08:00
38 changed files with 860 additions and 1728 deletions

View File

@@ -1,14 +1,27 @@
from typing import Literal
from uuid import UUID
from flask import request
from flask_restx import Namespace, Resource, fields, marshal_with
from pydantic import BaseModel, Field
from werkzeug.exceptions import Forbidden
from controllers.common.schema import register_schema_models
from controllers.console import console_ns
from controllers.console.wraps import account_initialization_required, edit_permission_required, setup_required
from controllers.fastopenapi import console_router
from libs.login import current_account_with_tenant, login_required
from services.tag_service import TagService
dataset_tag_fields = {
"id": fields.String,
"name": fields.String,
"type": fields.String,
"binding_count": fields.String,
}
def build_dataset_tag_fields(api_or_ns: Namespace):
return api_or_ns.model("DataSetTag", dataset_tag_fields)
class TagBasePayload(BaseModel):
name: str = Field(description="Tag name", min_length=1, max_length=50)
@@ -32,129 +45,115 @@ class TagListQueryParam(BaseModel):
keyword: str | None = Field(None, description="Search keyword")
class TagResponse(BaseModel):
id: str = Field(description="Tag ID")
name: str = Field(description="Tag name")
type: str = Field(description="Tag type")
binding_count: int = Field(description="Number of bindings")
class TagBindingResult(BaseModel):
result: Literal["success"] = Field(description="Operation result", examples=["success"])
@console_router.get(
"/tags",
response_model=list[TagResponse],
tags=["console"],
register_schema_models(
console_ns,
TagBasePayload,
TagBindingPayload,
TagBindingRemovePayload,
TagListQueryParam,
)
@setup_required
@login_required
@account_initialization_required
def list_tags(query: TagListQueryParam) -> list[TagResponse]:
_, current_tenant_id = current_account_with_tenant()
tags = TagService.get_tags(query.type, current_tenant_id, query.keyword)
return [
TagResponse(
id=tag.id,
name=tag.name,
type=tag.type,
binding_count=int(tag.binding_count),
)
for tag in tags
]
@console_router.post(
"/tags",
response_model=TagResponse,
tags=["console"],
)
@setup_required
@login_required
@account_initialization_required
def create_tag(payload: TagBasePayload) -> TagResponse:
current_user, _ = current_account_with_tenant()
# The role of the current user in the tag table must be admin, owner, or editor
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
@console_ns.route("/tags")
class TagListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@console_ns.doc(
params={"type": 'Tag type filter. Can be "knowledge" or "app".', "keyword": "Search keyword for tag name."}
)
@marshal_with(dataset_tag_fields)
def get(self):
_, current_tenant_id = current_account_with_tenant()
raw_args = request.args.to_dict()
param = TagListQueryParam.model_validate(raw_args)
tags = TagService.get_tags(param.type, current_tenant_id, param.keyword)
tag = TagService.save_tags(payload.model_dump())
return tags, 200
return TagResponse(id=tag.id, name=tag.name, type=tag.type, binding_count=0)
@console_ns.expect(console_ns.models[TagBasePayload.__name__])
@setup_required
@login_required
@account_initialization_required
def post(self):
current_user, _ = current_account_with_tenant()
# The role of the current user in the ta table must be admin, owner, or editor
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
payload = TagBasePayload.model_validate(console_ns.payload or {})
tag = TagService.save_tags(payload.model_dump())
response = {"id": tag.id, "name": tag.name, "type": tag.type, "binding_count": 0}
return response, 200
@console_router.patch(
"/tags/<uuid:tag_id>",
response_model=TagResponse,
tags=["console"],
)
@setup_required
@login_required
@account_initialization_required
def update_tag(tag_id: UUID, payload: TagBasePayload) -> TagResponse:
current_user, _ = current_account_with_tenant()
tag_id_str = str(tag_id)
# The role of the current user in the ta table must be admin, owner, or editor
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
@console_ns.route("/tags/<uuid:tag_id>")
class TagUpdateDeleteApi(Resource):
@console_ns.expect(console_ns.models[TagBasePayload.__name__])
@setup_required
@login_required
@account_initialization_required
def patch(self, tag_id):
current_user, _ = current_account_with_tenant()
tag_id = str(tag_id)
# The role of the current user in the ta table must be admin, owner, or editor
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
tag = TagService.update_tags(payload.model_dump(), tag_id_str)
payload = TagBasePayload.model_validate(console_ns.payload or {})
tag = TagService.update_tags(payload.model_dump(), tag_id)
binding_count = TagService.get_tag_binding_count(tag_id_str)
binding_count = TagService.get_tag_binding_count(tag_id)
return TagResponse(id=tag.id, name=tag.name, type=tag.type, binding_count=binding_count)
response = {"id": tag.id, "name": tag.name, "type": tag.type, "binding_count": binding_count}
return response, 200
@setup_required
@login_required
@account_initialization_required
@edit_permission_required
def delete(self, tag_id):
tag_id = str(tag_id)
TagService.delete_tag(tag_id)
return 204
@console_router.delete(
"/tags/<uuid:tag_id>",
tags=["console"],
status_code=204,
)
@setup_required
@login_required
@account_initialization_required
@edit_permission_required
def delete_tag(tag_id: UUID) -> None:
tag_id_str = str(tag_id)
@console_ns.route("/tag-bindings/create")
class TagBindingCreateApi(Resource):
@console_ns.expect(console_ns.models[TagBindingPayload.__name__])
@setup_required
@login_required
@account_initialization_required
def post(self):
current_user, _ = current_account_with_tenant()
# The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
TagService.delete_tag(tag_id_str)
payload = TagBindingPayload.model_validate(console_ns.payload or {})
TagService.save_tag_binding(payload.model_dump())
return {"result": "success"}, 200
@console_router.post(
"/tag-bindings/create",
response_model=TagBindingResult,
tags=["console"],
)
@setup_required
@login_required
@account_initialization_required
def create_tag_binding(payload: TagBindingPayload) -> TagBindingResult:
current_user, _ = current_account_with_tenant()
# The role of the current user in the tag table must be admin, owner, editor, or dataset_operator
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
@console_ns.route("/tag-bindings/remove")
class TagBindingDeleteApi(Resource):
@console_ns.expect(console_ns.models[TagBindingRemovePayload.__name__])
@setup_required
@login_required
@account_initialization_required
def post(self):
current_user, _ = current_account_with_tenant()
# The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
TagService.save_tag_binding(payload.model_dump())
payload = TagBindingRemovePayload.model_validate(console_ns.payload or {})
TagService.delete_tag_binding(payload.model_dump())
return TagBindingResult(result="success")
@console_router.post(
"/tag-bindings/remove",
response_model=TagBindingResult,
tags=["console"],
)
@setup_required
@login_required
@account_initialization_required
def delete_tag_binding(payload: TagBindingRemovePayload) -> TagBindingResult:
current_user, _ = current_account_with_tenant()
# The role of the current user in the tag table must be admin, owner, editor, or dataset_operator
if not (current_user.has_edit_permission or current_user.is_dataset_editor):
raise Forbidden()
TagService.delete_tag_binding(payload.model_dump())
return TagBindingResult(result="success")
return {"result": "success"}, 200

View File

@@ -1,6 +1,6 @@
[project]
name = "dify-api"
version = "1.11.4"
version = "1.12.1"
requires-python = ">=3.11,<3.13"
dependencies = [

View File

@@ -24,7 +24,7 @@ class TagService:
escaped_keyword = escape_like_pattern(keyword)
query = query.where(sa.and_(Tag.name.ilike(f"%{escaped_keyword}%", escape="\\")))
query = query.group_by(Tag.id, Tag.type, Tag.name, Tag.created_at)
results = query.order_by(Tag.created_at.desc()).all()
results: list = query.order_by(Tag.created_at.desc()).all()
return results
@staticmethod

View File

@@ -8,7 +8,6 @@ from sqlalchemy import delete, select
from core.db.session_factory import session_factory
from core.indexing_runner import DocumentIsPausedError, IndexingRunner
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from libs.datetime_utils import naive_utc_now
from models.dataset import Dataset, Document, DocumentSegment
@@ -27,7 +26,7 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
logger.info(click.style(f"Start update document: {document_id}", fg="green"))
start_at = time.perf_counter()
with session_factory.create_session() as session:
with session_factory.create_session() as session, session.begin():
document = session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
if not document:
@@ -36,7 +35,6 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
document.indexing_status = "parsing"
document.processing_started_at = naive_utc_now()
session.commit()
# delete all document segment and index
try:
@@ -56,7 +54,7 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
segment_ids = [segment.id for segment in segments]
segment_delete_stmt = delete(DocumentSegment).where(DocumentSegment.id.in_(segment_ids))
session.execute(segment_delete_stmt)
db.session.commit()
end_at = time.perf_counter()
logger.info(
click.style(

View File

@@ -259,8 +259,8 @@ def _delete_app_workflow_app_logs(tenant_id: str, app_id: str):
def _delete_app_workflow_archive_logs(tenant_id: str, app_id: str):
def del_workflow_archive_log(workflow_archive_log_id: str):
db.session.query(WorkflowArchiveLog).where(WorkflowArchiveLog.id == workflow_archive_log_id).delete(
def del_workflow_archive_log(session, workflow_archive_log_id: str):
session.query(WorkflowArchiveLog).where(WorkflowArchiveLog.id == workflow_archive_log_id).delete(
synchronize_session=False
)
@@ -420,7 +420,7 @@ def delete_draft_variables_batch(app_id: str, batch_size: int = 1000) -> int:
total_files_deleted = 0
while True:
with session_factory.create_session() as session:
with session_factory.create_session() as session, session.begin():
# Get a batch of draft variable IDs along with their file_ids
query_sql = """
SELECT id, file_id FROM workflow_draft_variables

View File

@@ -10,7 +10,10 @@ from models import Tenant
from models.enums import CreatorUserRole
from models.model import App, UploadFile
from models.workflow import WorkflowDraftVariable, WorkflowDraftVariableFile
from tasks.remove_app_and_related_data_task import _delete_draft_variables, delete_draft_variables_batch
from tasks.remove_app_and_related_data_task import (
_delete_draft_variables,
delete_draft_variables_batch,
)
@pytest.fixture
@@ -297,12 +300,18 @@ class TestDeleteDraftVariablesWithOffloadIntegration:
def test_delete_draft_variables_with_offload_data(self, mock_storage, setup_offload_test_data):
data = setup_offload_test_data
app_id = data["app"].id
upload_file_ids = [uf.id for uf in data["upload_files"]]
variable_file_ids = [vf.id for vf in data["variable_files"]]
mock_storage.delete.return_value = None
with session_factory.create_session() as session:
draft_vars_before = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
var_files_before = session.query(WorkflowDraftVariableFile).count()
upload_files_before = session.query(UploadFile).count()
var_files_before = (
session.query(WorkflowDraftVariableFile)
.where(WorkflowDraftVariableFile.id.in_(variable_file_ids))
.count()
)
upload_files_before = session.query(UploadFile).where(UploadFile.id.in_(upload_file_ids)).count()
assert draft_vars_before == 3
assert var_files_before == 2
assert upload_files_before == 2
@@ -315,8 +324,12 @@ class TestDeleteDraftVariablesWithOffloadIntegration:
assert draft_vars_after == 0
with session_factory.create_session() as session:
var_files_after = session.query(WorkflowDraftVariableFile).count()
upload_files_after = session.query(UploadFile).count()
var_files_after = (
session.query(WorkflowDraftVariableFile)
.where(WorkflowDraftVariableFile.id.in_(variable_file_ids))
.count()
)
upload_files_after = session.query(UploadFile).where(UploadFile.id.in_(upload_file_ids)).count()
assert var_files_after == 0
assert upload_files_after == 0
@@ -329,6 +342,8 @@ class TestDeleteDraftVariablesWithOffloadIntegration:
def test_delete_draft_variables_storage_failure_continues_cleanup(self, mock_storage, setup_offload_test_data):
data = setup_offload_test_data
app_id = data["app"].id
upload_file_ids = [uf.id for uf in data["upload_files"]]
variable_file_ids = [vf.id for vf in data["variable_files"]]
mock_storage.delete.side_effect = [Exception("Storage error"), None]
deleted_count = delete_draft_variables_batch(app_id, batch_size=10)
@@ -339,8 +354,12 @@ class TestDeleteDraftVariablesWithOffloadIntegration:
assert draft_vars_after == 0
with session_factory.create_session() as session:
var_files_after = session.query(WorkflowDraftVariableFile).count()
upload_files_after = session.query(UploadFile).count()
var_files_after = (
session.query(WorkflowDraftVariableFile)
.where(WorkflowDraftVariableFile.id.in_(variable_file_ids))
.count()
)
upload_files_after = session.query(UploadFile).where(UploadFile.id.in_(upload_file_ids)).count()
assert var_files_after == 0
assert upload_files_after == 0
@@ -395,3 +414,275 @@ class TestDeleteDraftVariablesWithOffloadIntegration:
if app2_obj:
session.delete(app2_obj)
session.commit()
class TestDeleteDraftVariablesSessionCommit:
"""Test suite to verify session commit behavior in delete_draft_variables_batch."""
@pytest.fixture
def setup_offload_test_data(self, app_and_tenant):
"""Create test data with offload files for session commit tests."""
from core.variables.types import SegmentType
from libs.datetime_utils import naive_utc_now
tenant, app = app_and_tenant
with session_factory.create_session() as session:
upload_file1 = UploadFile(
tenant_id=tenant.id,
storage_type="local",
key="test/file1.json",
name="file1.json",
size=1024,
extension="json",
mime_type="application/json",
created_by_role=CreatorUserRole.ACCOUNT,
created_by=str(uuid.uuid4()),
created_at=naive_utc_now(),
used=False,
)
upload_file2 = UploadFile(
tenant_id=tenant.id,
storage_type="local",
key="test/file2.json",
name="file2.json",
size=2048,
extension="json",
mime_type="application/json",
created_by_role=CreatorUserRole.ACCOUNT,
created_by=str(uuid.uuid4()),
created_at=naive_utc_now(),
used=False,
)
session.add(upload_file1)
session.add(upload_file2)
session.flush()
var_file1 = WorkflowDraftVariableFile(
tenant_id=tenant.id,
app_id=app.id,
user_id=str(uuid.uuid4()),
upload_file_id=upload_file1.id,
size=1024,
length=10,
value_type=SegmentType.STRING,
)
var_file2 = WorkflowDraftVariableFile(
tenant_id=tenant.id,
app_id=app.id,
user_id=str(uuid.uuid4()),
upload_file_id=upload_file2.id,
size=2048,
length=20,
value_type=SegmentType.OBJECT,
)
session.add(var_file1)
session.add(var_file2)
session.flush()
draft_var1 = WorkflowDraftVariable.new_node_variable(
app_id=app.id,
node_id="node_1",
name="large_var_1",
value=StringSegment(value="truncated..."),
node_execution_id=str(uuid.uuid4()),
file_id=var_file1.id,
)
draft_var2 = WorkflowDraftVariable.new_node_variable(
app_id=app.id,
node_id="node_2",
name="large_var_2",
value=StringSegment(value="truncated..."),
node_execution_id=str(uuid.uuid4()),
file_id=var_file2.id,
)
draft_var3 = WorkflowDraftVariable.new_node_variable(
app_id=app.id,
node_id="node_3",
name="regular_var",
value=StringSegment(value="regular_value"),
node_execution_id=str(uuid.uuid4()),
)
session.add(draft_var1)
session.add(draft_var2)
session.add(draft_var3)
session.commit()
data = {
"app": app,
"tenant": tenant,
"upload_files": [upload_file1, upload_file2],
"variable_files": [var_file1, var_file2],
"draft_variables": [draft_var1, draft_var2, draft_var3],
}
yield data
with session_factory.create_session() as session:
for table, ids in [
(WorkflowDraftVariable, [v.id for v in data["draft_variables"]]),
(WorkflowDraftVariableFile, [vf.id for vf in data["variable_files"]]),
(UploadFile, [uf.id for uf in data["upload_files"]]),
]:
cleanup_query = delete(table).where(table.id.in_(ids)).execution_options(synchronize_session=False)
session.execute(cleanup_query)
session.commit()
@pytest.fixture
def setup_commit_test_data(self, app_and_tenant):
"""Create test data for session commit tests."""
tenant, app = app_and_tenant
variable_ids: list[str] = []
with session_factory.create_session() as session:
variables = []
for i in range(10):
var = WorkflowDraftVariable.new_node_variable(
app_id=app.id,
node_id=f"node_{i}",
name=f"var_{i}",
value=StringSegment(value="test_value"),
node_execution_id=str(uuid.uuid4()),
)
session.add(var)
variables.append(var)
session.commit()
variable_ids = [v.id for v in variables]
yield {
"app": app,
"tenant": tenant,
"variable_ids": variable_ids,
}
with session_factory.create_session() as session:
cleanup_query = (
delete(WorkflowDraftVariable)
.where(WorkflowDraftVariable.id.in_(variable_ids))
.execution_options(synchronize_session=False)
)
session.execute(cleanup_query)
session.commit()
def test_session_commit_is_called_after_each_batch(self, setup_commit_test_data):
"""Test that session.begin() is used for automatic transaction management."""
data = setup_commit_test_data
app_id = data["app"].id
# Since session.begin() is used, the transaction is automatically committed
# when the with block exits successfully. We verify this by checking that
# data is actually persisted.
deleted_count = delete_draft_variables_batch(app_id, batch_size=3)
# Verify all data was deleted (proves transaction was committed)
with session_factory.create_session() as session:
remaining_count = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
assert deleted_count == 10
assert remaining_count == 0
def test_data_persisted_after_batch_deletion(self, setup_commit_test_data):
"""Test that data is actually persisted to database after batch deletion with commits."""
data = setup_commit_test_data
app_id = data["app"].id
variable_ids = data["variable_ids"]
# Verify initial state
with session_factory.create_session() as session:
initial_count = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
assert initial_count == 10
# Perform deletion with small batch size to force multiple commits
deleted_count = delete_draft_variables_batch(app_id, batch_size=3)
assert deleted_count == 10
# Verify all data is deleted in a new session (proves commits worked)
with session_factory.create_session() as session:
final_count = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
assert final_count == 0
# Verify specific IDs are deleted
with session_factory.create_session() as session:
remaining_vars = (
session.query(WorkflowDraftVariable).where(WorkflowDraftVariable.id.in_(variable_ids)).count()
)
assert remaining_vars == 0
def test_session_commit_with_empty_dataset(self, setup_commit_test_data):
"""Test session behavior when deleting from an empty dataset."""
nonexistent_app_id = str(uuid.uuid4())
# Should not raise any errors and should return 0
deleted_count = delete_draft_variables_batch(nonexistent_app_id, batch_size=10)
assert deleted_count == 0
def test_session_commit_with_single_batch(self, setup_commit_test_data):
"""Test that commit happens correctly when all data fits in a single batch."""
data = setup_commit_test_data
app_id = data["app"].id
with session_factory.create_session() as session:
initial_count = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
assert initial_count == 10
# Delete all in a single batch
deleted_count = delete_draft_variables_batch(app_id, batch_size=100)
assert deleted_count == 10
# Verify data is persisted
with session_factory.create_session() as session:
final_count = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
assert final_count == 0
def test_invalid_batch_size_raises_error(self, setup_commit_test_data):
"""Test that invalid batch size raises ValueError."""
data = setup_commit_test_data
app_id = data["app"].id
with pytest.raises(ValueError, match="batch_size must be positive"):
delete_draft_variables_batch(app_id, batch_size=0)
with pytest.raises(ValueError, match="batch_size must be positive"):
delete_draft_variables_batch(app_id, batch_size=-1)
@patch("extensions.ext_storage.storage")
def test_session_commit_with_offload_data_cleanup(self, mock_storage, setup_offload_test_data):
"""Test that session commits correctly when cleaning up offload data."""
data = setup_offload_test_data
app_id = data["app"].id
upload_file_ids = [uf.id for uf in data["upload_files"]]
mock_storage.delete.return_value = None
# Verify initial state
with session_factory.create_session() as session:
draft_vars_before = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
var_files_before = (
session.query(WorkflowDraftVariableFile)
.where(WorkflowDraftVariableFile.id.in_([vf.id for vf in data["variable_files"]]))
.count()
)
upload_files_before = session.query(UploadFile).where(UploadFile.id.in_(upload_file_ids)).count()
assert draft_vars_before == 3
assert var_files_before == 2
assert upload_files_before == 2
# Delete variables with offload data
deleted_count = delete_draft_variables_batch(app_id, batch_size=10)
assert deleted_count == 3
# Verify all data is persisted (deleted) in new session
with session_factory.create_session() as session:
draft_vars_after = session.query(WorkflowDraftVariable).filter_by(app_id=app_id).count()
var_files_after = (
session.query(WorkflowDraftVariableFile)
.where(WorkflowDraftVariableFile.id.in_([vf.id for vf in data["variable_files"]]))
.count()
)
upload_files_after = session.query(UploadFile).where(UploadFile.id.in_(upload_file_ids)).count()
assert draft_vars_after == 0
assert var_files_after == 0
assert upload_files_after == 0
# Verify storage cleanup was called
assert mock_storage.delete.call_count == 2

View File

@@ -0,0 +1,182 @@
from unittest.mock import MagicMock, patch
import pytest
from faker import Faker
from models import Account, Tenant, TenantAccountJoin, TenantAccountRole
from models.dataset import Dataset, Document, DocumentSegment
from tasks.document_indexing_update_task import document_indexing_update_task
class TestDocumentIndexingUpdateTask:
@pytest.fixture
def mock_external_dependencies(self):
"""Patch external collaborators used by the update task.
- IndexProcessorFactory.init_index_processor().clean(...)
- IndexingRunner.run([...])
"""
with (
patch("tasks.document_indexing_update_task.IndexProcessorFactory") as mock_factory,
patch("tasks.document_indexing_update_task.IndexingRunner") as mock_runner,
):
processor_instance = MagicMock()
mock_factory.return_value.init_index_processor.return_value = processor_instance
runner_instance = MagicMock()
mock_runner.return_value = runner_instance
yield {
"factory": mock_factory,
"processor": processor_instance,
"runner": mock_runner,
"runner_instance": runner_instance,
}
def _create_dataset_document_with_segments(self, db_session_with_containers, *, segment_count: int = 2):
fake = Faker()
# Account and tenant
account = Account(
email=fake.email(),
name=fake.name(),
interface_language="en-US",
status="active",
)
db_session_with_containers.add(account)
db_session_with_containers.commit()
tenant = Tenant(name=fake.company(), status="normal")
db_session_with_containers.add(tenant)
db_session_with_containers.commit()
join = TenantAccountJoin(
tenant_id=tenant.id,
account_id=account.id,
role=TenantAccountRole.OWNER,
current=True,
)
db_session_with_containers.add(join)
db_session_with_containers.commit()
# Dataset and document
dataset = Dataset(
tenant_id=tenant.id,
name=fake.company(),
description=fake.text(max_nb_chars=64),
data_source_type="upload_file",
indexing_technique="high_quality",
created_by=account.id,
)
db_session_with_containers.add(dataset)
db_session_with_containers.commit()
document = Document(
tenant_id=tenant.id,
dataset_id=dataset.id,
position=0,
data_source_type="upload_file",
batch="test_batch",
name=fake.file_name(),
created_from="upload_file",
created_by=account.id,
indexing_status="waiting",
enabled=True,
doc_form="text_model",
)
db_session_with_containers.add(document)
db_session_with_containers.commit()
# Segments
node_ids = []
for i in range(segment_count):
node_id = f"node-{i + 1}"
seg = DocumentSegment(
tenant_id=tenant.id,
dataset_id=dataset.id,
document_id=document.id,
position=i,
content=fake.text(max_nb_chars=32),
answer=None,
word_count=10,
tokens=5,
index_node_id=node_id,
status="completed",
created_by=account.id,
)
db_session_with_containers.add(seg)
node_ids.append(node_id)
db_session_with_containers.commit()
# Refresh to ensure ORM state
db_session_with_containers.refresh(dataset)
db_session_with_containers.refresh(document)
return dataset, document, node_ids
def test_cleans_segments_and_reindexes(self, db_session_with_containers, mock_external_dependencies):
dataset, document, node_ids = self._create_dataset_document_with_segments(db_session_with_containers)
# Act
document_indexing_update_task(dataset.id, document.id)
# Ensure we see committed changes from another session
db_session_with_containers.expire_all()
# Assert document status updated before reindex
updated = db_session_with_containers.query(Document).where(Document.id == document.id).first()
assert updated.indexing_status == "parsing"
assert updated.processing_started_at is not None
# Segments should be deleted
remaining = (
db_session_with_containers.query(DocumentSegment).where(DocumentSegment.document_id == document.id).count()
)
assert remaining == 0
# Assert index processor clean was called with expected args
clean_call = mock_external_dependencies["processor"].clean.call_args
assert clean_call is not None
args, kwargs = clean_call
# args[0] is a Dataset instance (from another session) — validate by id
assert getattr(args[0], "id", None) == dataset.id
# args[1] should contain our node_ids
assert set(args[1]) == set(node_ids)
assert kwargs.get("with_keywords") is True
assert kwargs.get("delete_child_chunks") is True
# Assert indexing runner invoked with the updated document
run_call = mock_external_dependencies["runner_instance"].run.call_args
assert run_call is not None
run_docs = run_call[0][0]
assert len(run_docs) == 1
first = run_docs[0]
assert getattr(first, "id", None) == document.id
def test_clean_error_is_logged_and_indexing_continues(self, db_session_with_containers, mock_external_dependencies):
dataset, document, node_ids = self._create_dataset_document_with_segments(db_session_with_containers)
# Force clean to raise; task should continue to indexing
mock_external_dependencies["processor"].clean.side_effect = Exception("boom")
document_indexing_update_task(dataset.id, document.id)
# Ensure we see committed changes from another session
db_session_with_containers.expire_all()
# Indexing should still be triggered
mock_external_dependencies["runner_instance"].run.assert_called_once()
# Segments should remain (since clean failed before DB delete)
remaining = (
db_session_with_containers.query(DocumentSegment).where(DocumentSegment.document_id == document.id).count()
)
assert remaining > 0
def test_document_not_found_noop(self, db_session_with_containers, mock_external_dependencies):
fake = Faker()
# Act with non-existent document id
document_indexing_update_task(dataset_id=fake.uuid4(), document_id=fake.uuid4())
# Neither processor nor runner should be called
mock_external_dependencies["processor"].clean.assert_not_called()
mock_external_dependencies["runner_instance"].run.assert_not_called()

View File

@@ -1,222 +0,0 @@
import builtins
import contextlib
import importlib
import sys
from types import SimpleNamespace
from unittest.mock import MagicMock, patch
import pytest
from flask import Flask
from flask.views import MethodView
from extensions import ext_fastopenapi
from extensions.ext_database import db
@pytest.fixture
def app():
app = Flask(__name__)
app.config["TESTING"] = True
app.config["SECRET_KEY"] = "test-secret"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
db.init_app(app)
return app
@pytest.fixture(autouse=True)
def fix_method_view_issue(monkeypatch):
if not hasattr(builtins, "MethodView"):
monkeypatch.setattr(builtins, "MethodView", MethodView, raising=False)
def _create_isolated_router():
import controllers.fastopenapi
router_class = type(controllers.fastopenapi.console_router)
return router_class()
@contextlib.contextmanager
def _patch_auth_and_router(temp_router):
def noop(func):
return func
default_user = MagicMock(has_edit_permission=True, is_dataset_editor=False)
with (
patch("controllers.fastopenapi.console_router", temp_router),
patch("extensions.ext_fastopenapi.console_router", temp_router),
patch("controllers.console.wraps.setup_required", side_effect=noop),
patch("libs.login.login_required", side_effect=noop),
patch("controllers.console.wraps.account_initialization_required", side_effect=noop),
patch("controllers.console.wraps.edit_permission_required", side_effect=noop),
patch("libs.login.current_account_with_tenant", return_value=(default_user, "tenant-id")),
patch("configs.dify_config.EDITION", "CLOUD"),
):
import extensions.ext_fastopenapi
importlib.reload(extensions.ext_fastopenapi)
yield
def _force_reload_module(target_module: str, alias_module: str):
if target_module in sys.modules:
del sys.modules[target_module]
if alias_module in sys.modules:
del sys.modules[alias_module]
module = importlib.import_module(target_module)
sys.modules[alias_module] = sys.modules[target_module]
return module
def _dedupe_routes(router):
seen = set()
unique_routes = []
for path, method, endpoint in reversed(router.get_routes()):
key = (path, method, endpoint.__name__)
if key in seen:
continue
seen.add(key)
unique_routes.append((path, method, endpoint))
router._routes = list(reversed(unique_routes))
def _cleanup_modules(target_module: str, alias_module: str):
if target_module in sys.modules:
del sys.modules[target_module]
if alias_module in sys.modules:
del sys.modules[alias_module]
@pytest.fixture
def mock_tags_module_env():
target_module = "controllers.console.tag.tags"
alias_module = "api.controllers.console.tag.tags"
temp_router = _create_isolated_router()
try:
with _patch_auth_and_router(temp_router):
tags_module = _force_reload_module(target_module, alias_module)
_dedupe_routes(temp_router)
yield tags_module
finally:
_cleanup_modules(target_module, alias_module)
def test_list_tags_success(app: Flask, mock_tags_module_env):
# Arrange
tag = SimpleNamespace(id="tag-1", name="Alpha", type="app", binding_count=2)
with patch("controllers.console.tag.tags.TagService.get_tags", return_value=[tag]):
ext_fastopenapi.init_app(app)
client = app.test_client()
# Act
response = client.get("/console/api/tags?type=app&keyword=Alpha")
# Assert
assert response.status_code == 200
assert response.get_json() == [
{"id": "tag-1", "name": "Alpha", "type": "app", "binding_count": 2},
]
def test_create_tag_success(app: Flask, mock_tags_module_env):
# Arrange
tag = SimpleNamespace(id="tag-2", name="Beta", type="app")
with patch("controllers.console.tag.tags.TagService.save_tags", return_value=tag) as mock_save:
ext_fastopenapi.init_app(app)
client = app.test_client()
# Act
response = client.post("/console/api/tags", json={"name": "Beta", "type": "app"})
# Assert
assert response.status_code == 200
assert response.get_json() == {
"id": "tag-2",
"name": "Beta",
"type": "app",
"binding_count": 0,
}
mock_save.assert_called_once_with({"name": "Beta", "type": "app"})
def test_update_tag_success(app: Flask, mock_tags_module_env):
# Arrange
tag = SimpleNamespace(id="tag-3", name="Gamma", type="app")
with (
patch("controllers.console.tag.tags.TagService.update_tags", return_value=tag) as mock_update,
patch("controllers.console.tag.tags.TagService.get_tag_binding_count", return_value=4),
):
ext_fastopenapi.init_app(app)
client = app.test_client()
# Act
response = client.patch(
"/console/api/tags/11111111-1111-1111-1111-111111111111",
json={"name": "Gamma", "type": "app"},
)
# Assert
assert response.status_code == 200
assert response.get_json() == {
"id": "tag-3",
"name": "Gamma",
"type": "app",
"binding_count": 4,
}
mock_update.assert_called_once_with(
{"name": "Gamma", "type": "app"},
"11111111-1111-1111-1111-111111111111",
)
def test_delete_tag_success(app: Flask, mock_tags_module_env):
# Arrange
with patch("controllers.console.tag.tags.TagService.delete_tag") as mock_delete:
ext_fastopenapi.init_app(app)
client = app.test_client()
# Act
response = client.delete("/console/api/tags/11111111-1111-1111-1111-111111111111")
# Assert
assert response.status_code == 204
mock_delete.assert_called_once_with("11111111-1111-1111-1111-111111111111")
def test_create_tag_binding_success(app: Flask, mock_tags_module_env):
# Arrange
payload = {"tag_ids": ["tag-1", "tag-2"], "target_id": "target-1", "type": "app"}
with patch("controllers.console.tag.tags.TagService.save_tag_binding") as mock_bind:
ext_fastopenapi.init_app(app)
client = app.test_client()
# Act
response = client.post("/console/api/tag-bindings/create", json=payload)
# Assert
assert response.status_code == 200
assert response.get_json() == {"result": "success"}
mock_bind.assert_called_once_with(payload)
def test_delete_tag_binding_success(app: Flask, mock_tags_module_env):
# Arrange
payload = {"tag_id": "tag-1", "target_id": "target-1", "type": "app"}
with patch("controllers.console.tag.tags.TagService.delete_tag_binding") as mock_unbind:
ext_fastopenapi.init_app(app)
client = app.test_client()
# Act
response = client.post("/console/api/tag-bindings/remove", json=payload)
# Assert
assert response.status_code == 200
assert response.get_json() == {"result": "success"}
mock_unbind.assert_called_once_with(payload)

View File

@@ -350,7 +350,7 @@ class TestDeleteWorkflowArchiveLogs:
mock_query.where.return_value = mock_delete_query
mock_db.session.query.return_value = mock_query
delete_func("log-1")
delete_func(mock_db.session, "log-1")
mock_db.session.query.assert_called_once_with(WorkflowArchiveLog)
mock_query.where.assert_called_once()

2
api/uv.lock generated
View File

@@ -1368,7 +1368,7 @@ wheels = [
[[package]]
name = "dify-api"
version = "1.11.4"
version = "1.12.1"
source = { virtual = "." }
dependencies = [
{ name = "aliyun-log-python-sdk" },

View File

@@ -1,5 +1,5 @@
#!/bin/bash
set -x
set -euxo pipefail
SCRIPT_DIR="$(dirname "$(realpath "$0")")"
cd "$SCRIPT_DIR/../.."

View File

@@ -21,7 +21,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.11.4
image: langgenius/dify-api:1.12.1
restart: always
environment:
# Use the shared environment variables.
@@ -63,7 +63,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.11.4
image: langgenius/dify-api:1.12.1
restart: always
environment:
# Use the shared environment variables.
@@ -102,7 +102,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.11.4
image: langgenius/dify-api:1.12.1
restart: always
environment:
# Use the shared environment variables.
@@ -132,7 +132,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.11.4
image: langgenius/dify-web:1.12.1
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@@ -662,13 +662,14 @@ services:
- "${IRIS_SUPER_SERVER_PORT:-1972}:1972"
- "${IRIS_WEB_SERVER_PORT:-52773}:52773"
volumes:
- ./volumes/iris:/opt/iris
- ./volumes/iris:/durable
- ./iris/iris-init.script:/iris-init.script
- ./iris/docker-entrypoint.sh:/custom-entrypoint.sh
entrypoint: ["/custom-entrypoint.sh"]
tty: true
environment:
TZ: ${IRIS_TIMEZONE:-UTC}
ISC_DATA_DIRECTORY: /durable/iris
# Oracle vector database
oracle:

View File

@@ -707,7 +707,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.11.4
image: langgenius/dify-api:1.12.1
restart: always
environment:
# Use the shared environment variables.
@@ -749,7 +749,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.11.4
image: langgenius/dify-api:1.12.1
restart: always
environment:
# Use the shared environment variables.
@@ -788,7 +788,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.11.4
image: langgenius/dify-api:1.12.1
restart: always
environment:
# Use the shared environment variables.
@@ -818,7 +818,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.11.4
image: langgenius/dify-web:1.12.1
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@@ -1348,13 +1348,14 @@ services:
- "${IRIS_SUPER_SERVER_PORT:-1972}:1972"
- "${IRIS_WEB_SERVER_PORT:-52773}:52773"
volumes:
- ./volumes/iris:/opt/iris
- ./volumes/iris:/durable
- ./iris/iris-init.script:/iris-init.script
- ./iris/docker-entrypoint.sh:/custom-entrypoint.sh
entrypoint: ["/custom-entrypoint.sh"]
tty: true
environment:
TZ: ${IRIS_TIMEZONE:-UTC}
ISC_DATA_DIRECTORY: /durable/iris
# Oracle vector database
oracle:

View File

@@ -1,15 +1,33 @@
#!/bin/bash
set -e
# IRIS configuration flag file
IRIS_CONFIG_DONE="/opt/iris/.iris-configured"
# IRIS configuration flag file (stored in durable directory to persist with data)
IRIS_CONFIG_DONE="/durable/.iris-configured"
# Function to wait for IRIS to be ready
wait_for_iris() {
echo "Waiting for IRIS to be ready..."
local max_attempts=30
local attempt=1
while [ "$attempt" -le "$max_attempts" ]; do
if iris qlist IRIS 2>/dev/null | grep -q "running"; then
echo "IRIS is ready."
return 0
fi
echo "Attempt $attempt/$max_attempts: IRIS not ready yet, waiting..."
sleep 2
attempt=$((attempt + 1))
done
echo "ERROR: IRIS failed to start within expected time." >&2
return 1
}
# Function to configure IRIS
configure_iris() {
echo "Configuring IRIS for first-time setup..."
# Wait for IRIS to be fully started
sleep 5
wait_for_iris
# Execute the initialization script
iris session IRIS < /iris-init.script

View File

@@ -3,7 +3,7 @@
import type { ReactNode } from 'react'
import Cookies from 'js-cookie'
import { usePathname, useRouter, useSearchParams } from 'next/navigation'
import { parseAsString, useQueryState } from 'nuqs'
import { parseAsBoolean, useQueryState } from 'nuqs'
import { useCallback, useEffect, useState } from 'react'
import {
EDUCATION_VERIFY_URL_SEARCHPARAMS_ACTION,
@@ -28,7 +28,7 @@ export const AppInitializer = ({
const [init, setInit] = useState(false)
const [oauthNewUser, setOauthNewUser] = useQueryState(
'oauth_new_user',
parseAsString.withOptions({ history: 'replace' }),
parseAsBoolean.withOptions({ history: 'replace' }),
)
const isSetupFinished = useCallback(async () => {
@@ -46,7 +46,7 @@ export const AppInitializer = ({
(async () => {
const action = searchParams.get('action')
if (oauthNewUser === 'true') {
if (oauthNewUser) {
let utmInfo = null
const utmInfoStr = Cookies.get('utm_info')
if (utmInfoStr) {

View File

@@ -62,19 +62,19 @@ const AppCard = ({
{app.description}
</div>
</div>
{canCreate && (
{(canCreate || isTrialApp) && (
<div className={cn('absolute bottom-0 left-0 right-0 hidden bg-gradient-to-t from-components-panel-gradient-2 from-[60.27%] to-transparent p-4 pt-8 group-hover:flex')}>
<div className={cn('grid h-8 w-full grid-cols-1 items-center space-x-2', isTrialApp && 'grid-cols-2')}>
<Button variant="primary" onClick={() => onCreate()}>
<PlusIcon className="mr-1 h-4 w-4" />
<span className="text-xs">{t('newApp.useTemplate', { ns: 'app' })}</span>
</Button>
{isTrialApp && (
<Button onClick={showTryAPPPanel(app.app_id)}>
<RiInformation2Line className="mr-1 size-4" />
<span>{t('appCard.try', { ns: 'explore' })}</span>
<div className={cn('grid h-8 w-full grid-cols-1 items-center space-x-2', canCreate && 'grid-cols-2')}>
{canCreate && (
<Button variant="primary" onClick={() => onCreate()}>
<PlusIcon className="mr-1 h-4 w-4" />
<span className="text-xs">{t('newApp.useTemplate', { ns: 'app' })}</span>
</Button>
)}
<Button onClick={showTryAPPPanel(app.app_id)}>
<RiInformation2Line className="mr-1 size-4" />
<span>{t('appCard.try', { ns: 'explore' })}</span>
</Button>
</div>
</div>
)}

View File

@@ -154,7 +154,7 @@ export const GeneralChunkingOptions: FC<GeneralChunkingOptionsProps> = ({
</div>
))}
{
showSummaryIndexSetting && (
showSummaryIndexSetting && IS_CE_EDITION && (
<div className="mt-3">
<SummaryIndexSetting
entry="create-document"

View File

@@ -12,6 +12,7 @@ import Divider from '@/app/components/base/divider'
import { ParentChildChunk } from '@/app/components/base/icons/src/vender/knowledge'
import RadioCard from '@/app/components/base/radio-card'
import SummaryIndexSetting from '@/app/components/datasets/settings/summary-index-setting'
import { IS_CE_EDITION } from '@/config'
import { ChunkingMode } from '@/models/datasets'
import FileList from '../../assets/file-list-3-fill.svg'
import Note from '../../assets/note-mod.svg'
@@ -191,7 +192,7 @@ export const ParentChildOptions: FC<ParentChildOptionsProps> = ({
</div>
))}
{
showSummaryIndexSetting && (
showSummaryIndexSetting && IS_CE_EDITION && (
<div className="mt-3">
<SummaryIndexSetting
entry="create-document"

View File

@@ -26,6 +26,7 @@ import CustomPopover from '@/app/components/base/popover'
import Switch from '@/app/components/base/switch'
import { ToastContext } from '@/app/components/base/toast'
import Tooltip from '@/app/components/base/tooltip'
import { IS_CE_EDITION } from '@/config'
import { DataSourceType, DocumentActionType } from '@/models/datasets'
import {
useDocumentArchive,
@@ -263,10 +264,14 @@ const Operations = ({
<span className={s.actionName}>{t('list.action.sync', { ns: 'datasetDocuments' })}</span>
</div>
)}
<div className={s.actionItem} onClick={() => onOperate('summary')}>
<SearchLinesSparkle className="h-4 w-4 text-text-tertiary" />
<span className={s.actionName}>{t('list.action.summary', { ns: 'datasetDocuments' })}</span>
</div>
{
IS_CE_EDITION && (
<div className={s.actionItem} onClick={() => onOperate('summary')}>
<SearchLinesSparkle className="h-4 w-4 text-text-tertiary" />
<span className={s.actionName}>{t('list.action.summary', { ns: 'datasetDocuments' })}</span>
</div>
)
}
<Divider className="my-1" />
</>
)}

View File

@@ -7,6 +7,7 @@ import Button from '@/app/components/base/button'
import Confirm from '@/app/components/base/confirm'
import Divider from '@/app/components/base/divider'
import { SearchLinesSparkle } from '@/app/components/base/icons/src/vender/knowledge'
import { IS_CE_EDITION } from '@/config'
import { cn } from '@/utils/classnames'
const i18nPrefix = 'batchAction'
@@ -87,7 +88,7 @@ const BatchAction: FC<IBatchActionProps> = ({
<span className="px-0.5">{t('metadata.metadata', { ns: 'dataset' })}</span>
</Button>
)}
{onBatchSummary && (
{onBatchSummary && IS_CE_EDITION && (
<Button
variant="ghost"
className="gap-x-0.5 px-3"

View File

@@ -3,8 +3,6 @@ import type { FC, ReactNode } from 'react'
import type { SliceProps } from './type'
import { autoUpdate, flip, FloatingFocusManager, offset, shift, useDismiss, useFloating, useHover, useInteractions, useRole } from '@floating-ui/react'
import { RiDeleteBinLine } from '@remixicon/react'
// @ts-expect-error no types available
import lineClamp from 'line-clamp'
import { useState } from 'react'
import ActionButton, { ActionButtonState } from '@/app/components/base/action-button'
import { cn } from '@/utils/classnames'
@@ -58,12 +56,8 @@ export const EditSlice: FC<EditSliceProps> = (props) => {
<>
<SliceContainer
{...rest}
className={cn('mr-0 block', className)}
ref={(ref) => {
refs.setReference(ref)
if (ref)
lineClamp(ref, 4)
}}
className={cn('mr-0 line-clamp-4 block', className)}
ref={refs.setReference}
{...getReferenceProps()}
>
<SliceLabel

View File

@@ -21,6 +21,7 @@ import RetrievalMethodConfig from '@/app/components/datasets/common/retrieval-me
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useModelList } from '@/app/components/header/account-setting/model-provider-page/hooks'
import ModelSelector from '@/app/components/header/account-setting/model-provider-page/model-selector'
import { IS_CE_EDITION } from '@/config'
import { useSelector as useAppContextWithSelector } from '@/context/app-context'
import { useDatasetDetailContextWithSelector } from '@/context/dataset-detail'
import { useDocLink } from '@/context/i18n'
@@ -359,7 +360,7 @@ const Form = () => {
{
indexMethod === IndexingType.QUALIFIED
&& [ChunkingMode.text, ChunkingMode.parentChild].includes(currentDataset?.doc_form as ChunkingMode)
&& (
&& IS_CE_EDITION && (
<>
<Divider
type="horizontal"

View File

@@ -74,11 +74,15 @@ const AppCard = ({
</div>
{isExplore && (canCreate || isTrialApp) && (
<div className={cn('absolute bottom-0 left-0 right-0 hidden bg-gradient-to-t from-components-panel-gradient-2 from-[60.27%] to-transparent p-4 pt-8 group-hover:flex')}>
<div className={cn('grid h-8 w-full grid-cols-2 space-x-2')}>
<Button variant="primary" className="h-7" onClick={() => onCreate()}>
<PlusIcon className="mr-1 h-4 w-4" />
<span className="text-xs">{t('appCard.addToWorkspace', { ns: 'explore' })}</span>
</Button>
<div className={cn('grid h-8 w-full grid-cols-1 space-x-2', canCreate && 'grid-cols-2')}>
{
canCreate && (
<Button variant="primary" className="h-7" onClick={() => onCreate()}>
<PlusIcon className="mr-1 h-4 w-4" />
<span className="text-xs">{t('appCard.addToWorkspace', { ns: 'explore' })}</span>
</Button>
)
}
<Button className="h-7" onClick={showTryAPPPanel(app.app_id)}>
<RiInformation2Line className="mr-1 size-4" />
<span>{t('appCard.try', { ns: 'explore' })}</span>

View File

@@ -16,6 +16,14 @@ vi.mock('react-i18next', () => ({
}),
}))
vi.mock('@/config', async (importOriginal) => {
const actual = await importOriginal() as object
return {
...actual,
IS_CLOUD_EDITION: true,
}
})
const mockUseGetTryAppInfo = vi.fn()
vi.mock('@/service/use-try-app', () => ({

View File

@@ -14,6 +14,14 @@ vi.mock('react-i18next', () => ({
}),
}))
vi.mock('@/config', async (importOriginal) => {
const actual = await importOriginal() as object
return {
...actual,
IS_CLOUD_EDITION: true,
}
})
describe('Tab', () => {
afterEach(() => {
cleanup()

View File

@@ -1,5 +1,5 @@
import type { TriggerSubscriptionBuilder } from '@/app/components/workflow/block-selector/types'
import { fireEvent, render, screen, waitFor } from '@testing-library/react'
import { act, fireEvent, render, screen, waitFor } from '@testing-library/react'
import * as React from 'react'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
// Import after mocks
@@ -821,6 +821,9 @@ describe('CommonCreateModal', () => {
expect(mockCreateBuilder).toHaveBeenCalled()
})
// Flush pending state updates from createBuilder promise resolution
await act(async () => {})
const input = screen.getByTestId('form-field-webhook_url')
fireEvent.change(input, { target: { value: 'https://example.com/webhook' } })

View File

@@ -1,5 +1,5 @@
import type { PropsWithChildren } from 'react'
import { cleanup, fireEvent, render, screen, waitFor } from '@testing-library/react'
import { act, cleanup, fireEvent, render, screen, waitFor } from '@testing-library/react'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { DSLImportStatus } from '@/models/app'
import UpdateDSLModal from './update-dsl-modal'
@@ -140,13 +140,13 @@ class MockFileReader {
onload: ((e: { target: { result: string | null } }) => void) | null = null
readAsText(_file: File) {
// Simulate async file reading
setTimeout(() => {
// Simulate async file reading using queueMicrotask for more reliable async behavior
queueMicrotask(() => {
this.result = 'test file content'
if (this.onload) {
this.onload({ target: { result: this.result } })
}
}, 0)
})
}
}
@@ -174,6 +174,7 @@ describe('UpdateDSLModal', () => {
status: DSLImportStatus.COMPLETED,
pipeline_id: 'test-pipeline-id',
})
mockHandleCheckPluginDependencies.mockResolvedValue(undefined)
// Mock FileReader
originalFileReader = globalThis.FileReader
@@ -472,14 +473,14 @@ describe('UpdateDSLModal', () => {
await waitFor(() => {
const importButton = screen.getByText('common.overwriteAndImport')
expect(importButton).not.toBeDisabled()
})
}, { timeout: 1000 })
const importButton = screen.getByText('common.overwriteAndImport')
fireEvent.click(importButton)
await waitFor(() => {
expect(mockOnImport).toHaveBeenCalled()
})
}, { timeout: 1000 })
})
it('should show warning notification on import with warnings', async () => {
@@ -647,6 +648,8 @@ describe('UpdateDSLModal', () => {
})
it('should show error modal when import status is PENDING', async () => {
vi.useFakeTimers({ shouldAdvanceTime: true })
mockImportDSL.mockResolvedValue({
id: 'import-id',
status: DSLImportStatus.PENDING,
@@ -659,20 +662,29 @@ describe('UpdateDSLModal', () => {
const fileInput = screen.getByTestId('file-input')
const file = new File(['test content'], 'test.pipeline', { type: 'text/yaml' })
fireEvent.change(fileInput, { target: { files: [file] } })
await waitFor(() => {
const importButton = screen.getByText('common.overwriteAndImport')
expect(importButton).not.toBeDisabled()
await act(async () => {
fireEvent.change(fileInput, { target: { files: [file] } })
// Flush microtasks scheduled by the FileReader mock (which uses queueMicrotask)
await new Promise<void>(resolve => queueMicrotask(resolve))
})
const importButton = screen.getByText('common.overwriteAndImport')
fireEvent.click(importButton)
expect(importButton).not.toBeDisabled()
await act(async () => {
fireEvent.click(importButton)
// Flush the promise resolution from mockImportDSL
await Promise.resolve()
// Advance past the 300ms setTimeout in the component
await vi.advanceTimersByTimeAsync(350)
})
// Wait for the error modal to be shown after setTimeout
await waitFor(() => {
expect(screen.getByText('newApp.appCreateDSLErrorTitle')).toBeInTheDocument()
}, { timeout: 500 })
})
vi.useRealTimers()
})
it('should show version info in error modal', async () => {

View File

@@ -61,6 +61,12 @@ vi.mock('@/service/use-pipeline', () => ({
}),
}))
// Mock download utility
const mockDownloadBlob = vi.fn()
vi.mock('@/utils/download', () => ({
downloadBlob: (...args: unknown[]) => mockDownloadBlob(...args),
}))
// Mock workflow service
const mockFetchWorkflowDraft = vi.fn()
vi.mock('@/service/workflow', () => ({
@@ -77,33 +83,9 @@ vi.mock('@/app/components/workflow/constants', () => ({
// ============================================================================
describe('useDSL', () => {
let mockLink: { href: string, download: string, click: ReturnType<typeof vi.fn> }
let originalCreateElement: typeof document.createElement
let mockCreateObjectURL: ReturnType<typeof vi.spyOn>
let mockRevokeObjectURL: ReturnType<typeof vi.spyOn>
beforeEach(() => {
vi.clearAllMocks()
// Create a proper mock link element
mockLink = {
href: '',
download: '',
click: vi.fn(),
}
// Save original and mock selectively - only intercept 'a' elements
originalCreateElement = document.createElement.bind(document)
document.createElement = vi.fn((tagName: string) => {
if (tagName === 'a') {
return mockLink as unknown as HTMLElement
}
return originalCreateElement(tagName)
}) as typeof document.createElement
mockCreateObjectURL = vi.spyOn(URL, 'createObjectURL').mockReturnValue('blob:test-url')
mockRevokeObjectURL = vi.spyOn(URL, 'revokeObjectURL').mockImplementation(() => {})
// Default store state
mockWorkflowStoreGetState.mockReturnValue({
pipelineId: 'test-pipeline-id',
@@ -118,9 +100,6 @@ describe('useDSL', () => {
})
afterEach(() => {
document.createElement = originalCreateElement
mockCreateObjectURL.mockRestore()
mockRevokeObjectURL.mockRestore()
vi.clearAllMocks()
})
@@ -187,9 +166,7 @@ describe('useDSL', () => {
await result.current.handleExportDSL()
})
expect(document.createElement).toHaveBeenCalledWith('a')
expect(mockCreateObjectURL).toHaveBeenCalled()
expect(mockRevokeObjectURL).toHaveBeenCalledWith('blob:test-url')
expect(mockDownloadBlob).toHaveBeenCalled()
})
it('should use correct file extension for download', async () => {
@@ -199,17 +176,25 @@ describe('useDSL', () => {
await result.current.handleExportDSL()
})
expect(mockLink.download).toBe('Test Knowledge Base.pipeline')
expect(mockDownloadBlob).toHaveBeenCalledWith(
expect.objectContaining({
fileName: 'Test Knowledge Base.pipeline',
}),
)
})
it('should trigger download click', async () => {
it('should pass blob data to downloadBlob', async () => {
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.handleExportDSL()
})
expect(mockLink.click).toHaveBeenCalled()
expect(mockDownloadBlob).toHaveBeenCalledWith(
expect.objectContaining({
data: expect.any(Blob),
}),
)
})
it('should show error notification on export failure', async () => {

View File

@@ -172,6 +172,9 @@ describe('EditCustomCollectionModal', () => {
expect(parseParamsSchemaMock).toHaveBeenCalledWith('{}')
})
// Flush pending state updates from parseParamsSchema promise resolution
await act(async () => {})
await act(async () => {
fireEvent.click(screen.getByText('common.operation.save'))
})
@@ -184,6 +187,10 @@ describe('EditCustomCollectionModal', () => {
credentials: {
auth_type: 'none',
},
icon: {
content: '🕵️',
background: '#FEF7C3',
},
labels: [],
}))
expect(toastNotifySpy).not.toHaveBeenCalled()

View File

@@ -1,705 +0,0 @@
/**
* Test Suite for useNodesSyncDraft Hook
*
* PURPOSE:
* This hook handles syncing workflow draft to the server. The key fix being tested
* is the error handling behavior when `draft_workflow_not_sync` error occurs.
*
* MULTI-TAB PROBLEM SCENARIO:
* 1. User opens the same workflow in Tab A and Tab B (both have hash: v1)
* 2. Tab A saves successfully, server returns new hash: v2
* 3. Tab B tries to save with old hash: v1, server returns 400 error with code
* 'draft_workflow_not_sync'
* 4. BEFORE FIX: handleRefreshWorkflowDraft() was called without args, which fetched
* draft AND overwrote canvas - user lost unsaved changes in Tab B
* 5. AFTER FIX: handleRefreshWorkflowDraft(true) is called, which fetches draft but
* only updates hash (notUpdateCanvas=true), preserving user's canvas changes
*
* TESTING STRATEGY:
* We don't simulate actual tab switching UI behavior. Instead, we mock the API to
* return `draft_workflow_not_sync` error and verify:
* - The hook calls handleRefreshWorkflowDraft(true) - not handleRefreshWorkflowDraft()
* - This ensures canvas data is preserved while hash is updated for retry
*
* This is behavior-driven testing - we verify "what the code does when receiving
* specific API errors" rather than simulating complete user interaction flows.
* True multi-tab integration testing would require E2E frameworks like Playwright.
*/
import { act, renderHook, waitFor } from '@testing-library/react'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { useNodesSyncDraft } from './use-nodes-sync-draft'
// Mock reactflow store
const mockGetNodes = vi.fn()
type MockEdge = {
id: string
source: string
target: string
data: Record<string, unknown>
}
const mockStoreState: {
getNodes: ReturnType<typeof vi.fn>
edges: MockEdge[]
transform: number[]
} = {
getNodes: mockGetNodes,
edges: [],
transform: [0, 0, 1],
}
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => mockStoreState,
}),
}))
// Mock features store
const mockFeaturesState = {
features: {
opening: { enabled: false, opening_statement: '', suggested_questions: [] },
suggested: {},
text2speech: {},
speech2text: {},
citation: {},
moderation: {},
file: {},
},
}
vi.mock('@/app/components/base/features/hooks', () => ({
useFeaturesStore: () => ({
getState: () => mockFeaturesState,
}),
}))
// Mock workflow service
const mockSyncWorkflowDraft = vi.fn()
vi.mock('@/service/workflow', () => ({
syncWorkflowDraft: (...args: unknown[]) => mockSyncWorkflowDraft(...args),
}))
// Mock useNodesReadOnly
const mockGetNodesReadOnly = vi.fn()
vi.mock('@/app/components/workflow/hooks/use-workflow', () => ({
useNodesReadOnly: () => ({
getNodesReadOnly: mockGetNodesReadOnly,
}),
}))
// Mock useSerialAsyncCallback - pass through the callback
vi.mock('@/app/components/workflow/hooks/use-serial-async-callback', () => ({
useSerialAsyncCallback: (callback: (...args: unknown[]) => unknown) => callback,
}))
// Mock workflow store
const mockSetSyncWorkflowDraftHash = vi.fn()
const mockSetDraftUpdatedAt = vi.fn()
const createMockWorkflowStoreState = (overrides = {}) => ({
appId: 'test-app-id',
conversationVariables: [],
environmentVariables: [],
syncWorkflowDraftHash: 'current-hash-123',
isWorkflowDataLoaded: true,
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
setDraftUpdatedAt: mockSetDraftUpdatedAt,
...overrides,
})
const mockWorkflowStoreGetState = vi.fn()
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: mockWorkflowStoreGetState,
}),
}))
// Mock useWorkflowRefreshDraft (THE KEY DEPENDENCY FOR THIS TEST)
const mockHandleRefreshWorkflowDraft = vi.fn()
vi.mock('.', () => ({
useWorkflowRefreshDraft: () => ({
handleRefreshWorkflowDraft: mockHandleRefreshWorkflowDraft,
}),
}))
// Mock API_PREFIX
vi.mock('@/config', () => ({
API_PREFIX: '/api',
}))
// Create a mock error response that mimics the actual API error
const createMockErrorResponse = (code: string) => {
const errorBody = { code, message: 'Draft not in sync' }
let bodyUsed = false
return {
json: vi.fn().mockImplementation(() => {
bodyUsed = true
return Promise.resolve(errorBody)
}),
get bodyUsed() {
return bodyUsed
},
}
}
describe('useNodesSyncDraft', () => {
beforeEach(() => {
vi.clearAllMocks()
mockGetNodesReadOnly.mockReturnValue(false)
mockGetNodes.mockReturnValue([
{ id: 'node-1', type: 'start', data: { type: 'start' } },
{ id: 'node-2', type: 'llm', data: { type: 'llm' } },
])
mockStoreState.edges = [
{ id: 'edge-1', source: 'node-1', target: 'node-2', data: {} },
]
mockWorkflowStoreGetState.mockReturnValue(createMockWorkflowStoreState())
mockSyncWorkflowDraft.mockResolvedValue({
hash: 'new-hash-456',
updated_at: Date.now(),
})
})
afterEach(() => {
vi.resetAllMocks()
})
describe('doSyncWorkflowDraft function', () => {
it('should return doSyncWorkflowDraft function', () => {
const { result } = renderHook(() => useNodesSyncDraft())
expect(result.current.doSyncWorkflowDraft).toBeDefined()
expect(typeof result.current.doSyncWorkflowDraft).toBe('function')
})
it('should return syncWorkflowDraftWhenPageClose function', () => {
const { result } = renderHook(() => useNodesSyncDraft())
expect(result.current.syncWorkflowDraftWhenPageClose).toBeDefined()
expect(typeof result.current.syncWorkflowDraftWhenPageClose).toBe('function')
})
})
describe('successful sync', () => {
it('should call syncWorkflowDraft service on successful sync', async () => {
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith({
url: '/apps/test-app-id/workflows/draft',
params: expect.objectContaining({
hash: 'current-hash-123',
graph: expect.objectContaining({
nodes: expect.any(Array),
edges: expect.any(Array),
viewport: expect.any(Object),
}),
}),
})
})
it('should update syncWorkflowDraftHash on success', async () => {
mockSyncWorkflowDraft.mockResolvedValue({
hash: 'new-hash-789',
updated_at: 1234567890,
})
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-789')
})
it('should update draftUpdatedAt on success', async () => {
const updatedAt = 1234567890
mockSyncWorkflowDraft.mockResolvedValue({
hash: 'new-hash',
updated_at: updatedAt,
})
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSetDraftUpdatedAt).toHaveBeenCalledWith(updatedAt)
})
it('should call onSuccess callback on success', async () => {
const onSuccess = vi.fn()
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onSuccess })
})
expect(onSuccess).toHaveBeenCalled()
})
it('should call onSettled callback after success', async () => {
const onSettled = vi.fn()
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onSettled })
})
expect(onSettled).toHaveBeenCalled()
})
})
describe('sync error handling - draft_workflow_not_sync (THE KEY FIX)', () => {
/**
* This is THE KEY TEST for the bug fix.
*
* SCENARIO: Multi-tab editing
* 1. User opens workflow in Tab A and Tab B
* 2. Tab A saves draft successfully, gets new hash
* 3. Tab B tries to save with old hash
* 4. Server returns 400 with code 'draft_workflow_not_sync'
*
* BEFORE FIX:
* - handleRefreshWorkflowDraft() was called without arguments
* - This would fetch draft AND overwrite the canvas
* - User loses their unsaved changes in Tab B
*
* AFTER FIX:
* - handleRefreshWorkflowDraft(true) is called
* - This fetches draft but DOES NOT overwrite canvas
* - Only hash is updated for the next sync attempt
* - User's unsaved changes are preserved
*/
it('should call handleRefreshWorkflowDraft with notUpdateCanvas=true when draft_workflow_not_sync error occurs', async () => {
const mockError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
// THE KEY ASSERTION: handleRefreshWorkflowDraft must be called with true
await waitFor(() => {
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledWith(true)
})
})
it('should NOT call handleRefreshWorkflowDraft when notRefreshWhenSyncError is true', async () => {
const mockError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
// First parameter is notRefreshWhenSyncError
await result.current.doSyncWorkflowDraft(true)
})
// Wait a bit for async operations
await new Promise(resolve => setTimeout(resolve, 100))
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
})
it('should call onError callback when draft_workflow_not_sync error occurs', async () => {
const mockError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const onError = vi.fn()
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onError })
})
expect(onError).toHaveBeenCalled()
})
it('should call onSettled callback after error', async () => {
const mockError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const onSettled = vi.fn()
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onSettled })
})
expect(onSettled).toHaveBeenCalled()
})
})
describe('other error handling', () => {
it('should NOT call handleRefreshWorkflowDraft for non-draft_workflow_not_sync errors', async () => {
const mockError = createMockErrorResponse('some_other_error')
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
// Wait a bit for async operations
await new Promise(resolve => setTimeout(resolve, 100))
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
})
it('should handle error without json method', async () => {
const mockError = new Error('Network error')
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const { result } = renderHook(() => useNodesSyncDraft())
const onError = vi.fn()
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onError })
})
expect(onError).toHaveBeenCalled()
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
})
it('should handle error with bodyUsed already true', async () => {
const mockError = {
json: vi.fn(),
bodyUsed: true,
}
mockSyncWorkflowDraft.mockRejectedValue(mockError)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
// Should not call json() when bodyUsed is true
expect(mockError.json).not.toHaveBeenCalled()
expect(mockHandleRefreshWorkflowDraft).not.toHaveBeenCalled()
})
})
describe('read-only mode', () => {
it('should not sync when nodes are read-only', async () => {
mockGetNodesReadOnly.mockReturnValue(true)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSyncWorkflowDraft).not.toHaveBeenCalled()
})
it('should not sync on page close when nodes are read-only', () => {
mockGetNodesReadOnly.mockReturnValue(true)
// Mock sendBeacon
const mockSendBeacon = vi.fn()
Object.defineProperty(navigator, 'sendBeacon', {
value: mockSendBeacon,
writable: true,
})
const { result } = renderHook(() => useNodesSyncDraft())
act(() => {
result.current.syncWorkflowDraftWhenPageClose()
})
expect(mockSendBeacon).not.toHaveBeenCalled()
})
})
describe('workflow data not loaded', () => {
it('should not sync when workflow data is not loaded', async () => {
mockWorkflowStoreGetState.mockReturnValue(
createMockWorkflowStoreState({ isWorkflowDataLoaded: false }),
)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSyncWorkflowDraft).not.toHaveBeenCalled()
})
})
describe('no appId', () => {
it('should not sync when appId is not set', async () => {
mockWorkflowStoreGetState.mockReturnValue(
createMockWorkflowStoreState({ appId: null }),
)
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSyncWorkflowDraft).not.toHaveBeenCalled()
})
})
describe('node filtering', () => {
it('should filter out temp nodes', async () => {
mockGetNodes.mockReturnValue([
{ id: 'node-1', type: 'start', data: { type: 'start' } },
{ id: 'node-temp', type: 'custom', data: { type: 'custom', _isTempNode: true } },
{ id: 'node-2', type: 'llm', data: { type: 'llm' } },
])
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith(
expect.objectContaining({
params: expect.objectContaining({
graph: expect.objectContaining({
nodes: expect.not.arrayContaining([
expect.objectContaining({ id: 'node-temp' }),
]),
}),
}),
}),
)
})
it('should remove internal underscore properties from nodes', async () => {
mockGetNodes.mockReturnValue([
{
id: 'node-1',
type: 'start',
data: {
type: 'start',
_internalProp: 'should be removed',
_anotherInternal: true,
publicProp: 'should remain',
},
},
])
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
const callArgs = mockSyncWorkflowDraft.mock.calls[0][0]
const sentNode = callArgs.params.graph.nodes[0]
expect(sentNode.data).not.toHaveProperty('_internalProp')
expect(sentNode.data).not.toHaveProperty('_anotherInternal')
expect(sentNode.data).toHaveProperty('publicProp', 'should remain')
})
})
describe('edge filtering', () => {
it('should filter out temp edges', async () => {
mockStoreState.edges = [
{ id: 'edge-1', source: 'node-1', target: 'node-2', data: {} },
{ id: 'edge-temp', source: 'node-1', target: 'node-3', data: { _isTemp: true } },
]
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
const callArgs = mockSyncWorkflowDraft.mock.calls[0][0]
const sentEdges = callArgs.params.graph.edges
expect(sentEdges).toHaveLength(1)
expect(sentEdges[0].id).toBe('edge-1')
})
it('should remove internal underscore properties from edges', async () => {
mockStoreState.edges = [
{
id: 'edge-1',
source: 'node-1',
target: 'node-2',
data: {
_internalEdgeProp: 'should be removed',
publicEdgeProp: 'should remain',
},
},
]
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
const callArgs = mockSyncWorkflowDraft.mock.calls[0][0]
const sentEdge = callArgs.params.graph.edges[0]
expect(sentEdge.data).not.toHaveProperty('_internalEdgeProp')
expect(sentEdge.data).toHaveProperty('publicEdgeProp', 'should remain')
})
})
describe('viewport handling', () => {
it('should send current viewport from transform', async () => {
mockStoreState.transform = [100, 200, 1.5]
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith(
expect.objectContaining({
params: expect.objectContaining({
graph: expect.objectContaining({
viewport: { x: 100, y: 200, zoom: 1.5 },
}),
}),
}),
)
})
})
describe('multi-tab concurrent editing scenario (END-TO-END TEST)', () => {
/**
* Simulates the complete multi-tab scenario to verify the fix works correctly.
*
* Scenario:
* 1. Tab A and Tab B both have the workflow open with hash 'hash-v1'
* 2. Tab A saves successfully, server returns 'hash-v2'
* 3. Tab B tries to save with 'hash-v1', gets 'draft_workflow_not_sync' error
* 4. Tab B should only update hash to 'hash-v2', not overwrite canvas
* 5. Tab B can now retry save with correct hash
*/
it('should preserve canvas data during hash conflict resolution', async () => {
// Initial state: both tabs have hash-v1
mockWorkflowStoreGetState.mockReturnValue(
createMockWorkflowStoreState({ syncWorkflowDraftHash: 'hash-v1' }),
)
// Tab B tries to save with old hash, server returns error
const syncError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(syncError)
const { result } = renderHook(() => useNodesSyncDraft())
// Tab B attempts to sync
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
// Verify the sync was attempted with old hash
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith(
expect.objectContaining({
params: expect.objectContaining({
hash: 'hash-v1',
}),
}),
)
// Verify handleRefreshWorkflowDraft was called with true (not overwrite canvas)
await waitFor(() => {
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledWith(true)
})
// The key assertion: only one argument (true) was passed
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledTimes(1)
expect(mockHandleRefreshWorkflowDraft.mock.calls[0]).toEqual([true])
})
it('should handle multiple consecutive sync failures gracefully', async () => {
// Create fresh error for each call to avoid bodyUsed issue
mockSyncWorkflowDraft
.mockRejectedValueOnce(createMockErrorResponse('draft_workflow_not_sync'))
.mockRejectedValueOnce(createMockErrorResponse('draft_workflow_not_sync'))
const { result } = renderHook(() => useNodesSyncDraft())
// First sync attempt
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
// Wait for first refresh call
await waitFor(() => {
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledTimes(1)
})
// Second sync attempt
await act(async () => {
await result.current.doSyncWorkflowDraft()
})
// Both should call handleRefreshWorkflowDraft with true
await waitFor(() => {
expect(mockHandleRefreshWorkflowDraft).toHaveBeenCalledTimes(2)
})
mockHandleRefreshWorkflowDraft.mock.calls.forEach((call) => {
expect(call).toEqual([true])
})
})
})
describe('callbacks behavior', () => {
it('should not call onSuccess when sync fails', async () => {
const syncError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(syncError)
const onSuccess = vi.fn()
const onError = vi.fn()
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onSuccess, onError })
})
expect(onSuccess).not.toHaveBeenCalled()
expect(onError).toHaveBeenCalled()
})
it('should always call onSettled regardless of success or failure', async () => {
const onSettled = vi.fn()
const { result } = renderHook(() => useNodesSyncDraft())
// Test success case
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onSettled })
})
expect(onSettled).toHaveBeenCalledTimes(1)
// Reset
onSettled.mockClear()
// Test failure case
const syncError = createMockErrorResponse('draft_workflow_not_sync')
mockSyncWorkflowDraft.mockRejectedValue(syncError)
await act(async () => {
await result.current.doSyncWorkflowDraft(false, { onSettled })
})
expect(onSettled).toHaveBeenCalledTimes(1)
})
})
})

View File

@@ -115,7 +115,7 @@ export const useNodesSyncDraft = () => {
if (error && error.json && !error.bodyUsed) {
error.json().then((err: any) => {
if (err.code === 'draft_workflow_not_sync' && !notRefreshWhenSyncError)
handleRefreshWorkflowDraft(true)
handleRefreshWorkflowDraft()
})
}
callback?.onError?.()

View File

@@ -1,556 +0,0 @@
/**
* Test Suite for useWorkflowRefreshDraft Hook
*
* PURPOSE:
* This hook is responsible for refreshing workflow draft data from the server.
* The key fix being tested is the `notUpdateCanvas` parameter behavior.
*
* MULTI-TAB PROBLEM SCENARIO:
* 1. User opens the same workflow in Tab A and Tab B (both have hash: v1)
* 2. Tab A saves successfully, server returns new hash: v2
* 3. Tab B tries to save with old hash: v1, server returns 400 error (draft_workflow_not_sync)
* 4. BEFORE FIX: handleRefreshWorkflowDraft() was called without args, which fetched
* draft AND overwrote canvas - user lost unsaved changes in Tab B
* 5. AFTER FIX: handleRefreshWorkflowDraft(true) is called, which fetches draft but
* only updates hash, preserving user's canvas changes
*
* TESTING STRATEGY:
* We don't simulate actual tab switching UI behavior. Instead, we test the hook's
* response to specific inputs:
* - When notUpdateCanvas=true: should NOT call handleUpdateWorkflowCanvas
* - When notUpdateCanvas=false/undefined: should call handleUpdateWorkflowCanvas
*
* This is behavior-driven testing - we verify "what the code does when given specific
* inputs" rather than simulating complete user interaction flows.
*/
import { act, renderHook, waitFor } from '@testing-library/react'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { useWorkflowRefreshDraft } from './use-workflow-refresh-draft'
// Mock the workflow service
const mockFetchWorkflowDraft = vi.fn()
vi.mock('@/service/workflow', () => ({
fetchWorkflowDraft: (...args: unknown[]) => mockFetchWorkflowDraft(...args),
}))
// Mock the workflow update hook
const mockHandleUpdateWorkflowCanvas = vi.fn()
vi.mock('@/app/components/workflow/hooks', () => ({
useWorkflowUpdate: () => ({
handleUpdateWorkflowCanvas: mockHandleUpdateWorkflowCanvas,
}),
}))
// Mock store state
const mockSetSyncWorkflowDraftHash = vi.fn()
const mockSetIsSyncingWorkflowDraft = vi.fn()
const mockSetEnvironmentVariables = vi.fn()
const mockSetEnvSecrets = vi.fn()
const mockSetConversationVariables = vi.fn()
const mockSetIsWorkflowDataLoaded = vi.fn()
const mockCancelDebouncedSync = vi.fn()
const createMockStoreState = (overrides = {}) => ({
appId: 'test-app-id',
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
setIsSyncingWorkflowDraft: mockSetIsSyncingWorkflowDraft,
setEnvironmentVariables: mockSetEnvironmentVariables,
setEnvSecrets: mockSetEnvSecrets,
setConversationVariables: mockSetConversationVariables,
setIsWorkflowDataLoaded: mockSetIsWorkflowDataLoaded,
isWorkflowDataLoaded: true,
debouncedSyncWorkflowDraft: {
cancel: mockCancelDebouncedSync,
},
...overrides,
})
const mockWorkflowStoreGetState = vi.fn()
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: mockWorkflowStoreGetState,
}),
}))
// Default mock response from fetchWorkflowDraft
const createMockDraftResponse = (overrides = {}) => ({
hash: 'new-hash-12345',
graph: {
nodes: [{ id: 'node-1', type: 'start', data: {} }],
edges: [{ id: 'edge-1', source: 'node-1', target: 'node-2' }],
viewport: { x: 100, y: 200, zoom: 1.5 },
},
environment_variables: [
{ id: 'env-1', name: 'API_KEY', value: 'secret-key', value_type: 'secret' },
{ id: 'env-2', name: 'BASE_URL', value: 'https://api.example.com', value_type: 'string' },
],
conversation_variables: [
{ id: 'conv-1', name: 'user_input', value: 'test' },
],
...overrides,
})
describe('useWorkflowRefreshDraft', () => {
beforeEach(() => {
vi.clearAllMocks()
mockWorkflowStoreGetState.mockReturnValue(createMockStoreState())
mockFetchWorkflowDraft.mockResolvedValue(createMockDraftResponse())
})
afterEach(() => {
vi.resetAllMocks()
})
describe('handleRefreshWorkflowDraft function', () => {
it('should return handleRefreshWorkflowDraft function', () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
expect(result.current.handleRefreshWorkflowDraft).toBeDefined()
expect(typeof result.current.handleRefreshWorkflowDraft).toBe('function')
})
})
describe('notUpdateCanvas parameter behavior (THE KEY FIX)', () => {
it('should NOT call handleUpdateWorkflowCanvas when notUpdateCanvas is true', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockFetchWorkflowDraft).toHaveBeenCalledWith('/apps/test-app-id/workflows/draft')
})
await waitFor(() => {
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
})
// THE KEY ASSERTION: Canvas should NOT be updated when notUpdateCanvas is true
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
})
it('should call handleUpdateWorkflowCanvas when notUpdateCanvas is false', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(false)
})
await waitFor(() => {
expect(mockFetchWorkflowDraft).toHaveBeenCalledWith('/apps/test-app-id/workflows/draft')
})
await waitFor(() => {
// Canvas SHOULD be updated when notUpdateCanvas is false
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
nodes: [{ id: 'node-1', type: 'start', data: {} }],
edges: [{ id: 'edge-1', source: 'node-1', target: 'node-2' }],
viewport: { x: 100, y: 200, zoom: 1.5 },
})
})
await waitFor(() => {
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
})
})
it('should call handleUpdateWorkflowCanvas when notUpdateCanvas is undefined (default)', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
expect(mockFetchWorkflowDraft).toHaveBeenCalled()
})
await waitFor(() => {
// Canvas SHOULD be updated when notUpdateCanvas is undefined
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalled()
})
})
it('should still update hash even when notUpdateCanvas is true', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
})
// Verify canvas was NOT updated
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
})
it('should still update environment variables when notUpdateCanvas is true', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([
{ id: 'env-1', name: 'API_KEY', value: '[__HIDDEN__]', value_type: 'secret' },
{ id: 'env-2', name: 'BASE_URL', value: 'https://api.example.com', value_type: 'string' },
])
})
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
})
it('should still update env secrets when notUpdateCanvas is true', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetEnvSecrets).toHaveBeenCalledWith({
'env-1': 'secret-key',
})
})
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
})
it('should still update conversation variables when notUpdateCanvas is true', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetConversationVariables).toHaveBeenCalledWith([
{ id: 'conv-1', name: 'user_input', value: 'test' },
])
})
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
})
})
describe('syncing state management', () => {
it('should set isSyncingWorkflowDraft to true before fetch', () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(true)
})
it('should set isSyncingWorkflowDraft to false after fetch completes', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(false)
})
})
it('should set isSyncingWorkflowDraft to false even when fetch fails', async () => {
mockFetchWorkflowDraft.mockRejectedValue(new Error('Network error'))
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(false)
})
})
})
describe('isWorkflowDataLoaded flag management', () => {
it('should set isWorkflowDataLoaded to false before fetch when it was true', () => {
mockWorkflowStoreGetState.mockReturnValue(
createMockStoreState({ isWorkflowDataLoaded: true }),
)
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
expect(mockSetIsWorkflowDataLoaded).toHaveBeenCalledWith(false)
})
it('should set isWorkflowDataLoaded to true after fetch succeeds', async () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
expect(mockSetIsWorkflowDataLoaded).toHaveBeenCalledWith(true)
})
})
it('should restore isWorkflowDataLoaded when fetch fails and it was previously loaded', async () => {
mockWorkflowStoreGetState.mockReturnValue(
createMockStoreState({ isWorkflowDataLoaded: true }),
)
mockFetchWorkflowDraft.mockRejectedValue(new Error('Network error'))
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
// Should restore to true because wasLoaded was true
expect(mockSetIsWorkflowDataLoaded).toHaveBeenLastCalledWith(true)
})
})
})
describe('debounced sync cancellation', () => {
it('should cancel debounced sync before fetching draft', () => {
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
expect(mockCancelDebouncedSync).toHaveBeenCalled()
})
it('should handle case when debouncedSyncWorkflowDraft has no cancel method', () => {
mockWorkflowStoreGetState.mockReturnValue(
createMockStoreState({ debouncedSyncWorkflowDraft: {} }),
)
const { result } = renderHook(() => useWorkflowRefreshDraft())
// Should not throw
expect(() => {
act(() => {
result.current.handleRefreshWorkflowDraft()
})
}).not.toThrow()
})
})
describe('edge cases', () => {
it('should handle empty graph in response', async () => {
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'hash-empty',
graph: null,
environment_variables: [],
conversation_variables: [],
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(false)
})
await waitFor(() => {
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
nodes: [],
edges: [],
viewport: { x: 0, y: 0, zoom: 1 },
})
})
})
it('should handle missing viewport in response', async () => {
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'hash-no-viewport',
graph: {
nodes: [{ id: 'node-1' }],
edges: [],
viewport: null,
},
environment_variables: [],
conversation_variables: [],
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(false)
})
await waitFor(() => {
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
nodes: [{ id: 'node-1' }],
edges: [],
viewport: { x: 0, y: 0, zoom: 1 },
})
})
})
it('should handle missing environment_variables in response', async () => {
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'hash-no-env',
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
environment_variables: undefined,
conversation_variables: [],
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([])
expect(mockSetEnvSecrets).toHaveBeenCalledWith({})
})
})
it('should handle missing conversation_variables in response', async () => {
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'hash-no-conv',
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
environment_variables: [],
conversation_variables: undefined,
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetConversationVariables).toHaveBeenCalledWith([])
})
})
it('should filter only secret type for envSecrets', async () => {
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'hash-mixed-env',
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
environment_variables: [
{ id: 'env-1', name: 'SECRET_KEY', value: 'secret-value', value_type: 'secret' },
{ id: 'env-2', name: 'PUBLIC_URL', value: 'https://example.com', value_type: 'string' },
{ id: 'env-3', name: 'ANOTHER_SECRET', value: 'another-secret', value_type: 'secret' },
],
conversation_variables: [],
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetEnvSecrets).toHaveBeenCalledWith({
'env-1': 'secret-value',
'env-3': 'another-secret',
})
})
})
it('should hide secret values in environment variables', async () => {
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'hash-secrets',
graph: { nodes: [], edges: [], viewport: { x: 0, y: 0, zoom: 1 } },
environment_variables: [
{ id: 'env-1', name: 'SECRET_KEY', value: 'super-secret', value_type: 'secret' },
{ id: 'env-2', name: 'PUBLIC_URL', value: 'https://example.com', value_type: 'string' },
],
conversation_variables: [],
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([
{ id: 'env-1', name: 'SECRET_KEY', value: '[__HIDDEN__]', value_type: 'secret' },
{ id: 'env-2', name: 'PUBLIC_URL', value: 'https://example.com', value_type: 'string' },
])
})
})
})
describe('multi-tab scenario simulation (THE BUG FIX VERIFICATION)', () => {
/**
* This test verifies the fix for the multi-tab scenario:
* 1. User opens workflow in Tab A and Tab B
* 2. Tab A saves draft successfully
* 3. Tab B tries to save but gets 'draft_workflow_not_sync' error (hash mismatch)
* 4. BEFORE FIX: Tab B would fetch draft and overwrite canvas with old data
* 5. AFTER FIX: Tab B only updates hash, preserving user's canvas changes
*/
it('should only update hash when called with notUpdateCanvas=true (simulating sync error recovery)', async () => {
const mockResponse = createMockDraftResponse()
mockFetchWorkflowDraft.mockResolvedValue(mockResponse)
const { result } = renderHook(() => useWorkflowRefreshDraft())
// Simulate the sync error recovery scenario where notUpdateCanvas is true
act(() => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockFetchWorkflowDraft).toHaveBeenCalled()
})
await waitFor(() => {
// Hash should be updated for next sync attempt
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
})
// Canvas should NOT be updated - user's changes are preserved
expect(mockHandleUpdateWorkflowCanvas).not.toHaveBeenCalled()
// Other states should still be updated
expect(mockSetEnvironmentVariables).toHaveBeenCalled()
expect(mockSetConversationVariables).toHaveBeenCalled()
})
it('should update canvas when called with notUpdateCanvas=false (normal refresh)', async () => {
const mockResponse = createMockDraftResponse()
mockFetchWorkflowDraft.mockResolvedValue(mockResponse)
const { result } = renderHook(() => useWorkflowRefreshDraft())
// Simulate normal refresh scenario
act(() => {
result.current.handleRefreshWorkflowDraft(false)
})
await waitFor(() => {
expect(mockFetchWorkflowDraft).toHaveBeenCalled()
})
await waitFor(() => {
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new-hash-12345')
})
// Canvas SHOULD be updated in normal refresh
await waitFor(() => {
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalled()
})
})
})
})

View File

@@ -8,7 +8,7 @@ export const useWorkflowRefreshDraft = () => {
const workflowStore = useWorkflowStore()
const { handleUpdateWorkflowCanvas } = useWorkflowUpdate()
const handleRefreshWorkflowDraft = useCallback((notUpdateCanvas?: boolean) => {
const handleRefreshWorkflowDraft = useCallback(() => {
const {
appId,
setSyncWorkflowDraftHash,
@@ -31,14 +31,12 @@ export const useWorkflowRefreshDraft = () => {
fetchWorkflowDraft(`/apps/${appId}/workflows/draft`)
.then((response) => {
// Ensure we have a valid workflow structure with viewport
if (!notUpdateCanvas) {
const workflowData: WorkflowDataUpdater = {
nodes: response.graph?.nodes || [],
edges: response.graph?.edges || [],
viewport: response.graph?.viewport || { x: 0, y: 0, zoom: 1 },
}
handleUpdateWorkflowCanvas(workflowData)
const workflowData: WorkflowDataUpdater = {
nodes: response.graph?.nodes || [],
edges: response.graph?.edges || [],
viewport: response.graph?.viewport || { x: 0, y: 0, zoom: 1 },
}
handleUpdateWorkflowCanvas(workflowData)
setSyncWorkflowDraftHash(response.hash)
setEnvSecrets((response.environment_variables || []).filter(env => env.value_type === 'secret').reduce((acc, env) => {
acc[env.id] = env.value

View File

@@ -18,6 +18,7 @@ import {
Group,
} from '@/app/components/workflow/nodes/_base/components/layout'
import VarReferencePicker from '@/app/components/workflow/nodes/_base/components/variable/var-reference-picker'
import { IS_CE_EDITION } from '@/config'
import Split from '../_base/components/split'
import ChunkStructure from './components/chunk-structure'
import EmbeddingModel from './components/embedding-model'
@@ -172,7 +173,7 @@ const Panel: FC<NodePanelProps<KnowledgeBaseNodeType>> = ({
{
data.indexing_technique === IndexMethodEnum.QUALIFIED
&& [ChunkStructureEnum.general, ChunkStructureEnum.parent_child].includes(data.chunk_structure)
&& (
&& IS_CE_EDITION && (
<>
<SummaryIndexSetting
summaryIndexSetting={data.summary_index_setting}

View File

@@ -1,7 +1,7 @@
{
"name": "dify-web",
"type": "module",
"version": "1.11.4",
"version": "1.12.1",
"private": true,
"packageManager": "pnpm@10.27.0+sha512.72d699da16b1179c14ba9e64dc71c9a40988cbdc65c264cb0e489db7de917f20dcf4d64d8723625f2969ba52d4b7e2a1170682d9ac2a5dcaeaab732b7e16f04a",
"imports": {
@@ -117,7 +117,6 @@
"ky": "1.12.0",
"lamejs": "1.2.1",
"lexical": "0.38.2",
"line-clamp": "1.0.0",
"mermaid": "11.11.0",
"mime": "4.1.0",
"mitt": "3.0.1",

8
web/pnpm-lock.yaml generated
View File

@@ -233,9 +233,6 @@ importers:
lexical:
specifier: 0.38.2
version: 0.38.2
line-clamp:
specifier: 1.0.0
version: 1.0.0
mermaid:
specifier: 11.11.0
version: 11.11.0
@@ -5403,9 +5400,6 @@ packages:
resolution: {integrity: sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==}
engines: {node: '>=14'}
line-clamp@1.0.0:
resolution: {integrity: sha512-dCDlvMj572RIRBQ3x9aIX0DTdt2St1bMdpi64jVTAi5vqBck7wf+J97//+J7+pS80rFJaYa8HiyXCTp0flpnBA==}
lines-and-columns@1.2.4:
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
@@ -12913,8 +12907,6 @@ snapshots:
lilconfig@3.1.3: {}
line-clamp@1.0.0: {}
lines-and-columns@1.2.4: {}
lint-staged@15.5.2:

View File

@@ -0,0 +1,80 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const loadGetBaseURL = async (isClientValue: boolean) => {
vi.resetModules()
vi.doMock('@/utils/client', () => ({ isClient: isClientValue }))
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {})
// eslint-disable-next-line next/no-assign-module-variable
const module = await import('./client')
warnSpy.mockClear()
return { getBaseURL: module.getBaseURL, warnSpy }
}
// Scenario: base URL selection and warnings.
describe('getBaseURL', () => {
beforeEach(() => {
vi.clearAllMocks()
})
afterEach(() => {
vi.restoreAllMocks()
})
// Scenario: client environment uses window origin.
it('should use window origin when running on the client', async () => {
// Arrange
const { origin } = window.location
const { getBaseURL, warnSpy } = await loadGetBaseURL(true)
// Act
const url = getBaseURL('/api')
// Assert
expect(url.href).toBe(`${origin}/api`)
expect(warnSpy).not.toHaveBeenCalled()
})
// Scenario: server environment falls back to localhost with warning.
it('should fall back to localhost and warn on the server', async () => {
// Arrange
const { getBaseURL, warnSpy } = await loadGetBaseURL(false)
// Act
const url = getBaseURL('/api')
// Assert
expect(url.href).toBe('http://localhost/api')
expect(warnSpy).toHaveBeenCalledTimes(1)
expect(warnSpy).toHaveBeenCalledWith('Using localhost as base URL in server environment, please configure accordingly.')
})
// Scenario: non-http protocols surface warnings.
it('should warn when protocol is not http or https', async () => {
// Arrange
const { getBaseURL, warnSpy } = await loadGetBaseURL(true)
// Act
const url = getBaseURL('localhost:5001/console/api')
// Assert
expect(url.protocol).toBe('localhost:')
expect(url.href).toBe('localhost:5001/console/api')
expect(warnSpy).toHaveBeenCalledTimes(1)
expect(warnSpy).toHaveBeenCalledWith(
'Unexpected protocol for API requests, expected http or https. Current protocol: localhost:. Please configure accordingly.',
)
})
// Scenario: absolute http URLs are preserved.
it('should keep absolute http URLs intact', async () => {
// Arrange
const { getBaseURL, warnSpy } = await loadGetBaseURL(true)
// Act
const url = getBaseURL('https://api.example.com/console/api')
// Assert
expect(url.href).toBe('https://api.example.com/console/api')
expect(warnSpy).not.toHaveBeenCalled()
})
})

View File

@@ -13,12 +13,38 @@ import {
consoleRouterContract,
marketplaceRouterContract,
} from '@/contract/router'
import { isClient } from '@/utils/client'
import { request } from './base'
const getMarketplaceHeaders = () => new Headers({
'X-Dify-Version': !IS_MARKETPLACE ? APP_VERSION : '999.0.0',
})
function isURL(path: string) {
try {
// eslint-disable-next-line no-new
new URL(path)
return true
}
catch {
return false
}
}
export function getBaseURL(path: string) {
const url = new URL(path, isURL(path) ? undefined : isClient ? window.location.origin : 'http://localhost')
if (!isClient && !isURL(path)) {
console.warn('Using localhost as base URL in server environment, please configure accordingly.')
}
if (url.protocol !== 'http:' && url.protocol !== 'https:') {
console.warn(`Unexpected protocol for API requests, expected http or https. Current protocol: ${url.protocol}. Please configure accordingly.`)
}
return url
}
const marketplaceLink = new OpenAPILink(marketplaceRouterContract, {
url: MARKETPLACE_API_PREFIX,
headers: () => (getMarketplaceHeaders()),
@@ -39,7 +65,7 @@ export const marketplaceClient: JsonifiedClient<ContractRouterClient<typeof mark
export const marketplaceQuery = createTanstackQueryUtils(marketplaceClient, { path: ['marketplace'] })
const consoleLink = new OpenAPILink(consoleRouterContract, {
url: API_PREFIX,
url: getBaseURL(API_PREFIX),
fetch: (input, init) => {
return request(
input.url,