Compare commits

..

21 Commits

Author SHA1 Message Date
Yanli 盐粒
b7a5ed6c0b test(api): cover remaining workflow typing branches 2026-03-25 19:47:41 +08:00
盐粒 Yanli
e819a9a5f7 Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-25 19:45:36 +08:00
盐粒 Yanli
bc82676d93 Update api/dify_graph/nodes/loop/loop_node.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-25 19:29:05 +08:00
Yanli 盐粒
7b76fdc1d3 test(api): cover workflow typing paths 2026-03-25 19:06:23 +08:00
Yanli 盐粒
82acddddb4 Merge remote-tracking branch 'origin/main' into yanli/phase3-code-scope 2026-03-25 18:13:16 +08:00
Yanli 盐粒
710ac3b90a fix(api): preserve typed loop array constants 2026-03-18 22:05:16 +08:00
Yanli 盐粒
8548498f25 fix(api): restore advanced chat refresh_model contract 2026-03-18 19:41:00 +08:00
Yanli 盐粒
d014f0b91a fix(api): address typing review feedback 2026-03-18 19:16:48 +08:00
Yanli 盐粒
cc5aac268a fix(api): support tool typed dicts on py311 2026-03-18 18:59:49 +08:00
Yanli 盐粒
4c1d27431b fix(api): restore workflow node compatibility 2026-03-18 18:43:35 +08:00
Yanli 盐粒
9a86f280eb fix(api): avoid recursive loop type adapters 2026-03-18 18:20:43 +08:00
Yanli 盐粒
c5920fb28a Merge remote-tracking branch 'origin/main' into yanli/phase3-code-scope 2026-03-18 17:52:03 +08:00
Yanli 盐粒
2f81d5dfdf fix(api): restore typedict py311 compatibility 2026-03-17 20:30:18 +08:00
Yanli 盐粒
7639d8e43f fix(api): reuse advanced chat refresh session 2026-03-17 20:18:21 +08:00
Yanli 盐粒
1dce81c604 refactor(api): type single node workflow helpers 2026-03-17 20:16:14 +08:00
Yanli 盐粒
f874ca183e chore(api): remove phase 3 pyrefly excludes 2026-03-17 20:04:55 +08:00
Yanli 盐粒
0d805e624e Type phase 3 loop values 2026-03-17 19:39:54 +08:00
Yanli 盐粒
61196180b8 Type phase 3 tool inputs 2026-03-17 19:31:00 +08:00
Yanli 盐粒
79433b0091 Refine phase 3 typing boundaries 2026-03-17 19:13:12 +08:00
Yanli 盐粒
c4aeaa35d4 Type phase 3 schema contracts 2026-03-17 18:56:22 +08:00
Yanli 盐粒
9f0d79b8b0 Tighten phase 3 runtime typing 2026-03-17 18:49:14 +08:00
96 changed files with 2300 additions and 6365 deletions

View File

@@ -16,14 +16,12 @@ api = ExternalApi(
inner_api_ns = Namespace("inner_api", description="Internal API operations", path="/")
from . import mail as _mail
from .app import dsl as _app_dsl
from .plugin import plugin as _plugin
from .workspace import workspace as _workspace
api.add_namespace(inner_api_ns)
__all__ = [
"_app_dsl",
"_mail",
"_plugin",
"_workspace",

View File

@@ -1 +0,0 @@

View File

@@ -1,110 +0,0 @@
"""Inner API endpoints for app DSL import/export.
Called by the enterprise admin-api service. Import requires ``creator_email``
to attribute the created app; workspace/membership validation is done by the
Go admin-api caller.
"""
from flask import request
from flask_restx import Resource
from pydantic import BaseModel, Field
from sqlalchemy.orm import Session
from controllers.common.schema import register_schema_model
from controllers.console.wraps import setup_required
from controllers.inner_api import inner_api_ns
from controllers.inner_api.wraps import enterprise_inner_api_only
from extensions.ext_database import db
from models import Account, App
from models.account import AccountStatus
from services.app_dsl_service import AppDslService, ImportMode, ImportStatus
class InnerAppDSLImportPayload(BaseModel):
yaml_content: str = Field(description="YAML DSL content")
creator_email: str = Field(description="Email of the workspace member who will own the imported app")
name: str | None = Field(default=None, description="Override app name from DSL")
description: str | None = Field(default=None, description="Override app description from DSL")
register_schema_model(inner_api_ns, InnerAppDSLImportPayload)
@inner_api_ns.route("/enterprise/workspaces/<string:workspace_id>/dsl/import")
class EnterpriseAppDSLImport(Resource):
@setup_required
@enterprise_inner_api_only
@inner_api_ns.doc("enterprise_app_dsl_import")
@inner_api_ns.expect(inner_api_ns.models[InnerAppDSLImportPayload.__name__])
@inner_api_ns.doc(
responses={
200: "Import completed",
202: "Import pending (DSL version mismatch requires confirmation)",
400: "Import failed (business error)",
404: "Creator account not found or inactive",
}
)
def post(self, workspace_id: str):
"""Import a DSL into a workspace on behalf of a specified creator."""
args = InnerAppDSLImportPayload.model_validate(inner_api_ns.payload or {})
account = _get_active_account(args.creator_email)
if account is None:
return {"message": f"account '{args.creator_email}' not found or inactive"}, 404
account.set_tenant_id(workspace_id)
with Session(db.engine) as session:
dsl_service = AppDslService(session)
result = dsl_service.import_app(
account=account,
import_mode=ImportMode.YAML_CONTENT,
yaml_content=args.yaml_content,
name=args.name,
description=args.description,
)
session.commit()
if result.status == ImportStatus.FAILED:
return result.model_dump(mode="json"), 400
if result.status == ImportStatus.PENDING:
return result.model_dump(mode="json"), 202
return result.model_dump(mode="json"), 200
@inner_api_ns.route("/enterprise/apps/<string:app_id>/dsl")
class EnterpriseAppDSLExport(Resource):
@setup_required
@enterprise_inner_api_only
@inner_api_ns.doc(
"enterprise_app_dsl_export",
responses={
200: "Export successful",
404: "App not found",
},
)
def get(self, app_id: str):
"""Export an app's DSL as YAML."""
include_secret = request.args.get("include_secret", "false").lower() == "true"
app_model = db.session.query(App).filter_by(id=app_id).first()
if not app_model:
return {"message": "app not found"}, 404
data = AppDslService.export_dsl(
app_model=app_model,
include_secret=include_secret,
)
return {"data": data}, 200
def _get_active_account(email: str) -> Account | None:
"""Look up an active account by email.
Workspace membership is already validated by the Go admin-api caller.
"""
account = db.session.query(Account).filter_by(email=email).first()
if account is None or account.status != AccountStatus.ACTIVE:
return None
return account

View File

@@ -5,7 +5,7 @@ import logging
import threading
import uuid
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, overload
from typing import TYPE_CHECKING, Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@@ -47,7 +47,6 @@ from extensions.ext_database import db
from factories import file_factory
from libs.flask_utils import preserve_flask_contexts
from models import Account, App, Conversation, EndUser, Message, Workflow, WorkflowNodeExecutionTriggeredFrom
from models.base import Base
from models.enums import WorkflowRunTriggeredFrom
from services.conversation_service import ConversationService
from services.workflow_draft_variable_service import (
@@ -522,8 +521,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
# release database connection, because the following new thread operations may take a long time
with Session(bind=db.engine, expire_on_commit=False) as session:
workflow = _refresh_model(session, workflow)
message = _refresh_model(session, message)
workflow = _refresh_model(session=session, model=workflow)
message = _refresh_model(session=session, model=message)
if message is None:
raise RuntimeError("Failed to refresh Message; _refresh_model returned None.")
# workflow_ = session.get(Workflow, workflow.id)
# assert workflow_ is not None
# workflow = workflow_
@@ -690,11 +691,21 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
raise e
_T = TypeVar("_T", bound=Base)
@overload
def _refresh_model(*, session: Session | None = None, model: Workflow) -> Workflow: ...
def _refresh_model(session, model: _T) -> _T:
with Session(bind=db.engine, expire_on_commit=False) as session:
detach_model = session.get(type(model), model.id)
assert detach_model is not None
return detach_model
@overload
def _refresh_model(*, session: Session | None = None, model: Message) -> Message: ...
def _refresh_model(*, session: Session | None = None, model: Any) -> Any:
if session is not None:
detached_model = session.get(type(model), model.id)
assert detached_model is not None
return detached_model
with Session(bind=db.engine, expire_on_commit=False) as refresh_session:
detached_model = refresh_session.get(type(model), model.id)
assert detached_model is not None
return detached_model

View File

@@ -1,4 +1,4 @@
from collections.abc import Generator
from collections.abc import Generator, Iterator
from typing import Any, cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
@@ -56,8 +56,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
) -> Generator[dict | str, Any, None]:
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream full response.
:param stream_response: stream response
@@ -87,8 +87,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
) -> Generator[dict | str, Any, None]:
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream simple response.
:param stream_response: stream response

View File

@@ -1,4 +1,4 @@
from collections.abc import Generator
from collections.abc import Generator, Iterator
from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
@@ -55,7 +55,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream full response.
@@ -86,7 +86,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream simple response.

View File

@@ -1,7 +1,7 @@
import logging
from abc import ABC, abstractmethod
from collections.abc import Generator, Mapping
from typing import Any, Union
from collections.abc import Generator, Iterator, Mapping
from typing import Any
from core.app.entities.app_invoke_entities import InvokeFrom
from core.app.entities.task_entities import AppBlockingResponse, AppStreamResponse
@@ -16,24 +16,26 @@ class AppGenerateResponseConverter(ABC):
@classmethod
def convert(
cls, response: Union[AppBlockingResponse, Generator[AppStreamResponse, Any, None]], invoke_from: InvokeFrom
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]:
cls, response: AppBlockingResponse | Iterator[AppStreamResponse], invoke_from: InvokeFrom
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], None, None]:
if invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API}:
if isinstance(response, AppBlockingResponse):
return cls.convert_blocking_full_response(response)
else:
stream_response = response
def _generate_full_response() -> Generator[dict | str, Any, None]:
yield from cls.convert_stream_full_response(response)
def _generate_full_response() -> Generator[dict[str, Any] | str, None, None]:
yield from cls.convert_stream_full_response(stream_response)
return _generate_full_response()
else:
if isinstance(response, AppBlockingResponse):
return cls.convert_blocking_simple_response(response)
else:
stream_response = response
def _generate_simple_response() -> Generator[dict | str, Any, None]:
yield from cls.convert_stream_simple_response(response)
def _generate_simple_response() -> Generator[dict[str, Any] | str, None, None]:
yield from cls.convert_stream_simple_response(stream_response)
return _generate_simple_response()
@@ -50,14 +52,14 @@ class AppGenerateResponseConverter(ABC):
@classmethod
@abstractmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
raise NotImplementedError
@classmethod
@abstractmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
raise NotImplementedError

View File

@@ -224,6 +224,7 @@ class BaseAppGenerator:
def _get_draft_var_saver_factory(invoke_from: InvokeFrom, account: Account | EndUser) -> DraftVariableSaverFactory:
if invoke_from == InvokeFrom.DEBUGGER:
assert isinstance(account, Account)
debug_account = account
def draft_var_saver_factory(
session: Session,
@@ -240,7 +241,7 @@ class BaseAppGenerator:
node_type=node_type,
node_execution_id=node_execution_id,
enclosing_node_id=enclosing_node_id,
user=account,
user=debug_account,
)
else:

View File

@@ -166,15 +166,19 @@ class ChatAppGenerator(MessageBasedAppGenerator):
# init generate records
(conversation, message) = self._init_generate_records(application_generate_entity, conversation)
if conversation is None or message is None:
raise RuntimeError("_init_generate_records() returned None for conversation or message")
generated_conversation_id = str(conversation.id)
generated_message_id = str(message.id)
# init queue manager
queue_manager = MessageBasedAppQueueManager(
task_id=application_generate_entity.task_id,
user_id=application_generate_entity.user_id,
invoke_from=application_generate_entity.invoke_from,
conversation_id=conversation.id,
conversation_id=generated_conversation_id,
app_mode=conversation.mode,
message_id=message.id,
message_id=generated_message_id,
)
# new thread with request context
@@ -184,8 +188,8 @@ class ChatAppGenerator(MessageBasedAppGenerator):
flask_app=current_app._get_current_object(), # type: ignore
application_generate_entity=application_generate_entity,
queue_manager=queue_manager,
conversation_id=conversation.id,
message_id=message.id,
conversation_id=generated_conversation_id,
message_id=generated_message_id,
)
worker_thread = threading.Thread(target=worker_with_context)

View File

@@ -1,4 +1,4 @@
from collections.abc import Generator
from collections.abc import Generator, Iterator
from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
@@ -55,7 +55,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream full response.
@@ -86,7 +86,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream simple response.

View File

@@ -149,6 +149,11 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
# init generate records
(conversation, message) = self._init_generate_records(application_generate_entity)
if conversation is None or message is None:
raise RuntimeError(
"_init_generate_records() returned None for conversation or message, "
"which is required to proceed with generation."
)
# init queue manager
queue_manager = MessageBasedAppQueueManager(
@@ -312,15 +317,19 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
# init generate records
(conversation, message) = self._init_generate_records(application_generate_entity)
assert conversation is not None
assert message is not None
conversation_id = str(conversation.id)
message_id = str(message.id)
# init queue manager
queue_manager = MessageBasedAppQueueManager(
task_id=application_generate_entity.task_id,
user_id=application_generate_entity.user_id,
invoke_from=application_generate_entity.invoke_from,
conversation_id=conversation.id,
conversation_id=conversation_id,
app_mode=conversation.mode,
message_id=message.id,
message_id=message_id,
)
# new thread with request context
@@ -330,7 +339,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
flask_app=current_app._get_current_object(), # type: ignore
application_generate_entity=application_generate_entity,
queue_manager=queue_manager,
message_id=message.id,
message_id=message_id,
)
worker_thread = threading.Thread(target=worker_with_context)

View File

@@ -1,4 +1,4 @@
from collections.abc import Generator
from collections.abc import Generator, Iterator
from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
@@ -54,7 +54,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream full response.
@@ -84,7 +84,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream simple response.

View File

@@ -1,4 +1,4 @@
from collections.abc import Generator
from collections.abc import Generator, Iterator
from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
@@ -36,7 +36,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream full response.
@@ -65,7 +65,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream simple response.

View File

@@ -1,4 +1,4 @@
from collections.abc import Generator
from collections.abc import Generator, Iterator
from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
@@ -36,7 +36,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream full response.
@@ -65,7 +65,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod
def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None]
cls, stream_response: Iterator[AppStreamResponse]
) -> Generator[dict | str, None, None]:
"""
Convert stream simple response.

View File

@@ -1,13 +1,17 @@
import logging
import time
from collections.abc import Mapping, Sequence
from typing import Any, cast
from typing import Protocol, TypeAlias
from pydantic import ValidationError
from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom
from core.app.entities.agent_strategy import AgentStrategyInfo
from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom, build_dify_run_context
from core.app.entities.app_invoke_entities import (
InvokeFrom,
UserFrom,
build_dify_run_context,
)
from core.app.entities.queue_entities import (
AppQueueEvent,
QueueAgentLogEvent,
@@ -36,7 +40,7 @@ from core.rag.entities.citation_metadata import RetrievalSourceMetadata
from core.workflow.node_factory import DifyNodeFactory, get_default_root_node_id, resolve_workflow_node_class
from core.workflow.workflow_entry import WorkflowEntry
from dify_graph.entities import GraphInitParams
from dify_graph.entities.graph_config import NodeConfigDictAdapter
from dify_graph.entities.graph_config import NodeConfigDict, NodeConfigDictAdapter
from dify_graph.entities.pause_reason import HumanInputRequired
from dify_graph.graph import Graph
from dify_graph.graph_engine.layers.base import GraphEngineLayer
@@ -75,6 +79,14 @@ from tasks.mail_human_input_delivery_task import dispatch_human_input_email_task
logger = logging.getLogger(__name__)
GraphConfigObject: TypeAlias = dict[str, object]
GraphConfigMapping: TypeAlias = Mapping[str, object]
class SingleNodeRunEntity(Protocol):
node_id: str
inputs: Mapping[str, object]
class WorkflowBasedAppRunner:
def __init__(
@@ -98,7 +110,7 @@ class WorkflowBasedAppRunner:
def _init_graph(
self,
graph_config: Mapping[str, Any],
graph_config: GraphConfigMapping,
graph_runtime_state: GraphRuntimeState,
user_from: UserFrom,
invoke_from: InvokeFrom,
@@ -154,8 +166,8 @@ class WorkflowBasedAppRunner:
def _prepare_single_node_execution(
self,
workflow: Workflow,
single_iteration_run: Any | None = None,
single_loop_run: Any | None = None,
single_iteration_run: SingleNodeRunEntity | None = None,
single_loop_run: SingleNodeRunEntity | None = None,
) -> tuple[Graph, VariablePool, GraphRuntimeState]:
"""
Prepare graph, variable pool, and runtime state for single node execution
@@ -208,11 +220,88 @@ class WorkflowBasedAppRunner:
# This ensures all nodes in the graph reference the same GraphRuntimeState instance
return graph, variable_pool, graph_runtime_state
@staticmethod
def _get_graph_items(graph_config: GraphConfigMapping) -> tuple[list[GraphConfigMapping], list[GraphConfigMapping]]:
nodes = graph_config.get("nodes")
edges = graph_config.get("edges")
if not isinstance(nodes, list):
raise ValueError("nodes in workflow graph must be a list")
if not isinstance(edges, list):
raise ValueError("edges in workflow graph must be a list")
validated_nodes: list[GraphConfigMapping] = []
for node in nodes:
if not isinstance(node, Mapping):
raise ValueError("nodes in workflow graph must be mappings")
validated_nodes.append(node)
validated_edges: list[GraphConfigMapping] = []
for edge in edges:
if not isinstance(edge, Mapping):
raise ValueError("edges in workflow graph must be mappings")
validated_edges.append(edge)
return validated_nodes, validated_edges
@staticmethod
def _extract_start_node_id(node_config: GraphConfigMapping | None) -> str | None:
if node_config is None:
return None
node_data = node_config.get("data")
if not isinstance(node_data, Mapping):
return None
start_node_id = node_data.get("start_node_id")
return start_node_id if isinstance(start_node_id, str) else None
@classmethod
def _build_single_node_graph_config(
cls,
*,
graph_config: GraphConfigMapping,
node_id: str,
node_type_filter_key: str,
) -> tuple[GraphConfigObject, NodeConfigDict]:
node_configs, edge_configs = cls._get_graph_items(graph_config)
main_node_config = next((node for node in node_configs if node.get("id") == node_id), None)
start_node_id = cls._extract_start_node_id(main_node_config)
filtered_node_configs = [
dict(node)
for node in node_configs
if node.get("id") == node_id
or (isinstance(node_data := node.get("data"), Mapping) and node_data.get(node_type_filter_key) == node_id)
or (start_node_id and node.get("id") == start_node_id)
]
if not filtered_node_configs:
raise ValueError(f"node id {node_id} not found in workflow graph")
filtered_node_ids = {
str(node_id_value) for node in filtered_node_configs if isinstance((node_id_value := node.get("id")), str)
}
filtered_edge_configs = [
dict(edge)
for edge in edge_configs
if (edge.get("source") is None or edge.get("source") in filtered_node_ids)
and (edge.get("target") is None or edge.get("target") in filtered_node_ids)
]
target_node_config = next((node for node in filtered_node_configs if node.get("id") == node_id), None)
if target_node_config is None:
raise ValueError(f"node id {node_id} not found in workflow graph")
return (
{
"nodes": filtered_node_configs,
"edges": filtered_edge_configs,
},
NodeConfigDictAdapter.validate_python(target_node_config),
)
def _get_graph_and_variable_pool_for_single_node_run(
self,
workflow: Workflow,
node_id: str,
user_inputs: dict[str, Any],
user_inputs: Mapping[str, object],
graph_runtime_state: GraphRuntimeState,
node_type_filter_key: str, # 'iteration_id' or 'loop_id'
node_type_label: str = "node", # 'iteration' or 'loop' for error messages
@@ -236,41 +325,14 @@ class WorkflowBasedAppRunner:
if not graph_config:
raise ValueError("workflow graph not found")
graph_config = cast(dict[str, Any], graph_config)
if "nodes" not in graph_config or "edges" not in graph_config:
raise ValueError("nodes or edges not found in workflow graph")
if not isinstance(graph_config.get("nodes"), list):
raise ValueError("nodes in workflow graph must be a list")
if not isinstance(graph_config.get("edges"), list):
raise ValueError("edges in workflow graph must be a list")
# filter nodes only in the specified node type (iteration or loop)
main_node_config = next((n for n in graph_config.get("nodes", []) if n.get("id") == node_id), None)
start_node_id = main_node_config.get("data", {}).get("start_node_id") if main_node_config else None
node_configs = [
node
for node in graph_config.get("nodes", [])
if node.get("id") == node_id
or node.get("data", {}).get(node_type_filter_key, "") == node_id
or (start_node_id and node.get("id") == start_node_id)
]
graph_config["nodes"] = node_configs
node_ids = [node.get("id") for node in node_configs]
# filter edges only in the specified node type
edge_configs = [
edge
for edge in graph_config.get("edges", [])
if (edge.get("source") is None or edge.get("source") in node_ids)
and (edge.get("target") is None or edge.get("target") in node_ids)
]
graph_config["edges"] = edge_configs
graph_config, target_node_config = self._build_single_node_graph_config(
graph_config=graph_config,
node_id=node_id,
node_type_filter_key=node_type_filter_key,
)
# Create required parameters for Graph.init
graph_init_params = GraphInitParams(
@@ -299,18 +361,6 @@ class WorkflowBasedAppRunner:
if not graph:
raise ValueError("graph not found in workflow")
# fetch node config from node id
target_node_config = None
for node in node_configs:
if node.get("id") == node_id:
target_node_config = node
break
if not target_node_config:
raise ValueError(f"{node_type_label} node id not found in workflow graph")
target_node_config = NodeConfigDictAdapter.validate_python(target_node_config)
# Get node class
node_type = target_node_config["data"].type
node_version = str(target_node_config["data"].version)

View File

@@ -213,7 +213,7 @@ class AdvancedChatAppGenerateEntity(ConversationAppGenerateEntity):
"""
node_id: str
inputs: Mapping
inputs: Mapping[str, object]
single_iteration_run: SingleIterationRunEntity | None = None
@@ -223,7 +223,7 @@ class AdvancedChatAppGenerateEntity(ConversationAppGenerateEntity):
"""
node_id: str
inputs: Mapping
inputs: Mapping[str, object]
single_loop_run: SingleLoopRunEntity | None = None
@@ -243,7 +243,7 @@ class WorkflowAppGenerateEntity(AppGenerateEntity):
"""
node_id: str
inputs: dict
inputs: Mapping[str, object]
single_iteration_run: SingleIterationRunEntity | None = None
@@ -253,7 +253,7 @@ class WorkflowAppGenerateEntity(AppGenerateEntity):
"""
node_id: str
inputs: dict
inputs: Mapping[str, object]
single_loop_run: SingleLoopRunEntity | None = None

View File

@@ -1045,9 +1045,10 @@ class ToolManager:
continue
tool_input = ToolNodeData.ToolInput.model_validate(tool_configurations.get(parameter.name, {}))
if tool_input.type == "variable":
variable = variable_pool.get(tool_input.value)
variable_selector = tool_input.require_variable_selector()
variable = variable_pool.get(variable_selector)
if variable is None:
raise ToolParameterError(f"Variable {tool_input.value} does not exist")
raise ToolParameterError(f"Variable {variable_selector} does not exist")
parameter_value = variable.value
elif tool_input.type == "constant":
parameter_value = tool_input.value

View File

@@ -1,13 +1,24 @@
from enum import IntEnum, StrEnum, auto
from typing import Any, Literal, Union
from __future__ import annotations
from pydantic import BaseModel
from enum import IntEnum, StrEnum, auto
from typing import Literal, TypeAlias
from pydantic import BaseModel, TypeAdapter, field_validator
from pydantic_core.core_schema import ValidationInfo
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
from core.tools.entities.tool_entities import ToolSelector
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
AgentInputConstantValue: TypeAlias = (
list[ToolSelector] | str | int | float | bool | dict[str, object] | list[object] | None
)
VariableSelector: TypeAlias = list[str]
_AGENT_INPUT_VALUE_ADAPTER: TypeAdapter[AgentInputConstantValue] = TypeAdapter(AgentInputConstantValue)
_AGENT_VARIABLE_SELECTOR_ADAPTER: TypeAdapter[VariableSelector] = TypeAdapter(VariableSelector)
class AgentNodeData(BaseNodeData):
type: NodeType = BuiltinNodeTypes.AGENT
@@ -21,8 +32,20 @@ class AgentNodeData(BaseNodeData):
tool_node_version: str | None = None
class AgentInput(BaseModel):
value: Union[list[str], list[ToolSelector], Any]
type: Literal["mixed", "variable", "constant"]
value: AgentInputConstantValue | VariableSelector
@field_validator("value", mode="before")
@classmethod
def validate_value(
cls, value: object, validation_info: ValidationInfo
) -> AgentInputConstantValue | VariableSelector:
input_type = validation_info.data.get("type")
if input_type == "variable":
return _AGENT_VARIABLE_SELECTOR_ADAPTER.validate_python(value)
if input_type in {"mixed", "constant"}:
return _AGENT_INPUT_VALUE_ADAPTER.validate_python(value)
raise ValueError(f"Unknown agent input type: {input_type}")
agent_parameters: dict[str, AgentInput]

View File

@@ -1,16 +1,17 @@
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import Any, cast
from collections.abc import Mapping, Sequence
from typing import TypeAlias
from packaging.version import Version
from pydantic import ValidationError
from pydantic import TypeAdapter, ValidationError
from sqlalchemy import select
from sqlalchemy.orm import Session
from core.agent.entities import AgentToolEntity
from core.agent.plugin_entities import AgentStrategyParameter
from core.app.entities.app_invoke_entities import InvokeFrom
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance, ModelManager
from core.plugin.entities.request import InvokeCredentials
@@ -28,6 +29,14 @@ from .entities import AgentNodeData, AgentOldVersionModelFeatures, ParamsAutoGen
from .exceptions import AgentInputTypeError, AgentVariableNotFoundError
from .strategy_protocols import ResolvedAgentStrategy
JsonObject: TypeAlias = dict[str, object]
JsonObjectList: TypeAlias = list[JsonObject]
VariableSelector: TypeAlias = list[str]
_JSON_OBJECT_ADAPTER = TypeAdapter(JsonObject)
_JSON_OBJECT_LIST_ADAPTER = TypeAdapter(JsonObjectList)
_VARIABLE_SELECTOR_ADAPTER = TypeAdapter(VariableSelector)
class AgentRuntimeSupport:
def build_parameters(
@@ -39,12 +48,12 @@ class AgentRuntimeSupport:
strategy: ResolvedAgentStrategy,
tenant_id: str,
app_id: str,
invoke_from: Any,
invoke_from: InvokeFrom,
for_log: bool = False,
) -> dict[str, Any]:
) -> dict[str, object]:
agent_parameters_dictionary = {parameter.name: parameter for parameter in agent_parameters}
result: dict[str, Any] = {}
result: dict[str, object] = {}
for parameter_name in node_data.agent_parameters:
parameter = agent_parameters_dictionary.get(parameter_name)
if not parameter:
@@ -54,9 +63,10 @@ class AgentRuntimeSupport:
agent_input = node_data.agent_parameters[parameter_name]
match agent_input.type:
case "variable":
variable = variable_pool.get(agent_input.value) # type: ignore[arg-type]
variable_selector = _VARIABLE_SELECTOR_ADAPTER.validate_python(agent_input.value)
variable = variable_pool.get(variable_selector)
if variable is None:
raise AgentVariableNotFoundError(str(agent_input.value))
raise AgentVariableNotFoundError(str(variable_selector))
parameter_value = variable.value
case "mixed" | "constant":
try:
@@ -79,60 +89,38 @@ class AgentRuntimeSupport:
value = parameter_value
if parameter.type == "array[tools]":
value = cast(list[dict[str, Any]], value)
value = [tool for tool in value if tool.get("enabled", False)]
value = self._filter_mcp_type_tool(strategy, value)
for tool in value:
if "schemas" in tool:
tool.pop("schemas")
parameters = tool.get("parameters", {})
if all(isinstance(v, dict) for _, v in parameters.items()):
params = {}
for key, param in parameters.items():
if param.get("auto", ParamsAutoGenerated.OPEN) in (
ParamsAutoGenerated.CLOSE,
0,
):
value_param = param.get("value", {})
if value_param and value_param.get("type", "") == "variable":
variable_selector = value_param.get("value")
if not variable_selector:
raise ValueError("Variable selector is missing for a variable-type parameter.")
variable = variable_pool.get(variable_selector)
if variable is None:
raise AgentVariableNotFoundError(str(variable_selector))
params[key] = variable.value
else:
params[key] = value_param.get("value", "") if value_param is not None else None
else:
params[key] = None
parameters = params
tool["settings"] = {k: v.get("value", None) for k, v in tool.get("settings", {}).items()}
tool["parameters"] = parameters
tool_payloads = _JSON_OBJECT_LIST_ADAPTER.validate_python(value)
value = self._normalize_tool_payloads(
strategy=strategy,
tools=tool_payloads,
variable_pool=variable_pool,
)
if not for_log:
if parameter.type == "array[tools]":
value = cast(list[dict[str, Any]], value)
value = _JSON_OBJECT_LIST_ADAPTER.validate_python(value)
tool_value = []
for tool in value:
provider_type = ToolProviderType(tool.get("type", ToolProviderType.BUILT_IN))
setting_params = tool.get("settings", {})
parameters = tool.get("parameters", {})
provider_type = self._coerce_tool_provider_type(tool.get("type"))
setting_params = self._coerce_json_object(tool.get("settings")) or {}
parameters = self._coerce_json_object(tool.get("parameters")) or {}
manual_input_params = [key for key, value in parameters.items() if value is not None]
parameters = {**parameters, **setting_params}
provider_id = self._coerce_optional_string(tool.get("provider_name")) or ""
tool_name = self._coerce_optional_string(tool.get("tool_name")) or ""
plugin_unique_identifier = self._coerce_optional_string(tool.get("plugin_unique_identifier"))
credential_id = self._coerce_optional_string(tool.get("credential_id"))
entity = AgentToolEntity(
provider_id=tool.get("provider_name", ""),
provider_id=provider_id,
provider_type=provider_type,
tool_name=tool.get("tool_name", ""),
tool_name=tool_name,
tool_parameters=parameters,
plugin_unique_identifier=tool.get("plugin_unique_identifier", None),
credential_id=tool.get("credential_id", None),
plugin_unique_identifier=plugin_unique_identifier,
credential_id=credential_id,
)
extra = tool.get("extra", {})
extra = self._coerce_json_object(tool.get("extra")) or {}
runtime_variable_pool: VariablePool | None = None
if node_data.version != "1" or node_data.tool_node_version is not None:
@@ -145,8 +133,9 @@ class AgentRuntimeSupport:
runtime_variable_pool,
)
if tool_runtime.entity.description:
description_override = self._coerce_optional_string(extra.get("description"))
tool_runtime.entity.description.llm = (
extra.get("description", "") or tool_runtime.entity.description.llm
description_override or tool_runtime.entity.description.llm
)
for tool_runtime_params in tool_runtime.entity.parameters:
tool_runtime_params.form = (
@@ -167,13 +156,13 @@ class AgentRuntimeSupport:
{
**tool_runtime.entity.model_dump(mode="json"),
"runtime_parameters": runtime_parameters,
"credential_id": tool.get("credential_id", None),
"credential_id": credential_id,
"provider_type": provider_type.value,
}
)
value = tool_value
if parameter.type == AgentStrategyParameter.AgentStrategyParameterType.MODEL_SELECTOR:
value = cast(dict[str, Any], value)
value = _JSON_OBJECT_ADAPTER.validate_python(value)
model_instance, model_schema = self.fetch_model(tenant_id=tenant_id, value=value)
history_prompt_messages = []
if node_data.memory:
@@ -199,17 +188,27 @@ class AgentRuntimeSupport:
return result
def build_credentials(self, *, parameters: dict[str, Any]) -> InvokeCredentials:
def build_credentials(self, *, parameters: Mapping[str, object]) -> InvokeCredentials:
credentials = InvokeCredentials()
credentials.tool_credentials = {}
for tool in parameters.get("tools", []):
tools = parameters.get("tools")
if not isinstance(tools, list):
return credentials
for raw_tool in tools:
tool = self._coerce_json_object(raw_tool)
if tool is None:
continue
if not tool.get("credential_id"):
continue
try:
identity = ToolIdentity.model_validate(tool.get("identity", {}))
except ValidationError:
continue
credentials.tool_credentials[identity.provider] = tool.get("credential_id", None)
credential_id = self._coerce_optional_string(tool.get("credential_id"))
if credential_id is None:
continue
credentials.tool_credentials[identity.provider] = credential_id
return credentials
def fetch_memory(
@@ -232,14 +231,14 @@ class AgentRuntimeSupport:
return TokenBufferMemory(conversation=conversation, model_instance=model_instance)
def fetch_model(self, *, tenant_id: str, value: dict[str, Any]) -> tuple[ModelInstance, AIModelEntity | None]:
def fetch_model(self, *, tenant_id: str, value: Mapping[str, object]) -> tuple[ModelInstance, AIModelEntity | None]:
provider_manager = ProviderManager()
provider_model_bundle = provider_manager.get_provider_model_bundle(
tenant_id=tenant_id,
provider=value.get("provider", ""),
provider=str(value.get("provider", "")),
model_type=ModelType.LLM,
)
model_name = value.get("model", "")
model_name = str(value.get("model", ""))
model_credentials = provider_model_bundle.configuration.get_current_credentials(
model_type=ModelType.LLM,
model=model_name,
@@ -249,7 +248,7 @@ class AgentRuntimeSupport:
model_instance = ModelManager().get_model_instance(
tenant_id=tenant_id,
provider=provider_name,
model_type=ModelType(value.get("model_type", "")),
model_type=ModelType(str(value.get("model_type", ""))),
model=model_name,
)
model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
@@ -268,9 +267,88 @@ class AgentRuntimeSupport:
@staticmethod
def _filter_mcp_type_tool(
strategy: ResolvedAgentStrategy,
tools: list[dict[str, Any]],
) -> list[dict[str, Any]]:
tools: JsonObjectList,
) -> JsonObjectList:
meta_version = strategy.meta_version
if meta_version and Version(meta_version) > Version("0.0.1"):
return tools
return [tool for tool in tools if tool.get("type") != ToolProviderType.MCP]
def _normalize_tool_payloads(
self,
*,
strategy: ResolvedAgentStrategy,
tools: JsonObjectList,
variable_pool: VariablePool,
) -> JsonObjectList:
enabled_tools = [dict(tool) for tool in tools if bool(tool.get("enabled", False))]
normalized_tools = self._filter_mcp_type_tool(strategy, enabled_tools)
for tool in normalized_tools:
tool.pop("schemas", None)
tool["parameters"] = self._resolve_tool_parameters(tool=tool, variable_pool=variable_pool)
tool["settings"] = self._resolve_tool_settings(tool)
return normalized_tools
def _resolve_tool_parameters(self, *, tool: Mapping[str, object], variable_pool: VariablePool) -> JsonObject:
parameter_configs = self._coerce_named_json_objects(tool.get("parameters"))
if parameter_configs is None:
raw_parameters = self._coerce_json_object(tool.get("parameters"))
return raw_parameters or {}
resolved_parameters: JsonObject = {}
for key, parameter_config in parameter_configs.items():
if parameter_config.get("auto", ParamsAutoGenerated.OPEN) in (ParamsAutoGenerated.CLOSE, 0):
value_param = self._coerce_json_object(parameter_config.get("value"))
if value_param and value_param.get("type") == "variable":
variable_selector = _VARIABLE_SELECTOR_ADAPTER.validate_python(value_param.get("value"))
variable = variable_pool.get(variable_selector)
if variable is None:
raise AgentVariableNotFoundError(str(variable_selector))
resolved_parameters[key] = variable.value
else:
resolved_parameters[key] = value_param.get("value", "") if value_param is not None else None
else:
resolved_parameters[key] = None
return resolved_parameters
@staticmethod
def _resolve_tool_settings(tool: Mapping[str, object]) -> JsonObject:
settings = AgentRuntimeSupport._coerce_named_json_objects(tool.get("settings"))
if settings is None:
return {}
return {key: setting.get("value") for key, setting in settings.items()}
@staticmethod
def _coerce_json_object(value: object) -> JsonObject | None:
try:
return _JSON_OBJECT_ADAPTER.validate_python(value)
except ValidationError:
return None
@staticmethod
def _coerce_optional_string(value: object) -> str | None:
return value if isinstance(value, str) else None
@staticmethod
def _coerce_tool_provider_type(value: object) -> ToolProviderType:
if isinstance(value, ToolProviderType):
return value
if isinstance(value, str):
return ToolProviderType(value)
return ToolProviderType.BUILT_IN
@classmethod
def _coerce_named_json_objects(cls, value: object) -> dict[str, JsonObject] | None:
if not isinstance(value, dict):
return None
coerced: dict[str, JsonObject] = {}
for key, item in value.items():
if not isinstance(key, str):
return None
json_object = cls._coerce_json_object(item)
if json_object is None:
return None
coerced[key] = json_object
return coerced

View File

@@ -1,7 +1,7 @@
import logging
import time
from collections.abc import Generator, Mapping, Sequence
from typing import Any, cast
from typing import Any, TypeAlias, cast
from configs import dify_config
from core.app.apps.exc import GenerateTaskStoppedError
@@ -32,6 +32,13 @@ from models.workflow import Workflow
logger = logging.getLogger(__name__)
SpecialValueScalar: TypeAlias = str | int | float | bool | None
SpecialValue: TypeAlias = SpecialValueScalar | File | Mapping[str, "SpecialValue"] | list["SpecialValue"]
SerializedSpecialValue: TypeAlias = (
SpecialValueScalar | dict[str, "SerializedSpecialValue"] | list["SerializedSpecialValue"]
)
SingleNodeGraphConfig: TypeAlias = dict[str, list[dict[str, object]]]
class _WorkflowChildEngineBuilder:
@staticmethod
@@ -276,10 +283,10 @@ class WorkflowEntry:
@staticmethod
def _create_single_node_graph(
node_id: str,
node_data: dict[str, Any],
node_data: Mapping[str, object],
node_width: int = 114,
node_height: int = 514,
) -> dict[str, Any]:
) -> SingleNodeGraphConfig:
"""
Create a minimal graph structure for testing a single node in isolation.
@@ -289,14 +296,14 @@ class WorkflowEntry:
:param node_height: height for UI layout (default: 100)
:return: graph dictionary with start node and target node
"""
node_config = {
node_config: dict[str, object] = {
"id": node_id,
"width": node_width,
"height": node_height,
"type": "custom",
"data": node_data,
"data": dict(node_data),
}
start_node_config = {
start_node_config: dict[str, object] = {
"id": "start",
"width": node_width,
"height": node_height,
@@ -321,7 +328,12 @@ class WorkflowEntry:
@classmethod
def run_free_node(
cls, node_data: dict[str, Any], node_id: str, tenant_id: str, user_id: str, user_inputs: dict[str, Any]
cls,
node_data: Mapping[str, object],
node_id: str,
tenant_id: str,
user_id: str,
user_inputs: Mapping[str, object],
) -> tuple[Node, Generator[GraphNodeEventBase, None, None]]:
"""
Run free node
@@ -339,6 +351,8 @@ class WorkflowEntry:
graph_dict = cls._create_single_node_graph(node_id, node_data)
node_type = node_data.get("type", "")
if not isinstance(node_type, str):
raise ValueError("Node type must be a string")
if node_type not in {BuiltinNodeTypes.PARAMETER_EXTRACTOR, BuiltinNodeTypes.QUESTION_CLASSIFIER}:
raise ValueError(f"Node type {node_type} not supported")
@@ -369,7 +383,7 @@ class WorkflowEntry:
graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter())
# init workflow run state
node_config = NodeConfigDictAdapter.validate_python({"id": node_id, "data": node_data})
node_config = NodeConfigDictAdapter.validate_python({"id": node_id, "data": dict(node_data)})
node_factory = DifyNodeFactory(
graph_init_params=graph_init_params,
graph_runtime_state=graph_runtime_state,
@@ -405,30 +419,34 @@ class WorkflowEntry:
raise WorkflowNodeRunFailedError(node=node, err_msg=str(e))
@staticmethod
def handle_special_values(value: Mapping[str, Any] | None) -> Mapping[str, Any] | None:
def handle_special_values(value: Mapping[str, SpecialValue] | None) -> dict[str, SerializedSpecialValue] | None:
# NOTE(QuantumGhost): Avoid using this function in new code.
# Keep values structured as long as possible and only convert to dict
# immediately before serialization (e.g., JSON serialization) to maintain
# data integrity and type information.
result = WorkflowEntry._handle_special_values(value)
return result if isinstance(result, Mapping) or result is None else dict(result)
if result is None:
return None
if isinstance(result, dict):
return result
raise TypeError("handle_special_values expects a mapping input")
@staticmethod
def _handle_special_values(value: Any):
def _handle_special_values(value: SpecialValue) -> SerializedSpecialValue:
if value is None:
return value
if isinstance(value, dict):
res = {}
if isinstance(value, Mapping):
res: dict[str, SerializedSpecialValue] = {}
for k, v in value.items():
res[k] = WorkflowEntry._handle_special_values(v)
return res
if isinstance(value, list):
res_list = []
res_list: list[SerializedSpecialValue] = []
for item in value:
res_list.append(WorkflowEntry._handle_special_values(item))
return res_list
if isinstance(value, File):
return value.to_dict()
return dict(value.to_dict())
return value
@classmethod

View File

@@ -112,6 +112,8 @@ def _get_encoded_string(f: File, /) -> str:
data = _download_file_content(f.storage_key)
case FileTransferMethod.DATASOURCE_FILE:
data = _download_file_content(f.storage_key)
case _:
raise ValueError(f"Unsupported transfer method: {f.transfer_method}")
return base64.b64encode(data).decode("utf-8")

View File

@@ -133,6 +133,8 @@ class ExecutionLimitsLayer(GraphEngineLayer):
elif limit_type == LimitType.TIME_LIMIT:
elapsed_time = time.time() - self.start_time if self.start_time else 0
reason = f"Maximum execution time exceeded: {elapsed_time:.2f}s > {self.max_time}s"
else:
return
self.logger.warning("Execution limit exceeded: %s", reason)

View File

@@ -336,12 +336,7 @@ class Node(Generic[NodeDataT]):
def _restore_execution_id_from_runtime_state(self) -> str | None:
graph_execution = self.graph_runtime_state.graph_execution
try:
node_executions = graph_execution.node_executions
except AttributeError:
return None
if not isinstance(node_executions, dict):
return None
node_executions = graph_execution.node_executions
node_execution = node_executions.get(self._node_id)
if node_execution is None:
return None
@@ -395,8 +390,7 @@ class Node(Generic[NodeDataT]):
if isinstance(event, NodeEventBase): # pyright: ignore[reportUnnecessaryIsInstance]
yield self._dispatch(event)
elif isinstance(event, GraphNodeEventBase) and not event.in_iteration_id and not event.in_loop_id: # pyright: ignore[reportUnnecessaryIsInstance]
event.id = self.execution_id
yield event
yield event.model_copy(update={"id": self.execution_id})
else:
yield event
except Exception as e:

View File

@@ -443,7 +443,10 @@ def _extract_text_from_docx(file_content: bytes) -> str:
# Keep track of paragraph and table positions
content_items: list[tuple[int, str, Table | Paragraph]] = []
it = iter(doc.element.body)
doc_body = getattr(doc.element, "body", None)
if doc_body is None:
raise TextExtractionError("DOCX body not found")
it = iter(doc_body)
part = next(it, None)
i = 0
while part is not None:

View File

@@ -1,7 +1,8 @@
from collections.abc import Mapping, Sequence
from typing import Any, Literal
from typing import Literal, NotRequired
from pydantic import BaseModel, Field, field_validator
from typing_extensions import TypedDict
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
from dify_graph.entities.base_node_data import BaseNodeData
@@ -10,11 +11,17 @@ from dify_graph.model_runtime.entities import ImagePromptMessageContent, LLMMode
from dify_graph.nodes.base.entities import VariableSelector
class StructuredOutputConfig(TypedDict):
schema: Mapping[str, object]
name: NotRequired[str]
description: NotRequired[str]
class ModelConfig(BaseModel):
provider: str
name: str
mode: LLMMode
completion_params: dict[str, Any] = Field(default_factory=dict)
completion_params: dict[str, object] = Field(default_factory=dict)
class ContextConfig(BaseModel):
@@ -33,7 +40,7 @@ class VisionConfig(BaseModel):
@field_validator("configs", mode="before")
@classmethod
def convert_none_configs(cls, v: Any):
def convert_none_configs(cls, v: object):
if v is None:
return VisionConfigOptions()
return v
@@ -44,7 +51,7 @@ class PromptConfig(BaseModel):
@field_validator("jinja2_variables", mode="before")
@classmethod
def convert_none_jinja2_variables(cls, v: Any):
def convert_none_jinja2_variables(cls, v: object):
if v is None:
return []
return v
@@ -67,7 +74,7 @@ class LLMNodeData(BaseNodeData):
memory: MemoryConfig | None = None
context: ContextConfig
vision: VisionConfig = Field(default_factory=VisionConfig)
structured_output: Mapping[str, Any] | None = None
structured_output: StructuredOutputConfig | None = None
# We used 'structured_output_enabled' in the past, but it's not a good name.
structured_output_switch_on: bool = Field(False, alias="structured_output_enabled")
reasoning_format: Literal["separated", "tagged"] = Field(
@@ -90,11 +97,30 @@ class LLMNodeData(BaseNodeData):
@field_validator("prompt_config", mode="before")
@classmethod
def convert_none_prompt_config(cls, v: Any):
def convert_none_prompt_config(cls, v: object):
if v is None:
return PromptConfig()
return v
@field_validator("structured_output", mode="before")
@classmethod
def convert_legacy_structured_output(cls, v: object) -> StructuredOutputConfig | None | object:
if not isinstance(v, Mapping):
return v
schema = v.get("schema")
if schema is None:
return None
normalized: StructuredOutputConfig = {"schema": schema}
name = v.get("name")
description = v.get("description")
if isinstance(name, str):
normalized["name"] = name
if isinstance(description, str):
normalized["description"] = description
return normalized
@property
def structured_output_enabled(self) -> bool:
return self.structured_output_switch_on and self.structured_output is not None

View File

@@ -9,6 +9,7 @@ import time
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Literal
from pydantic import TypeAdapter
from sqlalchemy import select
from core.llm_generator.output_parser.errors import OutputParserError
@@ -74,6 +75,7 @@ from .entities import (
LLMNodeChatModelMessage,
LLMNodeCompletionModelPromptTemplate,
LLMNodeData,
StructuredOutputConfig,
)
from .exc import (
InvalidContextStructureError,
@@ -88,6 +90,7 @@ if TYPE_CHECKING:
from dify_graph.runtime import GraphRuntimeState
logger = logging.getLogger(__name__)
_JSON_OBJECT_ADAPTER = TypeAdapter(dict[str, object])
class LLMNode(Node[LLMNodeData]):
@@ -358,7 +361,7 @@ class LLMNode(Node[LLMNodeData]):
stop: Sequence[str] | None = None,
user_id: str,
structured_output_enabled: bool,
structured_output: Mapping[str, Any] | None = None,
structured_output: StructuredOutputConfig | None = None,
file_saver: LLMFileSaver,
file_outputs: list[File],
node_id: str,
@@ -371,8 +374,10 @@ class LLMNode(Node[LLMNodeData]):
model_schema = llm_utils.fetch_model_schema(model_instance=model_instance)
if structured_output_enabled:
if structured_output is None:
raise LLMNodeError("Please provide a valid structured output schema")
output_schema = LLMNode.fetch_structured_output_schema(
structured_output=structured_output or {},
structured_output=structured_output,
)
request_start_time = time.perf_counter()
@@ -924,6 +929,12 @@ class LLMNode(Node[LLMNodeData]):
# Extract clean text and reasoning from <think> tags
clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format)
structured_output = (
dict(invoke_result.structured_output)
if isinstance(invoke_result, LLMResultWithStructuredOutput) and invoke_result.structured_output is not None
else None
)
event = ModelInvokeCompletedEvent(
# Use clean_text for separated mode, full_text for tagged mode
text=clean_text if reasoning_format == "separated" else full_text,
@@ -932,7 +943,7 @@ class LLMNode(Node[LLMNodeData]):
# Reasoning content for workflow variables and downstream nodes
reasoning_content=reasoning_content,
# Pass structured output if enabled
structured_output=getattr(invoke_result, "structured_output", None),
structured_output=structured_output,
)
if request_latency is not None:
event.usage.latency = round(request_latency, 3)
@@ -966,27 +977,18 @@ class LLMNode(Node[LLMNodeData]):
@staticmethod
def fetch_structured_output_schema(
*,
structured_output: Mapping[str, Any],
) -> dict[str, Any]:
structured_output: StructuredOutputConfig,
) -> dict[str, object]:
"""
Fetch the structured output schema from the node data.
Returns:
dict[str, Any]: The structured output schema
dict[str, object]: The structured output schema
"""
if not structured_output:
schema = structured_output.get("schema")
if not schema:
raise LLMNodeError("Please provide a valid structured output schema")
structured_output_schema = json.dumps(structured_output.get("schema", {}), ensure_ascii=False)
if not structured_output_schema:
raise LLMNodeError("Please provide a valid structured output schema")
try:
schema = json.loads(structured_output_schema)
if not isinstance(schema, dict):
raise LLMNodeError("structured_output_schema must be a JSON object")
return schema
except json.JSONDecodeError:
raise LLMNodeError("structured_output_schema is not valid JSON format")
return _JSON_OBJECT_ADAPTER.validate_python(schema)
@staticmethod
def _save_multimodal_output_and_convert_result_to_markdown(

View File

@@ -1,7 +1,10 @@
from enum import StrEnum
from typing import Annotated, Any, Literal
from __future__ import annotations
from pydantic import AfterValidator, BaseModel, Field, field_validator
from enum import StrEnum
from typing import Annotated, Any, Literal, TypeAlias, cast
from pydantic import AfterValidator, BaseModel, Field, TypeAdapter, field_validator
from pydantic_core.core_schema import ValidationInfo
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
@@ -9,6 +12,12 @@ from dify_graph.nodes.base import BaseLoopNodeData, BaseLoopState
from dify_graph.utils.condition.entities import Condition
from dify_graph.variables.types import SegmentType
LoopValue: TypeAlias = str | int | float | bool | None | dict[str, Any] | list[Any]
LoopValueMapping: TypeAlias = dict[str, LoopValue]
VariableSelector: TypeAlias = list[str]
_VARIABLE_SELECTOR_ADAPTER: TypeAdapter[VariableSelector] = TypeAdapter(VariableSelector)
_VALID_VAR_TYPE = frozenset(
[
SegmentType.STRING,
@@ -29,6 +38,36 @@ def _is_valid_var_type(seg_type: SegmentType) -> SegmentType:
return seg_type
def _validate_loop_value(value: object) -> LoopValue:
if value is None or isinstance(value, (str, int, float, bool)):
return cast(LoopValue, value)
if isinstance(value, list):
return [_validate_loop_value(item) for item in value]
if isinstance(value, dict):
normalized: dict[str, LoopValue] = {}
for key, item in value.items():
if not isinstance(key, str):
raise TypeError("Loop values only support string object keys")
normalized[key] = _validate_loop_value(item)
return normalized
raise TypeError("Loop values must be JSON-like primitives, arrays, or objects")
def _validate_loop_value_mapping(value: object) -> LoopValueMapping:
if not isinstance(value, dict):
raise TypeError("Loop outputs must be an object")
normalized: LoopValueMapping = {}
for key, item in value.items():
if not isinstance(key, str):
raise TypeError("Loop output keys must be strings")
normalized[key] = _validate_loop_value(item)
return normalized
class LoopVariableData(BaseModel):
"""
Loop Variable Data.
@@ -37,7 +76,29 @@ class LoopVariableData(BaseModel):
label: str
var_type: Annotated[SegmentType, AfterValidator(_is_valid_var_type)]
value_type: Literal["variable", "constant"]
value: Any | list[str] | None = None
value: LoopValue | VariableSelector | None = None
@field_validator("value", mode="before")
@classmethod
def validate_value(cls, value: object, validation_info: ValidationInfo) -> LoopValue | VariableSelector | None:
value_type = validation_info.data.get("value_type")
if value_type == "variable":
if value is None:
raise ValueError("Variable loop inputs require a selector")
return _VARIABLE_SELECTOR_ADAPTER.validate_python(value)
if value_type == "constant":
return _validate_loop_value(value)
raise ValueError(f"Unknown loop variable value type: {value_type}")
def require_variable_selector(self) -> VariableSelector:
if self.value_type != "variable":
raise ValueError(f"Expected variable loop input, got {self.value_type}")
return _VARIABLE_SELECTOR_ADAPTER.validate_python(self.value)
def require_constant_value(self) -> LoopValue:
if self.value_type != "constant":
raise ValueError(f"Expected constant loop input, got {self.value_type}")
return _validate_loop_value(self.value)
class LoopNodeData(BaseLoopNodeData):
@@ -46,14 +107,14 @@ class LoopNodeData(BaseLoopNodeData):
break_conditions: list[Condition] # Conditions to break the loop
logical_operator: Literal["and", "or"]
loop_variables: list[LoopVariableData] | None = Field(default_factory=list[LoopVariableData])
outputs: dict[str, Any] = Field(default_factory=dict)
outputs: LoopValueMapping = Field(default_factory=dict)
@field_validator("outputs", mode="before")
@classmethod
def validate_outputs(cls, v):
if v is None:
def validate_outputs(cls, value: object) -> LoopValueMapping:
if value is None:
return {}
return v
return _validate_loop_value_mapping(value)
class LoopStartNodeData(BaseNodeData):
@@ -77,8 +138,8 @@ class LoopState(BaseLoopState):
Loop State.
"""
outputs: list[Any] = Field(default_factory=list)
current_output: Any = None
outputs: list[LoopValue] = Field(default_factory=list)
current_output: LoopValue | None = None
class MetaData(BaseLoopState.MetaData):
"""
@@ -87,7 +148,7 @@ class LoopState(BaseLoopState):
loop_length: int
def get_last_output(self) -> Any:
def get_last_output(self) -> LoopValue | None:
"""
Get last output.
"""
@@ -95,7 +156,7 @@ class LoopState(BaseLoopState):
return self.outputs[-1]
return None
def get_current_output(self) -> Any:
def get_current_output(self) -> LoopValue | None:
"""
Get current output.
"""

View File

@@ -3,7 +3,7 @@ import json
import logging
from collections.abc import Callable, Generator, Mapping, Sequence
from datetime import datetime
from typing import TYPE_CHECKING, Any, Literal, cast
from typing import TYPE_CHECKING, Literal, cast
from dify_graph.entities.graph_config import NodeConfigDictAdapter
from dify_graph.enums import (
@@ -29,7 +29,7 @@ from dify_graph.node_events import (
)
from dify_graph.nodes.base import LLMUsageTrackingMixin
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.loop.entities import LoopCompletedReason, LoopNodeData, LoopVariableData
from dify_graph.nodes.loop.entities import LoopCompletedReason, LoopNodeData, LoopValue, LoopVariableData
from dify_graph.utils.condition.processor import ConditionProcessor
from dify_graph.variables import Segment, SegmentType
from factories.variable_factory import TypeMismatchError, build_segment_with_type, segment_to_variable
@@ -60,7 +60,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
break_conditions = self.node_data.break_conditions
logical_operator = self.node_data.logical_operator
inputs = {"loop_count": loop_count}
inputs: dict[str, object] = {"loop_count": loop_count}
if not self.node_data.start_node_id:
raise ValueError(f"field start_node_id in loop {self._node_id} not found")
@@ -68,12 +68,14 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
root_node_id = self.node_data.start_node_id
# Initialize loop variables in the original variable pool
loop_variable_selectors = {}
loop_variable_selectors: dict[str, list[str]] = {}
if self.node_data.loop_variables:
value_processor: dict[Literal["constant", "variable"], Callable[[LoopVariableData], Segment | None]] = {
"constant": lambda var: self._get_segment_for_constant(var.var_type, var.value),
"constant": lambda var: self._get_segment_for_constant(var.var_type, var.require_constant_value()),
"variable": lambda var: (
self.graph_runtime_state.variable_pool.get(var.value) if isinstance(var.value, list) else None
self.graph_runtime_state.variable_pool.get(var.require_variable_selector())
if var.value is not None
else None
),
}
for loop_variable in self.node_data.loop_variables:
@@ -95,7 +97,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
condition_processor = ConditionProcessor()
loop_duration_map: dict[str, float] = {}
single_loop_variable_map: dict[str, dict[str, Any]] = {} # single loop variable output
single_loop_variable_map: dict[str, dict[str, LoopValue]] = {} # single loop variable output
loop_usage = LLMUsage.empty_usage()
loop_node_ids = self._extract_loop_node_ids_from_config(self.graph_config, self._node_id)
@@ -146,7 +148,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
loop_usage = self._merge_usage(loop_usage, graph_engine.graph_runtime_state.llm_usage)
# Collect loop variable values after iteration
single_loop_variable = {}
single_loop_variable: dict[str, LoopValue] = {}
for key, selector in loop_variable_selectors.items():
segment = self.graph_runtime_state.variable_pool.get(selector)
single_loop_variable[key] = segment.value if segment else None
@@ -297,20 +299,29 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
def _extract_variable_selector_to_variable_mapping(
cls,
*,
graph_config: Mapping[str, Any],
graph_config: Mapping[str, object],
node_id: str,
node_data: LoopNodeData,
) -> Mapping[str, Sequence[str]]:
variable_mapping = {}
variable_mapping: dict[str, Sequence[str]] = {}
# Extract loop node IDs statically from graph_config
loop_node_ids = cls._extract_loop_node_ids_from_config(graph_config, node_id)
# Get node configs from graph_config
node_configs = {node["id"]: node for node in graph_config.get("nodes", []) if "id" in node}
raw_nodes = graph_config.get("nodes")
node_configs: dict[str, Mapping[str, object]] = {}
if isinstance(raw_nodes, list):
for raw_node in raw_nodes:
if not isinstance(raw_node, dict):
continue
raw_node_id = raw_node.get("id")
if isinstance(raw_node_id, str):
node_configs[raw_node_id] = raw_node
for sub_node_id, sub_node_config in node_configs.items():
if sub_node_config.get("data", {}).get("loop_id") != node_id:
sub_node_data = sub_node_config.get("data")
if not isinstance(sub_node_data, dict) or sub_node_data.get("loop_id") != node_id:
continue
# variable selector to variable mapping
@@ -341,9 +352,8 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
for loop_variable in node_data.loop_variables or []:
if loop_variable.value_type == "variable":
assert loop_variable.value is not None, "Loop variable value must be provided for variable type"
# add loop variable to variable mapping
selector = loop_variable.value
selector = loop_variable.require_variable_selector()
variable_mapping[f"{node_id}.{loop_variable.label}"] = selector
# remove variable out from loop
@@ -352,7 +362,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
return variable_mapping
@classmethod
def _extract_loop_node_ids_from_config(cls, graph_config: Mapping[str, Any], loop_node_id: str) -> set[str]:
def _extract_loop_node_ids_from_config(cls, graph_config: Mapping[str, object], loop_node_id: str) -> set[str]:
"""
Extract node IDs that belong to a specific loop from graph configuration.
@@ -363,12 +373,19 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
:param loop_node_id: the ID of the loop node
:return: set of node IDs that belong to the loop
"""
loop_node_ids = set()
loop_node_ids: set[str] = set()
# Find all nodes that belong to this loop
nodes = graph_config.get("nodes", [])
for node in nodes:
node_data = node.get("data", {})
raw_nodes = graph_config.get("nodes")
if not isinstance(raw_nodes, list):
return loop_node_ids
for node in raw_nodes:
if not isinstance(node, dict):
continue
node_data = node.get("data")
if not isinstance(node_data, dict):
continue
if node_data.get("loop_id") == loop_node_id:
node_id = node.get("id")
if node_id:
@@ -377,7 +394,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
return loop_node_ids
@staticmethod
def _get_segment_for_constant(var_type: SegmentType, original_value: Any) -> Segment:
def _get_segment_for_constant(var_type: SegmentType, original_value: LoopValue | None) -> Segment:
"""Get the appropriate segment type for a constant value."""
# TODO: Refactor for maintainability:
# 1. Ensure type handling logic stays synchronized with _VALID_VAR_TYPE (entities.py)
@@ -389,11 +406,15 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]):
SegmentType.ARRAY_OBJECT,
SegmentType.ARRAY_STRING,
]:
if original_value and isinstance(original_value, str):
value = json.loads(original_value)
else:
logger.warning("unexpected value for LoopNode, value_type=%s, value=%s", original_value, var_type)
# New typed payloads may already provide native lists, while legacy
# configs still serialize array constants as JSON strings.
if isinstance(original_value, str):
value = json.loads(original_value) if original_value else []
elif original_value is None:
# Preserve legacy behavior: treat missing/empty array constants as [].
value = []
else:
value = original_value
else:
raise AssertionError("this statement should be unreachable.")
try:

View File

@@ -1,4 +1,4 @@
from typing import Annotated, Any, Literal
from typing import Annotated, Literal
from pydantic import (
BaseModel,
@@ -6,6 +6,7 @@ from pydantic import (
Field,
field_validator,
)
from typing_extensions import TypedDict
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
from dify_graph.entities.base_node_data import BaseNodeData
@@ -55,7 +56,7 @@ class ParameterConfig(BaseModel):
@field_validator("name", mode="before")
@classmethod
def validate_name(cls, value) -> str:
def validate_name(cls, value: object) -> str:
if not value:
raise ValueError("Parameter name is required")
if value in {"__reason", "__is_success"}:
@@ -79,6 +80,23 @@ class ParameterConfig(BaseModel):
return element_type
class JsonSchemaArrayItems(TypedDict):
type: str
class ParameterJsonSchemaProperty(TypedDict, total=False):
description: str
type: str
items: JsonSchemaArrayItems
enum: list[str]
class ParameterJsonSchema(TypedDict):
type: Literal["object"]
properties: dict[str, ParameterJsonSchemaProperty]
required: list[str]
class ParameterExtractorNodeData(BaseNodeData):
"""
Parameter Extractor Node Data.
@@ -95,19 +113,19 @@ class ParameterExtractorNodeData(BaseNodeData):
@field_validator("reasoning_mode", mode="before")
@classmethod
def set_reasoning_mode(cls, v) -> str:
return v or "function_call"
def set_reasoning_mode(cls, v: object) -> str:
return str(v) if v else "function_call"
def get_parameter_json_schema(self):
def get_parameter_json_schema(self) -> ParameterJsonSchema:
"""
Get parameter json schema.
:return: parameter json schema
"""
parameters: dict[str, Any] = {"type": "object", "properties": {}, "required": []}
parameters: ParameterJsonSchema = {"type": "object", "properties": {}, "required": []}
for parameter in self.parameters:
parameter_schema: dict[str, Any] = {"description": parameter.description}
parameter_schema: ParameterJsonSchemaProperty = {"description": parameter.description}
if parameter.type == SegmentType.STRING:
parameter_schema["type"] = "string"
@@ -118,7 +136,7 @@ class ParameterExtractorNodeData(BaseNodeData):
raise AssertionError("element type should not be None.")
parameter_schema["items"] = {"type": element_type.value}
else:
parameter_schema["type"] = parameter.type
parameter_schema["type"] = parameter.type.value
if parameter.options:
parameter_schema["enum"] = parameter.options

View File

@@ -5,6 +5,8 @@ import uuid
from collections.abc import Mapping, Sequence
from typing import TYPE_CHECKING, Any, cast
from pydantic import TypeAdapter
from core.model_manager import ModelInstance
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate
@@ -63,6 +65,7 @@ from .prompts import (
)
logger = logging.getLogger(__name__)
_JSON_OBJECT_ADAPTER = TypeAdapter(dict[str, object])
if TYPE_CHECKING:
from dify_graph.entities import GraphInitParams
@@ -70,7 +73,7 @@ if TYPE_CHECKING:
from dify_graph.runtime import GraphRuntimeState
def extract_json(text):
def extract_json(text: str) -> str | None:
"""
From a given JSON started from '{' or '[' extract the complete JSON object.
"""
@@ -396,10 +399,15 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
)
# generate tool
parameter_schema = node_data.get_parameter_json_schema()
tool = PromptMessageTool(
name=FUNCTION_CALLING_EXTRACTOR_NAME,
description="Extract parameters from the natural language text",
parameters=node_data.get_parameter_json_schema(),
parameters={
"type": parameter_schema["type"],
"properties": dict(parameter_schema["properties"]),
"required": list(parameter_schema["required"]),
},
)
return prompt_messages, [tool]
@@ -606,19 +614,21 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
else:
return None
def _transform_result(self, data: ParameterExtractorNodeData, result: dict):
def _transform_result(self, data: ParameterExtractorNodeData, result: Mapping[str, object]) -> dict[str, object]:
"""
Transform result into standard format.
"""
transformed_result: dict[str, Any] = {}
transformed_result: dict[str, object] = {}
for parameter in data.parameters:
if parameter.name in result:
param_value = result[parameter.name]
# transform value
if parameter.type == SegmentType.NUMBER:
transformed = self._transform_number(param_value)
if transformed is not None:
transformed_result[parameter.name] = transformed
if isinstance(param_value, (bool, int, float, str)):
numeric_value: bool | int | float | str = param_value
transformed = self._transform_number(numeric_value)
if transformed is not None:
transformed_result[parameter.name] = transformed
elif parameter.type == SegmentType.BOOLEAN:
if isinstance(result[parameter.name], (bool, int)):
transformed_result[parameter.name] = bool(result[parameter.name])
@@ -665,7 +675,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
return transformed_result
def _extract_complete_json_response(self, result: str) -> dict | None:
def _extract_complete_json_response(self, result: str) -> dict[str, object] | None:
"""
Extract complete json response.
"""
@@ -676,11 +686,11 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
json_str = extract_json(result[idx:])
if json_str:
with contextlib.suppress(Exception):
return cast(dict, json.loads(json_str))
return _JSON_OBJECT_ADAPTER.validate_python(json.loads(json_str))
logger.info("extra error: %s", result)
return None
def _extract_json_from_tool_call(self, tool_call: AssistantPromptMessage.ToolCall) -> dict | None:
def _extract_json_from_tool_call(self, tool_call: AssistantPromptMessage.ToolCall) -> dict[str, object] | None:
"""
Extract json from tool call.
"""
@@ -694,16 +704,16 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]):
json_str = extract_json(result[idx:])
if json_str:
with contextlib.suppress(Exception):
return cast(dict, json.loads(json_str))
return _JSON_OBJECT_ADAPTER.validate_python(json.loads(json_str))
logger.info("extra error: %s", result)
return None
def _generate_default_result(self, data: ParameterExtractorNodeData):
def _generate_default_result(self, data: ParameterExtractorNodeData) -> dict[str, object]:
"""
Generate default result.
"""
result: dict[str, Any] = {}
result: dict[str, object] = {}
for parameter in data.parameters:
if parameter.type == "number":
result[parameter.name] = 0

View File

@@ -1,12 +1,66 @@
from typing import Any, Literal, Union
from __future__ import annotations
from pydantic import BaseModel, field_validator
from typing import Literal, TypeAlias, cast
from pydantic import BaseModel, TypeAdapter, field_validator
from pydantic_core.core_schema import ValidationInfo
from typing_extensions import TypedDict
from core.tools.entities.tool_entities import ToolProviderType
from dify_graph.entities.base_node_data import BaseNodeData
from dify_graph.enums import BuiltinNodeTypes, NodeType
ToolConfigurationValue: TypeAlias = str | int | float | bool
ToolInputConstantValue: TypeAlias = str | int | float | bool | dict[str, object] | list[object] | None
VariableSelector: TypeAlias = list[str]
_TOOL_INPUT_MIXED_ADAPTER: TypeAdapter[str] = TypeAdapter(str)
_TOOL_INPUT_CONSTANT_ADAPTER: TypeAdapter[ToolInputConstantValue] = TypeAdapter(ToolInputConstantValue)
_VARIABLE_SELECTOR_ADAPTER: TypeAdapter[VariableSelector] = TypeAdapter(VariableSelector)
class WorkflowToolInputValue(TypedDict):
type: Literal["mixed", "variable", "constant"]
value: ToolInputConstantValue | VariableSelector
ToolConfigurationEntry: TypeAlias = ToolConfigurationValue | WorkflowToolInputValue
ToolConfigurations: TypeAlias = dict[str, ToolConfigurationEntry]
class ToolInputPayload(BaseModel):
type: Literal["mixed", "variable", "constant"]
value: ToolInputConstantValue | VariableSelector
@field_validator("value", mode="before")
@classmethod
def validate_value(
cls, value: object, validation_info: ValidationInfo
) -> ToolInputConstantValue | VariableSelector:
input_type = validation_info.data.get("type")
if input_type == "mixed":
return _TOOL_INPUT_MIXED_ADAPTER.validate_python(value)
if input_type == "variable":
return _VARIABLE_SELECTOR_ADAPTER.validate_python(value)
if input_type == "constant":
return _TOOL_INPUT_CONSTANT_ADAPTER.validate_python(value)
raise ValueError(f"Unknown tool input type: {input_type}")
def require_variable_selector(self) -> VariableSelector:
if self.type != "variable":
raise ValueError(f"Expected variable tool input, got {self.type}")
return _VARIABLE_SELECTOR_ADAPTER.validate_python(self.value)
def _validate_tool_configuration_entry(value: object) -> ToolConfigurationEntry:
if isinstance(value, (str, int, float, bool)):
return cast(ToolConfigurationEntry, value)
if isinstance(value, dict):
return cast(ToolConfigurationEntry, ToolInputPayload.model_validate(value).model_dump())
raise TypeError("Tool configuration values must be primitives or workflow tool input objects")
class ToolEntity(BaseModel):
provider_id: str
@@ -14,52 +68,29 @@ class ToolEntity(BaseModel):
provider_name: str # redundancy
tool_name: str
tool_label: str # redundancy
tool_configurations: dict[str, Any]
tool_configurations: ToolConfigurations
credential_id: str | None = None
plugin_unique_identifier: str | None = None # redundancy
@field_validator("tool_configurations", mode="before")
@classmethod
def validate_tool_configurations(cls, value, values: ValidationInfo):
def validate_tool_configurations(cls, value: object, _validation_info: ValidationInfo) -> ToolConfigurations:
if not isinstance(value, dict):
raise ValueError("tool_configurations must be a dictionary")
raise TypeError("tool_configurations must be a dictionary")
for key in values.data.get("tool_configurations", {}):
value = values.data.get("tool_configurations", {}).get(key)
if not isinstance(value, str | int | float | bool):
raise ValueError(f"{key} must be a string")
return value
normalized: ToolConfigurations = {}
for key, item in value.items():
if not isinstance(key, str):
raise TypeError("tool_configurations keys must be strings")
normalized[key] = _validate_tool_configuration_entry(item)
return normalized
class ToolNodeData(BaseNodeData, ToolEntity):
type: NodeType = BuiltinNodeTypes.TOOL
class ToolInput(BaseModel):
# TODO: check this type
value: Union[Any, list[str]]
type: Literal["mixed", "variable", "constant"]
@field_validator("type", mode="before")
@classmethod
def check_type(cls, value, validation_info: ValidationInfo):
typ = value
value = validation_info.data.get("value")
if value is None:
return typ
if typ == "mixed" and not isinstance(value, str):
raise ValueError("value must be a string")
elif typ == "variable":
if not isinstance(value, list):
raise ValueError("value must be a list")
for val in value:
if not isinstance(val, str):
raise ValueError("value must be a list of strings")
elif typ == "constant" and not isinstance(value, (allowed_types := (str, int, float, bool, dict, list))):
raise ValueError(f"value must be one of: {', '.join(t.__name__ for t in allowed_types)}")
return typ
class ToolInput(ToolInputPayload):
pass
tool_parameters: dict[str, ToolInput]
# The version of the tool parameter.
@@ -69,7 +100,7 @@ class ToolNodeData(BaseNodeData, ToolEntity):
@field_validator("tool_parameters", mode="before")
@classmethod
def filter_none_tool_inputs(cls, value):
def filter_none_tool_inputs(cls, value: object) -> object:
if not isinstance(value, dict):
return value
@@ -80,8 +111,10 @@ class ToolNodeData(BaseNodeData, ToolEntity):
}
@staticmethod
def _has_valid_value(tool_input):
def _has_valid_value(tool_input: object) -> bool:
"""Check if the value is valid"""
if isinstance(tool_input, dict):
return tool_input.get("value") is not None
return getattr(tool_input, "value", None) is not None
if isinstance(tool_input, ToolNodeData.ToolInput):
return tool_input.value is not None
return False

View File

@@ -225,10 +225,11 @@ class ToolNode(Node[ToolNodeData]):
continue
tool_input = node_data.tool_parameters[parameter_name]
if tool_input.type == "variable":
variable = variable_pool.get(tool_input.value)
variable_selector = tool_input.require_variable_selector()
variable = variable_pool.get(variable_selector)
if variable is None:
if parameter.required:
raise ToolParameterError(f"Variable {tool_input.value} does not exist")
raise ToolParameterError(f"Variable {variable_selector} does not exist")
continue
parameter_value = variable.value
elif tool_input.type in {"mixed", "constant"}:
@@ -510,8 +511,9 @@ class ToolNode(Node[ToolNodeData]):
for selector in selectors:
result[selector.variable] = selector.value_selector
case "variable":
selector_key = ".".join(input.value)
result[f"#{selector_key}#"] = input.value
variable_selector = input.require_variable_selector()
selector_key = ".".join(variable_selector)
result[f"#{selector_key}#"] = variable_selector
case "constant":
pass

View File

@@ -9,7 +9,7 @@ from dify_graph.node_events import NodeRunResult
from dify_graph.nodes.base.node import Node
from dify_graph.nodes.variable_assigner.common import helpers as common_helpers
from dify_graph.nodes.variable_assigner.common.exc import VariableOperatorNodeError
from dify_graph.variables import SegmentType, VariableBase
from dify_graph.variables import Segment, SegmentType, VariableBase
from .node_data import VariableAssignerData, WriteMode
@@ -74,23 +74,29 @@ class VariableAssignerNode(Node[VariableAssignerData]):
if not isinstance(original_variable, VariableBase):
raise VariableOperatorNodeError("assigned variable not found")
income_value: Segment
updated_variable: VariableBase
match self.node_data.write_mode:
case WriteMode.OVER_WRITE:
income_value = self.graph_runtime_state.variable_pool.get(self.node_data.input_variable_selector)
if not income_value:
input_value = self.graph_runtime_state.variable_pool.get(self.node_data.input_variable_selector)
if input_value is None:
raise VariableOperatorNodeError("input value not found")
income_value = input_value
updated_variable = original_variable.model_copy(update={"value": income_value.value})
case WriteMode.APPEND:
income_value = self.graph_runtime_state.variable_pool.get(self.node_data.input_variable_selector)
if not income_value:
input_value = self.graph_runtime_state.variable_pool.get(self.node_data.input_variable_selector)
if input_value is None:
raise VariableOperatorNodeError("input value not found")
income_value = input_value
updated_value = original_variable.value + [income_value.value]
updated_variable = original_variable.model_copy(update={"value": updated_value})
case WriteMode.CLEAR:
income_value = SegmentType.get_zero_value(original_variable.value_type)
updated_variable = original_variable.model_copy(update={"value": income_value.to_object()})
case _:
raise VariableOperatorNodeError(f"unsupported write mode: {self.node_data.write_mode}")
# Over write the variable.
self.graph_runtime_state.variable_pool.add(assigned_variable_selector, updated_variable)

View File

@@ -66,6 +66,11 @@ class GraphExecutionProtocol(Protocol):
exceptions_count: int
pause_reasons: list[PauseReason]
@property
def node_executions(self) -> Mapping[str, NodeExecutionProtocol]:
"""Return node execution state keyed by node id for resume support."""
...
def start(self) -> None:
"""Transition execution into the running state."""
...
@@ -91,6 +96,12 @@ class GraphExecutionProtocol(Protocol):
...
class NodeExecutionProtocol(Protocol):
"""Structural interface for per-node execution state used during resume."""
execution_id: str | None
class ResponseStreamCoordinatorProtocol(Protocol):
"""Structural interface for response stream coordinator."""

View File

@@ -1,6 +1,6 @@
[project]
name = "dify-api"
version = "1.13.3"
version = "1.13.2"
requires-python = ">=3.11,<3.13"
dependencies = [

View File

@@ -13,21 +13,6 @@ controllers/console/workspace/trigger_providers.py
controllers/service_api/app/annotation.py
controllers/web/workflow_events.py
core/agent/fc_agent_runner.py
core/app/apps/advanced_chat/app_generator.py
core/app/apps/advanced_chat/app_runner.py
core/app/apps/advanced_chat/generate_task_pipeline.py
core/app/apps/agent_chat/app_generator.py
core/app/apps/base_app_generate_response_converter.py
core/app/apps/base_app_generator.py
core/app/apps/chat/app_generator.py
core/app/apps/common/workflow_response_converter.py
core/app/apps/completion/app_generator.py
core/app/apps/pipeline/pipeline_generator.py
core/app/apps/pipeline/pipeline_runner.py
core/app/apps/workflow/app_generator.py
core/app/apps/workflow/app_runner.py
core/app/apps/workflow/generate_task_pipeline.py
core/app/apps/workflow_app_runner.py
core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
core/datasource/datasource_manager.py
core/external_data_tool/api/api.py
@@ -108,35 +93,6 @@ core/tools/workflow_as_tool/provider.py
core/trigger/debug/event_selectors.py
core/trigger/entities/entities.py
core/trigger/provider.py
core/workflow/workflow_entry.py
dify_graph/entities/workflow_execution.py
dify_graph/file/file_manager.py
dify_graph/graph_engine/error_handler.py
dify_graph/graph_engine/layers/execution_limits.py
dify_graph/nodes/agent/agent_node.py
dify_graph/nodes/base/node.py
dify_graph/nodes/code/code_node.py
dify_graph/nodes/datasource/datasource_node.py
dify_graph/nodes/document_extractor/node.py
dify_graph/nodes/human_input/human_input_node.py
dify_graph/nodes/if_else/if_else_node.py
dify_graph/nodes/iteration/iteration_node.py
dify_graph/nodes/knowledge_index/knowledge_index_node.py
core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py
dify_graph/nodes/list_operator/node.py
dify_graph/nodes/llm/node.py
dify_graph/nodes/loop/loop_node.py
dify_graph/nodes/parameter_extractor/parameter_extractor_node.py
dify_graph/nodes/question_classifier/question_classifier_node.py
dify_graph/nodes/start/start_node.py
dify_graph/nodes/template_transform/template_transform_node.py
dify_graph/nodes/tool/tool_node.py
dify_graph/nodes/trigger_plugin/trigger_event_node.py
dify_graph/nodes/trigger_schedule/trigger_schedule_node.py
dify_graph/nodes/trigger_webhook/node.py
dify_graph/nodes/variable_aggregator/variable_aggregator_node.py
dify_graph/nodes/variable_assigner/v1/node.py
dify_graph/nodes/variable_assigner/v2/node.py
extensions/logstore/repositories/logstore_api_workflow_run_repository.py
extensions/otel/instrumentation.py
extensions/otel/runtime.py

View File

@@ -163,9 +163,11 @@ class DifyTestContainers:
wait_for_logs(self.redis, "Ready to accept connections", timeout=30)
logger.info("Redis container is ready and accepting connections")
# Start Dify Sandbox container for code execution environment.
# Start Dify Sandbox container for code execution environment
# Dify Sandbox provides a secure environment for executing user code
# Use pinned version 0.2.12 to match production docker-compose configuration
logger.info("Initializing Dify Sandbox container...")
self.dify_sandbox = DockerContainer(image="langgenius/dify-sandbox:0.2.14").with_network(self.network)
self.dify_sandbox = DockerContainer(image="langgenius/dify-sandbox:0.2.12").with_network(self.network)
self.dify_sandbox.with_exposed_ports(8194)
self.dify_sandbox.env = {
"API_KEY": "test_api_key",
@@ -185,7 +187,7 @@ class DifyTestContainers:
# Start Dify Plugin Daemon container for plugin management
# Dify Plugin Daemon provides plugin lifecycle management and execution
logger.info("Initializing Dify Plugin Daemon container...")
self.dify_plugin_daemon = DockerContainer(image="langgenius/dify-plugin-daemon:0.5.3-local").with_network(
self.dify_plugin_daemon = DockerContainer(image="langgenius/dify-plugin-daemon:0.5.4-local").with_network(
self.network
)
self.dify_plugin_daemon.with_exposed_ports(5002)

View File

@@ -1,245 +0,0 @@
"""Unit tests for inner_api app DSL import/export endpoints.
Tests Pydantic model validation, endpoint handler logic, and the
_get_active_account helper. Auth/setup decorators are tested separately
in test_auth_wraps.py; handler tests use inspect.unwrap() to bypass them.
"""
import inspect
from unittest.mock import MagicMock, patch
import pytest
from flask import Flask
from pydantic import ValidationError
from controllers.inner_api.app.dsl import (
EnterpriseAppDSLExport,
EnterpriseAppDSLImport,
InnerAppDSLImportPayload,
_get_active_account,
)
from services.app_dsl_service import ImportStatus
class TestInnerAppDSLImportPayload:
"""Test InnerAppDSLImportPayload Pydantic model validation."""
def test_valid_payload_all_fields(self):
data = {
"yaml_content": "version: 0.6.0\nkind: app\n",
"creator_email": "user@example.com",
"name": "My App",
"description": "A test app",
}
payload = InnerAppDSLImportPayload.model_validate(data)
assert payload.yaml_content == data["yaml_content"]
assert payload.creator_email == "user@example.com"
assert payload.name == "My App"
assert payload.description == "A test app"
def test_valid_payload_optional_fields_omitted(self):
data = {
"yaml_content": "version: 0.6.0\n",
"creator_email": "user@example.com",
}
payload = InnerAppDSLImportPayload.model_validate(data)
assert payload.name is None
assert payload.description is None
def test_missing_yaml_content_fails(self):
with pytest.raises(ValidationError) as exc_info:
InnerAppDSLImportPayload.model_validate({"creator_email": "a@b.com"})
assert "yaml_content" in str(exc_info.value)
def test_missing_creator_email_fails(self):
with pytest.raises(ValidationError) as exc_info:
InnerAppDSLImportPayload.model_validate({"yaml_content": "test"})
assert "creator_email" in str(exc_info.value)
class TestGetActiveAccount:
"""Test the _get_active_account helper function."""
@patch("controllers.inner_api.app.dsl.db")
def test_returns_active_account(self, mock_db):
mock_account = MagicMock()
mock_account.status = "active"
mock_db.session.query.return_value.filter_by.return_value.first.return_value = mock_account
result = _get_active_account("user@example.com")
assert result is mock_account
mock_db.session.query.return_value.filter_by.assert_called_once_with(email="user@example.com")
@patch("controllers.inner_api.app.dsl.db")
def test_returns_none_for_inactive_account(self, mock_db):
mock_account = MagicMock()
mock_account.status = "banned"
mock_db.session.query.return_value.filter_by.return_value.first.return_value = mock_account
result = _get_active_account("banned@example.com")
assert result is None
@patch("controllers.inner_api.app.dsl.db")
def test_returns_none_for_nonexistent_email(self, mock_db):
mock_db.session.query.return_value.filter_by.return_value.first.return_value = None
result = _get_active_account("missing@example.com")
assert result is None
class TestEnterpriseAppDSLImport:
"""Test EnterpriseAppDSLImport endpoint handler logic.
Uses inspect.unwrap() to bypass auth/setup decorators.
"""
@pytest.fixture
def api_instance(self):
return EnterpriseAppDSLImport()
@pytest.fixture
def _mock_import_deps(self):
"""Patch db, Session, and AppDslService for import handler tests."""
with (
patch("controllers.inner_api.app.dsl.db"),
patch("controllers.inner_api.app.dsl.Session") as mock_session,
patch("controllers.inner_api.app.dsl.AppDslService") as mock_dsl_cls,
):
mock_session.return_value.__enter__ = MagicMock(return_value=MagicMock())
mock_session.return_value.__exit__ = MagicMock(return_value=False)
self._mock_dsl = MagicMock()
mock_dsl_cls.return_value = self._mock_dsl
yield
def _make_import_result(self, status: ImportStatus, **kwargs) -> "Import":
from services.app_dsl_service import Import
result = Import(
id="import-id",
status=status,
app_id=kwargs.get("app_id", "app-123"),
app_mode=kwargs.get("app_mode", "workflow"),
)
return result
@pytest.mark.usefixtures("_mock_import_deps")
@patch("controllers.inner_api.app.dsl._get_active_account")
def test_import_success_returns_200(self, mock_get_account, api_instance, app: Flask):
mock_account = MagicMock()
mock_get_account.return_value = mock_account
self._mock_dsl.import_app.return_value = self._make_import_result(ImportStatus.COMPLETED)
unwrapped = inspect.unwrap(api_instance.post)
with app.test_request_context():
with patch("controllers.inner_api.app.dsl.inner_api_ns") as mock_ns:
mock_ns.payload = {
"yaml_content": "version: 0.6.0\n",
"creator_email": "user@example.com",
}
result = unwrapped(api_instance, workspace_id="ws-123")
body, status_code = result
assert status_code == 200
assert body["status"] == "completed"
mock_account.set_tenant_id.assert_called_once_with("ws-123")
@pytest.mark.usefixtures("_mock_import_deps")
@patch("controllers.inner_api.app.dsl._get_active_account")
def test_import_pending_returns_202(self, mock_get_account, api_instance, app: Flask):
mock_get_account.return_value = MagicMock()
self._mock_dsl.import_app.return_value = self._make_import_result(ImportStatus.PENDING)
unwrapped = inspect.unwrap(api_instance.post)
with app.test_request_context():
with patch("controllers.inner_api.app.dsl.inner_api_ns") as mock_ns:
mock_ns.payload = {"yaml_content": "test", "creator_email": "u@e.com"}
body, status_code = unwrapped(api_instance, workspace_id="ws-123")
assert status_code == 202
assert body["status"] == "pending"
@pytest.mark.usefixtures("_mock_import_deps")
@patch("controllers.inner_api.app.dsl._get_active_account")
def test_import_failed_returns_400(self, mock_get_account, api_instance, app: Flask):
mock_get_account.return_value = MagicMock()
self._mock_dsl.import_app.return_value = self._make_import_result(ImportStatus.FAILED)
unwrapped = inspect.unwrap(api_instance.post)
with app.test_request_context():
with patch("controllers.inner_api.app.dsl.inner_api_ns") as mock_ns:
mock_ns.payload = {"yaml_content": "test", "creator_email": "u@e.com"}
body, status_code = unwrapped(api_instance, workspace_id="ws-123")
assert status_code == 400
assert body["status"] == "failed"
@patch("controllers.inner_api.app.dsl._get_active_account")
def test_import_account_not_found_returns_404(self, mock_get_account, api_instance, app: Flask):
mock_get_account.return_value = None
unwrapped = inspect.unwrap(api_instance.post)
with app.test_request_context():
with patch("controllers.inner_api.app.dsl.inner_api_ns") as mock_ns:
mock_ns.payload = {"yaml_content": "test", "creator_email": "missing@e.com"}
result = unwrapped(api_instance, workspace_id="ws-123")
body, status_code = result
assert status_code == 404
assert "missing@e.com" in body["message"]
class TestEnterpriseAppDSLExport:
"""Test EnterpriseAppDSLExport endpoint handler logic.
Uses inspect.unwrap() to bypass auth/setup decorators.
"""
@pytest.fixture
def api_instance(self):
return EnterpriseAppDSLExport()
@patch("controllers.inner_api.app.dsl.AppDslService")
@patch("controllers.inner_api.app.dsl.db")
def test_export_success_returns_200(self, mock_db, mock_dsl_cls, api_instance, app: Flask):
mock_app = MagicMock()
mock_db.session.query.return_value.filter_by.return_value.first.return_value = mock_app
mock_dsl_cls.export_dsl.return_value = "version: 0.6.0\nkind: app\n"
unwrapped = inspect.unwrap(api_instance.get)
with app.test_request_context("?include_secret=false"):
result = unwrapped(api_instance, app_id="app-123")
body, status_code = result
assert status_code == 200
assert body["data"] == "version: 0.6.0\nkind: app\n"
mock_dsl_cls.export_dsl.assert_called_once_with(app_model=mock_app, include_secret=False)
@patch("controllers.inner_api.app.dsl.AppDslService")
@patch("controllers.inner_api.app.dsl.db")
def test_export_with_secret(self, mock_db, mock_dsl_cls, api_instance, app: Flask):
mock_app = MagicMock()
mock_db.session.query.return_value.filter_by.return_value.first.return_value = mock_app
mock_dsl_cls.export_dsl.return_value = "yaml-data"
unwrapped = inspect.unwrap(api_instance.get)
with app.test_request_context("?include_secret=true"):
result = unwrapped(api_instance, app_id="app-123")
body, status_code = result
assert status_code == 200
mock_dsl_cls.export_dsl.assert_called_once_with(app_model=mock_app, include_secret=True)
@patch("controllers.inner_api.app.dsl.db")
def test_export_app_not_found_returns_404(self, mock_db, api_instance, app: Flask):
mock_db.session.query.return_value.filter_by.return_value.first.return_value = None
unwrapped = inspect.unwrap(api_instance.get)
with app.test_request_context("?include_secret=false"):
result = unwrapped(api_instance, app_id="nonexistent")
body, status_code = result
assert status_code == 404
assert "app not found" in body["message"]

View File

@@ -1013,7 +1013,7 @@ class TestAdvancedChatAppGeneratorInternals:
monkeypatch.setattr("core.app.apps.advanced_chat.app_generator.Session", _Session)
monkeypatch.setattr("core.app.apps.advanced_chat.app_generator.db", SimpleNamespace(engine=object()))
refreshed = _refresh_model(session=SimpleNamespace(), model=source_model)
refreshed = _refresh_model(session=None, model=source_model)
assert refreshed is detached_model

View File

@@ -0,0 +1,110 @@
from collections.abc import Iterator
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
from core.app.entities.app_invoke_entities import InvokeFrom
from core.app.entities.task_entities import AppBlockingResponse
from core.errors.error import QuotaExceededError
class DummyResponseConverter(AppGenerateResponseConverter):
_blocking_response_type = AppBlockingResponse
@classmethod
def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, str]:
return {"mode": "blocking-full", "task_id": blocking_response.task_id}
@classmethod
def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, str]:
return {"mode": "blocking-simple", "task_id": blocking_response.task_id}
@classmethod
def convert_stream_full_response(cls, stream_response: Iterator[object]):
for _ in stream_response:
yield {"mode": "stream-full"}
@classmethod
def convert_stream_simple_response(cls, stream_response: Iterator[object]):
for _ in stream_response:
yield {"mode": "stream-simple"}
def test_convert_routes_to_full_or_simple_modes() -> None:
blocking = AppBlockingResponse(task_id="task-1")
assert DummyResponseConverter.convert(blocking, InvokeFrom.DEBUGGER) == {
"mode": "blocking-full",
"task_id": "task-1",
}
assert DummyResponseConverter.convert(blocking, InvokeFrom.WEB_APP) == {
"mode": "blocking-simple",
"task_id": "task-1",
}
assert list(DummyResponseConverter.convert(iter([object()]), InvokeFrom.SERVICE_API)) == [{"mode": "stream-full"}]
assert list(DummyResponseConverter.convert(iter([object()]), InvokeFrom.WEB_APP)) == [{"mode": "stream-simple"}]
def test_get_simple_metadata_preserves_new_retriever_fields() -> None:
metadata = {
"retriever_resources": [
{
"dataset_id": "dataset-1",
"dataset_name": "Dataset",
"document_id": "document-1",
"segment_id": "segment-1",
"position": 1,
"data_source_type": "upload_file",
"document_name": "Document",
"score": 0.9,
"hit_count": 2,
"word_count": 128,
"segment_position": 3,
"index_node_hash": "hash",
"content": "content",
"page": 5,
"title": "Title",
"files": [{"id": "file-1"}],
"summary": "summary",
}
],
"annotation_reply": "hidden",
"usage": {"latency": 0.1},
}
result = DummyResponseConverter._get_simple_metadata(metadata)
assert result == {
"retriever_resources": [
{
"dataset_id": "dataset-1",
"dataset_name": "Dataset",
"document_id": "document-1",
"segment_id": "segment-1",
"position": 1,
"data_source_type": "upload_file",
"document_name": "Document",
"score": 0.9,
"hit_count": 2,
"word_count": 128,
"segment_position": 3,
"index_node_hash": "hash",
"content": "content",
"page": 5,
"title": "Title",
"files": [{"id": "file-1"}],
"summary": "summary",
}
]
}
def test_error_to_stream_response_uses_specific_and_fallback_mappings() -> None:
quota_response = DummyResponseConverter._error_to_stream_response(QuotaExceededError())
fallback_response = DummyResponseConverter._error_to_stream_response(RuntimeError("boom"))
assert quota_response["code"] == "provider_quota_exceeded"
assert quota_response["status"] == 400
assert fallback_response == {
"code": "internal_server_error",
"message": "Internal Server Error, please contact support.",
"status": 500,
}

View File

@@ -33,6 +33,79 @@ from dify_graph.system_variable import SystemVariable
class TestWorkflowBasedAppRunner:
def test_get_graph_items_rejects_non_mapping_entries(self):
with pytest.raises(ValueError, match="nodes in workflow graph must be mappings"):
WorkflowBasedAppRunner._get_graph_items({"nodes": ["bad"], "edges": []})
with pytest.raises(ValueError, match="edges in workflow graph must be mappings"):
WorkflowBasedAppRunner._get_graph_items({"nodes": [], "edges": ["bad"]})
def test_extract_start_node_id_handles_missing_and_invalid_values(self):
assert WorkflowBasedAppRunner._extract_start_node_id(None) is None
assert WorkflowBasedAppRunner._extract_start_node_id({"data": "invalid"}) is None
assert WorkflowBasedAppRunner._extract_start_node_id({"data": {"start_node_id": 123}}) is None
assert WorkflowBasedAppRunner._extract_start_node_id({"data": {"start_node_id": "start-node"}}) == "start-node"
def test_build_single_node_graph_config_keeps_target_related_and_start_nodes(self):
graph_config, target_node_config = WorkflowBasedAppRunner._build_single_node_graph_config(
graph_config={
"nodes": [
{"id": "start-node", "data": {"type": "start", "version": "1"}},
{
"id": "loop-node",
"data": {"type": "loop", "version": "1", "start_node_id": "start-node"},
},
{
"id": "loop-child",
"data": {"type": "answer", "version": "1", "loop_id": "loop-node"},
},
{"id": "outside-node", "data": {"type": "answer", "version": "1"}},
],
"edges": [
{"source": "start-node", "target": "loop-node"},
{"source": "loop-node", "target": "loop-child"},
{"source": "loop-node", "target": "outside-node"},
],
},
node_id="loop-node",
node_type_filter_key="loop_id",
)
assert [node["id"] for node in graph_config["nodes"]] == ["start-node", "loop-node", "loop-child"]
assert graph_config["edges"] == [
{"source": "start-node", "target": "loop-node"},
{"source": "loop-node", "target": "loop-child"},
]
assert target_node_config["id"] == "loop-node"
def test_build_agent_strategy_info_validates_payload(self):
event = NodeRunStartedEvent(
id="exec",
node_id="node",
node_type=BuiltinNodeTypes.START,
node_title="Start",
start_at=datetime.utcnow(),
extras={"agent_strategy": {"name": "planner", "icon": "robot"}},
)
strategy = WorkflowBasedAppRunner._build_agent_strategy_info(event)
assert strategy is not None
assert strategy.name == "planner"
assert strategy.icon == "robot"
def test_build_agent_strategy_info_returns_none_for_invalid_payload(self):
event = NodeRunStartedEvent(
id="exec",
node_id="node",
node_type=BuiltinNodeTypes.START,
node_title="Start",
start_at=datetime.utcnow(),
extras={"agent_strategy": {"name": "planner", "extra": "ignored"}},
)
assert WorkflowBasedAppRunner._build_agent_strategy_info(event) is None
def test_resolve_user_from(self):
runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app")
@@ -174,6 +247,34 @@ class TestWorkflowBasedAppRunner:
assert paused_event.paused_nodes == ["node-1"]
assert emails
def test_enqueue_human_input_notifications_skips_invalid_reasons_and_logs_failures(self, monkeypatch):
runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app")
seen_calls: list[tuple[dict[str, object], str]] = []
class _Dispatch:
def apply_async(self, *, kwargs, queue):
seen_calls.append((kwargs, queue))
raise RuntimeError("boom")
logged: list[str] = []
monkeypatch.setattr("core.app.apps.workflow_app_runner.dispatch_human_input_email_task", _Dispatch())
monkeypatch.setattr(
"core.app.apps.workflow_app_runner.logger",
SimpleNamespace(exception=lambda message, form_id: logged.append(f"{message}:{form_id}")),
)
runner._enqueue_human_input_notifications(
[
object(),
HumanInputRequired(form_id="", form_content="content", node_id="node", node_title="Node"),
HumanInputRequired(form_id="form-1", form_content="content", node_id="node", node_title="Node"),
]
)
assert seen_calls == [({"form_id": "form-1", "node_title": "Node"}, "mail")]
assert logged == ["Failed to enqueue human input email task for form %s:form-1"]
def test_handle_node_events_publishes_queue_events(self):
published: list[object] = []

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
from typing import Any
from copy import deepcopy
from unittest.mock import MagicMock, patch
import pytest
@@ -33,8 +33,8 @@ def _make_graph_state():
],
)
def test_run_uses_single_node_execution_branch(
single_iteration_run: Any,
single_loop_run: Any,
single_iteration_run: WorkflowAppGenerateEntity.SingleIterationRunEntity | None,
single_loop_run: WorkflowAppGenerateEntity.SingleLoopRunEntity | None,
) -> None:
app_config = MagicMock()
app_config.app_id = "app"
@@ -130,10 +130,23 @@ def test_single_node_run_validates_target_node_config(monkeypatch) -> None:
"break_conditions": [],
"logical_operator": "and",
},
},
{
"id": "other-node",
"data": {
"type": "answer",
"title": "Answer",
},
},
],
"edges": [
{
"source": "other-node",
"target": "loop-node",
}
],
"edges": [],
}
original_graph_dict = deepcopy(workflow.graph_dict)
_, _, graph_runtime_state = _make_graph_state()
seen_configs: list[object] = []
@@ -143,13 +156,19 @@ def test_single_node_run_validates_target_node_config(monkeypatch) -> None:
seen_configs.append(value)
return original_validate_python(value)
class FakeNodeClass:
@staticmethod
def extract_variable_selector_to_variable_mapping(**_kwargs):
return {}
monkeypatch.setattr(NodeConfigDictAdapter, "validate_python", record_validate_python)
with (
patch("core.app.apps.workflow_app_runner.DifyNodeFactory"),
patch("core.app.apps.workflow_app_runner.Graph.init", return_value=MagicMock()),
patch("core.app.apps.workflow_app_runner.Graph.init", return_value=MagicMock()) as graph_init,
patch("core.app.apps.workflow_app_runner.load_into_variable_pool"),
patch("core.app.apps.workflow_app_runner.WorkflowEntry.mapping_user_inputs_to_variable_pool"),
patch("core.app.apps.workflow_app_runner.resolve_workflow_node_class", return_value=FakeNodeClass),
):
runner._get_graph_and_variable_pool_for_single_node_run(
workflow=workflow,
@@ -161,3 +180,8 @@ def test_single_node_run_validates_target_node_config(monkeypatch) -> None:
)
assert seen_configs == [workflow.graph_dict["nodes"][0]]
assert workflow.graph_dict == original_graph_dict
graph_config = graph_init.call_args.kwargs["graph_config"]
assert graph_config is not workflow.graph_dict
assert graph_config["nodes"] == [workflow.graph_dict["nodes"][0]]
assert graph_config["edges"] == []

View File

@@ -0,0 +1,46 @@
import pytest
from pydantic import ValidationError
from core.workflow.nodes.agent.entities import AgentNodeData
def test_agent_input_accepts_variable_selector_and_mixed_values() -> None:
node_data = AgentNodeData.model_validate(
{
"title": "Agent",
"agent_strategy_provider_name": "provider",
"agent_strategy_name": "strategy",
"agent_strategy_label": "Strategy",
"agent_parameters": {
"query": {"type": "variable", "value": ["start", "query"]},
"tools": {"type": "mixed", "value": [{"provider": "builtin", "name": "search"}]},
},
}
)
assert node_data.agent_parameters["query"].value == ["start", "query"]
assert node_data.agent_parameters["tools"].value == [{"provider": "builtin", "name": "search"}]
def test_agent_input_rejects_invalid_variable_selector_and_unknown_type() -> None:
with pytest.raises(ValidationError):
AgentNodeData.model_validate(
{
"title": "Agent",
"agent_strategy_provider_name": "provider",
"agent_strategy_name": "strategy",
"agent_strategy_label": "Strategy",
"agent_parameters": {"query": {"type": "variable", "value": "start.query"}},
}
)
with pytest.raises(ValidationError, match="Unknown agent input type"):
AgentNodeData.model_validate(
{
"title": "Agent",
"agent_strategy_provider_name": "provider",
"agent_strategy_name": "strategy",
"agent_strategy_label": "Strategy",
"agent_parameters": {"query": {"type": "unsupported", "value": "hello"}},
}
)

View File

@@ -0,0 +1,125 @@
from types import SimpleNamespace
import pytest
from core.tools.entities.tool_entities import ToolProviderType
from core.workflow.nodes.agent.exceptions import AgentVariableNotFoundError
from core.workflow.nodes.agent.runtime_support import AgentRuntimeSupport
def test_filter_mcp_type_tool_depends_on_strategy_meta_version() -> None:
runtime_support = AgentRuntimeSupport()
tools = [
{"type": ToolProviderType.BUILT_IN, "tool_name": "search"},
{"type": ToolProviderType.MCP, "tool_name": "mcp-tool"},
]
filtered_tools = runtime_support._filter_mcp_type_tool(SimpleNamespace(meta_version="0.0.1"), tools)
preserved_tools = runtime_support._filter_mcp_type_tool(SimpleNamespace(meta_version="0.0.2"), tools)
assert filtered_tools == [{"type": ToolProviderType.BUILT_IN, "tool_name": "search"}]
assert preserved_tools == tools
def test_normalize_tool_payloads_keeps_enabled_tools_and_resolves_values() -> None:
runtime_support = AgentRuntimeSupport()
variable_pool = SimpleNamespace(get=lambda selector: SimpleNamespace(value=f"resolved:{'.'.join(selector)}"))
normalized_tools = runtime_support._normalize_tool_payloads(
strategy=SimpleNamespace(meta_version="0.0.2"),
tools=[
{
"enabled": True,
"tool_name": "search",
"schemas": {"ignored": True},
"parameters": {
"query": {
"auto": 0,
"value": {"type": "variable", "value": ["start", "query"]},
},
"top_k": {
"auto": 0,
"value": {"type": "constant", "value": 3},
},
"optional": {"auto": 1, "value": {"type": "constant", "value": "skip"}},
},
"settings": {
"region": {"value": "us"},
"safe": {"value": True},
},
},
{"enabled": False, "tool_name": "disabled"},
],
variable_pool=variable_pool,
)
assert normalized_tools == [
{
"enabled": True,
"tool_name": "search",
"parameters": {"query": "resolved:start.query", "top_k": 3, "optional": None},
"settings": {"region": "us", "safe": True},
}
]
def test_resolve_tool_parameters_raises_for_missing_variable() -> None:
runtime_support = AgentRuntimeSupport()
variable_pool = SimpleNamespace(get=lambda _selector: None)
with pytest.raises(AgentVariableNotFoundError, match=r"\['start', 'query'\]"):
runtime_support._resolve_tool_parameters(
tool={
"parameters": {
"query": {
"auto": 0,
"value": {"type": "variable", "value": ["start", "query"]},
}
}
},
variable_pool=variable_pool,
)
def test_build_credentials_collects_valid_tool_credentials_only() -> None:
runtime_support = AgentRuntimeSupport()
credentials = runtime_support.build_credentials(
parameters={
"tools": [
{
"credential_id": "cred-1",
"identity": {
"author": "author",
"name": "tool",
"label": {"en_US": "Tool"},
"provider": "provider-a",
},
},
{
"credential_id": "cred-2",
"identity": {"author": "author"},
},
{
"credential_id": None,
"identity": {
"author": "author",
"name": "tool",
"label": {"en_US": "Tool"},
"provider": "provider-b",
},
},
"invalid",
]
}
)
assert credentials.tool_credentials == {"provider-a": "cred-1"}
def test_coerce_named_json_objects_requires_string_keys_and_json_object_values() -> None:
runtime_support = AgentRuntimeSupport()
assert runtime_support._coerce_named_json_objects({"valid": {"value": 1}}) == {"valid": {"value": 1}}
assert runtime_support._coerce_named_json_objects({1: {"value": 1}}) is None
assert runtime_support._coerce_named_json_objects({"invalid": object()}) is None

View File

@@ -13,7 +13,9 @@ from core.model_manager import ModelInstance
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
from dify_graph.entities import GraphInitParams
from dify_graph.file import File, FileTransferMethod, FileType
from dify_graph.model_runtime.entities import LLMMode
from dify_graph.model_runtime.entities.common_entities import I18nObject
from dify_graph.model_runtime.entities.llm_entities import LLMResult, LLMResultWithStructuredOutput, LLMUsage
from dify_graph.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
@@ -55,6 +57,118 @@ class MockTokenBufferMemory:
return self.history_messages
def test_llm_node_data_normalizes_optional_configs_and_legacy_structured_output() -> None:
node_data = LLMNodeData.model_validate(
{
"title": "Test LLM",
"model": {"provider": "openai", "name": "gpt-4o-mini", "mode": LLMMode.CHAT, "completion_params": {}},
"prompt_template": [],
"prompt_config": None,
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": True, "configs": None},
"structured_output": {
"schema": {"type": "object"},
"name": "Response",
"description": "Structured",
},
"structured_output_enabled": True,
}
)
assert node_data.prompt_config.jinja2_variables == []
assert node_data.vision.configs.variable_selector == ["sys", "files"]
assert node_data.structured_output == {
"schema": {"type": "object"},
"name": "Response",
"description": "Structured",
}
assert node_data.structured_output_enabled is True
def test_llm_node_data_discards_legacy_structured_output_without_schema() -> None:
node_data = LLMNodeData.model_validate(
{
"title": "Test LLM",
"model": {"provider": "openai", "name": "gpt-4o-mini", "mode": LLMMode.CHAT, "completion_params": {}},
"prompt_template": [],
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
"structured_output": {"name": "Missing schema"},
"structured_output_enabled": True,
}
)
assert node_data.structured_output is None
assert node_data.structured_output_enabled is False
def test_prompt_config_converts_none_jinja_variables() -> None:
prompt_config = LLMNodeData.model_validate(
{
"title": "Test LLM",
"model": {"provider": "openai", "name": "gpt-4o-mini", "mode": LLMMode.CHAT, "completion_params": {}},
"prompt_template": [],
"prompt_config": None,
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
"structured_output_enabled": False,
}
).prompt_config
assert prompt_config.jinja2_variables == []
def test_fetch_structured_output_schema_validates_required_object_shape() -> None:
assert LLMNode.fetch_structured_output_schema(structured_output={"schema": {"type": "object", "a": 1}}) == {
"type": "object",
"a": 1,
}
with pytest.raises(Exception, match="valid structured output schema"):
LLMNode.fetch_structured_output_schema(structured_output={"schema": None})
def test_handle_blocking_result_separates_reasoning_and_structured_output() -> None:
saver = mock.MagicMock(spec=LLMFileSaver)
event = LLMNode.handle_blocking_result(
invoke_result=LLMResultWithStructuredOutput(
model="gpt",
message=AssistantPromptMessage(content="<think>reasoning</think>answer"),
usage=LLMUsage.empty_usage(),
structured_output={"answer": "done"},
),
saver=saver,
file_outputs=[],
reasoning_format="separated",
request_latency=1.2345,
)
assert event.text == "answer"
assert event.reasoning_content == "reasoning"
assert event.structured_output == {"answer": "done"}
assert event.usage.latency == 1.234
def test_handle_blocking_result_keeps_tagged_text_without_structured_output() -> None:
saver = mock.MagicMock(spec=LLMFileSaver)
event = LLMNode.handle_blocking_result(
invoke_result=LLMResult(
model="gpt",
message=AssistantPromptMessage(content="plain text"),
usage=LLMUsage.empty_usage(),
),
saver=saver,
file_outputs=[],
)
assert event.text == "plain text"
assert event.reasoning_content == ""
assert event.structured_output is None
@pytest.fixture
def llm_node_data() -> LLMNodeData:
return LLMNodeData(

View File

@@ -1,6 +1,12 @@
from types import SimpleNamespace
import pytest
from pydantic import ValidationError
from dify_graph.entities.graph_config import NodeConfigDictAdapter
from dify_graph.nodes.loop.entities import LoopNodeData
from dify_graph.nodes.loop.entities import LoopNodeData, LoopValue
from dify_graph.nodes.loop.loop_node import LoopNode
from dify_graph.variables.types import SegmentType
def test_extract_variable_selector_to_variable_mapping_validates_child_node_configs(monkeypatch) -> None:
@@ -50,3 +56,104 @@ def test_extract_variable_selector_to_variable_mapping_validates_child_node_conf
)
assert seen_configs == [child_node_config]
@pytest.mark.parametrize(
("var_type", "original_value", "expected_value"),
[
(SegmentType.ARRAY_STRING, ["alpha", "beta"], ["alpha", "beta"]),
(SegmentType.ARRAY_NUMBER, [1, 2.5], [1, 2.5]),
(SegmentType.ARRAY_OBJECT, [{"name": "item"}], [{"name": "item"}]),
(SegmentType.ARRAY_STRING, '["legacy", "json"]', ["legacy", "json"]),
],
)
def test_get_segment_for_constant_accepts_native_array_values(
var_type: SegmentType, original_value: LoopValue, expected_value: LoopValue
) -> None:
segment = LoopNode._get_segment_for_constant(var_type, original_value)
assert segment.value_type == var_type
assert segment.value == expected_value
def test_loop_variable_data_validates_variable_selector_and_constant_value() -> None:
variable_input = LoopNodeData(
title="Loop",
loop_count=1,
break_conditions=[],
logical_operator="and",
loop_variables=[
{
"label": "question",
"var_type": SegmentType.STRING,
"value_type": "variable",
"value": ["start", "question"],
},
{
"label": "payload",
"var_type": SegmentType.OBJECT,
"value_type": "constant",
"value": {"count": 1, "items": ["a", 2]},
},
],
)
assert variable_input.loop_variables[0].require_variable_selector() == ["start", "question"]
assert variable_input.loop_variables[1].require_constant_value() == {"count": 1, "items": ["a", 2]}
def test_loop_variable_data_rejects_missing_variable_selector() -> None:
with pytest.raises(ValidationError, match="Variable loop inputs require a selector"):
LoopNodeData(
title="Loop",
loop_count=1,
break_conditions=[],
logical_operator="and",
loop_variables=[
{
"label": "question",
"var_type": SegmentType.STRING,
"value_type": "variable",
"value": None,
}
],
)
def test_loop_node_data_outputs_default_to_empty_mapping_for_none() -> None:
node_data = LoopNodeData(
title="Loop",
loop_count=1,
break_conditions=[],
logical_operator="and",
outputs=None,
)
assert node_data.outputs == {}
def test_append_loop_info_to_event_preserves_existing_loop_metadata() -> None:
node = object.__new__(LoopNode)
node._node_id = "loop-node"
event = SimpleNamespace(
node_run_result=SimpleNamespace(metadata={"loop_id": "existing-loop", "other": "value"}),
in_loop_id=None,
)
node._append_loop_info_to_event(event=event, loop_run_index=2)
assert event.in_loop_id == "loop-node"
assert event.node_run_result.metadata == {"loop_id": "existing-loop", "other": "value"}
def test_clear_loop_subgraph_variables_removes_each_loop_node() -> None:
node = object.__new__(LoopNode)
remove_calls: list[list[str]] = []
node.graph_runtime_state = SimpleNamespace(
variable_pool=SimpleNamespace(remove=lambda selector: remove_calls.append(selector))
)
node._clear_loop_subgraph_variables({"child-a", "child-b"})
assert sorted(remove_calls) == [["child-a"], ["child-b"]]

View File

@@ -8,11 +8,13 @@ from unittest.mock import MagicMock, patch
import pytest
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.utils.message_transformer import ToolFileMessageTransformer
from dify_graph.file import File, FileTransferMethod, FileType
from dify_graph.model_runtime.entities.llm_entities import LLMUsage
from dify_graph.node_events import StreamChunkEvent, StreamCompletedEvent
from dify_graph.nodes.tool.entities import ToolEntity as WorkflowToolEntity
from dify_graph.nodes.tool.entities import ToolNodeData
from dify_graph.runtime import GraphRuntimeState, VariablePool
from dify_graph.system_variable import SystemVariable
from dify_graph.variables.segments import ArrayFileSegment
@@ -167,3 +169,119 @@ def test_plain_link_messages_remain_links(tool_node: ToolNode):
files_segment = completed_events[0].node_run_result.outputs["files"]
assert isinstance(files_segment, ArrayFileSegment)
assert files_segment.value == []
def test_workflow_tool_entity_accepts_primitives_and_tool_input_payloads() -> None:
entity = WorkflowToolEntity(
provider_id="provider",
provider_type="builtin",
provider_name="provider",
tool_name="search",
tool_label="Search",
tool_configurations={
"timeout": 30,
"query": {"type": "mixed", "value": "hello {{name}}"},
"selector": {"type": "variable", "value": ["start", "question"]},
},
)
assert entity.tool_configurations == {
"timeout": 30,
"query": {"type": "mixed", "value": "hello {{name}}"},
"selector": {"type": "variable", "value": ["start", "question"]},
}
def test_workflow_tool_entity_rejects_invalid_configuration_entries() -> None:
with pytest.raises(TypeError, match="Tool configuration values must be primitives"):
WorkflowToolEntity(
provider_id="provider",
provider_type="builtin",
provider_name="provider",
tool_name="search",
tool_label="Search",
tool_configurations={"bad": [object()]},
)
def test_tool_node_data_filters_missing_tool_parameter_values() -> None:
node_data = ToolNodeData(
title="Tool",
provider_id="provider",
provider_type="builtin",
provider_name="provider",
tool_name="search",
tool_label="Search",
tool_configurations={},
tool_parameters={
"query": {"type": "mixed", "value": "hello"},
"skip_none": None,
"skip_empty": {"type": "constant", "value": None},
},
)
assert set(node_data.tool_parameters.keys()) == {"query"}
def test_generate_parameters_reads_variables_and_optional_missing_inputs(tool_node: ToolNode) -> None:
variable_pool = MagicMock()
variable_pool.get.side_effect = [MagicMock(value="from-variable"), None]
node_data = ToolNodeData.model_validate(
{
"title": "Tool",
"provider_id": "provider",
"provider_type": "builtin",
"provider_name": "provider",
"tool_name": "tool",
"tool_label": "tool",
"tool_configurations": {},
"tool_parameters": {
"query": {"type": "variable", "value": ["start", "query"]},
"optional": {"type": "variable", "value": ["start", "optional"]},
},
}
)
tool_parameters = [
ToolParameter.get_simple_instance("query", "query", ToolParameter.ToolParameterType.STRING, True),
ToolParameter.get_simple_instance("optional", "optional", ToolParameter.ToolParameterType.STRING, False),
]
result = tool_node._generate_parameters(
tool_parameters=tool_parameters,
variable_pool=variable_pool,
node_data=node_data,
)
assert result == {"query": "from-variable"}
def test_generate_parameters_formats_logs_and_unknown_parameters(tool_node: ToolNode) -> None:
variable_pool = MagicMock()
variable_pool.convert_template.return_value = MagicMock(text="rendered", log="masked")
node_data = ToolNodeData.model_validate(
{
"title": "Tool",
"provider_id": "provider",
"provider_type": "builtin",
"provider_name": "provider",
"tool_name": "tool",
"tool_label": "tool",
"tool_configurations": {},
"tool_parameters": {
"query": {"type": "mixed", "value": "{{ question }}"},
"missing": {"type": "constant", "value": "literal"},
},
}
)
tool_parameters = [
ToolParameter.get_simple_instance("query", "query", ToolParameter.ToolParameterType.STRING, True),
]
result = tool_node._generate_parameters(
tool_parameters=tool_parameters,
variable_pool=variable_pool,
node_data=node_data,
for_log=True,
)
assert result == {"query": "masked", "missing": None}

View File

@@ -97,6 +97,22 @@ class TestWorkflowChildEngineBuilder:
((sentinel.layer_two,), {}),
]
def test_build_child_engine_tolerates_invalid_graph_shape_until_graph_init(self):
builder = workflow_entry._WorkflowChildEngineBuilder()
with (
patch.object(workflow_entry, "DifyNodeFactory", return_value=sentinel.factory),
patch.object(workflow_entry.Graph, "init", side_effect=ValueError("invalid graph")),
):
with pytest.raises(ValueError, match="invalid graph"):
builder.build_child_engine(
workflow_id="workflow-id",
graph_init_params=sentinel.graph_init_params,
graph_runtime_state=sentinel.graph_runtime_state,
graph_config={"nodes": "invalid"},
root_node_id="root",
)
class TestWorkflowEntryInit:
def test_rejects_call_depth_above_limit(self):

2
api/uv.lock generated
View File

@@ -1457,7 +1457,7 @@ wheels = [
[[package]]
name = "dify-api"
version = "1.13.3"
version = "1.13.2"
source = { virtual = "." }
dependencies = [
{ name = "aliyun-log-python-sdk" },

View File

@@ -21,7 +21,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.13.3
image: langgenius/dify-api:1.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -63,7 +63,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.13.3
image: langgenius/dify-api:1.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -102,7 +102,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.13.3
image: langgenius/dify-api:1.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -132,7 +132,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.13.3
image: langgenius/dify-web:1.13.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@@ -245,7 +245,7 @@ services:
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.14
image: langgenius/dify-sandbox:0.2.12
restart: always
environment:
# The DifySandbox configurations
@@ -269,7 +269,7 @@ services:
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.5.3-local
image: langgenius/dify-plugin-daemon:0.5.4-local
restart: always
environment:
# Use the shared environment variables.

View File

@@ -97,7 +97,7 @@ services:
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.14
image: langgenius/dify-sandbox:0.2.12
restart: always
env_file:
- ./middleware.env
@@ -123,7 +123,7 @@ services:
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.5.3-local
image: langgenius/dify-plugin-daemon:0.5.4-local
restart: always
env_file:
- ./middleware.env

View File

@@ -731,7 +731,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.13.3
image: langgenius/dify-api:1.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -773,7 +773,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.13.3
image: langgenius/dify-api:1.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -812,7 +812,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.13.3
image: langgenius/dify-api:1.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -842,7 +842,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.13.3
image: langgenius/dify-web:1.13.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@@ -955,7 +955,7 @@ services:
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.14
image: langgenius/dify-sandbox:0.2.12
restart: always
environment:
# The DifySandbox configurations
@@ -979,7 +979,7 @@ services:
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.5.3-local
image: langgenius/dify-plugin-daemon:0.5.4-local
restart: always
environment:
# Use the shared environment variables.

View File

@@ -5,8 +5,7 @@ app:
max_workers: 4
max_requests: 50
worker_timeout: 5
python_path: /opt/python/bin/python3
nodejs_path: /usr/local/bin/node
python_path: /usr/local/bin/python3
enable_network: True # please make sure there is no network risk in your environment
allowed_syscalls: # please leave it empty if you have no idea how seccomp works
proxy:

View File

@@ -5,7 +5,7 @@ app:
max_workers: 4
max_requests: 50
worker_timeout: 5
python_path: /opt/python/bin/python3
python_path: /usr/local/bin/python3
python_lib_path:
- /usr/local/lib/python3.10
- /usr/lib/python3.10

View File

@@ -1,350 +0,0 @@
import type { ReactNode } from 'react'
import { render, screen, waitFor } from '@testing-library/react'
import WorkflowApp from '../index'
const mockSetTriggerStatuses = vi.fn()
const mockSetInputs = vi.fn()
const mockSetShowInputsPanel = vi.fn()
const mockSetShowDebugAndPreviewPanel = vi.fn()
const mockWorkflowStoreSetState = vi.fn()
const mockDebouncedCancel = vi.fn()
const mockFetchRunDetail = vi.fn()
const mockInitialNodes = vi.fn()
const mockInitialEdges = vi.fn()
const mockGetWorkflowRunAndTraceUrl = vi.fn()
let appStoreState: {
appDetail?: {
id: string
mode: string
}
}
let workflowInitState: {
data: {
graph: {
nodes: Array<Record<string, unknown>>
edges: Array<Record<string, unknown>>
viewport: { x: number, y: number, zoom: number }
}
features: Record<string, unknown>
} | null
isLoading: boolean
fileUploadConfigResponse: Record<string, unknown> | null
}
let appContextState: {
isLoadingCurrentWorkspace: boolean
currentWorkspace: {
id?: string
}
}
let appTriggersState: {
data?: {
data: Array<{
node_id: string
status: string
}>
}
}
let searchParamsValue: string | null = null
const mockWorkflowStore = {
setState: mockWorkflowStoreSetState,
getState: () => ({
setInputs: mockSetInputs,
setShowInputsPanel: mockSetShowInputsPanel,
setShowDebugAndPreviewPanel: mockSetShowDebugAndPreviewPanel,
debouncedSyncWorkflowDraft: {
cancel: mockDebouncedCancel,
},
}),
}
vi.mock('@/app/components/app/store', () => ({
useStore: <T,>(selector: (state: typeof appStoreState) => T) => selector(appStoreState),
}))
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => mockWorkflowStore,
}))
vi.mock('@/app/components/workflow/store/trigger-status', () => ({
useTriggerStatusStore: () => ({
setTriggerStatuses: mockSetTriggerStatuses,
}),
}))
vi.mock('@/context/app-context', () => ({
useAppContext: () => appContextState,
}))
vi.mock('@/next/navigation', () => ({
useSearchParams: () => ({
get: (key: string) => (key === 'replayRunId' ? searchParamsValue : null),
}),
}))
vi.mock('@/service/log', () => ({
fetchRunDetail: (...args: unknown[]) => mockFetchRunDetail(...args),
}))
vi.mock('@/service/use-tools', () => ({
useAppTriggers: () => appTriggersState,
}))
vi.mock('@/app/components/workflow-app/hooks/use-workflow-init', () => ({
useWorkflowInit: () => workflowInitState,
}))
vi.mock('@/app/components/workflow-app/hooks/use-get-run-and-trace-url', () => ({
useGetRunAndTraceUrl: () => ({
getWorkflowRunAndTraceUrl: mockGetWorkflowRunAndTraceUrl,
}),
}))
vi.mock('@/app/components/workflow/utils', async (importOriginal) => {
const actual = await importOriginal<typeof import('@/app/components/workflow/utils')>()
return {
...actual,
initialNodes: (...args: unknown[]) => mockInitialNodes(...args),
initialEdges: (...args: unknown[]) => mockInitialEdges(...args),
}
})
vi.mock('@/app/components/base/loading', () => ({
default: () => <div data-testid="loading">loading</div>,
}))
vi.mock('@/app/components/base/features', () => ({
FeaturesProvider: ({
features,
children,
}: {
features: Record<string, unknown>
children: ReactNode
}) => (
<div data-testid="features-provider" data-features={JSON.stringify(features)}>
{children}
</div>
),
}))
vi.mock('@/app/components/workflow', () => ({
default: ({
nodes,
edges,
children,
}: {
nodes: Array<Record<string, unknown>>
edges: Array<Record<string, unknown>>
children: ReactNode
}) => (
<div data-testid="workflow-default-context" data-nodes={JSON.stringify(nodes)} data-edges={JSON.stringify(edges)}>
{children}
</div>
),
}))
vi.mock('@/app/components/workflow/context', () => ({
WorkflowContextProvider: ({
children,
}: {
injectWorkflowStoreSliceFn: unknown
children: ReactNode
}) => (
<div data-testid="workflow-context-provider">{children}</div>
),
}))
vi.mock('@/app/components/workflow-app/components/workflow-main', () => ({
default: ({
nodes,
edges,
viewport,
}: {
nodes: Array<Record<string, unknown>>
edges: Array<Record<string, unknown>>
viewport: Record<string, unknown>
}) => (
<div
data-testid="workflow-app-main"
data-nodes={JSON.stringify(nodes)}
data-edges={JSON.stringify(edges)}
data-viewport={JSON.stringify(viewport)}
/>
),
}))
describe('WorkflowApp', () => {
beforeEach(() => {
vi.clearAllMocks()
appStoreState = {
appDetail: {
id: 'app-1',
mode: 'workflow',
},
}
workflowInitState = {
data: {
graph: {
nodes: [{ id: 'raw-node' }],
edges: [{ id: 'raw-edge' }],
viewport: { x: 1, y: 2, zoom: 3 },
},
features: {
file_upload: {
enabled: true,
},
},
},
isLoading: false,
fileUploadConfigResponse: { enabled: true },
}
appContextState = {
isLoadingCurrentWorkspace: false,
currentWorkspace: { id: 'workspace-1' },
}
appTriggersState = {}
searchParamsValue = null
mockFetchRunDetail.mockResolvedValue({ inputs: null })
mockInitialNodes.mockReturnValue([{ id: 'node-1' }])
mockInitialEdges.mockReturnValue([{ id: 'edge-1' }])
mockGetWorkflowRunAndTraceUrl.mockReturnValue({ runUrl: '/runs/run-1' })
})
it('should render the loading shell while workflow data is still loading', () => {
workflowInitState = {
data: null,
isLoading: true,
fileUploadConfigResponse: null,
}
render(<WorkflowApp />)
expect(screen.getByTestId('loading')).toBeInTheDocument()
expect(screen.queryByTestId('workflow-app-main')).not.toBeInTheDocument()
})
it('should render the workflow app shell and sync trigger statuses when data is ready', () => {
appTriggersState = {
data: {
data: [
{ node_id: 'trigger-enabled', status: 'enabled' },
{ node_id: 'trigger-disabled', status: 'paused' },
],
},
}
render(<WorkflowApp />)
expect(screen.getByTestId('workflow-context-provider')).toBeInTheDocument()
expect(screen.getByTestId('workflow-default-context')).toHaveAttribute('data-nodes', JSON.stringify([{ id: 'node-1' }]))
expect(screen.getByTestId('workflow-default-context')).toHaveAttribute('data-edges', JSON.stringify([{ id: 'edge-1' }]))
expect(screen.getByTestId('workflow-app-main')).toHaveAttribute('data-viewport', JSON.stringify({ x: 1, y: 2, zoom: 3 }))
expect(screen.getByTestId('features-provider')).toBeInTheDocument()
expect(mockSetTriggerStatuses).toHaveBeenCalledWith({
'trigger-enabled': 'enabled',
'trigger-disabled': 'disabled',
})
})
it('should not sync trigger statuses when trigger data is unavailable', () => {
render(<WorkflowApp />)
expect(screen.getByTestId('workflow-app-main')).toBeInTheDocument()
expect(mockSetTriggerStatuses).not.toHaveBeenCalled()
})
it('should replay workflow inputs from replayRunId and clean up workflow state on unmount', async () => {
searchParamsValue = 'run-1'
mockFetchRunDetail.mockResolvedValue({
inputs: '{"sys.query":"hidden","foo":"bar","count":2,"flag":true,"obj":{"nested":true},"nil":null}',
})
const { unmount } = render(<WorkflowApp />)
await waitFor(() => {
expect(mockFetchRunDetail).toHaveBeenCalledWith('/runs/run-1')
expect(mockSetInputs).toHaveBeenCalledWith({
foo: 'bar',
count: 2,
flag: true,
obj: '{"nested":true}',
nil: '',
})
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(true)
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
})
unmount()
expect(mockWorkflowStoreSetState).toHaveBeenCalledWith({ isWorkflowDataLoaded: false })
expect(mockDebouncedCancel).toHaveBeenCalled()
})
it('should skip replay lookups when replayRunId is missing', () => {
render(<WorkflowApp />)
expect(mockGetWorkflowRunAndTraceUrl).not.toHaveBeenCalled()
expect(mockFetchRunDetail).not.toHaveBeenCalled()
expect(mockSetInputs).not.toHaveBeenCalled()
})
it('should skip replay fetches when the resolved run url is empty', async () => {
searchParamsValue = 'run-1'
mockGetWorkflowRunAndTraceUrl.mockReturnValue({ runUrl: '' })
render(<WorkflowApp />)
await waitFor(() => {
expect(mockGetWorkflowRunAndTraceUrl).toHaveBeenCalledWith('run-1')
})
expect(mockFetchRunDetail).not.toHaveBeenCalled()
expect(mockSetInputs).not.toHaveBeenCalled()
})
it('should stop replay recovery when workflow run inputs cannot be parsed', async () => {
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {})
searchParamsValue = 'run-1'
mockFetchRunDetail.mockResolvedValue({
inputs: '{invalid-json}',
})
render(<WorkflowApp />)
await waitFor(() => {
expect(mockFetchRunDetail).toHaveBeenCalledWith('/runs/run-1')
})
expect(consoleErrorSpy).toHaveBeenCalledWith(
'Failed to parse workflow run inputs',
expect.any(Error),
)
expect(mockSetInputs).not.toHaveBeenCalled()
expect(mockSetShowInputsPanel).not.toHaveBeenCalled()
expect(mockSetShowDebugAndPreviewPanel).not.toHaveBeenCalled()
consoleErrorSpy.mockRestore()
})
it('should ignore replay inputs when they only contain sys variables', async () => {
searchParamsValue = 'run-1'
mockFetchRunDetail.mockResolvedValue({
inputs: '{"sys.query":"hidden","sys.user_id":"u-1"}',
})
render(<WorkflowApp />)
await waitFor(() => {
expect(mockFetchRunDetail).toHaveBeenCalledWith('/runs/run-1')
})
expect(mockSetInputs).not.toHaveBeenCalled()
expect(mockSetShowInputsPanel).not.toHaveBeenCalled()
expect(mockSetShowDebugAndPreviewPanel).not.toHaveBeenCalled()
})
})

View File

@@ -1,90 +0,0 @@
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
import { TransferMethod } from '@/types/app'
import {
buildInitialFeatures,
buildTriggerStatusMap,
coerceReplayUserInputs,
} from '../utils'
describe('workflow-app utils', () => {
it('should map trigger statuses to enabled and disabled states', () => {
expect(buildTriggerStatusMap([
{ node_id: 'node-1', status: 'enabled' },
{ node_id: 'node-2', status: 'disabled' },
{ node_id: 'node-3', status: 'paused' },
])).toEqual({
'node-1': 'enabled',
'node-2': 'disabled',
'node-3': 'disabled',
})
})
it('should coerce replay run inputs, omit sys keys, and stringify complex values', () => {
expect(coerceReplayUserInputs({
'sys.query': 'hidden',
'query': 'hello',
'count': 3,
'enabled': true,
'nullable': null,
'metadata': { nested: true },
})).toEqual({
query: 'hello',
count: 3,
enabled: true,
nullable: '',
metadata: '{"nested":true}',
})
expect(coerceReplayUserInputs('invalid')).toBeNull()
expect(coerceReplayUserInputs(null)).toBeNull()
})
it('should build initial features with file-upload and feature fallbacks', () => {
const result = buildInitialFeatures({
file_upload: {
enabled: true,
allowed_file_types: [SupportUploadFileTypes.image],
allowed_file_extensions: ['.png'],
allowed_file_upload_methods: [TransferMethod.local_file],
number_limits: 2,
image: {
enabled: true,
number_limits: 5,
transfer_methods: [TransferMethod.remote_url],
},
},
opening_statement: 'hello',
suggested_questions: ['Q1'],
suggested_questions_after_answer: { enabled: true },
speech_to_text: { enabled: true },
text_to_speech: { enabled: true },
retriever_resource: { enabled: true },
sensitive_word_avoidance: { enabled: true },
}, { enabled: true } as never)
expect(result).toMatchObject({
file: {
enabled: true,
allowed_file_types: [SupportUploadFileTypes.image],
allowed_file_extensions: ['.png'],
allowed_file_upload_methods: [TransferMethod.local_file],
number_limits: 2,
fileUploadConfig: { enabled: true },
image: {
enabled: true,
number_limits: 5,
transfer_methods: [TransferMethod.remote_url],
},
},
opening: {
enabled: true,
opening_statement: 'hello',
suggested_questions: ['Q1'],
},
suggested: { enabled: true },
speech2text: { enabled: true },
text2speech: { enabled: true },
citation: { enabled: true },
moderation: { enabled: true },
})
})
})

View File

@@ -1,494 +0,0 @@
import { act, render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import * as React from 'react'
import { DSL_EXPORT_CHECK } from '@/app/components/workflow/constants'
import { BlockEnum } from '@/app/components/workflow/types'
import WorkflowChildren from '../workflow-children'
type WorkflowStoreState = {
showFeaturesPanel: boolean
showImportDSLModal: boolean
setShowImportDSLModal: (show: boolean) => void
showOnboarding: boolean
setShowOnboarding: (show: boolean) => void
setHasSelectedStartNode: (selected: boolean) => void
setShouldAutoOpenStartNodeSelector: (open: boolean) => void
}
type TriggerPluginConfig = {
plugin_id: string
provider_name: string
provider_type: string
event_name: string
event_label: string
event_description: string
output_schema: Record<string, unknown>
paramSchemas: Array<Record<string, unknown>>
params: Record<string, unknown>
subscription_id: string
plugin_unique_identifier: string
is_team_authorization: boolean
meta?: Record<string, unknown>
}
const mockSetShowImportDSLModal = vi.fn()
const mockSetShowOnboarding = vi.fn()
const mockSetHasSelectedStartNode = vi.fn()
const mockSetShouldAutoOpenStartNodeSelector = vi.fn()
const mockSetNodes = vi.fn()
const mockSetEdges = vi.fn()
const mockHandleSyncWorkflowDraft = vi.fn()
const mockHandleOnboardingClose = vi.fn()
const mockHandlePaneContextmenuCancel = vi.fn()
const mockHandleExportDSL = vi.fn()
const mockExportCheck = vi.fn()
const mockAutoGenerateWebhookUrl = vi.fn()
let workflowStoreState: WorkflowStoreState
let eventSubscription: ((value: { type: string, payload: { data: Array<Record<string, unknown>> } }) => void) | null = null
let lastGenerateNodeInput: Record<string, unknown> | null = null
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => ({
setNodes: mockSetNodes,
setEdges: mockSetEdges,
}),
}),
}))
vi.mock('@/app/components/workflow/store', () => ({
useStore: <T,>(selector: (state: WorkflowStoreState) => T) => selector(workflowStoreState),
}))
vi.mock('@/context/event-emitter', () => ({
useEventEmitterContextContext: () => ({
eventEmitter: {
useSubscription: (callback: typeof eventSubscription) => {
eventSubscription = callback
},
},
}),
}))
vi.mock('@/app/components/workflow/hooks', () => ({
useAutoGenerateWebhookUrl: () => mockAutoGenerateWebhookUrl,
useDSL: () => ({
exportCheck: mockExportCheck,
handleExportDSL: mockHandleExportDSL,
}),
usePanelInteractions: () => ({
handlePaneContextmenuCancel: mockHandlePaneContextmenuCancel,
}),
}))
vi.mock('@/app/components/workflow/hooks/use-nodes-sync-draft', () => ({
useNodesSyncDraft: () => ({
handleSyncWorkflowDraft: mockHandleSyncWorkflowDraft,
}),
}))
vi.mock('@/app/components/workflow/utils', async (importOriginal) => {
const actual = await importOriginal<typeof import('@/app/components/workflow/utils')>()
return {
...actual,
generateNewNode: (args: Record<string, unknown>) => {
lastGenerateNodeInput = args
return {
newNode: {
id: 'new-node-id',
position: args.position,
data: args.data,
},
}
},
}
})
vi.mock('@/app/components/workflow-app/hooks', () => ({
useAvailableNodesMetaData: () => ({
nodesMap: {
[BlockEnum.Start]: {
defaultValue: {
title: 'Start Title',
desc: 'Start description',
config: {
image: false,
},
},
},
[BlockEnum.TriggerPlugin]: {
defaultValue: {
title: 'Plugin title',
desc: 'Plugin description',
config: {
baseConfig: 'base',
},
},
},
},
}),
}))
vi.mock('@/app/components/workflow-app/hooks/use-auto-onboarding', () => ({
useAutoOnboarding: () => ({
handleOnboardingClose: mockHandleOnboardingClose,
}),
}))
vi.mock('@/app/components/workflow/plugin-dependency', () => ({
default: () => <div data-testid="plugin-dependency">plugin-dependency</div>,
}))
vi.mock('@/app/components/workflow-app/components/workflow-header', () => ({
default: () => <div data-testid="workflow-header">workflow-header</div>,
}))
vi.mock('@/app/components/workflow-app/components/workflow-panel', () => ({
default: () => <div data-testid="workflow-panel">workflow-panel</div>,
}))
vi.mock('@/next/dynamic', async () => {
const ReactModule = await import('react')
return {
default: (
loader: () => Promise<{ default: React.ComponentType<Record<string, unknown>> }>,
) => {
const DynamicComponent = (props: Record<string, unknown>) => {
const [Loaded, setLoaded] = ReactModule.useState<React.ComponentType<Record<string, unknown>> | null>(null)
ReactModule.useEffect(() => {
let mounted = true
loader().then((mod) => {
if (mounted)
setLoaded(() => mod.default)
})
return () => {
mounted = false
}
}, [])
return Loaded ? <Loaded {...props} /> : null
}
return DynamicComponent
},
}
})
vi.mock('@/app/components/workflow/features', () => ({
default: () => <div data-testid="workflow-features">features</div>,
}))
vi.mock('@/app/components/workflow/update-dsl-modal', () => ({
default: ({
onCancel,
onBackup,
onImport,
}: {
onCancel: () => void
onBackup: () => void
onImport: () => void
}) => (
<div data-testid="update-dsl-modal">
<button type="button" onClick={onCancel}>cancel-import-dsl</button>
<button type="button" onClick={onBackup}>backup-dsl</button>
<button type="button" onClick={onImport}>import-dsl</button>
</div>
),
}))
vi.mock('@/app/components/workflow/dsl-export-confirm-modal', () => ({
default: ({
envList,
onConfirm,
onClose,
}: {
envList: Array<Record<string, unknown>>
onConfirm: () => void
onClose: () => void
}) => (
<div data-testid="dsl-export-confirm-modal" data-env-count={String(envList.length)}>
<button type="button" onClick={onConfirm}>confirm-export-dsl</button>
<button type="button" onClick={onClose}>close-export-dsl</button>
</div>
),
}))
vi.mock('@/app/components/workflow-app/components/workflow-onboarding-modal', () => ({
default: ({
onClose,
onSelectStartNode,
}: {
isShow: boolean
onClose: () => void
onSelectStartNode: (nodeType: BlockEnum, config?: TriggerPluginConfig) => void
}) => (
<div data-testid="workflow-onboarding-modal">
<button type="button" onClick={onClose}>close-onboarding</button>
<button type="button" onClick={() => onSelectStartNode(BlockEnum.Start)}>select-start-node</button>
<button
type="button"
onClick={() => onSelectStartNode(BlockEnum.Start, {
title: 'Configured Start Title',
desc: 'Configured Start Description',
config: { image: true, custom: 'config' },
extra: 'field',
} as never)}
>
select-start-node-with-config
</button>
<button
type="button"
onClick={() => onSelectStartNode(BlockEnum.TriggerPlugin, {
plugin_id: 'plugin-id',
provider_name: 'provider-name',
provider_type: 'tool',
event_name: 'event-name',
event_label: 'Event Label',
event_description: 'Event Description',
output_schema: { output: true },
paramSchemas: [{ name: 'api_key' }],
params: { token: 'abc' },
subscription_id: 'subscription-id',
plugin_unique_identifier: 'plugin-unique',
is_team_authorization: true,
meta: { source: 'plugin' },
})}
>
select-trigger-plugin
</button>
<button
type="button"
onClick={() => onSelectStartNode(BlockEnum.TriggerPlugin, {
plugin_id: 'plugin-id-2',
provider_name: 'provider-name-2',
provider_type: 'tool',
event_name: 'event-name-2',
event_label: '',
event_description: '',
output_schema: {},
paramSchemas: undefined,
params: {},
subscription_id: 'subscription-id-2',
plugin_unique_identifier: 'plugin-unique-2',
is_team_authorization: false,
} as never)}
>
select-trigger-plugin-fallback
</button>
</div>
),
}))
describe('WorkflowChildren', () => {
beforeEach(() => {
vi.clearAllMocks()
workflowStoreState = {
showFeaturesPanel: false,
showImportDSLModal: false,
setShowImportDSLModal: mockSetShowImportDSLModal,
showOnboarding: false,
setShowOnboarding: mockSetShowOnboarding,
setHasSelectedStartNode: mockSetHasSelectedStartNode,
setShouldAutoOpenStartNodeSelector: mockSetShouldAutoOpenStartNodeSelector,
}
eventSubscription = null
lastGenerateNodeInput = null
mockHandleSyncWorkflowDraft.mockImplementation((_force?: boolean, _notRefresh?: boolean, callback?: { onSuccess?: () => void }) => {
callback?.onSuccess?.()
})
})
it('should render feature panel, import modal actions, and default workflow chrome', async () => {
const user = userEvent.setup()
workflowStoreState = {
...workflowStoreState,
showFeaturesPanel: true,
showImportDSLModal: true,
}
render(<WorkflowChildren />)
expect(screen.getByTestId('plugin-dependency')).toBeInTheDocument()
expect(screen.getByTestId('workflow-header')).toBeInTheDocument()
expect(screen.getByTestId('workflow-panel')).toBeInTheDocument()
expect(await screen.findByTestId('workflow-features')).toBeInTheDocument()
expect(screen.getByTestId('update-dsl-modal')).toBeInTheDocument()
await user.click(screen.getByRole('button', { name: /cancel-import-dsl/i }))
await user.click(screen.getByRole('button', { name: /backup-dsl/i }))
await user.click(screen.getByRole('button', { name: /^import-dsl$/i }))
expect(mockSetShowImportDSLModal).toHaveBeenCalledWith(false)
expect(mockExportCheck).toHaveBeenCalled()
expect(mockHandlePaneContextmenuCancel).toHaveBeenCalled()
})
it('should react to DSL export check events by showing the confirm modal and closing it', async () => {
const user = userEvent.setup()
render(<WorkflowChildren />)
await act(async () => {
eventSubscription?.({
type: DSL_EXPORT_CHECK,
payload: {
data: [{ id: 'env-1' }, { id: 'env-2' }],
},
})
})
expect(await screen.findByTestId('dsl-export-confirm-modal')).toHaveAttribute('data-env-count', '2')
await user.click(screen.getByRole('button', { name: /confirm-export-dsl/i }))
await user.click(screen.getByRole('button', { name: /close-export-dsl/i }))
expect(mockHandleExportDSL).toHaveBeenCalled()
expect(screen.queryByTestId('dsl-export-confirm-modal')).not.toBeInTheDocument()
})
it('should ignore unrelated workflow events when listening for DSL export checks', async () => {
render(<WorkflowChildren />)
await act(async () => {
eventSubscription?.({
type: 'UNRELATED_EVENT',
payload: {
data: [{ id: 'env-1' }],
},
})
})
expect(screen.queryByTestId('dsl-export-confirm-modal')).not.toBeInTheDocument()
})
it('should close onboarding through the onboarding hook callback', async () => {
const user = userEvent.setup()
workflowStoreState = {
...workflowStoreState,
showOnboarding: true,
}
render(<WorkflowChildren />)
expect(await screen.findByTestId('workflow-onboarding-modal')).toBeInTheDocument()
await user.click(screen.getByRole('button', { name: /close-onboarding/i }))
expect(mockHandleOnboardingClose).toHaveBeenCalled()
})
it('should create a start node, sync draft, and auto-generate webhook url after selecting a start node', async () => {
const user = userEvent.setup()
workflowStoreState = {
...workflowStoreState,
showOnboarding: true,
}
render(<WorkflowChildren />)
await user.click(await screen.findByRole('button', { name: /^select-start-node$/i }))
expect(lastGenerateNodeInput).toMatchObject({
data: {
title: 'Start Title',
desc: 'Start description',
config: {
image: false,
},
},
})
expect(mockSetNodes).toHaveBeenCalledWith([expect.objectContaining({ id: 'new-node-id' })])
expect(mockSetEdges).toHaveBeenCalledWith([])
expect(mockSetShowOnboarding).toHaveBeenCalledWith(false)
expect(mockSetHasSelectedStartNode).toHaveBeenCalledWith(true)
expect(mockSetShouldAutoOpenStartNodeSelector).toHaveBeenCalledWith(true)
expect(mockHandleSyncWorkflowDraft).toHaveBeenCalledWith(true, false, expect.any(Object))
expect(mockAutoGenerateWebhookUrl).toHaveBeenCalledWith('new-node-id')
})
it('should merge non-trigger start node config directly into the default node data', async () => {
const user = userEvent.setup()
workflowStoreState = {
...workflowStoreState,
showOnboarding: true,
}
render(<WorkflowChildren />)
await user.click(await screen.findByRole('button', { name: /select-start-node-with-config/i }))
expect(lastGenerateNodeInput).toMatchObject({
data: {
title: 'Configured Start Title',
desc: 'Configured Start Description',
config: {
image: true,
custom: 'config',
},
extra: 'field',
},
})
})
it('should merge trigger plugin defaults and config before creating the node', async () => {
const user = userEvent.setup()
workflowStoreState = {
...workflowStoreState,
showOnboarding: true,
}
render(<WorkflowChildren />)
await user.click(await screen.findByRole('button', { name: /^select-trigger-plugin$/i }))
expect(lastGenerateNodeInput).toMatchObject({
data: {
plugin_id: 'plugin-id',
provider_id: 'provider-name',
provider_name: 'provider-name',
provider_type: 'tool',
event_name: 'event-name',
event_label: 'Event Label',
event_description: 'Event Description',
title: 'Event Label',
desc: 'Event Description',
output_schema: { output: true },
parameters_schema: [{ name: 'api_key' }],
config: {
baseConfig: 'base',
token: 'abc',
},
subscription_id: 'subscription-id',
plugin_unique_identifier: 'plugin-unique',
is_team_authorization: true,
meta: { source: 'plugin' },
},
})
})
it('should fall back to plugin default title and description when trigger labels are missing', async () => {
const user = userEvent.setup()
workflowStoreState = {
...workflowStoreState,
showOnboarding: true,
}
render(<WorkflowChildren />)
await user.click(await screen.findByRole('button', { name: /select-trigger-plugin-fallback/i }))
expect(lastGenerateNodeInput).toMatchObject({
data: {
title: 'Plugin title',
desc: 'Plugin description',
parameters_schema: [],
config: {
baseConfig: 'base',
},
},
})
})
})

View File

@@ -1,277 +0,0 @@
import type { ReactNode } from 'react'
import type { WorkflowProps } from '@/app/components/workflow'
import { fireEvent, render, screen } from '@testing-library/react'
import WorkflowMain from '../workflow-main'
const mockSetFeatures = vi.fn()
const mockSetConversationVariables = vi.fn()
const mockSetEnvironmentVariables = vi.fn()
const hookFns = {
doSyncWorkflowDraft: vi.fn(),
syncWorkflowDraftWhenPageClose: vi.fn(),
handleRefreshWorkflowDraft: vi.fn(),
handleBackupDraft: vi.fn(),
handleLoadBackupDraft: vi.fn(),
handleRestoreFromPublishedWorkflow: vi.fn(),
handleRun: vi.fn(),
handleStopRun: vi.fn(),
handleStartWorkflowRun: vi.fn(),
handleWorkflowStartRunInChatflow: vi.fn(),
handleWorkflowStartRunInWorkflow: vi.fn(),
handleWorkflowTriggerScheduleRunInWorkflow: vi.fn(),
handleWorkflowTriggerWebhookRunInWorkflow: vi.fn(),
handleWorkflowTriggerPluginRunInWorkflow: vi.fn(),
handleWorkflowRunAllTriggersInWorkflow: vi.fn(),
getWorkflowRunAndTraceUrl: vi.fn(),
exportCheck: vi.fn(),
handleExportDSL: vi.fn(),
fetchInspectVars: vi.fn(),
hasNodeInspectVars: vi.fn(),
hasSetInspectVar: vi.fn(),
fetchInspectVarValue: vi.fn(),
editInspectVarValue: vi.fn(),
renameInspectVarName: vi.fn(),
appendNodeInspectVars: vi.fn(),
deleteInspectVar: vi.fn(),
deleteNodeInspectorVars: vi.fn(),
deleteAllInspectorVars: vi.fn(),
isInspectVarEdited: vi.fn(),
resetToLastRunVar: vi.fn(),
invalidateSysVarValues: vi.fn(),
resetConversationVar: vi.fn(),
invalidateConversationVarValues: vi.fn(),
}
let capturedContextProps: Record<string, unknown> | null = null
type MockWorkflowWithInnerContextProps = Pick<WorkflowProps, 'nodes' | 'edges' | 'viewport' | 'onWorkflowDataUpdate'> & {
hooksStore?: Record<string, unknown>
children?: ReactNode
}
vi.mock('@/app/components/base/features/hooks', () => ({
useFeaturesStore: () => ({
getState: () => ({
setFeatures: mockSetFeatures,
}),
}),
}))
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: () => ({
setConversationVariables: mockSetConversationVariables,
setEnvironmentVariables: mockSetEnvironmentVariables,
}),
}),
}))
vi.mock('@/app/components/workflow', () => ({
WorkflowWithInnerContext: ({
nodes,
edges,
viewport,
onWorkflowDataUpdate,
hooksStore,
children,
}: MockWorkflowWithInnerContextProps) => {
capturedContextProps = {
nodes,
edges,
viewport,
hooksStore,
}
return (
<div data-testid="workflow-inner-context">
<button
type="button"
onClick={() => onWorkflowDataUpdate?.({
features: { file: { enabled: true } },
conversation_variables: [{ id: 'conversation-1' }],
environment_variables: [{ id: 'env-1' }],
})}
>
update-workflow-data
</button>
<button
type="button"
onClick={() => onWorkflowDataUpdate?.({
conversation_variables: [{ id: 'conversation-only' }],
})}
>
update-conversation-only
</button>
<button
type="button"
onClick={() => onWorkflowDataUpdate?.({})}
>
update-empty-payload
</button>
{children}
</div>
)
},
}))
vi.mock('@/app/components/workflow-app/hooks', () => ({
useAvailableNodesMetaData: () => ({ nodes: [{ id: 'start' }], nodesMap: { start: { id: 'start' } } }),
useConfigsMap: () => ({ flowId: 'app-1', flowType: 'app-flow', fileSettings: { enabled: true } }),
useDSL: () => ({ exportCheck: hookFns.exportCheck, handleExportDSL: hookFns.handleExportDSL }),
useGetRunAndTraceUrl: () => ({ getWorkflowRunAndTraceUrl: hookFns.getWorkflowRunAndTraceUrl }),
useInspectVarsCrud: () => ({
hasNodeInspectVars: hookFns.hasNodeInspectVars,
hasSetInspectVar: hookFns.hasSetInspectVar,
fetchInspectVarValue: hookFns.fetchInspectVarValue,
editInspectVarValue: hookFns.editInspectVarValue,
renameInspectVarName: hookFns.renameInspectVarName,
appendNodeInspectVars: hookFns.appendNodeInspectVars,
deleteInspectVar: hookFns.deleteInspectVar,
deleteNodeInspectorVars: hookFns.deleteNodeInspectorVars,
deleteAllInspectorVars: hookFns.deleteAllInspectorVars,
isInspectVarEdited: hookFns.isInspectVarEdited,
resetToLastRunVar: hookFns.resetToLastRunVar,
invalidateSysVarValues: hookFns.invalidateSysVarValues,
resetConversationVar: hookFns.resetConversationVar,
invalidateConversationVarValues: hookFns.invalidateConversationVarValues,
}),
useNodesSyncDraft: () => ({
doSyncWorkflowDraft: hookFns.doSyncWorkflowDraft,
syncWorkflowDraftWhenPageClose: hookFns.syncWorkflowDraftWhenPageClose,
}),
useSetWorkflowVarsWithValue: () => ({
fetchInspectVars: hookFns.fetchInspectVars,
}),
useWorkflowRefreshDraft: () => ({ handleRefreshWorkflowDraft: hookFns.handleRefreshWorkflowDraft }),
useWorkflowRun: () => ({
handleBackupDraft: hookFns.handleBackupDraft,
handleLoadBackupDraft: hookFns.handleLoadBackupDraft,
handleRestoreFromPublishedWorkflow: hookFns.handleRestoreFromPublishedWorkflow,
handleRun: hookFns.handleRun,
handleStopRun: hookFns.handleStopRun,
}),
useWorkflowStartRun: () => ({
handleStartWorkflowRun: hookFns.handleStartWorkflowRun,
handleWorkflowStartRunInChatflow: hookFns.handleWorkflowStartRunInChatflow,
handleWorkflowStartRunInWorkflow: hookFns.handleWorkflowStartRunInWorkflow,
handleWorkflowTriggerScheduleRunInWorkflow: hookFns.handleWorkflowTriggerScheduleRunInWorkflow,
handleWorkflowTriggerWebhookRunInWorkflow: hookFns.handleWorkflowTriggerWebhookRunInWorkflow,
handleWorkflowTriggerPluginRunInWorkflow: hookFns.handleWorkflowTriggerPluginRunInWorkflow,
handleWorkflowRunAllTriggersInWorkflow: hookFns.handleWorkflowRunAllTriggersInWorkflow,
}),
}))
vi.mock('../workflow-children', () => ({
default: () => <div data-testid="workflow-children">workflow-children</div>,
}))
describe('WorkflowMain', () => {
beforeEach(() => {
vi.clearAllMocks()
capturedContextProps = null
})
it('should render the inner workflow context with children and forwarded graph props', () => {
const nodes = [{ id: 'node-1' }]
const edges = [{ id: 'edge-1' }]
const viewport = { x: 1, y: 2, zoom: 1.5 }
render(
<WorkflowMain
nodes={nodes as never}
edges={edges as never}
viewport={viewport}
/>,
)
expect(screen.getByTestId('workflow-inner-context')).toBeInTheDocument()
expect(screen.getByTestId('workflow-children')).toBeInTheDocument()
expect(capturedContextProps).toMatchObject({
nodes,
edges,
viewport,
})
})
it('should update features and workflow variables when workflow data changes', () => {
render(
<WorkflowMain
nodes={[]}
edges={[]}
viewport={{ x: 0, y: 0, zoom: 1 }}
/>,
)
fireEvent.click(screen.getByRole('button', { name: /update-workflow-data/i }))
expect(mockSetFeatures).toHaveBeenCalledWith({ file: { enabled: true } })
expect(mockSetConversationVariables).toHaveBeenCalledWith([{ id: 'conversation-1' }])
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([{ id: 'env-1' }])
})
it('should only update the workflow store slices present in the payload', () => {
render(
<WorkflowMain
nodes={[]}
edges={[]}
viewport={{ x: 0, y: 0, zoom: 1 }}
/>,
)
fireEvent.click(screen.getByRole('button', { name: /update-conversation-only/i }))
expect(mockSetConversationVariables).toHaveBeenCalledWith([{ id: 'conversation-only' }])
expect(mockSetFeatures).not.toHaveBeenCalled()
expect(mockSetEnvironmentVariables).not.toHaveBeenCalled()
})
it('should ignore empty workflow data updates', () => {
render(
<WorkflowMain
nodes={[]}
edges={[]}
viewport={{ x: 0, y: 0, zoom: 1 }}
/>,
)
fireEvent.click(screen.getByRole('button', { name: /update-empty-payload/i }))
expect(mockSetFeatures).not.toHaveBeenCalled()
expect(mockSetConversationVariables).not.toHaveBeenCalled()
expect(mockSetEnvironmentVariables).not.toHaveBeenCalled()
})
it('should expose the composed workflow action hooks through hooksStore', () => {
render(
<WorkflowMain
nodes={[]}
edges={[]}
viewport={{ x: 0, y: 0, zoom: 1 }}
/>,
)
expect(capturedContextProps?.hooksStore).toMatchObject({
syncWorkflowDraftWhenPageClose: hookFns.syncWorkflowDraftWhenPageClose,
doSyncWorkflowDraft: hookFns.doSyncWorkflowDraft,
handleRefreshWorkflowDraft: hookFns.handleRefreshWorkflowDraft,
handleBackupDraft: hookFns.handleBackupDraft,
handleLoadBackupDraft: hookFns.handleLoadBackupDraft,
handleRestoreFromPublishedWorkflow: hookFns.handleRestoreFromPublishedWorkflow,
handleRun: hookFns.handleRun,
handleStopRun: hookFns.handleStopRun,
handleStartWorkflowRun: hookFns.handleStartWorkflowRun,
handleWorkflowStartRunInChatflow: hookFns.handleWorkflowStartRunInChatflow,
handleWorkflowStartRunInWorkflow: hookFns.handleWorkflowStartRunInWorkflow,
handleWorkflowTriggerScheduleRunInWorkflow: hookFns.handleWorkflowTriggerScheduleRunInWorkflow,
handleWorkflowTriggerWebhookRunInWorkflow: hookFns.handleWorkflowTriggerWebhookRunInWorkflow,
handleWorkflowTriggerPluginRunInWorkflow: hookFns.handleWorkflowTriggerPluginRunInWorkflow,
handleWorkflowRunAllTriggersInWorkflow: hookFns.handleWorkflowRunAllTriggersInWorkflow,
availableNodesMetaData: { nodes: [{ id: 'start' }], nodesMap: { start: { id: 'start' } } },
getWorkflowRunAndTraceUrl: hookFns.getWorkflowRunAndTraceUrl,
exportCheck: hookFns.exportCheck,
handleExportDSL: hookFns.handleExportDSL,
fetchInspectVars: hookFns.fetchInspectVars,
configsMap: { flowId: 'app-1', flowType: 'app-flow', fileSettings: { enabled: true } },
})
})
})

View File

@@ -1,214 +0,0 @@
import type { ReactNode } from 'react'
import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import * as React from 'react'
import WorkflowPanel from '../workflow-panel'
type AppStoreState = {
appDetail?: {
id?: string
workflow?: {
id?: string
}
}
currentLogItem?: { id: string }
setCurrentLogItem: (item?: { id: string }) => void
showMessageLogModal: boolean
setShowMessageLogModal: (show: boolean) => void
currentLogModalActiveTab?: string
}
type WorkflowStoreState = {
historyWorkflowData?: Record<string, unknown>
showDebugAndPreviewPanel: boolean
showChatVariablePanel: boolean
showGlobalVariablePanel: boolean
}
const mockUseIsChatMode = vi.fn()
const mockSetCurrentLogItem = vi.fn()
const mockSetShowMessageLogModal = vi.fn()
let appStoreState: AppStoreState
let workflowStoreState: WorkflowStoreState
vi.mock('@/app/components/app/store', () => ({
useStore: <T,>(selector: (state: AppStoreState) => T) => selector(appStoreState),
}))
vi.mock('@/app/components/workflow/store', () => ({
useStore: <T,>(selector: (state: WorkflowStoreState) => T) => selector(workflowStoreState),
}))
vi.mock('@/app/components/workflow/panel', () => ({
default: ({
components,
versionHistoryPanelProps,
}: {
components?: {
left?: ReactNode
right?: ReactNode
}
versionHistoryPanelProps?: {
getVersionListUrl: string
deleteVersionUrl: (versionId: string) => string
restoreVersionUrl: (versionId: string) => string
updateVersionUrl: (versionId: string) => string
latestVersionId?: string
}
}) => (
<div
data-testid="panel"
data-version-list-url={versionHistoryPanelProps?.getVersionListUrl ?? ''}
data-delete-version-url={versionHistoryPanelProps?.deleteVersionUrl('version-1') ?? ''}
data-restore-version-url={versionHistoryPanelProps?.restoreVersionUrl('version-1') ?? ''}
data-update-version-url={versionHistoryPanelProps?.updateVersionUrl('version-1') ?? ''}
data-latest-version-id={versionHistoryPanelProps?.latestVersionId ?? ''}
>
<div data-testid="panel-left">{components?.left}</div>
<div data-testid="panel-right">{components?.right}</div>
</div>
),
}))
vi.mock('@/next/dynamic', () => ({
default: (loader: () => Promise<{ default: React.ComponentType<Record<string, unknown>> }>) => {
const LazyComp = React.lazy(loader)
return function DynamicWrapper(props: Record<string, unknown>) {
return React.createElement(
React.Suspense,
{ fallback: null },
React.createElement(LazyComp, props),
)
}
},
}))
vi.mock('@/app/components/base/message-log-modal', () => ({
default: ({
currentLogItem,
defaultTab,
onCancel,
}: {
currentLogItem?: { id: string }
defaultTab?: string
onCancel: () => void
}) => (
<div data-testid="message-log-modal" data-current-log-id={currentLogItem?.id ?? ''} data-default-tab={defaultTab ?? ''}>
<button type="button" onClick={onCancel}>close-message-log</button>
</div>
),
}))
vi.mock('@/app/components/workflow/panel/record', () => ({
default: () => <div data-testid="record-panel">record</div>,
}))
vi.mock('@/app/components/workflow/panel/chat-record', () => ({
default: () => <div data-testid="chat-record-panel">chat-record</div>,
}))
vi.mock('@/app/components/workflow/panel/debug-and-preview', () => ({
default: () => <div data-testid="debug-and-preview-panel">debug</div>,
}))
vi.mock('@/app/components/workflow/panel/workflow-preview', () => ({
default: () => <div data-testid="workflow-preview-panel">preview</div>,
}))
vi.mock('@/app/components/workflow/panel/chat-variable-panel', () => ({
default: () => <div data-testid="chat-variable-panel">chat-variable</div>,
}))
vi.mock('@/app/components/workflow/panel/global-variable-panel', () => ({
default: () => <div data-testid="global-variable-panel">global-variable</div>,
}))
vi.mock('@/app/components/workflow-app/hooks', () => ({
useIsChatMode: () => mockUseIsChatMode(),
}))
describe('WorkflowPanel', () => {
beforeEach(() => {
vi.clearAllMocks()
appStoreState = {
appDetail: {
id: 'app-123',
workflow: {
id: 'workflow-version-id',
},
},
currentLogItem: { id: 'log-1' },
setCurrentLogItem: mockSetCurrentLogItem,
showMessageLogModal: false,
setShowMessageLogModal: mockSetShowMessageLogModal,
currentLogModalActiveTab: 'detail',
}
workflowStoreState = {
historyWorkflowData: undefined,
showDebugAndPreviewPanel: false,
showChatVariablePanel: false,
showGlobalVariablePanel: false,
}
mockUseIsChatMode.mockReturnValue(false)
})
it('should configure workflow version history urls and latest version id for the panel shell', async () => {
render(<WorkflowPanel />)
const panel = await screen.findByTestId('panel')
expect(panel).toHaveAttribute('data-version-list-url', '/apps/app-123/workflows')
expect(panel).toHaveAttribute('data-delete-version-url', '/apps/app-123/workflows/version-1')
expect(panel).toHaveAttribute('data-restore-version-url', '/apps/app-123/workflows/version-1/restore')
expect(panel).toHaveAttribute('data-update-version-url', '/apps/app-123/workflows/version-1')
expect(panel).toHaveAttribute('data-latest-version-id', 'workflow-version-id')
})
it('should render and close the message log modal from the left panel slot', async () => {
const user = userEvent.setup()
appStoreState = {
...appStoreState,
showMessageLogModal: true,
}
render(<WorkflowPanel />)
expect(await screen.findByTestId('message-log-modal')).toHaveAttribute('data-current-log-id', 'log-1')
expect(screen.getByTestId('message-log-modal')).toHaveAttribute('data-default-tab', 'detail')
await user.click(screen.getByRole('button', { name: /close-message-log/i }))
expect(mockSetCurrentLogItem).toHaveBeenCalledWith()
expect(mockSetShowMessageLogModal).toHaveBeenCalledWith(false)
})
it('should switch right-side workflow panels based on chat mode and workflow state', async () => {
workflowStoreState = {
historyWorkflowData: { id: 'history-1' },
showDebugAndPreviewPanel: true,
showChatVariablePanel: true,
showGlobalVariablePanel: true,
}
mockUseIsChatMode.mockReturnValue(true)
const { unmount } = render(<WorkflowPanel />)
expect(await screen.findByTestId('chat-record-panel')).toBeInTheDocument()
expect(screen.getByTestId('debug-and-preview-panel')).toBeInTheDocument()
expect(screen.getByTestId('chat-variable-panel')).toBeInTheDocument()
expect(screen.getByTestId('global-variable-panel')).toBeInTheDocument()
expect(screen.queryByTestId('record-panel')).not.toBeInTheDocument()
expect(screen.queryByTestId('workflow-preview-panel')).not.toBeInTheDocument()
unmount()
mockUseIsChatMode.mockReturnValue(false)
render(<WorkflowPanel />)
expect(await screen.findByTestId('record-panel')).toBeInTheDocument()
expect(screen.getByTestId('workflow-preview-panel')).toBeInTheDocument()
expect(screen.getByTestId('global-variable-panel')).toBeInTheDocument()
expect(screen.queryByTestId('chat-record-panel')).not.toBeInTheDocument()
expect(screen.queryByTestId('debug-and-preview-panel')).not.toBeInTheDocument()
expect(screen.queryByTestId('chat-variable-panel')).not.toBeInTheDocument()
})
})

View File

@@ -149,7 +149,6 @@ const createProviderContext = ({
const renderWithToast = (ui: ReactElement) => {
return render(
// eslint-disable-next-line react/no-context-provider
<ToastContext.Provider value={{ notify: mockNotify, close: vi.fn() }}>
{ui}
</ToastContext.Provider>,
@@ -446,27 +445,6 @@ describe('FeaturesTrigger', () => {
})
})
it('should skip success side effects when publish mutation returns no workflow version', async () => {
// Arrange
const user = userEvent.setup()
mockPublishWorkflow.mockResolvedValue(null)
renderWithToast(<FeaturesTrigger />)
// Act
await user.click(screen.getByRole('button', { name: 'publisher-publish' }))
// Assert
await waitFor(() => {
expect(mockPublishWorkflow).toHaveBeenCalled()
})
expect(mockNotify).not.toHaveBeenCalledWith({ type: 'success', message: 'common.api.actionSuccess' })
expect(mockUpdatePublishedWorkflow).not.toHaveBeenCalled()
expect(mockInvalidateAppTriggers).not.toHaveBeenCalled()
expect(mockSetPublishedAt).not.toHaveBeenCalled()
expect(mockSetLastPublishedHasUserInput).not.toHaveBeenCalled()
expect(mockResetWorkflowVersionHistory).not.toHaveBeenCalled()
})
it('should log error when app detail refresh fails after publish', async () => {
// Arrange
const user = userEvent.setup()

View File

@@ -1,18 +0,0 @@
import * as hooks from '../index'
describe('workflow-app hooks index', () => {
it('should re-export workflow-app hooks', () => {
expect(hooks.useAvailableNodesMetaData).toBeTypeOf('function')
expect(hooks.useConfigsMap).toBeTypeOf('function')
expect(hooks.useDSL).toBeTypeOf('function')
expect(hooks.useGetRunAndTraceUrl).toBeTypeOf('function')
expect(hooks.useInspectVarsCrud).toBeTypeOf('function')
expect(hooks.useIsChatMode).toBeTypeOf('function')
expect(hooks.useNodesSyncDraft).toBeTypeOf('function')
expect(hooks.useWorkflowInit).toBeTypeOf('function')
expect(hooks.useWorkflowRefreshDraft).toBeTypeOf('function')
expect(hooks.useWorkflowRun).toBeTypeOf('function')
expect(hooks.useWorkflowStartRun).toBeTypeOf('function')
expect(hooks.useWorkflowTemplate).toBeTypeOf('function')
})
})

View File

@@ -1,206 +0,0 @@
import { act, renderHook, waitFor } from '@testing-library/react'
import { DSL_EXPORT_CHECK } from '@/app/components/workflow/constants'
import { useDSL } from '../use-DSL'
const mockNotify = vi.fn()
const mockEmit = vi.fn()
const mockDoSyncWorkflowDraft = vi.fn()
const mockExportAppConfig = vi.fn()
const mockFetchWorkflowDraft = vi.fn()
const mockDownloadBlob = vi.fn()
let appStoreState: {
appDetail?: {
id: string
name: string
}
}
vi.mock('@/app/components/base/toast/context', () => ({
useToastContext: () => ({ notify: mockNotify }),
}))
vi.mock('@/context/event-emitter', () => ({
useEventEmitterContextContext: () => ({
eventEmitter: {
emit: mockEmit,
},
}),
}))
vi.mock('@/app/components/app/store', () => ({
useStore: <T>(selector: (state: typeof appStoreState) => T) => selector(appStoreState),
}))
vi.mock('../use-nodes-sync-draft', () => ({
useNodesSyncDraft: () => ({
doSyncWorkflowDraft: mockDoSyncWorkflowDraft,
}),
}))
vi.mock('@/service/apps', () => ({
exportAppConfig: (...args: unknown[]) => mockExportAppConfig(...args),
}))
vi.mock('@/service/workflow', () => ({
fetchWorkflowDraft: (...args: unknown[]) => mockFetchWorkflowDraft(...args),
}))
vi.mock('@/utils/download', () => ({
downloadBlob: (...args: unknown[]) => mockDownloadBlob(...args),
}))
const createDeferred = <T>() => {
let resolve!: (value: T) => void
const promise = new Promise<T>((res) => {
resolve = res
})
return { promise, resolve }
}
describe('useDSL', () => {
beforeEach(() => {
vi.clearAllMocks()
appStoreState = {
appDetail: {
id: 'app-1',
name: 'Workflow App',
},
}
mockDoSyncWorkflowDraft.mockResolvedValue(undefined)
mockExportAppConfig.mockResolvedValue({ data: 'yaml-content' })
mockFetchWorkflowDraft.mockResolvedValue({ environment_variables: [] })
})
it('should export workflow dsl and download the yaml blob when no secret env is present', async () => {
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.exportCheck()
})
expect(mockFetchWorkflowDraft).toHaveBeenCalledWith('/apps/app-1/workflows/draft')
expect(mockDoSyncWorkflowDraft).toHaveBeenCalled()
expect(mockExportAppConfig).toHaveBeenCalledWith({
appID: 'app-1',
include: false,
workflowID: undefined,
})
expect(mockDownloadBlob).toHaveBeenCalledWith(expect.objectContaining({
data: expect.any(Blob),
fileName: 'Workflow App.yml',
}))
})
it('should forward include and workflow id arguments when exporting dsl directly', async () => {
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.handleExportDSL(true, 'workflow-1')
})
expect(mockExportAppConfig).toHaveBeenCalledWith({
appID: 'app-1',
include: true,
workflowID: 'workflow-1',
})
})
it('should emit DSL_EXPORT_CHECK when secret environment variables exist', async () => {
const secretVars = [{ id: 'env-1', value_type: 'secret', value: 'secret-token' }]
mockFetchWorkflowDraft.mockResolvedValue({ environment_variables: secretVars })
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.exportCheck()
})
expect(mockEmit).toHaveBeenCalledWith({
type: DSL_EXPORT_CHECK,
payload: {
data: secretVars,
},
})
expect(mockExportAppConfig).not.toHaveBeenCalled()
})
it('should return early when app detail is unavailable', async () => {
appStoreState = {}
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.exportCheck()
await result.current.handleExportDSL()
})
expect(mockFetchWorkflowDraft).not.toHaveBeenCalled()
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockExportAppConfig).not.toHaveBeenCalled()
expect(mockEmit).not.toHaveBeenCalled()
})
it('should notify when export fails', async () => {
mockExportAppConfig.mockRejectedValue(new Error('export failed'))
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.handleExportDSL()
})
await waitFor(() => {
expect(mockNotify).toHaveBeenCalledWith({
type: 'error',
message: 'app.exportFailed',
})
})
})
it('should notify when exportCheck cannot load the workflow draft', async () => {
mockFetchWorkflowDraft.mockRejectedValue(new Error('draft fetch failed'))
const { result } = renderHook(() => useDSL())
await act(async () => {
await result.current.exportCheck()
})
await waitFor(() => {
expect(mockNotify).toHaveBeenCalledWith({
type: 'error',
message: 'app.exportFailed',
})
})
expect(mockExportAppConfig).not.toHaveBeenCalled()
})
it('should ignore repeated export attempts while an export is already in progress', async () => {
const deferred = createDeferred<{ data: string }>()
mockExportAppConfig.mockReturnValue(deferred.promise)
const { result } = renderHook(() => useDSL())
let firstExportPromise!: Promise<void>
act(() => {
firstExportPromise = result.current.handleExportDSL()
})
await waitFor(() => {
expect(mockDoSyncWorkflowDraft).toHaveBeenCalledTimes(1)
expect(mockExportAppConfig).toHaveBeenCalledTimes(1)
})
act(() => {
void result.current.handleExportDSL()
})
expect(mockExportAppConfig).toHaveBeenCalledTimes(1)
await act(async () => {
deferred.resolve({ data: 'yaml-content' })
await firstExportPromise
})
})
})

View File

@@ -1,118 +0,0 @@
import { act, renderHook } from '@testing-library/react'
import { useAutoOnboarding } from '../use-auto-onboarding'
const mockGetNodes = vi.fn()
const mockWorkflowStore = {
getState: vi.fn(),
}
const mockSetShowOnboarding = vi.fn()
const mockSetHasShownOnboarding = vi.fn()
const mockSetShouldAutoOpenStartNodeSelector = vi.fn()
const mockSetHasSelectedStartNode = vi.fn()
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => ({
getNodes: mockGetNodes,
}),
}),
}))
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => mockWorkflowStore,
}))
describe('useAutoOnboarding', () => {
beforeEach(() => {
vi.clearAllMocks()
vi.useFakeTimers()
mockGetNodes.mockReturnValue([])
mockWorkflowStore.getState.mockReturnValue({
showOnboarding: false,
hasShownOnboarding: false,
notInitialWorkflow: false,
setShowOnboarding: mockSetShowOnboarding,
setHasShownOnboarding: mockSetHasShownOnboarding,
setShouldAutoOpenStartNodeSelector: mockSetShouldAutoOpenStartNodeSelector,
hasSelectedStartNode: false,
setHasSelectedStartNode: mockSetHasSelectedStartNode,
})
})
afterEach(() => {
vi.useRealTimers()
})
it('should open onboarding after the delayed empty-canvas check on mount', () => {
renderHook(() => useAutoOnboarding())
act(() => {
vi.advanceTimersByTime(500)
})
expect(mockSetShowOnboarding).toHaveBeenCalledWith(true)
expect(mockSetHasShownOnboarding).toHaveBeenCalledWith(true)
expect(mockSetShouldAutoOpenStartNodeSelector).toHaveBeenCalledWith(true)
})
it('should skip auto onboarding when it is already visible or the workflow is not initial', () => {
mockWorkflowStore.getState.mockReturnValue({
showOnboarding: true,
hasShownOnboarding: false,
notInitialWorkflow: true,
setShowOnboarding: mockSetShowOnboarding,
setHasShownOnboarding: mockSetHasShownOnboarding,
setShouldAutoOpenStartNodeSelector: mockSetShouldAutoOpenStartNodeSelector,
hasSelectedStartNode: false,
setHasSelectedStartNode: mockSetHasSelectedStartNode,
})
renderHook(() => useAutoOnboarding())
act(() => {
vi.advanceTimersByTime(500)
})
expect(mockSetShowOnboarding).not.toHaveBeenCalled()
expect(mockSetHasShownOnboarding).not.toHaveBeenCalled()
expect(mockSetShouldAutoOpenStartNodeSelector).not.toHaveBeenCalled()
})
it('should close onboarding and reset selected start node state when one was chosen', () => {
mockWorkflowStore.getState.mockReturnValue({
showOnboarding: false,
hasShownOnboarding: true,
notInitialWorkflow: false,
setShowOnboarding: mockSetShowOnboarding,
setHasShownOnboarding: mockSetHasShownOnboarding,
setShouldAutoOpenStartNodeSelector: mockSetShouldAutoOpenStartNodeSelector,
hasSelectedStartNode: true,
setHasSelectedStartNode: mockSetHasSelectedStartNode,
})
const { result } = renderHook(() => useAutoOnboarding())
act(() => {
result.current.handleOnboardingClose()
})
expect(mockSetShowOnboarding).toHaveBeenCalledWith(false)
expect(mockSetHasShownOnboarding).toHaveBeenCalledWith(true)
expect(mockSetHasSelectedStartNode).toHaveBeenCalledWith(false)
expect(mockSetShouldAutoOpenStartNodeSelector).not.toHaveBeenCalled()
})
it('should close onboarding and disable auto-open when no start node was selected', () => {
const { result } = renderHook(() => useAutoOnboarding())
act(() => {
result.current.handleOnboardingClose()
})
expect(mockSetShowOnboarding).toHaveBeenCalledWith(false)
expect(mockSetHasShownOnboarding).toHaveBeenCalledWith(true)
expect(mockSetShouldAutoOpenStartNodeSelector).toHaveBeenCalledWith(false)
expect(mockSetHasSelectedStartNode).not.toHaveBeenCalled()
})
})

View File

@@ -1,49 +0,0 @@
import { renderHook } from '@testing-library/react'
import { BlockEnum } from '@/app/components/workflow/types'
import { useAvailableNodesMetaData } from '../use-available-nodes-meta-data'
const mockUseIsChatMode = vi.fn()
vi.mock('@/app/components/workflow-app/hooks/use-is-chat-mode', () => ({
useIsChatMode: () => mockUseIsChatMode(),
}))
vi.mock('@/context/i18n', () => ({
useDocLink: () => (path: string) => `/docs${path}`,
}))
describe('useAvailableNodesMetaData', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('should include chat-specific nodes and make the start node undeletable in chat mode', () => {
mockUseIsChatMode.mockReturnValue(true)
const { result } = renderHook(() => useAvailableNodesMetaData())
expect(result.current.nodesMap?.[BlockEnum.Start]?.metaData.isUndeletable).toBe(true)
expect(result.current.nodesMap?.[BlockEnum.Answer]).toBeDefined()
expect(result.current.nodesMap?.[BlockEnum.End]).toBeUndefined()
expect(result.current.nodesMap?.[BlockEnum.TriggerWebhook]).toBeUndefined()
expect(result.current.nodesMap?.[BlockEnum.VariableAssigner]).toBe(result.current.nodesMap?.[BlockEnum.VariableAggregator])
expect(result.current.nodesMap?.[BlockEnum.Start]?.metaData.helpLinkUri).toContain('/docs/use-dify/nodes/')
})
it('should include workflow-specific trigger and end nodes outside chat mode', () => {
mockUseIsChatMode.mockReturnValue(false)
const { result } = renderHook(() => useAvailableNodesMetaData())
expect(result.current.nodesMap?.[BlockEnum.Start]?.metaData.isUndeletable).toBe(false)
expect(result.current.nodesMap?.[BlockEnum.End]).toBeDefined()
expect(result.current.nodesMap?.[BlockEnum.TriggerWebhook]).toBeDefined()
expect(result.current.nodesMap?.[BlockEnum.TriggerSchedule]).toBeDefined()
expect(result.current.nodesMap?.[BlockEnum.TriggerPlugin]).toBeDefined()
expect(result.current.nodesMap?.[BlockEnum.Answer]).toBeUndefined()
expect(result.current.nodesMap?.[BlockEnum.Start]?.defaultValue).toMatchObject({
type: BlockEnum.Start,
title: 'workflow.blocks.start',
})
})
})

View File

@@ -1,40 +0,0 @@
import { renderHook } from '@testing-library/react'
import { FlowType } from '@/types/common'
import { useConfigsMap } from '../use-configs-map'
const mockUseFeatures = vi.fn()
vi.mock('@/app/components/base/features/hooks', () => ({
useFeatures: (selector: (state: { features: { file: Record<string, unknown> } }) => unknown) => mockUseFeatures(selector),
}))
vi.mock('@/app/components/workflow/store', () => ({
useStore: <T>(selector: (state: { appId: string }) => T) => selector({ appId: 'app-1' }),
}))
describe('useConfigsMap', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseFeatures.mockImplementation((selector: (state: { features: { file: Record<string, unknown> } }) => unknown) => selector({
features: {
file: {
enabled: true,
number_limits: 3,
},
},
}))
})
it('should map workflow app id and feature file settings into inspect-var configs', () => {
const { result } = renderHook(() => useConfigsMap())
expect(result.current).toEqual({
flowId: 'app-1',
flowType: FlowType.appFlow,
fileSettings: {
enabled: true,
number_limits: 3,
},
})
})
})

View File

@@ -1,28 +0,0 @@
import { renderHook } from '@testing-library/react'
import { useGetRunAndTraceUrl } from '../use-get-run-and-trace-url'
const mockWorkflowStore = {
getState: vi.fn(),
}
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => mockWorkflowStore,
}))
describe('useGetRunAndTraceUrl', () => {
beforeEach(() => {
vi.clearAllMocks()
mockWorkflowStore.getState.mockReturnValue({
appId: 'app-123',
})
})
it('should build workflow run and trace urls from the current app id', () => {
const { result } = renderHook(() => useGetRunAndTraceUrl())
expect(result.current.getWorkflowRunAndTraceUrl('run-1')).toEqual({
runUrl: '/apps/app-123/workflow-runs/run-1',
traceUrl: '/apps/app-123/workflow-runs/run-1/node-executions',
})
})
})

View File

@@ -1,44 +0,0 @@
import { renderHook } from '@testing-library/react'
import { useInspectVarsCrud } from '../use-inspect-vars-crud'
const mockUseInspectVarsCrudCommon = vi.fn()
const mockUseConfigsMap = vi.fn()
vi.mock('@/app/components/workflow/hooks/use-inspect-vars-crud-common', () => ({
useInspectVarsCrudCommon: (...args: unknown[]) => mockUseInspectVarsCrudCommon(...args),
}))
vi.mock('@/app/components/workflow-app/hooks/use-configs-map', () => ({
useConfigsMap: () => mockUseConfigsMap(),
}))
describe('useInspectVarsCrud', () => {
beforeEach(() => {
vi.clearAllMocks()
mockUseConfigsMap.mockReturnValue({
flowId: 'app-1',
flowType: 'app-flow',
fileSettings: { enabled: true },
})
mockUseInspectVarsCrudCommon.mockReturnValue({
fetchInspectVarValue: vi.fn(),
editInspectVarValue: vi.fn(),
deleteInspectVar: vi.fn(),
})
})
it('should call the shared inspect vars hook with workflow-app configs and return its api', () => {
const { result } = renderHook(() => useInspectVarsCrud())
expect(mockUseInspectVarsCrudCommon).toHaveBeenCalledWith({
flowId: 'app-1',
flowType: 'app-flow',
fileSettings: { enabled: true },
})
expect(result.current).toEqual({
fetchInspectVarValue: expect.any(Function),
editInspectVarValue: expect.any(Function),
deleteInspectVar: expect.any(Function),
})
})
})

View File

@@ -4,57 +4,42 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
import { useNodesSyncDraft } from '../use-nodes-sync-draft'
const mockGetNodes = vi.fn()
const mockPostWithKeepalive = vi.fn()
const mockSetSyncWorkflowDraftHash = vi.fn()
const mockSetDraftUpdatedAt = vi.fn()
const mockGetNodesReadOnly = vi.fn()
let reactFlowState: {
getNodes: typeof mockGetNodes
edges: Array<Record<string, unknown>>
transform: [number, number, number]
}
let workflowStoreState: {
appId: string
isWorkflowDataLoaded: boolean
syncWorkflowDraftHash: string | null
environmentVariables: Array<Record<string, unknown>>
conversationVariables: Array<Record<string, unknown>>
setSyncWorkflowDraftHash: typeof mockSetSyncWorkflowDraftHash
setDraftUpdatedAt: typeof mockSetDraftUpdatedAt
}
let featuresState: {
features: {
opening: { enabled: boolean, opening_statement: string, suggested_questions: string[] }
suggested: Record<string, unknown>
text2speech: Record<string, unknown>
speech2text: Record<string, unknown>
citation: Record<string, unknown>
moderation: Record<string, unknown>
file: Record<string, unknown>
}
}
vi.mock('reactflow', () => ({
useStoreApi: () => ({ getState: () => reactFlowState }),
useStoreApi: () => ({ getState: () => ({ getNodes: mockGetNodes, edges: [], transform: [0, 0, 1] }) }),
}))
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: () => workflowStoreState,
getState: () => ({
appId: 'app-1',
isWorkflowDataLoaded: true,
syncWorkflowDraftHash: 'hash-123',
environmentVariables: [],
conversationVariables: [],
setSyncWorkflowDraftHash: vi.fn(),
setDraftUpdatedAt: vi.fn(),
}),
}),
}))
vi.mock('@/app/components/base/features/hooks', () => ({
useFeaturesStore: () => ({
getState: () => featuresState,
getState: () => ({
features: {
opening: { enabled: false, opening_statement: '', suggested_questions: [] },
suggested: {},
text2speech: {},
speech2text: {},
citation: {},
moderation: {},
file: {},
},
}),
}),
}))
vi.mock('@/app/components/workflow/hooks/use-workflow', () => ({
useNodesReadOnly: () => ({ getNodesReadOnly: mockGetNodesReadOnly }),
useNodesReadOnly: () => ({ getNodesReadOnly: () => false }),
}))
vi.mock('@/app/components/workflow/hooks/use-serial-async-callback', () => ({
@@ -70,7 +55,7 @@ vi.mock('@/service/workflow', () => ({
syncWorkflowDraft: (p: unknown) => mockSyncWorkflowDraft(p),
}))
vi.mock('@/service/fetch', () => ({ postWithKeepalive: (...args: unknown[]) => mockPostWithKeepalive(...args) }))
vi.mock('@/service/fetch', () => ({ postWithKeepalive: vi.fn() }))
vi.mock('@/config', () => ({ API_PREFIX: '/api' }))
const mockHandleRefreshWorkflowDraft = vi.fn()
@@ -81,32 +66,6 @@ vi.mock('@/app/components/workflow-app/hooks', () => ({
describe('useNodesSyncDraft — handleRefreshWorkflowDraft(true) on 409', () => {
beforeEach(() => {
vi.clearAllMocks()
reactFlowState = {
getNodes: mockGetNodes,
edges: [],
transform: [0, 0, 1],
}
workflowStoreState = {
appId: 'app-1',
isWorkflowDataLoaded: true,
syncWorkflowDraftHash: 'hash-123',
environmentVariables: [],
conversationVariables: [],
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
setDraftUpdatedAt: mockSetDraftUpdatedAt,
}
featuresState = {
features: {
opening: { enabled: false, opening_statement: '', suggested_questions: [] },
suggested: {},
text2speech: {},
speech2text: {},
citation: {},
moderation: {},
file: {},
},
}
mockGetNodesReadOnly.mockReturnValue(false)
mockGetNodes.mockReturnValue([{ id: 'n1', position: { x: 0, y: 0 }, data: { type: 'start' } }])
mockSyncWorkflowDraft.mockResolvedValue({ hash: 'new', updated_at: 1 })
})
@@ -163,102 +122,4 @@ describe('useNodesSyncDraft — handleRefreshWorkflowDraft(true) on 409', () =>
}),
}))
})
it('should strip temp entities and private data, use the latest hash, and invoke success callbacks', async () => {
reactFlowState = {
...reactFlowState,
edges: [
{ id: 'edge-1', source: 'n1', target: 'n2', data: { _isTemp: false, _private: 'drop', stable: 'keep' } },
{ id: 'temp-edge', source: 'n2', target: 'n3', data: { _isTemp: true } },
],
transform: [10, 20, 1.5],
}
mockGetNodes.mockReturnValue([
{ id: 'n1', position: { x: 0, y: 0 }, data: { type: 'start', _tempField: 'drop', label: 'Start' } },
{ id: 'temp-node', position: { x: 1, y: 1 }, data: { type: 'answer', _isTempNode: true } },
])
workflowStoreState = {
...workflowStoreState,
syncWorkflowDraftHash: 'latest-hash',
environmentVariables: [{ id: 'env-1', value: 'env' }],
conversationVariables: [{ id: 'conversation-1', value: 'conversation' }],
}
featuresState = {
features: {
opening: { enabled: true, opening_statement: 'Hello', suggested_questions: ['Q1'] },
suggested: { enabled: true },
text2speech: { enabled: true },
speech2text: { enabled: true },
citation: { enabled: true },
moderation: { enabled: false },
file: { enabled: true },
},
}
const callbacks = {
onSuccess: vi.fn(),
onError: vi.fn(),
onSettled: vi.fn(),
}
const { result } = renderHook(() => useNodesSyncDraft())
await act(async () => {
await result.current.doSyncWorkflowDraft(false, callbacks)
})
expect(mockSyncWorkflowDraft).toHaveBeenCalledWith({
url: '/apps/app-1/workflows/draft',
params: {
graph: {
nodes: [{ id: 'n1', position: { x: 0, y: 0 }, data: { type: 'start', label: 'Start' } }],
edges: [{ id: 'edge-1', source: 'n1', target: 'n2', data: { stable: 'keep' } }],
viewport: { x: 10, y: 20, zoom: 1.5 },
},
features: {
opening_statement: 'Hello',
suggested_questions: ['Q1'],
suggested_questions_after_answer: { enabled: true },
text_to_speech: { enabled: true },
speech_to_text: { enabled: true },
retriever_resource: { enabled: true },
sensitive_word_avoidance: { enabled: false },
file_upload: { enabled: true },
},
environment_variables: [{ id: 'env-1', value: 'env' }],
conversation_variables: [{ id: 'conversation-1', value: 'conversation' }],
hash: 'latest-hash',
},
})
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('new')
expect(mockSetDraftUpdatedAt).toHaveBeenCalledWith(1)
expect(callbacks.onSuccess).toHaveBeenCalled()
expect(callbacks.onError).not.toHaveBeenCalled()
expect(callbacks.onSettled).toHaveBeenCalled()
})
it('should post workflow draft with keepalive when the page closes', () => {
reactFlowState = {
...reactFlowState,
transform: [1, 2, 3],
}
workflowStoreState = {
...workflowStoreState,
environmentVariables: [{ id: 'env-1' }],
conversationVariables: [{ id: 'conversation-1' }],
}
const { result } = renderHook(() => useNodesSyncDraft())
act(() => {
result.current.syncWorkflowDraftWhenPageClose()
})
expect(mockPostWithKeepalive).toHaveBeenCalledWith('/api/apps/app-1/workflows/draft', expect.objectContaining({
graph: expect.objectContaining({
viewport: { x: 1, y: 2, zoom: 3 },
}),
hash: 'hash-123',
}))
})
})

View File

@@ -1,6 +1,5 @@
import { renderHook, waitFor } from '@testing-library/react'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { BlockEnum } from '@/app/components/workflow/types'
import { useWorkflowInit } from '../use-workflow-init'
@@ -12,21 +11,6 @@ const mockSetLastPublishedHasUserInput = vi.fn()
const mockSetFileUploadConfig = vi.fn()
const mockWorkflowStoreSetState = vi.fn()
const mockWorkflowStoreGetState = vi.fn()
const mockFetchNodesDefaultConfigs = vi.fn()
const mockFetchPublishedWorkflow = vi.fn()
let appStoreState: {
appDetail: {
id: string
name: string
mode: string
}
}
let workflowConfigState: {
data: Record<string, unknown> | null
isLoading: boolean
}
vi.mock('@/app/components/workflow/store', () => ({
useStore: <T>(selector: (state: { setSyncWorkflowDraftHash: ReturnType<typeof vi.fn> }) => T): T =>
@@ -38,8 +22,8 @@ vi.mock('@/app/components/workflow/store', () => ({
}))
vi.mock('@/app/components/app/store', () => ({
useStore: <T>(selector: (state: typeof appStoreState) => T): T =>
selector(appStoreState),
useStore: <T>(selector: (state: { appDetail: { id: string, name: string, mode: string } }) => T): T =>
selector({ appDetail: { id: 'app-1', name: 'Test', mode: 'workflow' } }),
}))
vi.mock('../use-workflow-template', () => ({
@@ -47,11 +31,7 @@ vi.mock('../use-workflow-template', () => ({
}))
vi.mock('@/service/use-workflow', () => ({
useWorkflowConfig: (_url: string, onSuccess: (config: Record<string, unknown>) => void) => {
if (workflowConfigState.data)
onSuccess(workflowConfigState.data)
return workflowConfigState
},
useWorkflowConfig: () => ({ data: null, isLoading: false }),
}))
const mockFetchWorkflowDraft = vi.fn()
@@ -60,8 +40,8 @@ const mockSyncWorkflowDraft = vi.fn()
vi.mock('@/service/workflow', () => ({
fetchWorkflowDraft: (...args: unknown[]) => mockFetchWorkflowDraft(...args),
syncWorkflowDraft: (...args: unknown[]) => mockSyncWorkflowDraft(...args),
fetchNodesDefaultConfigs: (...args: unknown[]) => mockFetchNodesDefaultConfigs(...args),
fetchPublishedWorkflow: (...args: unknown[]) => mockFetchPublishedWorkflow(...args),
fetchNodesDefaultConfigs: () => Promise.resolve([]),
fetchPublishedWorkflow: () => Promise.resolve({ created_at: 0, graph: { nodes: [], edges: [] } }),
}))
const notExistError = () => ({
@@ -88,10 +68,6 @@ const draftResponse = {
describe('useWorkflowInit — hash fix (draft_workflow_not_exist)', () => {
beforeEach(() => {
vi.clearAllMocks()
appStoreState = {
appDetail: { id: 'app-1', name: 'Test', mode: 'workflow' },
}
workflowConfigState = { data: null, isLoading: false }
mockWorkflowStoreGetState.mockReturnValue({
setDraftUpdatedAt: mockSetDraftUpdatedAt,
setToolPublished: mockSetToolPublished,
@@ -99,8 +75,6 @@ describe('useWorkflowInit — hash fix (draft_workflow_not_exist)', () => {
setLastPublishedHasUserInput: mockSetLastPublishedHasUserInput,
setFileUploadConfig: mockSetFileUploadConfig,
})
mockFetchNodesDefaultConfigs.mockResolvedValue([])
mockFetchPublishedWorkflow.mockResolvedValue({ created_at: 0, graph: { nodes: [], edges: [] } })
mockFetchWorkflowDraft
.mockRejectedValueOnce(notExistError())
.mockResolvedValueOnce(draftResponse)
@@ -130,77 +104,4 @@ describe('useWorkflowInit — hash fix (draft_workflow_not_exist)', () => {
expect(order).toContain('hash:new-hash')
expect(order.indexOf('hash:new-hash')).toBeLessThan(order.indexOf('fetch:2'))
})
it('should hydrate draft state, preload defaults, and derive published workflow metadata on success', async () => {
workflowConfigState = {
data: { enabled: true, sizeLimit: 20 },
isLoading: false,
}
mockFetchWorkflowDraft.mockReset().mockResolvedValue({
...draftResponse,
updated_at: 9,
tool_published: true,
environment_variables: [
{ id: 'env-secret', value_type: 'secret', value: 'top-secret', name: 'SECRET' },
{ id: 'env-plain', value_type: 'text', value: 'visible', name: 'PLAIN' },
],
conversation_variables: [{ id: 'conversation-1' }],
})
mockFetchNodesDefaultConfigs.mockResolvedValue([
{ type: 'start', config: { title: 'Start Config' } },
{ type: 'start', config: { title: 'Ignored Duplicate' } },
])
mockFetchPublishedWorkflow.mockResolvedValue({
created_at: 99,
graph: {
nodes: [{ id: 'start', data: { type: BlockEnum.Start } }],
edges: [{ source: 'start', target: 'end' }],
},
})
const { result } = renderHook(() => useWorkflowInit())
await waitFor(() => {
expect(result.current.data?.hash).toBe('server-hash')
})
expect(mockWorkflowStoreSetState).toHaveBeenCalledWith({ appId: 'app-1', appName: 'Test' })
expect(mockWorkflowStoreSetState).toHaveBeenCalledWith(expect.objectContaining({
envSecrets: { 'env-secret': 'top-secret' },
environmentVariables: [
{ id: 'env-secret', value_type: 'secret', value: '[__HIDDEN__]', name: 'SECRET' },
{ id: 'env-plain', value_type: 'text', value: 'visible', name: 'PLAIN' },
],
conversationVariables: [{ id: 'conversation-1' }],
isWorkflowDataLoaded: true,
}))
expect(mockWorkflowStoreSetState).toHaveBeenCalledWith({
nodesDefaultConfigs: {
start: { title: 'Start Config' },
},
})
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('server-hash')
expect(mockSetDraftUpdatedAt).toHaveBeenCalledWith(9)
expect(mockSetToolPublished).toHaveBeenCalledWith(true)
expect(mockSetPublishedAt).toHaveBeenCalledWith(99)
expect(mockSetLastPublishedHasUserInput).toHaveBeenCalledWith(true)
expect(mockSetFileUploadConfig).toHaveBeenCalledWith({ enabled: true, sizeLimit: 20 })
expect(result.current.fileUploadConfigResponse).toEqual({ enabled: true, sizeLimit: 20 })
expect(result.current.isLoading).toBe(false)
})
it('should fall back to no published user input when preload requests fail', async () => {
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined)
mockFetchWorkflowDraft.mockReset().mockResolvedValue(draftResponse)
mockFetchNodesDefaultConfigs.mockRejectedValue(new Error('preload failed'))
renderHook(() => useWorkflowInit())
await waitFor(() => {
expect(mockSetLastPublishedHasUserInput).toHaveBeenCalledWith(false)
})
expect(consoleErrorSpy).toHaveBeenCalled()
consoleErrorSpy.mockRestore()
})
})

View File

@@ -1,32 +1,24 @@
import { act, renderHook, waitFor } from '@testing-library/react'
import { act, renderHook } from '@testing-library/react'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { useWorkflowRefreshDraft } from '../use-workflow-refresh-draft'
const mockHandleUpdateWorkflowCanvas = vi.fn()
const mockSetSyncWorkflowDraftHash = vi.fn()
const mockSetIsSyncingWorkflowDraft = vi.fn()
const mockSetEnvironmentVariables = vi.fn()
const mockSetEnvSecrets = vi.fn()
const mockSetConversationVariables = vi.fn()
const mockSetIsWorkflowDataLoaded = vi.fn()
const mockCancel = vi.fn()
let workflowStoreState: {
appId: string
isWorkflowDataLoaded: boolean
debouncedSyncWorkflowDraft?: { cancel: () => void }
setSyncWorkflowDraftHash: typeof mockSetSyncWorkflowDraftHash
setIsSyncingWorkflowDraft: typeof mockSetIsSyncingWorkflowDraft
setEnvironmentVariables: typeof mockSetEnvironmentVariables
setEnvSecrets: typeof mockSetEnvSecrets
setConversationVariables: typeof mockSetConversationVariables
setIsWorkflowDataLoaded: typeof mockSetIsWorkflowDataLoaded
}
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: () => workflowStoreState,
getState: () => ({
appId: 'app-1',
isWorkflowDataLoaded: true,
debouncedSyncWorkflowDraft: undefined,
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
setIsSyncingWorkflowDraft: vi.fn(),
setEnvironmentVariables: vi.fn(),
setEnvSecrets: vi.fn(),
setConversationVariables: vi.fn(),
setIsWorkflowDataLoaded: vi.fn(),
}),
}),
}))
@@ -49,17 +41,6 @@ const draftResponse = {
describe('useWorkflowRefreshDraft — notUpdateCanvas parameter', () => {
beforeEach(() => {
vi.clearAllMocks()
workflowStoreState = {
appId: 'app-1',
isWorkflowDataLoaded: true,
debouncedSyncWorkflowDraft: undefined,
setSyncWorkflowDraftHash: mockSetSyncWorkflowDraftHash,
setIsSyncingWorkflowDraft: mockSetIsSyncingWorkflowDraft,
setEnvironmentVariables: mockSetEnvironmentVariables,
setEnvSecrets: mockSetEnvSecrets,
setConversationVariables: mockSetConversationVariables,
setIsWorkflowDataLoaded: mockSetIsWorkflowDataLoaded,
}
mockFetchWorkflowDraft.mockResolvedValue(draftResponse)
})
@@ -94,67 +75,6 @@ describe('useWorkflowRefreshDraft — notUpdateCanvas parameter', () => {
await act(async () => {
result.current.handleRefreshWorkflowDraft(true)
})
await waitFor(() => {
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('server-hash')
})
})
it('should cancel pending draft sync, use fallback viewport, and persist masked secrets', async () => {
workflowStoreState = {
...workflowStoreState,
debouncedSyncWorkflowDraft: { cancel: mockCancel },
}
mockFetchWorkflowDraft.mockResolvedValue({
hash: 'server-hash',
graph: {
nodes: [{ id: 'n1' }],
edges: [],
},
environment_variables: [
{ id: 'env-secret', value_type: 'secret', value: 'top-secret', name: 'SECRET' },
{ id: 'env-plain', value_type: 'text', value: 'visible', name: 'PLAIN' },
],
conversation_variables: [{ id: 'conversation-1' }],
})
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
expect(mockCancel).toHaveBeenCalled()
expect(mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
nodes: [{ id: 'n1' }],
edges: [],
viewport: { x: 0, y: 0, zoom: 1 },
})
expect(mockSetEnvSecrets).toHaveBeenCalledWith({
'env-secret': 'top-secret',
})
expect(mockSetEnvironmentVariables).toHaveBeenCalledWith([
{ id: 'env-secret', value_type: 'secret', value: '[__HIDDEN__]', name: 'SECRET' },
{ id: 'env-plain', value_type: 'text', value: 'visible', name: 'PLAIN' },
])
expect(mockSetConversationVariables).toHaveBeenCalledWith([{ id: 'conversation-1' }])
})
})
it('should restore loaded state when refresh fails after workflow data was already loaded', async () => {
mockFetchWorkflowDraft.mockRejectedValue(new Error('refresh failed'))
const { result } = renderHook(() => useWorkflowRefreshDraft())
act(() => {
result.current.handleRefreshWorkflowDraft()
})
await waitFor(() => {
expect(mockSetIsWorkflowDataLoaded).toHaveBeenNthCalledWith(1, false)
expect(mockSetIsWorkflowDataLoaded).toHaveBeenNthCalledWith(2, true)
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenCalledWith(true)
expect(mockSetIsSyncingWorkflowDraft).toHaveBeenLastCalledWith(false)
})
expect(mockSetSyncWorkflowDraftHash).toHaveBeenCalledWith('server-hash')
})
})

View File

@@ -1,451 +0,0 @@
import type AudioPlayer from '@/app/components/base/audio-btn/audio'
import { createBaseWorkflowRunCallbacks, createFinalWorkflowRunCallbacks } from '../use-workflow-run-callbacks'
const {
mockSseGet,
mockResetMsgId,
} = vi.hoisted(() => ({
mockSseGet: vi.fn(),
mockResetMsgId: vi.fn(),
}))
vi.mock('@/service/base', () => ({
sseGet: mockSseGet,
}))
vi.mock('@/app/components/base/audio-btn/audio.player.manager', () => ({
AudioPlayerManager: {
getInstance: () => ({
resetMsgId: mockResetMsgId,
}),
},
}))
const createHandlers = () => ({
handleWorkflowStarted: vi.fn(),
handleWorkflowFinished: vi.fn(),
handleWorkflowFailed: vi.fn(),
handleWorkflowNodeStarted: vi.fn(),
handleWorkflowNodeFinished: vi.fn(),
handleWorkflowNodeHumanInputRequired: vi.fn(),
handleWorkflowNodeHumanInputFormFilled: vi.fn(),
handleWorkflowNodeHumanInputFormTimeout: vi.fn(),
handleWorkflowNodeIterationStarted: vi.fn(),
handleWorkflowNodeIterationNext: vi.fn(),
handleWorkflowNodeIterationFinished: vi.fn(),
handleWorkflowNodeLoopStarted: vi.fn(),
handleWorkflowNodeLoopNext: vi.fn(),
handleWorkflowNodeLoopFinished: vi.fn(),
handleWorkflowNodeRetry: vi.fn(),
handleWorkflowAgentLog: vi.fn(),
handleWorkflowTextChunk: vi.fn(),
handleWorkflowTextReplace: vi.fn(),
handleWorkflowPaused: vi.fn(),
})
const createUserCallbacks = () => ({
onWorkflowStarted: vi.fn(),
onWorkflowFinished: vi.fn(),
onNodeStarted: vi.fn(),
onNodeFinished: vi.fn(),
onIterationStart: vi.fn(),
onIterationNext: vi.fn(),
onIterationFinish: vi.fn(),
onLoopStart: vi.fn(),
onLoopNext: vi.fn(),
onLoopFinish: vi.fn(),
onNodeRetry: vi.fn(),
onAgentLog: vi.fn(),
onError: vi.fn(),
onWorkflowPaused: vi.fn(),
onHumanInputRequired: vi.fn(),
onHumanInputFormFilled: vi.fn(),
onHumanInputFormTimeout: vi.fn(),
onCompleted: vi.fn(),
})
describe('useWorkflowRun callbacks helpers', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('should create base callbacks that wrap workflow events, errors, pause continuation, and lazy tts playback', () => {
const handlers = createHandlers()
const clearAbortController = vi.fn()
const clearListeningState = vi.fn()
const invalidateRunHistory = vi.fn()
const fetchInspectVars = vi.fn()
const invalidAllLastRun = vi.fn()
const trackWorkflowRunFailed = vi.fn()
const userOnWorkflowFinished = vi.fn()
const userOnError = vi.fn()
const userOnWorkflowPaused = vi.fn()
const player = {
playAudioWithAudio: vi.fn(),
} as unknown as AudioPlayer
const getOrCreatePlayer = vi.fn<() => AudioPlayer | null>(() => player)
const callbacks = createBaseWorkflowRunCallbacks({
clientWidth: 320,
clientHeight: 240,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: true,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController,
clearListeningState,
trackWorkflowRunFailed,
handlers,
callbacks: {
onWorkflowFinished: userOnWorkflowFinished,
onError: userOnError,
onWorkflowPaused: userOnWorkflowPaused,
},
restCallback: {},
getOrCreatePlayer,
})
callbacks.onWorkflowFinished?.({ workflow_run_id: 'run-1' } as never)
expect(clearListeningState).toHaveBeenCalled()
expect(handlers.handleWorkflowFinished).toHaveBeenCalled()
expect(invalidateRunHistory).toHaveBeenCalledWith('/apps/app-1/workflow-runs')
expect(userOnWorkflowFinished).toHaveBeenCalled()
expect(fetchInspectVars).toHaveBeenCalledWith({})
expect(invalidAllLastRun).toHaveBeenCalled()
callbacks.onError?.({ error: 'failed', node_type: 'llm' } as never)
expect(clearAbortController).toHaveBeenCalled()
expect(handlers.handleWorkflowFailed).toHaveBeenCalled()
expect(userOnError).toHaveBeenCalled()
expect(trackWorkflowRunFailed).toHaveBeenCalledWith({ error: 'failed', node_type: 'llm' })
callbacks.onTTSChunk?.('message-1', 'audio-chunk')
expect(getOrCreatePlayer).toHaveBeenCalled()
expect(player.playAudioWithAudio).toHaveBeenCalledWith('audio-chunk', true)
expect(mockResetMsgId).toHaveBeenCalledWith('message-1')
callbacks.onWorkflowPaused?.({ workflow_run_id: 'run-2' } as never)
expect(handlers.handleWorkflowPaused).toHaveBeenCalled()
expect(userOnWorkflowPaused).toHaveBeenCalled()
expect(mockSseGet).toHaveBeenCalledWith('/workflow/run-2/events', {}, callbacks)
})
it('should create final callbacks that preserve rest callback override order and eager abort-controller wiring', () => {
const handlers = createHandlers()
const restOnNodeStarted = vi.fn()
const setAbortController = vi.fn()
const player = {
playAudioWithAudio: vi.fn(),
} as unknown as AudioPlayer
const baseSseOptions = createBaseWorkflowRunCallbacks({
clientWidth: 320,
clientHeight: 240,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: false,
fetchInspectVars: vi.fn(),
invalidAllLastRun: vi.fn(),
invalidateRunHistory: vi.fn(),
clearAbortController: vi.fn(),
clearListeningState: vi.fn(),
trackWorkflowRunFailed: vi.fn(),
handlers,
callbacks: {},
restCallback: {},
getOrCreatePlayer: vi.fn<() => AudioPlayer | null>(() => player),
})
const finalCallbacks = createFinalWorkflowRunCallbacks({
clientWidth: 320,
clientHeight: 240,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: false,
fetchInspectVars: vi.fn(),
invalidAllLastRun: vi.fn(),
invalidateRunHistory: vi.fn(),
clearAbortController: vi.fn(),
clearListeningState: vi.fn(),
trackWorkflowRunFailed: vi.fn(),
handlers,
callbacks: {},
restCallback: {
onNodeStarted: restOnNodeStarted,
},
baseSseOptions,
player,
setAbortController,
})
const controller = new AbortController()
finalCallbacks.getAbortController?.(controller)
expect(setAbortController).toHaveBeenCalledWith(controller)
finalCallbacks.onNodeStarted?.({ node_id: 'node-1' } as never)
expect(restOnNodeStarted).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeStarted).not.toHaveBeenCalled()
finalCallbacks.onTTSChunk?.('message-2', 'audio-chunk')
expect(player.playAudioWithAudio).toHaveBeenCalledWith('audio-chunk', true)
expect(mockResetMsgId).toHaveBeenCalledWith('message-2')
})
it('should route base workflow events through handlers, user callbacks, and pause continuation with the same callback object', async () => {
const handlers = createHandlers()
const userCallbacks = createUserCallbacks()
const clearAbortController = vi.fn()
const clearListeningState = vi.fn()
const invalidateRunHistory = vi.fn()
const fetchInspectVars = vi.fn()
const invalidAllLastRun = vi.fn()
const trackWorkflowRunFailed = vi.fn()
const player = {
playAudioWithAudio: vi.fn(),
} as unknown as AudioPlayer
const callbacks = createBaseWorkflowRunCallbacks({
clientWidth: 640,
clientHeight: 360,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: true,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController,
clearListeningState,
trackWorkflowRunFailed,
handlers,
callbacks: userCallbacks,
restCallback: {},
getOrCreatePlayer: vi.fn<() => AudioPlayer | null>(() => player),
})
callbacks.onWorkflowStarted?.({ workflow_run_id: 'run-1' } as never)
callbacks.onNodeStarted?.({ node_id: 'node-1' } as never)
callbacks.onNodeFinished?.({ node_id: 'node-1' } as never)
callbacks.onIterationStart?.({ node_id: 'node-1' } as never)
callbacks.onIterationNext?.({ node_id: 'node-1' } as never)
callbacks.onIterationFinish?.({ node_id: 'node-1' } as never)
callbacks.onLoopStart?.({ node_id: 'node-1' } as never)
callbacks.onLoopNext?.({ node_id: 'node-1' } as never)
callbacks.onLoopFinish?.({ node_id: 'node-1' } as never)
callbacks.onNodeRetry?.({ node_id: 'node-1' } as never)
callbacks.onAgentLog?.({ node_id: 'node-1' } as never)
callbacks.onTextChunk?.({ data: 'chunk' } as never)
callbacks.onTextReplace?.({ text: 'replacement' } as never)
callbacks.onHumanInputRequired?.({ node_id: 'node-1' } as never)
callbacks.onHumanInputFormFilled?.({ node_id: 'node-1' } as never)
callbacks.onHumanInputFormTimeout?.({ node_id: 'node-1' } as never)
callbacks.onWorkflowFinished?.({ workflow_run_id: 'run-1' } as never)
await callbacks.onCompleted?.(false, '')
callbacks.onTTSChunk?.('message-1', 'audio-chunk')
callbacks.onTTSEnd?.('message-1', 'audio-finished')
callbacks.onWorkflowPaused?.({ workflow_run_id: 'run-2' } as never)
callbacks.onError?.({ error: 'failed', node_type: 'llm' } as never, '500')
expect(handlers.handleWorkflowStarted).toHaveBeenCalled()
expect(userCallbacks.onWorkflowStarted).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeStarted).toHaveBeenCalledWith(
{ node_id: 'node-1' },
{ clientWidth: 640, clientHeight: 360 },
)
expect(userCallbacks.onNodeStarted).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeFinished).toHaveBeenCalled()
expect(userCallbacks.onNodeFinished).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeIterationStarted).toHaveBeenCalledWith(
{ node_id: 'node-1' },
{ clientWidth: 640, clientHeight: 360 },
)
expect(userCallbacks.onIterationStart).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeIterationNext).toHaveBeenCalled()
expect(userCallbacks.onIterationNext).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeIterationFinished).toHaveBeenCalled()
expect(userCallbacks.onIterationFinish).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeLoopStarted).toHaveBeenCalledWith(
{ node_id: 'node-1' },
{ clientWidth: 640, clientHeight: 360 },
)
expect(userCallbacks.onLoopStart).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeLoopNext).toHaveBeenCalled()
expect(userCallbacks.onLoopNext).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeLoopFinished).toHaveBeenCalled()
expect(userCallbacks.onLoopFinish).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeRetry).toHaveBeenCalled()
expect(userCallbacks.onNodeRetry).toHaveBeenCalled()
expect(handlers.handleWorkflowAgentLog).toHaveBeenCalled()
expect(userCallbacks.onAgentLog).toHaveBeenCalled()
expect(handlers.handleWorkflowTextChunk).toHaveBeenCalled()
expect(handlers.handleWorkflowTextReplace).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeHumanInputRequired).toHaveBeenCalled()
expect(userCallbacks.onHumanInputRequired).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeHumanInputFormFilled).toHaveBeenCalled()
expect(userCallbacks.onHumanInputFormFilled).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeHumanInputFormTimeout).toHaveBeenCalled()
expect(userCallbacks.onHumanInputFormTimeout).toHaveBeenCalled()
expect(clearListeningState).toHaveBeenCalled()
expect(handlers.handleWorkflowFinished).toHaveBeenCalled()
expect(userCallbacks.onWorkflowFinished).toHaveBeenCalled()
expect(fetchInspectVars).toHaveBeenCalledWith({})
expect(invalidAllLastRun).toHaveBeenCalled()
expect(userCallbacks.onCompleted).toHaveBeenCalledWith(false, '')
expect(player.playAudioWithAudio).toHaveBeenCalledWith('audio-chunk', true)
expect(player.playAudioWithAudio).toHaveBeenCalledWith('audio-finished', false)
expect(mockResetMsgId).toHaveBeenCalledWith('message-1')
expect(handlers.handleWorkflowPaused).toHaveBeenCalled()
expect(userCallbacks.onWorkflowPaused).toHaveBeenCalled()
expect(mockSseGet).toHaveBeenCalledWith('/workflow/run-2/events', {}, callbacks)
expect(clearAbortController).toHaveBeenCalled()
expect(handlers.handleWorkflowFailed).toHaveBeenCalled()
expect(userCallbacks.onError).toHaveBeenCalledWith({ error: 'failed', node_type: 'llm' }, '500')
expect(trackWorkflowRunFailed).toHaveBeenCalledWith({ error: 'failed', node_type: 'llm' })
expect(invalidateRunHistory).toHaveBeenCalledWith('/apps/app-1/workflow-runs')
})
it('should skip base debug-only side effects and audio playback when debug mode is off or audio is empty', () => {
const handlers = createHandlers()
const fetchInspectVars = vi.fn()
const invalidAllLastRun = vi.fn()
const getOrCreatePlayer = vi.fn<() => AudioPlayer | null>(() => null)
const callbacks = createBaseWorkflowRunCallbacks({
clientWidth: 320,
clientHeight: 240,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: false,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory: vi.fn(),
clearAbortController: vi.fn(),
clearListeningState: vi.fn(),
trackWorkflowRunFailed: vi.fn(),
handlers,
callbacks: {},
restCallback: {},
getOrCreatePlayer,
})
callbacks.onWorkflowFinished?.({ workflow_run_id: 'run-1' } as never)
callbacks.onTTSChunk?.('message-1', '')
callbacks.onTTSEnd?.('message-1', 'audio-finished')
expect(fetchInspectVars).not.toHaveBeenCalled()
expect(invalidAllLastRun).not.toHaveBeenCalled()
expect(getOrCreatePlayer).toHaveBeenCalledTimes(1)
expect(mockResetMsgId).not.toHaveBeenCalled()
})
it('should route final workflow events through handlers and continue paused runs with final callbacks', async () => {
const handlers = createHandlers()
const userCallbacks = createUserCallbacks()
const fetchInspectVars = vi.fn()
const invalidAllLastRun = vi.fn()
const invalidateRunHistory = vi.fn()
const setAbortController = vi.fn()
const player = {
playAudioWithAudio: vi.fn(),
} as unknown as AudioPlayer
const baseSseOptions = createBaseWorkflowRunCallbacks({
clientWidth: 480,
clientHeight: 320,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: false,
fetchInspectVars: vi.fn(),
invalidAllLastRun: vi.fn(),
invalidateRunHistory: vi.fn(),
clearAbortController: vi.fn(),
clearListeningState: vi.fn(),
trackWorkflowRunFailed: vi.fn(),
handlers,
callbacks: {},
restCallback: {},
getOrCreatePlayer: vi.fn<() => AudioPlayer | null>(() => player),
})
const finalCallbacks = createFinalWorkflowRunCallbacks({
clientWidth: 480,
clientHeight: 320,
runHistoryUrl: '/apps/app-1/workflow-runs',
isInWorkflowDebug: true,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController: vi.fn(),
clearListeningState: vi.fn(),
trackWorkflowRunFailed: vi.fn(),
handlers,
callbacks: userCallbacks,
restCallback: {},
baseSseOptions,
player,
setAbortController,
})
finalCallbacks.getAbortController?.(new AbortController())
finalCallbacks.onWorkflowFinished?.({ workflow_run_id: 'run-1' } as never)
finalCallbacks.onNodeStarted?.({ node_id: 'node-1' } as never)
finalCallbacks.onNodeFinished?.({ node_id: 'node-1' } as never)
finalCallbacks.onIterationStart?.({ node_id: 'node-1' } as never)
finalCallbacks.onIterationNext?.({ node_id: 'node-1' } as never)
finalCallbacks.onIterationFinish?.({ node_id: 'node-1' } as never)
finalCallbacks.onLoopStart?.({ node_id: 'node-1' } as never)
finalCallbacks.onLoopNext?.({ node_id: 'node-1' } as never)
finalCallbacks.onLoopFinish?.({ node_id: 'node-1' } as never)
finalCallbacks.onNodeRetry?.({ node_id: 'node-1' } as never)
finalCallbacks.onAgentLog?.({ node_id: 'node-1' } as never)
finalCallbacks.onTextChunk?.({ data: 'chunk' } as never)
finalCallbacks.onTextReplace?.({ text: 'replacement' } as never)
finalCallbacks.onHumanInputRequired?.({ node_id: 'node-1' } as never)
finalCallbacks.onHumanInputFormFilled?.({ node_id: 'node-1' } as never)
finalCallbacks.onHumanInputFormTimeout?.({ node_id: 'node-1' } as never)
finalCallbacks.onWorkflowPaused?.({ workflow_run_id: 'run-2' } as never)
finalCallbacks.onTTSChunk?.('message-2', 'audio-chunk')
finalCallbacks.onTTSEnd?.('message-2', 'audio-finished')
await finalCallbacks.onCompleted?.(true, 'done')
finalCallbacks.onError?.({ error: 'failed' } as never, '500')
expect(setAbortController).toHaveBeenCalled()
expect(handlers.handleWorkflowFinished).toHaveBeenCalled()
expect(userCallbacks.onWorkflowFinished).toHaveBeenCalled()
expect(fetchInspectVars).toHaveBeenCalledWith({})
expect(invalidAllLastRun).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeStarted).toHaveBeenCalledWith(
{ node_id: 'node-1' },
{ clientWidth: 480, clientHeight: 320 },
)
expect(handlers.handleWorkflowNodeIterationStarted).toHaveBeenCalledWith(
{ node_id: 'node-1' },
{ clientWidth: 480, clientHeight: 320 },
)
expect(handlers.handleWorkflowNodeLoopStarted).toHaveBeenCalledWith(
{ node_id: 'node-1' },
{ clientWidth: 480, clientHeight: 320 },
)
expect(userCallbacks.onNodeStarted).toHaveBeenCalled()
expect(userCallbacks.onNodeFinished).toHaveBeenCalled()
expect(userCallbacks.onIterationStart).toHaveBeenCalled()
expect(userCallbacks.onIterationNext).toHaveBeenCalled()
expect(userCallbacks.onIterationFinish).toHaveBeenCalled()
expect(userCallbacks.onLoopStart).toHaveBeenCalled()
expect(userCallbacks.onLoopNext).toHaveBeenCalled()
expect(userCallbacks.onLoopFinish).toHaveBeenCalled()
expect(userCallbacks.onNodeRetry).toHaveBeenCalled()
expect(userCallbacks.onAgentLog).toHaveBeenCalled()
expect(handlers.handleWorkflowTextChunk).toHaveBeenCalled()
expect(handlers.handleWorkflowTextReplace).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeHumanInputRequired).toHaveBeenCalled()
expect(userCallbacks.onHumanInputRequired).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeHumanInputFormFilled).toHaveBeenCalled()
expect(userCallbacks.onHumanInputFormFilled).toHaveBeenCalled()
expect(handlers.handleWorkflowNodeHumanInputFormTimeout).toHaveBeenCalled()
expect(userCallbacks.onHumanInputFormTimeout).toHaveBeenCalled()
expect(handlers.handleWorkflowPaused).toHaveBeenCalled()
expect(userCallbacks.onWorkflowPaused).toHaveBeenCalled()
expect(mockSseGet).toHaveBeenCalledWith('/workflow/run-2/events', {}, finalCallbacks)
expect(player.playAudioWithAudio).toHaveBeenCalledWith('audio-chunk', true)
expect(player.playAudioWithAudio).toHaveBeenCalledWith('audio-finished', false)
expect(handlers.handleWorkflowFailed).toHaveBeenCalled()
expect(userCallbacks.onError).toHaveBeenCalledWith({ error: 'failed' }, '500')
expect(invalidateRunHistory).toHaveBeenCalledWith('/apps/app-1/workflow-runs')
})
})

View File

@@ -1,431 +0,0 @@
import { TriggerType } from '@/app/components/workflow/header/test-run-menu'
import { WorkflowRunningStatus } from '@/app/components/workflow/types'
import { AppModeEnum } from '@/types/app'
import {
applyRunningStateForMode,
applyStoppedState,
buildListeningTriggerNodeIds,
buildRunHistoryUrl,
buildTTSConfig,
buildWorkflowRunRequestBody,
clearListeningState,
clearWindowDebugControllers,
createFailedWorkflowState,
createRunningWorkflowState,
createStoppedWorkflowState,
mapPublishedWorkflowFeatures,
normalizePublishedWorkflowNodes,
resolveWorkflowRunUrl,
runTriggerDebug,
validateWorkflowRunRequest,
} from '../use-workflow-run-utils'
const {
mockPost,
mockHandleStream,
mockToastError,
} = vi.hoisted(() => ({
mockPost: vi.fn(),
mockHandleStream: vi.fn(),
mockToastError: vi.fn(),
}))
vi.mock('@/service/base', () => ({
post: mockPost,
handleStream: mockHandleStream,
}))
vi.mock('@/app/components/base/ui/toast', () => ({
toast: {
error: mockToastError,
},
}))
const createListeningActions = () => ({
setWorkflowRunningData: vi.fn(),
setIsListening: vi.fn(),
setShowVariableInspectPanel: vi.fn(),
setListeningTriggerType: vi.fn(),
setListeningTriggerNodeIds: vi.fn(),
setListeningTriggerIsAll: vi.fn(),
setListeningTriggerNodeId: vi.fn(),
})
describe('useWorkflowRun utils', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('should resolve run history urls and run endpoints for workflow modes', () => {
expect(buildRunHistoryUrl({ id: 'app-1', mode: AppModeEnum.WORKFLOW })).toBe('/apps/app-1/workflow-runs')
expect(buildRunHistoryUrl({ id: 'app-1', mode: AppModeEnum.ADVANCED_CHAT })).toBe('/apps/app-1/advanced-chat/workflow-runs')
expect(resolveWorkflowRunUrl({ id: 'app-1', mode: AppModeEnum.WORKFLOW }, TriggerType.UserInput, true)).toBe('/apps/app-1/workflows/draft/run')
expect(resolveWorkflowRunUrl({ id: 'app-1', mode: AppModeEnum.ADVANCED_CHAT }, TriggerType.UserInput, false)).toBe('/apps/app-1/advanced-chat/workflows/draft/run')
expect(resolveWorkflowRunUrl({ id: 'app-1', mode: AppModeEnum.WORKFLOW }, TriggerType.Schedule, true)).toBe('/apps/app-1/workflows/draft/trigger/run')
expect(resolveWorkflowRunUrl({ id: 'app-1', mode: AppModeEnum.WORKFLOW }, TriggerType.All, true)).toBe('/apps/app-1/workflows/draft/trigger/run-all')
})
it('should build request bodies and validation errors for trigger runs', () => {
expect(buildWorkflowRunRequestBody(TriggerType.Schedule, {}, { scheduleNodeId: 'schedule-1' })).toEqual({ node_id: 'schedule-1' })
expect(buildWorkflowRunRequestBody(TriggerType.Webhook, {}, { webhookNodeId: 'webhook-1' })).toEqual({ node_id: 'webhook-1' })
expect(buildWorkflowRunRequestBody(TriggerType.Plugin, {}, { pluginNodeId: 'plugin-1' })).toEqual({ node_id: 'plugin-1' })
expect(buildWorkflowRunRequestBody(TriggerType.All, {}, { allNodeIds: ['trigger-1', 'trigger-2'] })).toEqual({ node_ids: ['trigger-1', 'trigger-2'] })
expect(buildWorkflowRunRequestBody(TriggerType.UserInput, { inputs: { query: 'hello' } })).toEqual({ inputs: { query: 'hello' } })
expect(validateWorkflowRunRequest(TriggerType.Schedule)).toBe('handleRun: schedule trigger run requires node id')
expect(validateWorkflowRunRequest(TriggerType.Webhook)).toBe('handleRun: webhook trigger run requires node id')
expect(validateWorkflowRunRequest(TriggerType.Plugin)).toBe('handleRun: plugin trigger run requires node id')
expect(validateWorkflowRunRequest(TriggerType.All)).toBe('')
expect(validateWorkflowRunRequest(TriggerType.All, { allNodeIds: [] })).toBe('')
})
it('should return empty trigger urls when app id is missing and keep user-input urls empty outside workflow debug', () => {
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {})
expect(resolveWorkflowRunUrl(undefined, TriggerType.Plugin, true)).toBe('')
expect(resolveWorkflowRunUrl(undefined, TriggerType.All, true)).toBe('')
expect(resolveWorkflowRunUrl({ id: 'app-1', mode: AppModeEnum.WORKFLOW }, TriggerType.UserInput, false)).toBe('')
expect(consoleErrorSpy).toHaveBeenCalledWith('handleRun: missing app id for trigger plugin run')
expect(consoleErrorSpy).toHaveBeenCalledWith('handleRun: missing app id for trigger run all')
consoleErrorSpy.mockRestore()
})
it('should configure listening state for trigger and non-trigger modes', () => {
const triggerActions = createListeningActions()
applyRunningStateForMode(triggerActions, TriggerType.All, { allNodeIds: ['trigger-1', 'trigger-2'] })
expect(triggerActions.setIsListening).toHaveBeenCalledWith(true)
expect(triggerActions.setShowVariableInspectPanel).toHaveBeenCalledWith(true)
expect(triggerActions.setListeningTriggerIsAll).toHaveBeenCalledWith(true)
expect(triggerActions.setListeningTriggerNodeIds).toHaveBeenCalledWith(['trigger-1', 'trigger-2'])
expect(triggerActions.setWorkflowRunningData).toHaveBeenCalledWith(createRunningWorkflowState())
const normalActions = createListeningActions()
applyRunningStateForMode(normalActions, TriggerType.UserInput)
expect(normalActions.setIsListening).toHaveBeenCalledWith(false)
expect(normalActions.setListeningTriggerType).toHaveBeenCalledWith(null)
expect(normalActions.setListeningTriggerNodeId).toHaveBeenCalledWith(null)
expect(normalActions.setListeningTriggerNodeIds).toHaveBeenCalledWith([])
expect(normalActions.setListeningTriggerIsAll).toHaveBeenCalledWith(false)
expect(normalActions.setWorkflowRunningData).toHaveBeenCalledWith(createRunningWorkflowState())
})
it('should clear listening state, stop state, and remove debug controllers', () => {
const listeningActions = createListeningActions()
clearListeningState(listeningActions)
expect(listeningActions.setIsListening).toHaveBeenCalledWith(false)
expect(listeningActions.setListeningTriggerType).toHaveBeenCalledWith(null)
expect(listeningActions.setListeningTriggerNodeId).toHaveBeenCalledWith(null)
expect(listeningActions.setListeningTriggerNodeIds).toHaveBeenCalledWith([])
expect(listeningActions.setListeningTriggerIsAll).toHaveBeenCalledWith(false)
const stoppedActions = createListeningActions()
applyStoppedState(stoppedActions)
expect(stoppedActions.setWorkflowRunningData).toHaveBeenCalledWith(createStoppedWorkflowState())
expect(stoppedActions.setShowVariableInspectPanel).toHaveBeenCalledWith(true)
const controllerTarget = {
__webhookDebugAbortController: { abort: vi.fn() },
__pluginDebugAbortController: { abort: vi.fn() },
__scheduleDebugAbortController: { abort: vi.fn() },
__allTriggersDebugAbortController: { abort: vi.fn() },
}
clearWindowDebugControllers(controllerTarget)
expect(controllerTarget).toEqual({})
})
it('should derive listening node ids, tts config, and published workflow mappings', () => {
expect(buildListeningTriggerNodeIds(TriggerType.Webhook, { webhookNodeId: 'webhook-1' })).toEqual(['webhook-1'])
expect(buildListeningTriggerNodeIds(TriggerType.Schedule, { scheduleNodeId: 'schedule-1' })).toEqual(['schedule-1'])
expect(buildListeningTriggerNodeIds(TriggerType.Plugin, { pluginNodeId: 'plugin-1' })).toEqual(['plugin-1'])
expect(buildListeningTriggerNodeIds(TriggerType.All, { allNodeIds: ['trigger-1', 'trigger-2'] })).toEqual(['trigger-1', 'trigger-2'])
expect(buildTTSConfig({ token: 'public-token' }, '/apps/app-1')).toEqual({
ttsUrl: '/text-to-audio',
ttsIsPublic: true,
})
expect(buildTTSConfig({ appId: 'app-1' }, '/explore/installed/app-1')).toEqual({
ttsUrl: '/installed-apps/app-1/text-to-audio',
ttsIsPublic: false,
})
expect(buildTTSConfig({ appId: 'app-1' }, '/apps/app-1/workflow')).toEqual({
ttsUrl: '/apps/app-1/text-to-audio',
ttsIsPublic: false,
})
const publishedWorkflow = {
graph: {
nodes: [{ id: 'node-1', selected: true, data: { selected: true, title: 'Start' } }],
edges: [],
viewport: { x: 0, y: 0, zoom: 1 },
},
features: {
opening_statement: 'hello',
suggested_questions: ['Q1'],
suggested_questions_after_answer: { enabled: true },
text_to_speech: { enabled: true },
speech_to_text: { enabled: true },
retriever_resource: { enabled: true },
sensitive_word_avoidance: { enabled: true },
file_upload: { enabled: true },
},
} as never
expect(normalizePublishedWorkflowNodes(publishedWorkflow)).toEqual([
{ id: 'node-1', selected: false, data: { selected: false, title: 'Start' } },
])
expect(mapPublishedWorkflowFeatures(publishedWorkflow)).toMatchObject({
opening: {
enabled: true,
opening_statement: 'hello',
suggested_questions: ['Q1'],
},
suggested: { enabled: true },
text2speech: { enabled: true },
speech2text: { enabled: true },
citation: { enabled: true },
moderation: { enabled: true },
file: { enabled: true },
})
})
it('should handle trigger debug null and invalid json responses as request failures', async () => {
const clearAbortController = vi.fn()
const clearListeningStateSpy = vi.fn()
const setAbortController = vi.fn()
const setWorkflowRunningData = vi.fn()
const controllerTarget: Record<string, unknown> = {}
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {})
mockPost.mockResolvedValueOnce(null)
await runTriggerDebug({
debugType: TriggerType.Webhook,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'webhook-1' },
baseSseOptions: {},
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(mockToastError).toHaveBeenCalledWith('Webhook debug request failed')
expect(clearAbortController).toHaveBeenCalledTimes(1)
expect(clearListeningStateSpy).not.toHaveBeenCalled()
mockPost.mockResolvedValueOnce(new Response('{invalid-json}', {
headers: { 'content-type': 'application/json' },
}))
await runTriggerDebug({
debugType: TriggerType.Schedule,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'schedule-1' },
baseSseOptions: {},
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(consoleErrorSpy).toHaveBeenCalledWith(
'handleRun: schedule debug response parse error',
expect.any(Error),
)
expect(mockToastError).toHaveBeenCalledWith('Schedule debug request failed')
expect(clearAbortController).toHaveBeenCalledTimes(2)
expect(clearListeningStateSpy).toHaveBeenCalledTimes(1)
expect(setWorkflowRunningData).not.toHaveBeenCalled()
consoleErrorSpy.mockRestore()
})
it('should handle trigger debug json failures and stream responses', async () => {
const clearAbortController = vi.fn()
const clearListeningStateSpy = vi.fn()
const setAbortController = vi.fn()
const setWorkflowRunningData = vi.fn()
const controllerTarget: Record<string, unknown> = {}
const baseSseOptions = {
onData: vi.fn(),
onCompleted: vi.fn(),
}
mockPost.mockResolvedValueOnce(new Response(JSON.stringify({ message: 'Webhook failed' }), {
headers: { 'content-type': 'application/json' },
}))
await runTriggerDebug({
debugType: TriggerType.Webhook,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'webhook-1' },
baseSseOptions,
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(setAbortController).toHaveBeenCalledTimes(1)
expect(mockToastError).toHaveBeenCalledWith('Webhook failed')
expect(clearAbortController).toHaveBeenCalled()
expect(clearListeningStateSpy).toHaveBeenCalled()
expect(setWorkflowRunningData).toHaveBeenCalledWith(createFailedWorkflowState('Webhook failed'))
mockPost.mockResolvedValueOnce(new Response('data: ok', {
headers: { 'content-type': 'text/event-stream' },
}))
await runTriggerDebug({
debugType: TriggerType.Plugin,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'plugin-1' },
baseSseOptions,
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(clearListeningStateSpy).toHaveBeenCalledTimes(2)
expect(mockHandleStream).toHaveBeenCalledTimes(1)
})
it('should retry waiting trigger debug responses until a stream is returned', async () => {
vi.useFakeTimers()
const clearAbortController = vi.fn()
const clearListeningStateSpy = vi.fn()
const setAbortController = vi.fn()
const setWorkflowRunningData = vi.fn()
const controllerTarget: Record<string, unknown> = {}
const baseSseOptions = {
onData: vi.fn(),
onCompleted: vi.fn(),
}
mockPost
.mockResolvedValueOnce(new Response(JSON.stringify({ status: 'waiting', retry_in: 1 }), {
headers: { 'content-type': 'application/json' },
}))
.mockResolvedValueOnce(new Response('data: ok', {
headers: { 'content-type': 'text/event-stream' },
}))
const runPromise = runTriggerDebug({
debugType: TriggerType.All,
url: '/apps/app-1/workflows/draft/trigger/run-all',
requestBody: { node_ids: ['trigger-1'] },
baseSseOptions,
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
await vi.advanceTimersByTimeAsync(1)
await runPromise
expect(mockPost).toHaveBeenCalledTimes(2)
expect(clearListeningStateSpy).toHaveBeenCalledTimes(1)
expect(mockHandleStream).toHaveBeenCalledTimes(1)
vi.useRealTimers()
})
it('should stop trigger debug processing when the controller aborts before handling the response', async () => {
const clearAbortController = vi.fn()
const clearListeningStateSpy = vi.fn()
const setWorkflowRunningData = vi.fn()
const controllerTarget: Record<string, unknown> = {}
mockPost.mockResolvedValueOnce(new Response('data: ok', {
headers: { 'content-type': 'text/event-stream' },
}))
await runTriggerDebug({
debugType: TriggerType.Plugin,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'plugin-1' },
baseSseOptions: {},
controllerTarget,
setAbortController: (controller) => {
controller?.abort()
},
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(mockHandleStream).not.toHaveBeenCalled()
expect(mockToastError).not.toHaveBeenCalled()
expect(clearAbortController).not.toHaveBeenCalled()
expect(clearListeningStateSpy).not.toHaveBeenCalled()
expect(setWorkflowRunningData).not.toHaveBeenCalled()
})
it('should handle Response and non-Response trigger debug exceptions correctly', async () => {
const clearAbortController = vi.fn()
const clearListeningStateSpy = vi.fn()
const setAbortController = vi.fn()
const setWorkflowRunningData = vi.fn()
const controllerTarget: Record<string, unknown> = {}
mockPost.mockRejectedValueOnce(new Response(JSON.stringify({ error: 'Plugin failed' }), {
headers: { 'content-type': 'application/json' },
}))
await runTriggerDebug({
debugType: TriggerType.Plugin,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'plugin-1' },
baseSseOptions: {},
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(mockToastError).toHaveBeenCalledWith('Plugin failed')
expect(clearAbortController).toHaveBeenCalledTimes(1)
expect(setWorkflowRunningData).toHaveBeenCalledWith(createFailedWorkflowState('Plugin failed'))
expect(clearListeningStateSpy).toHaveBeenCalledTimes(1)
mockPost.mockRejectedValueOnce(new Error('network failed'))
await runTriggerDebug({
debugType: TriggerType.Plugin,
url: '/apps/app-1/workflows/draft/trigger/run',
requestBody: { node_id: 'plugin-1' },
baseSseOptions: {},
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState: clearListeningStateSpy,
setWorkflowRunningData,
})
expect(clearAbortController).toHaveBeenCalledTimes(1)
expect(setWorkflowRunningData).toHaveBeenCalledTimes(1)
expect(clearListeningStateSpy).toHaveBeenCalledTimes(2)
})
it('should expose the canonical workflow state factories', () => {
expect(createRunningWorkflowState().result.status).toBe(WorkflowRunningStatus.Running)
expect(createStoppedWorkflowState().result.status).toBe(WorkflowRunningStatus.Stopped)
expect(createFailedWorkflowState('failed').result.status).toBe(WorkflowRunningStatus.Failed)
})
})

View File

@@ -1,592 +0,0 @@
import { act, renderHook } from '@testing-library/react'
import { TriggerType } from '@/app/components/workflow/header/test-run-menu'
import { WorkflowRunningStatus } from '@/app/components/workflow/types'
import { useWorkflowRun } from '../use-workflow-run'
type DebugAbortControllerRef = {
abort: () => void
}
type DebugControllerWindow = Window & {
__webhookDebugAbortController?: DebugAbortControllerRef
__pluginDebugAbortController?: DebugAbortControllerRef
__scheduleDebugAbortController?: DebugAbortControllerRef
__allTriggersDebugAbortController?: DebugAbortControllerRef
}
type WorkflowStoreState = {
backupDraft?: unknown
environmentVariables?: unknown
setBackupDraft?: (value: unknown) => void
setEnvironmentVariables?: (value: unknown) => void
setWorkflowRunningData?: (value: unknown) => void
setIsListening?: (value: boolean) => void
setShowVariableInspectPanel?: (value: boolean) => void
setListeningTriggerType?: (value: unknown) => void
setListeningTriggerNodeIds?: (value: string[]) => void
setListeningTriggerIsAll?: (value: boolean) => void
setListeningTriggerNodeId?: (value: string | null) => void
}
const mocks = vi.hoisted(() => {
const appStoreState = {
appDetail: {
id: 'app-1',
mode: 'workflow',
name: 'Workflow App',
},
}
const reactFlowStoreState = {
edges: [{ id: 'edge-1' }],
getNodes: vi.fn(),
setNodes: vi.fn(),
}
const workflowStoreState: WorkflowStoreState = {}
const workflowStoreSetState = vi.fn((partial: Record<string, unknown>) => {
Object.assign(workflowStoreState, partial)
})
const featuresStoreState = {
features: {
file: {
enabled: true,
},
},
}
const featuresStoreSetState = vi.fn((partial: Record<string, unknown>) => {
Object.assign(featuresStoreState, partial)
})
return {
appStoreState,
reactFlowStoreState,
workflowStoreState,
workflowStoreSetState,
featuresStoreState,
featuresStoreSetState,
mockGetViewport: vi.fn(),
mockDoSyncWorkflowDraft: vi.fn(),
mockHandleUpdateWorkflowCanvas: vi.fn(),
mockFetchInspectVars: vi.fn(),
mockInvalidateAllLastRun: vi.fn(),
mockInvalidateRunHistory: vi.fn(),
mockSsePost: vi.fn(),
mockSseGet: vi.fn(),
mockHandleStream: vi.fn(),
mockPost: vi.fn(),
mockStopWorkflowRun: vi.fn(),
mockTrackEvent: vi.fn(),
mockGetAudioPlayer: vi.fn(),
mockResetMsgId: vi.fn(),
mockCreateBaseWorkflowRunCallbacks: vi.fn(),
mockCreateFinalWorkflowRunCallbacks: vi.fn(),
runEventHandlers: {
handleWorkflowStarted: vi.fn(),
handleWorkflowFinished: vi.fn(),
handleWorkflowFailed: vi.fn(),
handleWorkflowNodeStarted: vi.fn(),
handleWorkflowNodeFinished: vi.fn(),
handleWorkflowNodeHumanInputRequired: vi.fn(),
handleWorkflowNodeHumanInputFormFilled: vi.fn(),
handleWorkflowNodeHumanInputFormTimeout: vi.fn(),
handleWorkflowNodeIterationStarted: vi.fn(),
handleWorkflowNodeIterationNext: vi.fn(),
handleWorkflowNodeIterationFinished: vi.fn(),
handleWorkflowNodeLoopStarted: vi.fn(),
handleWorkflowNodeLoopNext: vi.fn(),
handleWorkflowNodeLoopFinished: vi.fn(),
handleWorkflowNodeRetry: vi.fn(),
handleWorkflowAgentLog: vi.fn(),
handleWorkflowTextChunk: vi.fn(),
handleWorkflowTextReplace: vi.fn(),
handleWorkflowPaused: vi.fn(),
},
}
})
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => mocks.reactFlowStoreState,
}),
useReactFlow: () => ({
getViewport: mocks.mockGetViewport,
}),
}))
vi.mock('@/app/components/app/store', () => {
const useStore = Object.assign(vi.fn(), {
getState: () => mocks.appStoreState,
})
return {
useStore,
}
})
vi.mock('@/app/components/base/amplitude', () => ({
trackEvent: mocks.mockTrackEvent,
}))
vi.mock('@/app/components/base/audio-btn/audio.player.manager', () => ({
AudioPlayerManager: {
getInstance: () => ({
getAudioPlayer: mocks.mockGetAudioPlayer,
resetMsgId: mocks.mockResetMsgId,
}),
},
}))
vi.mock('@/app/components/base/features/hooks', () => ({
useFeaturesStore: () => ({
getState: () => mocks.featuresStoreState,
setState: mocks.featuresStoreSetState,
}),
}))
vi.mock('@/app/components/workflow/hooks/use-workflow-interactions', () => ({
useWorkflowUpdate: () => ({
handleUpdateWorkflowCanvas: mocks.mockHandleUpdateWorkflowCanvas,
}),
}))
vi.mock('@/app/components/workflow/hooks/use-workflow-run-event/use-workflow-run-event', () => ({
useWorkflowRunEvent: () => mocks.runEventHandlers,
}))
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: () => mocks.workflowStoreState,
setState: mocks.workflowStoreSetState,
}),
}))
vi.mock('@/next/navigation', () => ({
usePathname: () => '/apps/app-1/workflow',
}))
vi.mock('@/service/base', () => ({
ssePost: mocks.mockSsePost,
sseGet: mocks.mockSseGet,
post: mocks.mockPost,
handleStream: mocks.mockHandleStream,
}))
vi.mock('@/service/use-workflow', () => ({
useInvalidAllLastRun: () => mocks.mockInvalidateAllLastRun,
useInvalidateWorkflowRunHistory: () => mocks.mockInvalidateRunHistory,
useInvalidateConversationVarValues: () => vi.fn(),
useInvalidateSysVarValues: () => vi.fn(),
}))
vi.mock('@/service/workflow', () => ({
stopWorkflowRun: mocks.mockStopWorkflowRun,
}))
vi.mock('@/app/components/workflow/hooks/use-fetch-workflow-inspect-vars', () => ({
useSetWorkflowVarsWithValue: () => ({
fetchInspectVars: mocks.mockFetchInspectVars,
}),
}))
vi.mock('../use-configs-map', () => ({
useConfigsMap: () => ({
flowId: 'flow-1',
flowType: 'workflow',
}),
}))
vi.mock('../use-nodes-sync-draft', () => ({
useNodesSyncDraft: () => ({
doSyncWorkflowDraft: mocks.mockDoSyncWorkflowDraft,
}),
}))
vi.mock('../use-workflow-run-callbacks', async (importOriginal) => {
const actual = await importOriginal<typeof import('../use-workflow-run-callbacks')>()
return {
...actual,
createBaseWorkflowRunCallbacks: vi.fn((params) => {
mocks.mockCreateBaseWorkflowRunCallbacks(params)
return actual.createBaseWorkflowRunCallbacks(params)
}),
createFinalWorkflowRunCallbacks: vi.fn((params) => {
mocks.mockCreateFinalWorkflowRunCallbacks(params)
return actual.createFinalWorkflowRunCallbacks(params)
}),
}
})
const createWorkflowStoreState = () => ({
backupDraft: undefined,
environmentVariables: [{ id: 'env-current', value: 'secret' }],
setBackupDraft: vi.fn((value: unknown) => {
mocks.workflowStoreState.backupDraft = value
}),
setEnvironmentVariables: vi.fn((value: unknown) => {
mocks.workflowStoreState.environmentVariables = value
}),
setWorkflowRunningData: vi.fn(),
setIsListening: vi.fn(),
setShowVariableInspectPanel: vi.fn(),
setListeningTriggerType: vi.fn(),
setListeningTriggerNodeIds: vi.fn(),
setListeningTriggerIsAll: vi.fn(),
setListeningTriggerNodeId: vi.fn(),
})
describe('useWorkflowRun', () => {
beforeEach(() => {
vi.clearAllMocks()
document.body.innerHTML = '<div id="workflow-container"></div>'
const workflowContainer = document.getElementById('workflow-container')!
Object.defineProperty(workflowContainer, 'clientWidth', { value: 960, configurable: true })
Object.defineProperty(workflowContainer, 'clientHeight', { value: 540, configurable: true })
mocks.reactFlowStoreState.getNodes.mockReturnValue([
{ id: 'node-1', data: { selected: true, _runningStatus: 'running' } },
])
mocks.mockGetViewport.mockReturnValue({ x: 1, y: 2, zoom: 1.5 })
mocks.mockDoSyncWorkflowDraft.mockResolvedValue(undefined)
mocks.mockPost.mockResolvedValue(new Response('data: ok', {
headers: { 'content-type': 'text/event-stream' },
}))
mocks.mockGetAudioPlayer.mockReturnValue({
playAudioWithAudio: vi.fn(),
})
mocks.workflowStoreState.backupDraft = undefined
Object.assign(mocks.workflowStoreState, createWorkflowStoreState())
mocks.workflowStoreSetState.mockImplementation((partial: Record<string, unknown>) => {
Object.assign(mocks.workflowStoreState, partial)
})
mocks.featuresStoreState.features = {
file: {
enabled: true,
},
}
})
it('should backup the current draft once and skip subsequent backups until it is cleared', () => {
const { result } = renderHook(() => useWorkflowRun())
act(() => {
result.current.handleBackupDraft()
result.current.handleBackupDraft()
})
expect(mocks.workflowStoreState.setBackupDraft).toHaveBeenCalledTimes(1)
expect(mocks.workflowStoreState.setBackupDraft).toHaveBeenCalledWith({
nodes: [{ id: 'node-1', data: { selected: true, _runningStatus: 'running' } }],
edges: [{ id: 'edge-1' }],
viewport: { x: 1, y: 2, zoom: 1.5 },
features: { file: { enabled: true } },
environmentVariables: [{ id: 'env-current', value: 'secret' }],
})
expect(mocks.mockDoSyncWorkflowDraft).toHaveBeenCalledTimes(1)
})
it('should load a backup draft into canvas, environment variables, and features state', () => {
mocks.workflowStoreState.backupDraft = {
nodes: [{ id: 'backup-node' }],
edges: [{ id: 'backup-edge' }],
viewport: { x: 0, y: 0, zoom: 2 },
features: { opening: { enabled: true } },
environmentVariables: [{ id: 'env-backup', value: 'value' }],
}
const { result } = renderHook(() => useWorkflowRun())
act(() => {
result.current.handleLoadBackupDraft()
})
expect(mocks.mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
nodes: [{ id: 'backup-node' }],
edges: [{ id: 'backup-edge' }],
viewport: { x: 0, y: 0, zoom: 2 },
})
expect(mocks.workflowStoreState.setEnvironmentVariables).toHaveBeenCalledWith([{ id: 'env-backup', value: 'value' }])
expect(mocks.featuresStoreSetState).toHaveBeenCalledWith({
features: { opening: { enabled: true } },
})
expect(mocks.workflowStoreState.setBackupDraft).toHaveBeenCalledWith(undefined)
})
it('should prepare the graph and dispatch a workflow run through ssePost for user-input mode', async () => {
const { result } = renderHook(() => useWorkflowRun())
await act(async () => {
await result.current.handleRun({ inputs: { query: 'hello' } })
})
expect(mocks.reactFlowStoreState.setNodes).toHaveBeenCalledWith([
{ id: 'node-1', data: { selected: false, _runningStatus: undefined } },
])
expect(mocks.mockDoSyncWorkflowDraft).toHaveBeenCalled()
expect(mocks.workflowStoreSetState).toHaveBeenCalledWith({ historyWorkflowData: undefined })
expect(mocks.workflowStoreState.setIsListening).toHaveBeenCalledWith(false)
expect(mocks.workflowStoreState.setListeningTriggerType).toHaveBeenCalledWith(null)
expect(mocks.workflowStoreState.setListeningTriggerNodeId).toHaveBeenCalledWith(null)
expect(mocks.workflowStoreState.setListeningTriggerNodeIds).toHaveBeenCalledWith([])
expect(mocks.workflowStoreState.setListeningTriggerIsAll).toHaveBeenCalledWith(false)
expect(mocks.workflowStoreState.setWorkflowRunningData).toHaveBeenCalledWith(expect.objectContaining({
result: expect.objectContaining({
status: WorkflowRunningStatus.Running,
}),
}))
expect(mocks.mockSsePost).toHaveBeenCalledWith(
'/apps/app-1/workflows/draft/run',
{ body: { inputs: { query: 'hello' } } },
expect.objectContaining({
getAbortController: expect.any(Function),
}),
)
})
it.each([
{
title: 'schedule',
params: {},
options: { mode: TriggerType.Schedule, scheduleNodeId: 'schedule-1' },
expectedUrl: '/apps/app-1/workflows/draft/trigger/run',
expectedBody: { node_id: 'schedule-1' },
expectedNodeIds: ['schedule-1'],
expectedIsAll: false,
},
{
title: 'webhook',
params: { node_id: 'webhook-1' },
options: { mode: TriggerType.Webhook, webhookNodeId: 'webhook-1' },
expectedUrl: '/apps/app-1/workflows/draft/trigger/run',
expectedBody: { node_id: 'webhook-1' },
expectedNodeIds: ['webhook-1'],
expectedIsAll: false,
},
{
title: 'plugin',
params: { node_id: 'plugin-1' },
options: { mode: TriggerType.Plugin, pluginNodeId: 'plugin-1' },
expectedUrl: '/apps/app-1/workflows/draft/trigger/run',
expectedBody: { node_id: 'plugin-1' },
expectedNodeIds: ['plugin-1'],
expectedIsAll: false,
},
{
title: 'all',
params: { node_ids: ['trigger-1', 'trigger-2'] },
options: { mode: TriggerType.All, allNodeIds: ['trigger-1', 'trigger-2'] },
expectedUrl: '/apps/app-1/workflows/draft/trigger/run-all',
expectedBody: { node_ids: ['trigger-1', 'trigger-2'] },
expectedNodeIds: ['trigger-1', 'trigger-2'],
expectedIsAll: true,
},
])('should dispatch $title trigger runs through the debug runner integration', async ({
params,
options,
expectedUrl,
expectedBody,
expectedNodeIds,
expectedIsAll,
}) => {
const { result } = renderHook(() => useWorkflowRun())
await act(async () => {
await result.current.handleRun(params, undefined, options)
})
expect(mocks.mockPost).toHaveBeenCalledWith(
expectedUrl,
expect.objectContaining({
body: expectedBody,
signal: expect.any(AbortSignal),
}),
{ needAllResponseContent: true },
)
expect(mocks.workflowStoreState.setIsListening).toHaveBeenCalledWith(true)
expect(mocks.workflowStoreState.setListeningTriggerNodeIds).toHaveBeenCalledWith(expectedNodeIds)
expect(mocks.workflowStoreState.setListeningTriggerIsAll).toHaveBeenCalledWith(expectedIsAll)
expect(mocks.mockSsePost).not.toHaveBeenCalled()
})
it('should expose the workflow-failed tracker through the callback factory context', async () => {
const { result } = renderHook(() => useWorkflowRun())
await act(async () => {
await result.current.handleRun({ inputs: { query: 'hello' } })
})
const baseCallbackFactoryContext = mocks.mockCreateBaseWorkflowRunCallbacks.mock.calls.at(-1)?.[0] as {
trackWorkflowRunFailed: (params: { error?: string, node_type?: string }) => void
}
baseCallbackFactoryContext.trackWorkflowRunFailed({ error: 'failed', node_type: 'llm' })
expect(mocks.mockTrackEvent).toHaveBeenCalledWith('workflow_run_failed', {
workflow_id: 'flow-1',
reason: 'failed',
node_type: 'llm',
})
})
it('should lazily create audio players with the correct public and private tts urls', async () => {
const { result } = renderHook(() => useWorkflowRun())
await act(async () => {
await result.current.handleRun({ token: 'public-token' })
})
const publicBaseCallbackFactoryContext = mocks.mockCreateBaseWorkflowRunCallbacks.mock.calls.at(-1)?.[0] as {
getOrCreatePlayer: () => unknown
}
publicBaseCallbackFactoryContext.getOrCreatePlayer()
expect(mocks.mockGetAudioPlayer).toHaveBeenCalledWith(
'/text-to-audio',
true,
expect.any(String),
'none',
'none',
expect.any(Function),
)
mocks.mockSsePost.mockClear()
mocks.mockGetAudioPlayer.mockClear()
await act(async () => {
await result.current.handleRun({ appId: 'app-2' })
})
const privateBaseCallbackFactoryContext = mocks.mockCreateBaseWorkflowRunCallbacks.mock.calls.at(-1)?.[0] as {
getOrCreatePlayer: () => unknown
}
privateBaseCallbackFactoryContext.getOrCreatePlayer()
expect(mocks.mockGetAudioPlayer).toHaveBeenCalledWith(
'/apps/app-2/text-to-audio',
false,
expect.any(String),
'none',
'none',
expect.any(Function),
)
})
it('should stop workflow runs by task id or by aborting active debug controllers', async () => {
const { result } = renderHook(() => useWorkflowRun())
await act(async () => {
await result.current.handleRun({ inputs: { query: 'hello' } })
})
act(() => {
result.current.handleStopRun('task-1')
})
expect(mocks.mockStopWorkflowRun).toHaveBeenCalledWith('/apps/app-1/workflow-runs/tasks/task-1/stop')
expect(mocks.workflowStoreState.setWorkflowRunningData).toHaveBeenCalledWith(expect.objectContaining({
result: expect.objectContaining({
status: WorkflowRunningStatus.Stopped,
}),
}))
const webhookAbort = vi.fn()
const pluginAbort = vi.fn()
const scheduleAbort = vi.fn()
const allTriggersAbort = vi.fn()
const windowWithDebugControllers = window as DebugControllerWindow
windowWithDebugControllers.__webhookDebugAbortController = { abort: webhookAbort }
windowWithDebugControllers.__pluginDebugAbortController = { abort: pluginAbort }
windowWithDebugControllers.__scheduleDebugAbortController = { abort: scheduleAbort }
windowWithDebugControllers.__allTriggersDebugAbortController = { abort: allTriggersAbort }
const refController = new AbortController()
const refAbortSpy = vi.spyOn(refController, 'abort')
const { getAbortController } = mocks.mockSsePost.mock.calls.at(-1)?.[2] as {
getAbortController?: (controller: AbortController) => void
}
getAbortController?.(refController)
act(() => {
result.current.handleStopRun('')
})
expect(webhookAbort).toHaveBeenCalled()
expect(pluginAbort).toHaveBeenCalled()
expect(scheduleAbort).toHaveBeenCalled()
expect(allTriggersAbort).toHaveBeenCalled()
expect(refAbortSpy).toHaveBeenCalled()
})
it('should restore published workflow graph, features, and environment variables', () => {
const { result } = renderHook(() => useWorkflowRun())
act(() => {
result.current.handleRestoreFromPublishedWorkflow({
graph: {
nodes: [{ id: 'published-node', selected: true, data: { selected: true, label: 'Published' } }],
edges: [{ id: 'published-edge' }],
viewport: { x: 10, y: 20, zoom: 0.8 },
},
features: {
opening_statement: 'hello',
suggested_questions: ['Q1'],
suggested_questions_after_answer: { enabled: true },
text_to_speech: { enabled: true },
speech_to_text: { enabled: true },
retriever_resource: { enabled: true },
sensitive_word_avoidance: { enabled: true },
file_upload: { enabled: true },
},
environment_variables: [{ id: 'env-published', value: 'value' }],
} as never)
})
expect(mocks.mockHandleUpdateWorkflowCanvas).toHaveBeenCalledWith({
nodes: [{ id: 'published-node', selected: false, data: { selected: false, label: 'Published' } }],
edges: [{ id: 'published-edge' }],
viewport: { x: 10, y: 20, zoom: 0.8 },
})
expect(mocks.featuresStoreSetState).toHaveBeenCalledWith({
features: expect.objectContaining({
opening: expect.objectContaining({
enabled: true,
opening_statement: 'hello',
}),
file: { enabled: true },
}),
})
expect(mocks.workflowStoreState.setEnvironmentVariables).toHaveBeenCalledWith([{ id: 'env-published', value: 'value' }])
})
it('should restore published workflows with empty environment variables as an empty list', () => {
const { result } = renderHook(() => useWorkflowRun())
act(() => {
result.current.handleRestoreFromPublishedWorkflow({
graph: {
nodes: [{ id: 'published-node', selected: true, data: { selected: true, label: 'Published' } }],
edges: [],
viewport: { x: 0, y: 0, zoom: 1 },
},
features: {
opening_statement: '',
suggested_questions: [],
suggested_questions_after_answer: { enabled: false },
text_to_speech: { enabled: false },
speech_to_text: { enabled: false },
retriever_resource: { enabled: false },
sensitive_word_avoidance: { enabled: false },
file_upload: { enabled: false },
},
} as never)
})
expect(mocks.featuresStoreSetState).toHaveBeenCalledWith({
features: expect.objectContaining({
opening: expect.objectContaining({ enabled: false }),
file: { enabled: false },
}),
})
expect(mocks.workflowStoreState.setEnvironmentVariables).toHaveBeenCalledWith([])
})
})

View File

@@ -1,391 +0,0 @@
import { act, renderHook } from '@testing-library/react'
import { TriggerType } from '@/app/components/workflow/header/test-run-menu'
import {
BlockEnum,
WorkflowRunningStatus,
} from '@/app/components/workflow/types'
import { useWorkflowStartRun } from '../use-workflow-start-run'
const mockGetNodes = vi.fn()
const mockGetFeaturesState = vi.fn()
const mockHandleCancelDebugAndPreviewPanel = vi.fn()
const mockHandleRun = vi.fn()
const mockDoSyncWorkflowDraft = vi.fn()
const mockUseIsChatMode = vi.fn()
const mockSetShowDebugAndPreviewPanel = vi.fn()
const mockSetShowInputsPanel = vi.fn()
const mockSetShowEnvPanel = vi.fn()
const mockSetShowGlobalVariablePanel = vi.fn()
const mockSetShowChatVariablePanel = vi.fn()
const mockSetListeningTriggerType = vi.fn()
const mockSetListeningTriggerNodeId = vi.fn()
const mockSetListeningTriggerNodeIds = vi.fn()
const mockSetListeningTriggerIsAll = vi.fn()
const mockSetHistoryWorkflowData = vi.fn()
let workflowStoreState: Record<string, unknown>
vi.mock('reactflow', () => ({
useStoreApi: () => ({
getState: () => ({
getNodes: mockGetNodes,
}),
}),
}))
vi.mock('@/app/components/base/features/hooks', () => ({
useFeaturesStore: () => ({
getState: mockGetFeaturesState,
}),
}))
vi.mock('@/app/components/workflow/hooks', () => ({
useWorkflowInteractions: () => ({
handleCancelDebugAndPreviewPanel: mockHandleCancelDebugAndPreviewPanel,
}),
}))
vi.mock('@/app/components/workflow/store', () => ({
useWorkflowStore: () => ({
getState: () => workflowStoreState,
}),
}))
vi.mock('@/app/components/workflow-app/hooks', () => ({
useIsChatMode: () => mockUseIsChatMode(),
useNodesSyncDraft: () => ({
doSyncWorkflowDraft: mockDoSyncWorkflowDraft,
}),
useWorkflowRun: () => ({
handleRun: mockHandleRun,
}),
}))
const createWorkflowStoreState = (overrides: Record<string, unknown> = {}) => ({
workflowRunningData: undefined,
showDebugAndPreviewPanel: false,
setShowDebugAndPreviewPanel: mockSetShowDebugAndPreviewPanel,
setShowInputsPanel: mockSetShowInputsPanel,
setShowEnvPanel: mockSetShowEnvPanel,
setShowGlobalVariablePanel: mockSetShowGlobalVariablePanel,
setShowChatVariablePanel: mockSetShowChatVariablePanel,
setListeningTriggerType: mockSetListeningTriggerType,
setListeningTriggerNodeId: mockSetListeningTriggerNodeId,
setListeningTriggerNodeIds: mockSetListeningTriggerNodeIds,
setListeningTriggerIsAll: mockSetListeningTriggerIsAll,
setHistoryWorkflowData: mockSetHistoryWorkflowData,
...overrides,
})
describe('useWorkflowStartRun', () => {
beforeEach(() => {
vi.clearAllMocks()
workflowStoreState = createWorkflowStoreState()
mockGetNodes.mockReturnValue([
{ id: 'start-1', data: { type: BlockEnum.Start, variables: [] } },
])
mockGetFeaturesState.mockReturnValue({
features: {
file: {
image: {
enabled: false,
},
},
},
})
mockDoSyncWorkflowDraft.mockResolvedValue(undefined)
mockUseIsChatMode.mockReturnValue(false)
})
it('should run the workflow immediately when there are no start variables and no image upload input', async () => {
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowStartRunInWorkflow()
})
expect(mockSetShowEnvPanel).toHaveBeenCalledWith(false)
expect(mockSetShowGlobalVariablePanel).toHaveBeenCalledWith(false)
expect(mockDoSyncWorkflowDraft).toHaveBeenCalled()
expect(mockHandleRun).toHaveBeenCalledWith({ inputs: {}, files: [] })
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(false)
})
it('should open the input panel instead of running immediately when start inputs are required', async () => {
mockGetNodes.mockReturnValue([
{ id: 'start-1', data: { type: BlockEnum.Start, variables: [{ name: 'query' }] } },
])
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowStartRunInWorkflow()
})
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(true)
})
it('should open the input panel when image upload is enabled even without start variables', async () => {
mockGetFeaturesState.mockReturnValue({
features: {
file: {
image: {
enabled: true,
},
},
},
})
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowStartRunInWorkflow()
})
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(true)
})
it('should cancel the current debug panel instead of starting another workflow when one is already open', async () => {
workflowStoreState = createWorkflowStoreState({
showDebugAndPreviewPanel: true,
})
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowStartRunInWorkflow()
})
expect(mockHandleCancelDebugAndPreviewPanel).toHaveBeenCalled()
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
})
it('should short-circuit workflow start when a run is already in progress', async () => {
workflowStoreState = createWorkflowStoreState({
workflowRunningData: {
result: {
status: WorkflowRunningStatus.Running,
},
},
})
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowStartRunInWorkflow()
})
expect(mockSetShowEnvPanel).not.toHaveBeenCalled()
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
})
it('should configure schedule trigger runs and execute the workflow with schedule options', async () => {
mockGetNodes.mockReturnValue([
{ id: 'schedule-1', data: { type: BlockEnum.TriggerSchedule } },
])
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowTriggerScheduleRunInWorkflow('schedule-1')
})
expect(mockSetShowEnvPanel).toHaveBeenCalledWith(false)
expect(mockSetShowGlobalVariablePanel).toHaveBeenCalledWith(false)
expect(mockSetListeningTriggerType).toHaveBeenCalledWith(BlockEnum.TriggerSchedule)
expect(mockSetListeningTriggerNodeId).toHaveBeenCalledWith('schedule-1')
expect(mockSetListeningTriggerNodeIds).toHaveBeenCalledWith(['schedule-1'])
expect(mockSetListeningTriggerIsAll).toHaveBeenCalledWith(false)
expect(mockDoSyncWorkflowDraft).toHaveBeenCalled()
expect(mockHandleRun).toHaveBeenCalledWith(
{},
undefined,
{
mode: TriggerType.Schedule,
scheduleNodeId: 'schedule-1',
},
)
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(false)
})
it('should cancel schedule trigger execution when the debug panel is already open', async () => {
workflowStoreState = createWorkflowStoreState({
showDebugAndPreviewPanel: true,
})
mockGetNodes.mockReturnValue([
{ id: 'schedule-1', data: { type: BlockEnum.TriggerSchedule } },
])
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowTriggerScheduleRunInWorkflow('schedule-1')
})
expect(mockHandleCancelDebugAndPreviewPanel).toHaveBeenCalled()
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
})
it.each([
{
title: 'schedule',
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerScheduleRunInWorkflow(undefined),
},
{
title: 'webhook',
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerWebhookRunInWorkflow({ nodeId: '' }),
},
{
title: 'plugin',
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerPluginRunInWorkflow(''),
},
])('should ignore $title trigger execution when the node id is empty', async ({ invoke }) => {
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await invoke(result.current)
})
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
})
it.each([
{
title: 'schedule',
warnMessage: 'handleWorkflowTriggerScheduleRunInWorkflow: schedule node not found',
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerScheduleRunInWorkflow('schedule-missing'),
},
{
title: 'webhook',
warnMessage: 'handleWorkflowTriggerWebhookRunInWorkflow: webhook node not found',
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerWebhookRunInWorkflow({ nodeId: 'webhook-missing' }),
},
{
title: 'plugin',
warnMessage: 'handleWorkflowTriggerPluginRunInWorkflow: plugin node not found',
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerPluginRunInWorkflow('plugin-missing'),
},
])('should warn when the $title trigger node cannot be found', async ({ warnMessage, invoke }) => {
const consoleWarnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {})
mockGetNodes.mockReturnValue([{ id: 'other-node', data: { type: BlockEnum.Start } }])
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await invoke(result.current)
})
expect(consoleWarnSpy).toHaveBeenCalledWith(warnMessage, expect.stringContaining('missing'))
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
consoleWarnSpy.mockRestore()
})
it.each([
{
title: 'webhook',
nodeId: 'webhook-1',
nodeType: BlockEnum.TriggerWebhook,
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerWebhookRunInWorkflow({ nodeId: 'webhook-1' }),
expectedParams: { node_id: 'webhook-1' },
expectedOptions: { mode: TriggerType.Webhook, webhookNodeId: 'webhook-1' },
},
{
title: 'plugin',
nodeId: 'plugin-1',
nodeType: BlockEnum.TriggerPlugin,
invoke: (hook: ReturnType<typeof useWorkflowStartRun>) => hook.handleWorkflowTriggerPluginRunInWorkflow('plugin-1'),
expectedParams: { node_id: 'plugin-1' },
expectedOptions: { mode: TriggerType.Plugin, pluginNodeId: 'plugin-1' },
},
])('should configure $title trigger runs with node-specific options', async ({ nodeId, nodeType, invoke, expectedParams, expectedOptions }) => {
mockGetNodes.mockReturnValue([
{ id: nodeId, data: { type: nodeType } },
])
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await invoke(result.current)
})
expect(mockSetShowEnvPanel).toHaveBeenCalledWith(false)
expect(mockSetShowGlobalVariablePanel).toHaveBeenCalledWith(false)
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(false)
expect(mockSetListeningTriggerType).toHaveBeenCalledWith(nodeType)
expect(mockSetListeningTriggerNodeId).toHaveBeenCalledWith(nodeId)
expect(mockSetListeningTriggerNodeIds).toHaveBeenCalledWith([nodeId])
expect(mockSetListeningTriggerIsAll).toHaveBeenCalledWith(false)
expect(mockDoSyncWorkflowDraft).toHaveBeenCalled()
expect(mockHandleRun).toHaveBeenCalledWith(expectedParams, undefined, expectedOptions)
})
it('should run all triggers and mark the listener state as global', async () => {
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowRunAllTriggersInWorkflow(['trigger-1', 'trigger-2'])
})
expect(mockSetShowEnvPanel).toHaveBeenCalledWith(false)
expect(mockSetShowGlobalVariablePanel).toHaveBeenCalledWith(false)
expect(mockSetShowInputsPanel).toHaveBeenCalledWith(false)
expect(mockSetListeningTriggerIsAll).toHaveBeenCalledWith(true)
expect(mockSetListeningTriggerNodeIds).toHaveBeenCalledWith(['trigger-1', 'trigger-2'])
expect(mockSetListeningTriggerNodeId).toHaveBeenCalledWith(null)
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockDoSyncWorkflowDraft).toHaveBeenCalled()
expect(mockHandleRun).toHaveBeenCalledWith(
{ node_ids: ['trigger-1', 'trigger-2'] },
undefined,
{
mode: TriggerType.All,
allNodeIds: ['trigger-1', 'trigger-2'],
},
)
})
it('should ignore run-all requests when there are no trigger nodes', async () => {
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
await result.current.handleWorkflowRunAllTriggersInWorkflow([])
})
expect(mockSetListeningTriggerIsAll).not.toHaveBeenCalled()
expect(mockDoSyncWorkflowDraft).not.toHaveBeenCalled()
expect(mockHandleRun).not.toHaveBeenCalled()
})
it('should route handleStartWorkflowRun to the chatflow path when chat mode is enabled', async () => {
mockUseIsChatMode.mockReturnValue(true)
const { result } = renderHook(() => useWorkflowStartRun())
await act(async () => {
result.current.handleStartWorkflowRun()
})
expect(mockSetShowEnvPanel).toHaveBeenCalledWith(false)
expect(mockSetShowChatVariablePanel).toHaveBeenCalledWith(false)
expect(mockSetShowGlobalVariablePanel).toHaveBeenCalledWith(false)
expect(mockSetShowDebugAndPreviewPanel).toHaveBeenCalledWith(true)
expect(mockSetHistoryWorkflowData).toHaveBeenCalledWith(undefined)
expect(mockHandleRun).not.toHaveBeenCalled()
})
})

View File

@@ -1,82 +0,0 @@
import { renderHook } from '@testing-library/react'
import { useWorkflowTemplate } from '../use-workflow-template'
const mockUseIsChatMode = vi.fn()
let generateNewNodeCalls: Array<Record<string, unknown>> = []
vi.mock('@/app/components/workflow-app/hooks/use-is-chat-mode', () => ({
useIsChatMode: () => mockUseIsChatMode(),
}))
vi.mock('@/app/components/workflow/utils', async (importOriginal) => {
const actual = await importOriginal<typeof import('@/app/components/workflow/utils')>()
return {
...actual,
generateNewNode: (args: { id?: string, data: Record<string, unknown>, position: Record<string, unknown> }) => {
generateNewNodeCalls.push(args)
return {
newNode: {
id: args.id ?? `generated-${generateNewNodeCalls.length}`,
data: args.data,
position: args.position,
},
}
},
}
})
describe('useWorkflowTemplate', () => {
beforeEach(() => {
vi.clearAllMocks()
generateNewNodeCalls = []
})
it('should return only the start node template in workflow mode', () => {
mockUseIsChatMode.mockReturnValue(false)
const { result } = renderHook(() => useWorkflowTemplate())
expect(result.current.nodes).toHaveLength(1)
expect(result.current.edges).toEqual([])
expect(generateNewNodeCalls).toHaveLength(1)
})
it('should build start, llm, and answer templates with linked edges in chat mode', () => {
mockUseIsChatMode.mockReturnValue(true)
const { result } = renderHook(() => useWorkflowTemplate())
expect(result.current.nodes).toHaveLength(3)
expect(result.current.nodes.map(node => node.id)).toEqual(['generated-1', 'llm', 'answer'])
expect(result.current.edges).toEqual([
{
id: 'generated-1-llm',
source: 'generated-1',
sourceHandle: 'source',
target: 'llm',
targetHandle: 'target',
},
{
id: 'llm-answer',
source: 'llm',
sourceHandle: 'source',
target: 'answer',
targetHandle: 'target',
},
])
expect(generateNewNodeCalls).toHaveLength(3)
expect(generateNewNodeCalls[0].data).toMatchObject({
type: 'start',
title: 'workflow.blocks.start',
})
expect(generateNewNodeCalls[1].data).toMatchObject({
type: 'llm',
title: 'workflow.blocks.llm',
})
expect(generateNewNodeCalls[2].data).toMatchObject({
type: 'answer',
title: 'workflow.blocks.answer',
answer: '{{#llm.text#}}',
})
})
})

View File

@@ -1,470 +0,0 @@
import type AudioPlayer from '@/app/components/base/audio-btn/audio'
import type { IOtherOptions } from '@/service/base'
import { AudioPlayerManager } from '@/app/components/base/audio-btn/audio.player.manager'
import { sseGet } from '@/service/base'
type ContainerSize = {
clientWidth: number
clientHeight: number
}
type WorkflowRunEventHandlers = {
handleWorkflowStarted: NonNullable<IOtherOptions['onWorkflowStarted']>
handleWorkflowFinished: NonNullable<IOtherOptions['onWorkflowFinished']>
handleWorkflowFailed: () => void
handleWorkflowNodeStarted: (params: Parameters<NonNullable<IOtherOptions['onNodeStarted']>>[0], containerParams: ContainerSize) => void
handleWorkflowNodeFinished: NonNullable<IOtherOptions['onNodeFinished']>
handleWorkflowNodeHumanInputRequired: NonNullable<IOtherOptions['onHumanInputRequired']>
handleWorkflowNodeHumanInputFormFilled: NonNullable<IOtherOptions['onHumanInputFormFilled']>
handleWorkflowNodeHumanInputFormTimeout: NonNullable<IOtherOptions['onHumanInputFormTimeout']>
handleWorkflowNodeIterationStarted: (params: Parameters<NonNullable<IOtherOptions['onIterationStart']>>[0], containerParams: ContainerSize) => void
handleWorkflowNodeIterationNext: NonNullable<IOtherOptions['onIterationNext']>
handleWorkflowNodeIterationFinished: NonNullable<IOtherOptions['onIterationFinish']>
handleWorkflowNodeLoopStarted: (params: Parameters<NonNullable<IOtherOptions['onLoopStart']>>[0], containerParams: ContainerSize) => void
handleWorkflowNodeLoopNext: NonNullable<IOtherOptions['onLoopNext']>
handleWorkflowNodeLoopFinished: NonNullable<IOtherOptions['onLoopFinish']>
handleWorkflowNodeRetry: NonNullable<IOtherOptions['onNodeRetry']>
handleWorkflowAgentLog: NonNullable<IOtherOptions['onAgentLog']>
handleWorkflowTextChunk: NonNullable<IOtherOptions['onTextChunk']>
handleWorkflowTextReplace: NonNullable<IOtherOptions['onTextReplace']>
handleWorkflowPaused: () => void
}
type UserCallbackHandlers = {
onWorkflowStarted?: IOtherOptions['onWorkflowStarted']
onWorkflowFinished?: IOtherOptions['onWorkflowFinished']
onNodeStarted?: IOtherOptions['onNodeStarted']
onNodeFinished?: IOtherOptions['onNodeFinished']
onIterationStart?: IOtherOptions['onIterationStart']
onIterationNext?: IOtherOptions['onIterationNext']
onIterationFinish?: IOtherOptions['onIterationFinish']
onLoopStart?: IOtherOptions['onLoopStart']
onLoopNext?: IOtherOptions['onLoopNext']
onLoopFinish?: IOtherOptions['onLoopFinish']
onNodeRetry?: IOtherOptions['onNodeRetry']
onAgentLog?: IOtherOptions['onAgentLog']
onError?: IOtherOptions['onError']
onWorkflowPaused?: IOtherOptions['onWorkflowPaused']
onHumanInputRequired?: IOtherOptions['onHumanInputRequired']
onHumanInputFormFilled?: IOtherOptions['onHumanInputFormFilled']
onHumanInputFormTimeout?: IOtherOptions['onHumanInputFormTimeout']
onCompleted?: IOtherOptions['onCompleted']
}
type CallbackContext = {
clientWidth: number
clientHeight: number
runHistoryUrl: string
isInWorkflowDebug: boolean
fetchInspectVars: (params: Record<string, never>) => void
invalidAllLastRun: () => void
invalidateRunHistory: (url: string) => void
clearAbortController: () => void
clearListeningState: () => void
trackWorkflowRunFailed: (params: unknown) => void
handlers: WorkflowRunEventHandlers
callbacks: UserCallbackHandlers
restCallback: IOtherOptions
}
type BaseCallbacksContext = CallbackContext & {
getOrCreatePlayer: () => AudioPlayer | null
}
type FinalCallbacksContext = CallbackContext & {
baseSseOptions: IOtherOptions
player: AudioPlayer | null
setAbortController: (controller: AbortController) => void
}
export const createBaseWorkflowRunCallbacks = ({
clientWidth,
clientHeight,
runHistoryUrl,
isInWorkflowDebug,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController,
clearListeningState,
trackWorkflowRunFailed,
handlers,
callbacks,
restCallback,
getOrCreatePlayer,
}: BaseCallbacksContext): IOtherOptions => {
const {
handleWorkflowStarted,
handleWorkflowFinished,
handleWorkflowFailed,
handleWorkflowNodeStarted,
handleWorkflowNodeFinished,
handleWorkflowNodeHumanInputRequired,
handleWorkflowNodeHumanInputFormFilled,
handleWorkflowNodeHumanInputFormTimeout,
handleWorkflowNodeIterationStarted,
handleWorkflowNodeIterationNext,
handleWorkflowNodeIterationFinished,
handleWorkflowNodeLoopStarted,
handleWorkflowNodeLoopNext,
handleWorkflowNodeLoopFinished,
handleWorkflowNodeRetry,
handleWorkflowAgentLog,
handleWorkflowTextChunk,
handleWorkflowTextReplace,
handleWorkflowPaused,
} = handlers
const {
onWorkflowStarted,
onWorkflowFinished,
onNodeStarted,
onNodeFinished,
onIterationStart,
onIterationNext,
onIterationFinish,
onLoopStart,
onLoopNext,
onLoopFinish,
onNodeRetry,
onAgentLog,
onError,
onWorkflowPaused,
onHumanInputRequired,
onHumanInputFormFilled,
onHumanInputFormTimeout,
onCompleted,
} = callbacks
const wrappedOnError: IOtherOptions['onError'] = (params, code) => {
clearAbortController()
handleWorkflowFailed()
invalidateRunHistory(runHistoryUrl)
clearListeningState()
if (onError)
onError(params, code)
trackWorkflowRunFailed(params)
}
const wrappedOnCompleted: IOtherOptions['onCompleted'] = async (hasError, errorMessage) => {
clearAbortController()
clearListeningState()
if (onCompleted)
onCompleted(hasError, errorMessage)
}
const baseSseOptions: IOtherOptions = {
...restCallback,
onWorkflowStarted: (params) => {
handleWorkflowStarted(params)
invalidateRunHistory(runHistoryUrl)
if (onWorkflowStarted)
onWorkflowStarted(params)
},
onWorkflowFinished: (params) => {
clearListeningState()
handleWorkflowFinished(params)
invalidateRunHistory(runHistoryUrl)
if (onWorkflowFinished)
onWorkflowFinished(params)
if (isInWorkflowDebug) {
fetchInspectVars({})
invalidAllLastRun()
}
},
onNodeStarted: (params) => {
handleWorkflowNodeStarted(params, { clientWidth, clientHeight })
if (onNodeStarted)
onNodeStarted(params)
},
onNodeFinished: (params) => {
handleWorkflowNodeFinished(params)
if (onNodeFinished)
onNodeFinished(params)
},
onIterationStart: (params) => {
handleWorkflowNodeIterationStarted(params, { clientWidth, clientHeight })
if (onIterationStart)
onIterationStart(params)
},
onIterationNext: (params) => {
handleWorkflowNodeIterationNext(params)
if (onIterationNext)
onIterationNext(params)
},
onIterationFinish: (params) => {
handleWorkflowNodeIterationFinished(params)
if (onIterationFinish)
onIterationFinish(params)
},
onLoopStart: (params) => {
handleWorkflowNodeLoopStarted(params, { clientWidth, clientHeight })
if (onLoopStart)
onLoopStart(params)
},
onLoopNext: (params) => {
handleWorkflowNodeLoopNext(params)
if (onLoopNext)
onLoopNext(params)
},
onLoopFinish: (params) => {
handleWorkflowNodeLoopFinished(params)
if (onLoopFinish)
onLoopFinish(params)
},
onNodeRetry: (params) => {
handleWorkflowNodeRetry(params)
if (onNodeRetry)
onNodeRetry(params)
},
onAgentLog: (params) => {
handleWorkflowAgentLog(params)
if (onAgentLog)
onAgentLog(params)
},
onTextChunk: (params) => {
handleWorkflowTextChunk(params)
},
onTextReplace: (params) => {
handleWorkflowTextReplace(params)
},
onTTSChunk: (messageId: string, audio: string) => {
if (!audio || audio === '')
return
const audioPlayer = getOrCreatePlayer()
if (audioPlayer) {
audioPlayer.playAudioWithAudio(audio, true)
AudioPlayerManager.getInstance().resetMsgId(messageId)
}
},
onTTSEnd: (_messageId: string, audio: string) => {
const audioPlayer = getOrCreatePlayer()
if (audioPlayer)
audioPlayer.playAudioWithAudio(audio, false)
},
onWorkflowPaused: (params) => {
handleWorkflowPaused()
invalidateRunHistory(runHistoryUrl)
if (onWorkflowPaused)
onWorkflowPaused(params)
const url = `/workflow/${params.workflow_run_id}/events`
sseGet(url, {}, baseSseOptions)
},
onHumanInputRequired: (params) => {
handleWorkflowNodeHumanInputRequired(params)
if (onHumanInputRequired)
onHumanInputRequired(params)
},
onHumanInputFormFilled: (params) => {
handleWorkflowNodeHumanInputFormFilled(params)
if (onHumanInputFormFilled)
onHumanInputFormFilled(params)
},
onHumanInputFormTimeout: (params) => {
handleWorkflowNodeHumanInputFormTimeout(params)
if (onHumanInputFormTimeout)
onHumanInputFormTimeout(params)
},
onError: wrappedOnError,
onCompleted: wrappedOnCompleted,
}
return baseSseOptions
}
export const createFinalWorkflowRunCallbacks = ({
clientWidth,
clientHeight,
runHistoryUrl,
isInWorkflowDebug,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController: _clearAbortController,
clearListeningState: _clearListeningState,
trackWorkflowRunFailed: _trackWorkflowRunFailed,
handlers,
callbacks,
restCallback,
baseSseOptions,
player,
setAbortController,
}: FinalCallbacksContext): IOtherOptions => {
const {
handleWorkflowFinished,
handleWorkflowFailed,
handleWorkflowNodeStarted,
handleWorkflowNodeFinished,
handleWorkflowNodeHumanInputRequired,
handleWorkflowNodeHumanInputFormFilled,
handleWorkflowNodeHumanInputFormTimeout,
handleWorkflowNodeIterationStarted,
handleWorkflowNodeIterationNext,
handleWorkflowNodeIterationFinished,
handleWorkflowNodeLoopStarted,
handleWorkflowNodeLoopNext,
handleWorkflowNodeLoopFinished,
handleWorkflowNodeRetry,
handleWorkflowAgentLog,
handleWorkflowTextChunk,
handleWorkflowTextReplace,
handleWorkflowPaused,
} = handlers
const {
onWorkflowFinished,
onNodeStarted,
onNodeFinished,
onIterationStart,
onIterationNext,
onIterationFinish,
onLoopStart,
onLoopNext,
onLoopFinish,
onNodeRetry,
onAgentLog,
onError,
onWorkflowPaused,
onHumanInputRequired,
onHumanInputFormFilled,
onHumanInputFormTimeout,
} = callbacks
const finalCallbacks: IOtherOptions = {
...baseSseOptions,
getAbortController: (controller: AbortController) => {
setAbortController(controller)
},
onWorkflowFinished: (params) => {
handleWorkflowFinished(params)
invalidateRunHistory(runHistoryUrl)
if (onWorkflowFinished)
onWorkflowFinished(params)
if (isInWorkflowDebug) {
fetchInspectVars({})
invalidAllLastRun()
}
},
onError: (params, code) => {
handleWorkflowFailed()
invalidateRunHistory(runHistoryUrl)
if (onError)
onError(params, code)
},
onNodeStarted: (params) => {
handleWorkflowNodeStarted(params, { clientWidth, clientHeight })
if (onNodeStarted)
onNodeStarted(params)
},
onNodeFinished: (params) => {
handleWorkflowNodeFinished(params)
if (onNodeFinished)
onNodeFinished(params)
},
onIterationStart: (params) => {
handleWorkflowNodeIterationStarted(params, { clientWidth, clientHeight })
if (onIterationStart)
onIterationStart(params)
},
onIterationNext: (params) => {
handleWorkflowNodeIterationNext(params)
if (onIterationNext)
onIterationNext(params)
},
onIterationFinish: (params) => {
handleWorkflowNodeIterationFinished(params)
if (onIterationFinish)
onIterationFinish(params)
},
onLoopStart: (params) => {
handleWorkflowNodeLoopStarted(params, { clientWidth, clientHeight })
if (onLoopStart)
onLoopStart(params)
},
onLoopNext: (params) => {
handleWorkflowNodeLoopNext(params)
if (onLoopNext)
onLoopNext(params)
},
onLoopFinish: (params) => {
handleWorkflowNodeLoopFinished(params)
if (onLoopFinish)
onLoopFinish(params)
},
onNodeRetry: (params) => {
handleWorkflowNodeRetry(params)
if (onNodeRetry)
onNodeRetry(params)
},
onAgentLog: (params) => {
handleWorkflowAgentLog(params)
if (onAgentLog)
onAgentLog(params)
},
onTextChunk: (params) => {
handleWorkflowTextChunk(params)
},
onTextReplace: (params) => {
handleWorkflowTextReplace(params)
},
onTTSChunk: (messageId: string, audio: string) => {
if (!audio || audio === '')
return
player?.playAudioWithAudio(audio, true)
AudioPlayerManager.getInstance().resetMsgId(messageId)
},
onTTSEnd: (_messageId: string, audio: string) => {
player?.playAudioWithAudio(audio, false)
},
onWorkflowPaused: (params) => {
handleWorkflowPaused()
invalidateRunHistory(runHistoryUrl)
if (onWorkflowPaused)
onWorkflowPaused(params)
const url = `/workflow/${params.workflow_run_id}/events`
sseGet(url, {}, finalCallbacks)
},
onHumanInputRequired: (params) => {
handleWorkflowNodeHumanInputRequired(params)
if (onHumanInputRequired)
onHumanInputRequired(params)
},
onHumanInputFormFilled: (params) => {
handleWorkflowNodeHumanInputFormFilled(params)
if (onHumanInputFormFilled)
onHumanInputFormFilled(params)
},
onHumanInputFormTimeout: (params) => {
handleWorkflowNodeHumanInputFormTimeout(params)
if (onHumanInputFormTimeout)
onHumanInputFormTimeout(params)
},
...restCallback,
}
return finalCallbacks
}

View File

@@ -1,443 +0,0 @@
import type { Features as FeaturesData } from '@/app/components/base/features/types'
import type { TriggerNodeType } from '@/app/components/workflow/types'
import type { IOtherOptions } from '@/service/base'
import type { VersionHistory } from '@/types/workflow'
import { noop } from 'es-toolkit/function'
import { toast } from '@/app/components/base/ui/toast'
import { TriggerType } from '@/app/components/workflow/header/test-run-menu'
import { WorkflowRunningStatus } from '@/app/components/workflow/types'
import { handleStream, post } from '@/service/base'
import { ContentType } from '@/service/fetch'
import { AppModeEnum } from '@/types/app'
export type HandleRunMode = TriggerType
export type HandleRunOptions = {
mode?: HandleRunMode
scheduleNodeId?: string
webhookNodeId?: string
pluginNodeId?: string
allNodeIds?: string[]
}
export type DebuggableTriggerType = Exclude<TriggerType, TriggerType.UserInput>
type AppDetailLike = {
id?: string
mode?: AppModeEnum
}
type TTSParamsLike = {
token?: string
appId?: string
}
type ListeningStateActions = {
setWorkflowRunningData: (data: ReturnType<typeof createRunningWorkflowState> | ReturnType<typeof createFailedWorkflowState> | ReturnType<typeof createStoppedWorkflowState>) => void
setIsListening: (value: boolean) => void
setShowVariableInspectPanel: (value: boolean) => void
setListeningTriggerType: (value: TriggerNodeType | null) => void
setListeningTriggerNodeIds: (value: string[]) => void
setListeningTriggerIsAll: (value: boolean) => void
setListeningTriggerNodeId: (value: string | null) => void
}
type TriggerDebugRunnerOptions = {
debugType: DebuggableTriggerType
url: string
requestBody: unknown
baseSseOptions: IOtherOptions
controllerTarget: Record<string, unknown>
setAbortController: (controller: AbortController | null) => void
clearAbortController: () => void
clearListeningState: () => void
setWorkflowRunningData: ListeningStateActions['setWorkflowRunningData']
}
export const controllerKeyMap: Record<DebuggableTriggerType, string> = {
[TriggerType.Webhook]: '__webhookDebugAbortController',
[TriggerType.Plugin]: '__pluginDebugAbortController',
[TriggerType.All]: '__allTriggersDebugAbortController',
[TriggerType.Schedule]: '__scheduleDebugAbortController',
}
export const debugLabelMap: Record<DebuggableTriggerType, string> = {
[TriggerType.Webhook]: 'Webhook',
[TriggerType.Plugin]: 'Plugin',
[TriggerType.All]: 'All',
[TriggerType.Schedule]: 'Schedule',
}
export const createRunningWorkflowState = () => {
return {
result: {
status: WorkflowRunningStatus.Running,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
resultText: '',
}
}
export const createStoppedWorkflowState = () => {
return {
result: {
status: WorkflowRunningStatus.Stopped,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
resultText: '',
}
}
export const createFailedWorkflowState = (error: string) => {
return {
result: {
status: WorkflowRunningStatus.Failed,
error,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
}
}
export const buildRunHistoryUrl = (appDetail?: AppDetailLike) => {
return appDetail?.mode === AppModeEnum.ADVANCED_CHAT
? `/apps/${appDetail.id}/advanced-chat/workflow-runs`
: `/apps/${appDetail?.id}/workflow-runs`
}
export const resolveWorkflowRunUrl = (
appDetail: AppDetailLike | undefined,
runMode: HandleRunMode,
isInWorkflowDebug: boolean,
) => {
if (runMode === TriggerType.Plugin || runMode === TriggerType.Webhook || runMode === TriggerType.Schedule) {
if (!appDetail?.id) {
console.error('handleRun: missing app id for trigger plugin run')
return ''
}
return `/apps/${appDetail.id}/workflows/draft/trigger/run`
}
if (runMode === TriggerType.All) {
if (!appDetail?.id) {
console.error('handleRun: missing app id for trigger run all')
return ''
}
return `/apps/${appDetail.id}/workflows/draft/trigger/run-all`
}
if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT)
return `/apps/${appDetail.id}/advanced-chat/workflows/draft/run`
if (isInWorkflowDebug && appDetail?.id)
return `/apps/${appDetail.id}/workflows/draft/run`
return ''
}
export const buildWorkflowRunRequestBody = (
runMode: HandleRunMode,
resolvedParams: Record<string, unknown>,
options?: HandleRunOptions,
) => {
if (runMode === TriggerType.Schedule)
return { node_id: options?.scheduleNodeId }
if (runMode === TriggerType.Webhook)
return { node_id: options?.webhookNodeId }
if (runMode === TriggerType.Plugin)
return { node_id: options?.pluginNodeId }
if (runMode === TriggerType.All)
return { node_ids: options?.allNodeIds }
return resolvedParams
}
export const validateWorkflowRunRequest = (
runMode: HandleRunMode,
options?: HandleRunOptions,
) => {
if (runMode === TriggerType.Schedule && !options?.scheduleNodeId)
return 'handleRun: schedule trigger run requires node id'
if (runMode === TriggerType.Webhook && !options?.webhookNodeId)
return 'handleRun: webhook trigger run requires node id'
if (runMode === TriggerType.Plugin && !options?.pluginNodeId)
return 'handleRun: plugin trigger run requires node id'
if (runMode === TriggerType.All && !options?.allNodeIds && options?.allNodeIds?.length === 0)
return 'handleRun: all trigger run requires node ids'
return ''
}
export const isDebuggableTriggerType = (
runMode: HandleRunMode,
): runMode is DebuggableTriggerType => {
return (
runMode === TriggerType.Schedule
|| runMode === TriggerType.Webhook
|| runMode === TriggerType.Plugin
|| runMode === TriggerType.All
)
}
export const buildListeningTriggerNodeIds = (
runMode: DebuggableTriggerType,
options?: HandleRunOptions,
) => {
if (runMode === TriggerType.All)
return options?.allNodeIds ?? []
if (runMode === TriggerType.Webhook && options?.webhookNodeId)
return [options.webhookNodeId]
if (runMode === TriggerType.Schedule && options?.scheduleNodeId)
return [options.scheduleNodeId]
if (runMode === TriggerType.Plugin && options?.pluginNodeId)
return [options.pluginNodeId]
return []
}
export const applyRunningStateForMode = (
actions: ListeningStateActions,
runMode: HandleRunMode,
options?: HandleRunOptions,
) => {
if (isDebuggableTriggerType(runMode)) {
actions.setIsListening(true)
actions.setShowVariableInspectPanel(true)
actions.setListeningTriggerIsAll(runMode === TriggerType.All)
actions.setListeningTriggerNodeIds(buildListeningTriggerNodeIds(runMode, options))
actions.setWorkflowRunningData(createRunningWorkflowState())
return
}
actions.setIsListening(false)
actions.setListeningTriggerType(null)
actions.setListeningTriggerNodeId(null)
actions.setListeningTriggerNodeIds([])
actions.setListeningTriggerIsAll(false)
actions.setWorkflowRunningData(createRunningWorkflowState())
}
export const clearListeningState = (actions: Pick<ListeningStateActions, 'setIsListening' | 'setListeningTriggerType' | 'setListeningTriggerNodeId' | 'setListeningTriggerNodeIds' | 'setListeningTriggerIsAll'>) => {
actions.setIsListening(false)
actions.setListeningTriggerType(null)
actions.setListeningTriggerNodeId(null)
actions.setListeningTriggerNodeIds([])
actions.setListeningTriggerIsAll(false)
}
export const applyStoppedState = (actions: Pick<ListeningStateActions, 'setWorkflowRunningData' | 'setIsListening' | 'setShowVariableInspectPanel' | 'setListeningTriggerType' | 'setListeningTriggerNodeId'>) => {
actions.setWorkflowRunningData(createStoppedWorkflowState())
actions.setIsListening(false)
actions.setListeningTriggerType(null)
actions.setListeningTriggerNodeId(null)
actions.setShowVariableInspectPanel(true)
}
export const clearWindowDebugControllers = (controllerTarget: Record<string, unknown>) => {
delete controllerTarget.__webhookDebugAbortController
delete controllerTarget.__pluginDebugAbortController
delete controllerTarget.__scheduleDebugAbortController
delete controllerTarget.__allTriggersDebugAbortController
}
export const buildTTSConfig = (resolvedParams: TTSParamsLike, pathname: string) => {
let ttsUrl = ''
let ttsIsPublic = false
if (resolvedParams.token) {
ttsUrl = '/text-to-audio'
ttsIsPublic = true
}
else if (resolvedParams.appId) {
if (pathname.search('explore/installed') > -1)
ttsUrl = `/installed-apps/${resolvedParams.appId}/text-to-audio`
else
ttsUrl = `/apps/${resolvedParams.appId}/text-to-audio`
}
return {
ttsUrl,
ttsIsPublic,
}
}
export const mapPublishedWorkflowFeatures = (publishedWorkflow: VersionHistory): FeaturesData => {
return {
opening: {
enabled: !!publishedWorkflow.features.opening_statement || !!publishedWorkflow.features.suggested_questions.length,
opening_statement: publishedWorkflow.features.opening_statement,
suggested_questions: publishedWorkflow.features.suggested_questions,
},
suggested: publishedWorkflow.features.suggested_questions_after_answer,
text2speech: publishedWorkflow.features.text_to_speech,
speech2text: publishedWorkflow.features.speech_to_text,
citation: publishedWorkflow.features.retriever_resource,
moderation: publishedWorkflow.features.sensitive_word_avoidance,
file: publishedWorkflow.features.file_upload,
}
}
export const normalizePublishedWorkflowNodes = (publishedWorkflow: VersionHistory) => {
return publishedWorkflow.graph.nodes.map(node => ({
...node,
selected: false,
data: {
...node.data,
selected: false,
},
}))
}
export const waitWithAbort = (signal: AbortSignal, delay: number) => new Promise<void>((resolve) => {
const timer = window.setTimeout(resolve, delay)
signal.addEventListener('abort', () => {
clearTimeout(timer)
resolve()
}, { once: true })
})
export const runTriggerDebug = async ({
debugType,
url,
requestBody,
baseSseOptions,
controllerTarget,
setAbortController,
clearAbortController,
clearListeningState,
setWorkflowRunningData,
}: TriggerDebugRunnerOptions) => {
const controller = new AbortController()
setAbortController(controller)
const controllerKey = controllerKeyMap[debugType]
controllerTarget[controllerKey] = controller
const debugLabel = debugLabelMap[debugType]
const poll = async (): Promise<void> => {
try {
const response = await post<Response>(url, {
body: requestBody,
signal: controller.signal,
}, {
needAllResponseContent: true,
})
if (controller.signal.aborted)
return
if (!response) {
const message = `${debugLabel} debug request failed`
toast.error(message)
clearAbortController()
return
}
const contentType = response.headers.get('content-type') || ''
if (contentType.includes(ContentType.json)) {
let data: Record<string, unknown> | null = null
try {
data = await response.json() as Record<string, unknown>
}
catch (jsonError) {
console.error(`handleRun: ${debugLabel.toLowerCase()} debug response parse error`, jsonError)
toast.error(`${debugLabel} debug request failed`)
clearAbortController()
clearListeningState()
return
}
if (controller.signal.aborted)
return
if (data?.status === 'waiting') {
const delay = Number(data.retry_in) || 2000
await waitWithAbort(controller.signal, delay)
if (controller.signal.aborted)
return
await poll()
return
}
const errorMessage = typeof data?.message === 'string' ? data.message : `${debugLabel} debug failed`
toast.error(errorMessage)
clearAbortController()
setWorkflowRunningData(createFailedWorkflowState(errorMessage))
clearListeningState()
return
}
clearListeningState()
handleStream(
response,
baseSseOptions.onData ?? noop,
baseSseOptions.onCompleted,
baseSseOptions.onThought,
baseSseOptions.onMessageEnd,
baseSseOptions.onMessageReplace,
baseSseOptions.onFile,
baseSseOptions.onWorkflowStarted,
baseSseOptions.onWorkflowFinished,
baseSseOptions.onNodeStarted,
baseSseOptions.onNodeFinished,
baseSseOptions.onIterationStart,
baseSseOptions.onIterationNext,
baseSseOptions.onIterationFinish,
baseSseOptions.onLoopStart,
baseSseOptions.onLoopNext,
baseSseOptions.onLoopFinish,
baseSseOptions.onNodeRetry,
baseSseOptions.onParallelBranchStarted,
baseSseOptions.onParallelBranchFinished,
baseSseOptions.onTextChunk,
baseSseOptions.onTTSChunk,
baseSseOptions.onTTSEnd,
baseSseOptions.onTextReplace,
baseSseOptions.onAgentLog,
baseSseOptions.onHumanInputRequired,
baseSseOptions.onHumanInputFormFilled,
baseSseOptions.onHumanInputFormTimeout,
baseSseOptions.onWorkflowPaused,
baseSseOptions.onDataSourceNodeProcessing,
baseSseOptions.onDataSourceNodeCompleted,
baseSseOptions.onDataSourceNodeError,
)
}
catch (error) {
if (controller.signal.aborted)
return
if (error instanceof Response) {
const data = await error.clone().json() as Record<string, unknown>
const errorMessage = typeof data?.error === 'string' ? data.error : ''
toast.error(errorMessage)
clearAbortController()
setWorkflowRunningData(createFailedWorkflowState(errorMessage))
}
clearListeningState()
}
}
await poll()
}

View File

@@ -1,4 +1,3 @@
import type { HandleRunOptions } from './use-workflow-run-utils'
import type AudioPlayer from '@/app/components/base/audio-btn/audio'
import type { Node } from '@/app/components/workflow/types'
import type { IOtherOptions } from '@/service/base'
@@ -15,38 +14,46 @@ import { useStore as useAppStore } from '@/app/components/app/store'
import { trackEvent } from '@/app/components/base/amplitude'
import { AudioPlayerManager } from '@/app/components/base/audio-btn/audio.player.manager'
import { useFeaturesStore } from '@/app/components/base/features/hooks'
import Toast from '@/app/components/base/toast'
import { TriggerType } from '@/app/components/workflow/header/test-run-menu'
import { useWorkflowUpdate } from '@/app/components/workflow/hooks/use-workflow-interactions'
import { useWorkflowRunEvent } from '@/app/components/workflow/hooks/use-workflow-run-event/use-workflow-run-event'
import { useWorkflowStore } from '@/app/components/workflow/store'
import { WorkflowRunningStatus } from '@/app/components/workflow/types'
import { usePathname } from '@/next/navigation'
import { ssePost } from '@/service/base'
import { handleStream, post, sseGet, ssePost } from '@/service/base'
import { ContentType } from '@/service/fetch'
import { useInvalidAllLastRun, useInvalidateWorkflowRunHistory } from '@/service/use-workflow'
import { stopWorkflowRun } from '@/service/workflow'
import { AppModeEnum } from '@/types/app'
import { useSetWorkflowVarsWithValue } from '../../workflow/hooks/use-fetch-workflow-inspect-vars'
import { useConfigsMap } from './use-configs-map'
import { useNodesSyncDraft } from './use-nodes-sync-draft'
import {
createBaseWorkflowRunCallbacks,
createFinalWorkflowRunCallbacks,
} from './use-workflow-run-callbacks'
import {
applyRunningStateForMode,
applyStoppedState,
buildRunHistoryUrl,
buildTTSConfig,
buildWorkflowRunRequestBody,
clearListeningState,
clearWindowDebugControllers,
isDebuggableTriggerType,
mapPublishedWorkflowFeatures,
normalizePublishedWorkflowNodes,
resolveWorkflowRunUrl,
runTriggerDebug,
validateWorkflowRunRequest,
} from './use-workflow-run-utils'
type HandleRunMode = TriggerType
type HandleRunOptions = {
mode?: HandleRunMode
scheduleNodeId?: string
webhookNodeId?: string
pluginNodeId?: string
allNodeIds?: string[]
}
type DebuggableTriggerType = Exclude<TriggerType, TriggerType.UserInput>
const controllerKeyMap: Record<DebuggableTriggerType, string> = {
[TriggerType.Webhook]: '__webhookDebugAbortController',
[TriggerType.Plugin]: '__pluginDebugAbortController',
[TriggerType.All]: '__allTriggersDebugAbortController',
[TriggerType.Schedule]: '__scheduleDebugAbortController',
}
const debugLabelMap: Record<DebuggableTriggerType, string> = {
[TriggerType.Webhook]: 'Webhook',
[TriggerType.Plugin]: 'Plugin',
[TriggerType.All]: 'All',
[TriggerType.Schedule]: 'Schedule',
}
export const useWorkflowRun = () => {
const store = useStoreApi()
@@ -145,7 +152,7 @@ export const useWorkflowRun = () => {
callback?: IOtherOptions,
options?: HandleRunOptions,
) => {
const runMode = options?.mode ?? TriggerType.UserInput
const runMode: HandleRunMode = options?.mode ?? TriggerType.UserInput
const resolvedParams = params ?? {}
const {
getNodes,
@@ -183,7 +190,9 @@ export const useWorkflowRun = () => {
} = callback || {}
workflowStore.setState({ historyWorkflowData: undefined })
const appDetail = useAppStore.getState().appDetail
const runHistoryUrl = buildRunHistoryUrl(appDetail)
const runHistoryUrl = appDetail?.mode === AppModeEnum.ADVANCED_CHAT
? `/apps/${appDetail.id}/advanced-chat/workflow-runs`
: `/apps/${appDetail?.id}/workflow-runs`
const workflowContainer = document.getElementById('workflow-container')
const {
@@ -193,15 +202,65 @@ export const useWorkflowRun = () => {
const isInWorkflowDebug = appDetail?.mode === AppModeEnum.WORKFLOW
const url = resolveWorkflowRunUrl(appDetail, runMode, isInWorkflowDebug)
const requestBody = buildWorkflowRunRequestBody(runMode, resolvedParams, options)
let url = ''
if (runMode === TriggerType.Plugin || runMode === TriggerType.Webhook || runMode === TriggerType.Schedule) {
if (!appDetail?.id) {
console.error('handleRun: missing app id for trigger plugin run')
return
}
url = `/apps/${appDetail.id}/workflows/draft/trigger/run`
}
else if (runMode === TriggerType.All) {
if (!appDetail?.id) {
console.error('handleRun: missing app id for trigger run all')
return
}
url = `/apps/${appDetail.id}/workflows/draft/trigger/run-all`
}
else if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) {
url = `/apps/${appDetail.id}/advanced-chat/workflows/draft/run`
}
else if (isInWorkflowDebug && appDetail?.id) {
url = `/apps/${appDetail.id}/workflows/draft/run`
}
let requestBody = {}
if (runMode === TriggerType.Schedule)
requestBody = { node_id: options?.scheduleNodeId }
else if (runMode === TriggerType.Webhook)
requestBody = { node_id: options?.webhookNodeId }
else if (runMode === TriggerType.Plugin)
requestBody = { node_id: options?.pluginNodeId }
else if (runMode === TriggerType.All)
requestBody = { node_ids: options?.allNodeIds }
else
requestBody = resolvedParams
if (!url)
return
const validationMessage = validateWorkflowRunRequest(runMode, options)
if (validationMessage) {
console.error(validationMessage)
if (runMode === TriggerType.Schedule && !options?.scheduleNodeId) {
console.error('handleRun: schedule trigger run requires node id')
return
}
if (runMode === TriggerType.Webhook && !options?.webhookNodeId) {
console.error('handleRun: webhook trigger run requires node id')
return
}
if (runMode === TriggerType.Plugin && !options?.pluginNodeId) {
console.error('handleRun: plugin trigger run requires node id')
return
}
if (runMode === TriggerType.All && !options?.allNodeIds && options?.allNodeIds?.length === 0) {
console.error('handleRun: all trigger run requires node ids')
return
}
@@ -218,17 +277,66 @@ export const useWorkflowRun = () => {
setListeningTriggerNodeId,
} = workflowStore.getState()
applyRunningStateForMode({
setWorkflowRunningData,
setIsListening,
setShowVariableInspectPanel,
setListeningTriggerType,
setListeningTriggerNodeIds,
setListeningTriggerIsAll,
setListeningTriggerNodeId,
}, runMode, options)
if (
runMode === TriggerType.Webhook
|| runMode === TriggerType.Plugin
|| runMode === TriggerType.All
|| runMode === TriggerType.Schedule
) {
setIsListening(true)
setShowVariableInspectPanel(true)
setListeningTriggerIsAll(runMode === TriggerType.All)
if (runMode === TriggerType.All)
setListeningTriggerNodeIds(options?.allNodeIds ?? [])
else if (runMode === TriggerType.Webhook && options?.webhookNodeId)
setListeningTriggerNodeIds([options.webhookNodeId])
else if (runMode === TriggerType.Schedule && options?.scheduleNodeId)
setListeningTriggerNodeIds([options.scheduleNodeId])
else if (runMode === TriggerType.Plugin && options?.pluginNodeId)
setListeningTriggerNodeIds([options.pluginNodeId])
else
setListeningTriggerNodeIds([])
setWorkflowRunningData({
result: {
status: WorkflowRunningStatus.Running,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
resultText: '',
})
}
else {
setIsListening(false)
setListeningTriggerType(null)
setListeningTriggerNodeId(null)
setListeningTriggerNodeIds([])
setListeningTriggerIsAll(false)
setWorkflowRunningData({
result: {
status: WorkflowRunningStatus.Running,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
resultText: '',
})
}
const { ttsUrl, ttsIsPublic } = buildTTSConfig(resolvedParams, pathname)
let ttsUrl = ''
let ttsIsPublic = false
if (resolvedParams.token) {
ttsUrl = '/text-to-audio'
ttsIsPublic = true
}
else if (resolvedParams.appId) {
if (pathname.search('explore/installed') > -1)
ttsUrl = `/installed-apps/${resolvedParams.appId}/text-to-audio`
else
ttsUrl = `/apps/${resolvedParams.appId}/text-to-audio`
}
// Lazy initialization: Only create AudioPlayer when TTS is actually needed
// This prevents opening audio channel unnecessarily
let player: AudioPlayer | null = null
@@ -241,121 +349,497 @@ export const useWorkflowRun = () => {
const clearAbortController = () => {
abortControllerRef.current = null
clearWindowDebugControllers(window as unknown as Record<string, unknown>)
delete (window as any).__webhookDebugAbortController
delete (window as any).__pluginDebugAbortController
delete (window as any).__scheduleDebugAbortController
delete (window as any).__allTriggersDebugAbortController
}
const clearListeningStateInStore = () => {
const clearListeningState = () => {
const state = workflowStore.getState()
clearListeningState({
setIsListening: state.setIsListening,
setListeningTriggerType: state.setListeningTriggerType,
setListeningTriggerNodeId: state.setListeningTriggerNodeId,
setListeningTriggerNodeIds: state.setListeningTriggerNodeIds,
setListeningTriggerIsAll: state.setListeningTriggerIsAll,
})
state.setIsListening(false)
state.setListeningTriggerType(null)
state.setListeningTriggerNodeId(null)
state.setListeningTriggerNodeIds([])
state.setListeningTriggerIsAll(false)
}
const workflowRunEventHandlers = {
handleWorkflowStarted,
handleWorkflowFinished,
handleWorkflowFailed,
handleWorkflowNodeStarted,
handleWorkflowNodeFinished,
handleWorkflowNodeHumanInputRequired,
handleWorkflowNodeHumanInputFormFilled,
handleWorkflowNodeHumanInputFormTimeout,
handleWorkflowNodeIterationStarted,
handleWorkflowNodeIterationNext,
handleWorkflowNodeIterationFinished,
handleWorkflowNodeLoopStarted,
handleWorkflowNodeLoopNext,
handleWorkflowNodeLoopFinished,
handleWorkflowNodeRetry,
handleWorkflowAgentLog,
handleWorkflowTextChunk,
handleWorkflowTextReplace,
handleWorkflowPaused,
}
const userCallbacks = {
onWorkflowStarted,
onWorkflowFinished,
onNodeStarted,
onNodeFinished,
onIterationStart,
onIterationNext,
onIterationFinish,
onLoopStart,
onLoopNext,
onLoopFinish,
onNodeRetry,
onAgentLog,
onError,
onWorkflowPaused,
onHumanInputRequired,
onHumanInputFormFilled,
onHumanInputFormTimeout,
onCompleted,
const wrappedOnError = (params: any) => {
clearAbortController()
handleWorkflowFailed()
invalidateRunHistory(runHistoryUrl)
clearListeningState()
if (onError)
onError(params)
trackEvent('workflow_run_failed', { workflow_id: flowId, reason: params.error, node_type: params.node_type })
}
const trackWorkflowRunFailed = (eventParams: unknown) => {
const payload = eventParams as { error?: string, node_type?: string }
trackEvent('workflow_run_failed', { workflow_id: flowId, reason: payload?.error, node_type: payload?.node_type })
const wrappedOnCompleted: IOtherOptions['onCompleted'] = async (hasError?: boolean, errorMessage?: string) => {
clearAbortController()
clearListeningState()
if (onCompleted)
onCompleted(hasError, errorMessage)
}
const baseSseOptions = createBaseWorkflowRunCallbacks({
clientWidth,
clientHeight,
runHistoryUrl,
isInWorkflowDebug,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController,
clearListeningState: clearListeningStateInStore,
trackWorkflowRunFailed,
handlers: workflowRunEventHandlers,
callbacks: userCallbacks,
restCallback,
getOrCreatePlayer,
const baseSseOptions: IOtherOptions = {
...restCallback,
onWorkflowStarted: (params) => {
handleWorkflowStarted(params)
invalidateRunHistory(runHistoryUrl)
if (onWorkflowStarted)
onWorkflowStarted(params)
},
onWorkflowFinished: (params) => {
clearListeningState()
handleWorkflowFinished(params)
invalidateRunHistory(runHistoryUrl)
if (onWorkflowFinished)
onWorkflowFinished(params)
if (isInWorkflowDebug) {
fetchInspectVars({})
invalidAllLastRun()
}
},
onNodeStarted: (params) => {
handleWorkflowNodeStarted(
params,
{
clientWidth,
clientHeight,
},
)
if (onNodeStarted)
onNodeStarted(params)
},
onNodeFinished: (params) => {
handleWorkflowNodeFinished(params)
if (onNodeFinished)
onNodeFinished(params)
},
onIterationStart: (params) => {
handleWorkflowNodeIterationStarted(
params,
{
clientWidth,
clientHeight,
},
)
if (onIterationStart)
onIterationStart(params)
},
onIterationNext: (params) => {
handleWorkflowNodeIterationNext(params)
if (onIterationNext)
onIterationNext(params)
},
onIterationFinish: (params) => {
handleWorkflowNodeIterationFinished(params)
if (onIterationFinish)
onIterationFinish(params)
},
onLoopStart: (params) => {
handleWorkflowNodeLoopStarted(
params,
{
clientWidth,
clientHeight,
},
)
if (onLoopStart)
onLoopStart(params)
},
onLoopNext: (params) => {
handleWorkflowNodeLoopNext(params)
if (onLoopNext)
onLoopNext(params)
},
onLoopFinish: (params) => {
handleWorkflowNodeLoopFinished(params)
if (onLoopFinish)
onLoopFinish(params)
},
onNodeRetry: (params) => {
handleWorkflowNodeRetry(params)
if (onNodeRetry)
onNodeRetry(params)
},
onAgentLog: (params) => {
handleWorkflowAgentLog(params)
if (onAgentLog)
onAgentLog(params)
},
onTextChunk: (params) => {
handleWorkflowTextChunk(params)
},
onTextReplace: (params) => {
handleWorkflowTextReplace(params)
},
onTTSChunk: (messageId: string, audio: string) => {
if (!audio || audio === '')
return
const audioPlayer = getOrCreatePlayer()
if (audioPlayer) {
audioPlayer.playAudioWithAudio(audio, true)
AudioPlayerManager.getInstance().resetMsgId(messageId)
}
},
onTTSEnd: (messageId: string, audio: string) => {
const audioPlayer = getOrCreatePlayer()
if (audioPlayer)
audioPlayer.playAudioWithAudio(audio, false)
},
onWorkflowPaused: (params) => {
handleWorkflowPaused()
invalidateRunHistory(runHistoryUrl)
if (onWorkflowPaused)
onWorkflowPaused(params)
const url = `/workflow/${params.workflow_run_id}/events`
sseGet(
url,
{},
baseSseOptions,
)
},
onHumanInputRequired: (params) => {
handleWorkflowNodeHumanInputRequired(params)
if (onHumanInputRequired)
onHumanInputRequired(params)
},
onHumanInputFormFilled: (params) => {
handleWorkflowNodeHumanInputFormFilled(params)
if (onHumanInputFormFilled)
onHumanInputFormFilled(params)
},
onHumanInputFormTimeout: (params) => {
handleWorkflowNodeHumanInputFormTimeout(params)
if (onHumanInputFormTimeout)
onHumanInputFormTimeout(params)
},
onError: wrappedOnError,
onCompleted: wrappedOnCompleted,
}
const waitWithAbort = (signal: AbortSignal, delay: number) => new Promise<void>((resolve) => {
const timer = window.setTimeout(resolve, delay)
signal.addEventListener('abort', () => {
clearTimeout(timer)
resolve()
}, { once: true })
})
if (isDebuggableTriggerType(runMode)) {
await runTriggerDebug({
debugType: runMode,
url,
requestBody,
baseSseOptions,
controllerTarget: window as unknown as Record<string, unknown>,
setAbortController: (controller) => {
abortControllerRef.current = controller
},
clearAbortController,
clearListeningState: clearListeningStateInStore,
setWorkflowRunningData,
})
const runTriggerDebug = async (debugType: DebuggableTriggerType) => {
const controller = new AbortController()
abortControllerRef.current = controller
const controllerKey = controllerKeyMap[debugType]
; (window as any)[controllerKey] = controller
const debugLabel = debugLabelMap[debugType]
const poll = async (): Promise<void> => {
try {
const response = await post<Response>(url, {
body: requestBody,
signal: controller.signal,
}, {
needAllResponseContent: true,
})
if (controller.signal.aborted)
return
if (!response) {
const message = `${debugLabel} debug request failed`
Toast.notify({ type: 'error', message })
clearAbortController()
return
}
const contentType = response.headers.get('content-type') || ''
if (contentType.includes(ContentType.json)) {
let data: any = null
try {
data = await response.json()
}
catch (jsonError) {
console.error(`handleRun: ${debugLabel.toLowerCase()} debug response parse error`, jsonError)
Toast.notify({ type: 'error', message: `${debugLabel} debug request failed` })
clearAbortController()
clearListeningState()
return
}
if (controller.signal.aborted)
return
if (data?.status === 'waiting') {
const delay = Number(data.retry_in) || 2000
await waitWithAbort(controller.signal, delay)
if (controller.signal.aborted)
return
await poll()
return
}
const errorMessage = data?.message || `${debugLabel} debug failed`
Toast.notify({ type: 'error', message: errorMessage })
clearAbortController()
setWorkflowRunningData({
result: {
status: WorkflowRunningStatus.Failed,
error: errorMessage,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
})
clearListeningState()
return
}
clearListeningState()
handleStream(
response,
baseSseOptions.onData ?? noop,
baseSseOptions.onCompleted,
baseSseOptions.onThought,
baseSseOptions.onMessageEnd,
baseSseOptions.onMessageReplace,
baseSseOptions.onFile,
baseSseOptions.onWorkflowStarted,
baseSseOptions.onWorkflowFinished,
baseSseOptions.onNodeStarted,
baseSseOptions.onNodeFinished,
baseSseOptions.onIterationStart,
baseSseOptions.onIterationNext,
baseSseOptions.onIterationFinish,
baseSseOptions.onLoopStart,
baseSseOptions.onLoopNext,
baseSseOptions.onLoopFinish,
baseSseOptions.onNodeRetry,
baseSseOptions.onParallelBranchStarted,
baseSseOptions.onParallelBranchFinished,
baseSseOptions.onTextChunk,
baseSseOptions.onTTSChunk,
baseSseOptions.onTTSEnd,
baseSseOptions.onTextReplace,
baseSseOptions.onAgentLog,
baseSseOptions.onHumanInputRequired,
baseSseOptions.onHumanInputFormFilled,
baseSseOptions.onHumanInputFormTimeout,
baseSseOptions.onWorkflowPaused,
baseSseOptions.onDataSourceNodeProcessing,
baseSseOptions.onDataSourceNodeCompleted,
baseSseOptions.onDataSourceNodeError,
)
}
catch (error) {
if (controller.signal.aborted)
return
if (error instanceof Response) {
const data = await error.clone().json() as Record<string, any>
const { error: respError } = data || {}
Toast.notify({ type: 'error', message: respError })
clearAbortController()
setWorkflowRunningData({
result: {
status: WorkflowRunningStatus.Failed,
error: respError,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
})
}
clearListeningState()
}
}
await poll()
}
if (runMode === TriggerType.Schedule) {
await runTriggerDebug(TriggerType.Schedule)
return
}
const finalCallbacks = createFinalWorkflowRunCallbacks({
clientWidth,
clientHeight,
runHistoryUrl,
isInWorkflowDebug,
fetchInspectVars,
invalidAllLastRun,
invalidateRunHistory,
clearAbortController,
clearListeningState: clearListeningStateInStore,
trackWorkflowRunFailed,
handlers: workflowRunEventHandlers,
callbacks: userCallbacks,
restCallback,
baseSseOptions,
player,
setAbortController: (controller) => {
if (runMode === TriggerType.Webhook) {
await runTriggerDebug(TriggerType.Webhook)
return
}
if (runMode === TriggerType.Plugin) {
await runTriggerDebug(TriggerType.Plugin)
return
}
if (runMode === TriggerType.All) {
await runTriggerDebug(TriggerType.All)
return
}
const finalCallbacks: IOtherOptions = {
...baseSseOptions,
getAbortController: (controller: AbortController) => {
abortControllerRef.current = controller
},
})
onWorkflowFinished: (params) => {
handleWorkflowFinished(params)
invalidateRunHistory(runHistoryUrl)
if (onWorkflowFinished)
onWorkflowFinished(params)
if (isInWorkflowDebug) {
fetchInspectVars({})
invalidAllLastRun()
}
},
onError: (params) => {
handleWorkflowFailed()
invalidateRunHistory(runHistoryUrl)
if (onError)
onError(params)
},
onNodeStarted: (params) => {
handleWorkflowNodeStarted(
params,
{
clientWidth,
clientHeight,
},
)
if (onNodeStarted)
onNodeStarted(params)
},
onNodeFinished: (params) => {
handleWorkflowNodeFinished(params)
if (onNodeFinished)
onNodeFinished(params)
},
onIterationStart: (params) => {
handleWorkflowNodeIterationStarted(
params,
{
clientWidth,
clientHeight,
},
)
if (onIterationStart)
onIterationStart(params)
},
onIterationNext: (params) => {
handleWorkflowNodeIterationNext(params)
if (onIterationNext)
onIterationNext(params)
},
onIterationFinish: (params) => {
handleWorkflowNodeIterationFinished(params)
if (onIterationFinish)
onIterationFinish(params)
},
onLoopStart: (params) => {
handleWorkflowNodeLoopStarted(
params,
{
clientWidth,
clientHeight,
},
)
if (onLoopStart)
onLoopStart(params)
},
onLoopNext: (params) => {
handleWorkflowNodeLoopNext(params)
if (onLoopNext)
onLoopNext(params)
},
onLoopFinish: (params) => {
handleWorkflowNodeLoopFinished(params)
if (onLoopFinish)
onLoopFinish(params)
},
onNodeRetry: (params) => {
handleWorkflowNodeRetry(params)
if (onNodeRetry)
onNodeRetry(params)
},
onAgentLog: (params) => {
handleWorkflowAgentLog(params)
if (onAgentLog)
onAgentLog(params)
},
onTextChunk: (params) => {
handleWorkflowTextChunk(params)
},
onTextReplace: (params) => {
handleWorkflowTextReplace(params)
},
onTTSChunk: (messageId: string, audio: string) => {
if (!audio || audio === '')
return
player?.playAudioWithAudio(audio, true)
AudioPlayerManager.getInstance().resetMsgId(messageId)
},
onTTSEnd: (messageId: string, audio: string) => {
player?.playAudioWithAudio(audio, false)
},
onWorkflowPaused: (params) => {
handleWorkflowPaused()
invalidateRunHistory(runHistoryUrl)
if (onWorkflowPaused)
onWorkflowPaused(params)
const url = `/workflow/${params.workflow_run_id}/events`
sseGet(
url,
{},
finalCallbacks,
)
},
onHumanInputRequired: (params) => {
handleWorkflowNodeHumanInputRequired(params)
if (onHumanInputRequired)
onHumanInputRequired(params)
},
onHumanInputFormFilled: (params) => {
handleWorkflowNodeHumanInputFormFilled(params)
if (onHumanInputFormFilled)
onHumanInputFormFilled(params)
},
onHumanInputFormTimeout: (params) => {
handleWorkflowNodeHumanInputFormTimeout(params)
if (onHumanInputFormTimeout)
onHumanInputFormTimeout(params)
},
...restCallback,
}
ssePost(
url,
@@ -376,13 +860,20 @@ export const useWorkflowRun = () => {
setListeningTriggerNodeId,
} = workflowStore.getState()
applyStoppedState({
setWorkflowRunningData,
setIsListening,
setShowVariableInspectPanel,
setListeningTriggerType,
setListeningTriggerNodeId,
setWorkflowRunningData({
result: {
status: WorkflowRunningStatus.Stopped,
inputs_truncated: false,
process_data_truncated: false,
outputs_truncated: false,
},
tracing: [],
resultText: '',
})
setIsListening(false)
setListeningTriggerType(null)
setListeningTriggerNodeId(null)
setShowVariableInspectPanel(true)
}
if (taskId) {
@@ -418,7 +909,7 @@ export const useWorkflowRun = () => {
}, [workflowStore])
const handleRestoreFromPublishedWorkflow = useCallback((publishedWorkflow: VersionHistory) => {
const nodes = normalizePublishedWorkflowNodes(publishedWorkflow)
const nodes = publishedWorkflow.graph.nodes.map(node => ({ ...node, selected: false, data: { ...node.data, selected: false } }))
const edges = publishedWorkflow.graph.edges
const viewport = publishedWorkflow.graph.viewport!
handleUpdateWorkflowCanvas({
@@ -426,7 +917,21 @@ export const useWorkflowRun = () => {
edges,
viewport,
})
featuresStore?.setState({ features: mapPublishedWorkflowFeatures(publishedWorkflow) })
const mappedFeatures = {
opening: {
enabled: !!publishedWorkflow.features.opening_statement || !!publishedWorkflow.features.suggested_questions.length,
opening_statement: publishedWorkflow.features.opening_statement,
suggested_questions: publishedWorkflow.features.suggested_questions,
},
suggested: publishedWorkflow.features.suggested_questions_after_answer,
text2speech: publishedWorkflow.features.text_to_speech,
speech2text: publishedWorkflow.features.speech_to_text,
citation: publishedWorkflow.features.retriever_resource,
moderation: publishedWorkflow.features.sensitive_word_avoidance,
file: publishedWorkflow.features.file_upload,
}
featuresStore?.setState({ features: mappedFeatures })
workflowStore.getState().setEnvironmentVariables(publishedWorkflow.environment_variables || [])
}, [featuresStore, handleUpdateWorkflowCanvas, workflowStore])

View File

@@ -9,12 +9,16 @@ import {
import { useStore as useAppStore } from '@/app/components/app/store'
import { FeaturesProvider } from '@/app/components/base/features'
import Loading from '@/app/components/base/loading'
import { FILE_EXTS } from '@/app/components/base/prompt-editor/constants'
import WorkflowWithDefaultContext from '@/app/components/workflow'
import {
WorkflowContextProvider,
} from '@/app/components/workflow/context'
import { useWorkflowStore } from '@/app/components/workflow/store'
import { useTriggerStatusStore } from '@/app/components/workflow/store/trigger-status'
import {
SupportUploadFileTypes,
} from '@/app/components/workflow/types'
import {
initialEdges,
initialNodes,
@@ -31,11 +35,6 @@ import {
useWorkflowInit,
} from './hooks/use-workflow-init'
import { createWorkflowSlice } from './store/workflow/workflow-slice'
import {
buildInitialFeatures,
buildTriggerStatusMap,
coerceReplayUserInputs,
} from './utils'
const WorkflowAppWithAdditionalContext = () => {
const {
@@ -59,7 +58,13 @@ const WorkflowAppWithAdditionalContext = () => {
// Sync trigger statuses to store when data loads
useEffect(() => {
if (triggersResponse?.data) {
setTriggerStatuses(buildTriggerStatusMap(triggersResponse.data))
// Map API status to EntryNodeStatus: 'enabled' stays 'enabled', all others become 'disabled'
const statusMap = triggersResponse.data.reduce((acc, trigger) => {
acc[trigger.node_id] = trigger.status === 'enabled' ? 'enabled' : 'disabled'
return acc
}, {} as Record<string, 'enabled' | 'disabled'>)
setTriggerStatuses(statusMap)
}
}, [triggersResponse?.data, setTriggerStatuses])
@@ -103,21 +108,49 @@ const WorkflowAppWithAdditionalContext = () => {
fetchRunDetail(runUrl).then((res) => {
const { setInputs, setShowInputsPanel, setShowDebugAndPreviewPanel } = workflowStore.getState()
const rawInputs = res.inputs
let parsedInputs: unknown = rawInputs
let parsedInputs: Record<string, unknown> | null = null
if (typeof rawInputs === 'string') {
try {
parsedInputs = JSON.parse(rawInputs) as unknown
const maybeParsed = JSON.parse(rawInputs) as unknown
if (maybeParsed && typeof maybeParsed === 'object' && !Array.isArray(maybeParsed))
parsedInputs = maybeParsed as Record<string, unknown>
}
catch (error) {
console.error('Failed to parse workflow run inputs', error)
return
}
}
else if (rawInputs && typeof rawInputs === 'object' && !Array.isArray(rawInputs)) {
parsedInputs = rawInputs as Record<string, unknown>
}
const userInputs = coerceReplayUserInputs(parsedInputs)
if (!parsedInputs)
return
if (!userInputs || !Object.keys(userInputs).length)
const userInputs: Record<string, string | number | boolean> = {}
Object.entries(parsedInputs).forEach(([key, value]) => {
if (key.startsWith('sys.'))
return
if (value == null) {
userInputs[key] = ''
return
}
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {
userInputs[key] = value
return
}
try {
userInputs[key] = JSON.stringify(value)
}
catch {
userInputs[key] = String(value)
}
})
if (!Object.keys(userInputs).length)
return
setInputs(userInputs)
@@ -134,7 +167,32 @@ const WorkflowAppWithAdditionalContext = () => {
)
}
const initialFeatures: FeaturesData = buildInitialFeatures(data.features, fileUploadConfigResponse)
const features = data.features || {}
const initialFeatures: FeaturesData = {
file: {
image: {
enabled: !!features.file_upload?.image?.enabled,
number_limits: features.file_upload?.image?.number_limits || 3,
transfer_methods: features.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'],
},
enabled: !!(features.file_upload?.enabled || features.file_upload?.image?.enabled),
allowed_file_types: features.file_upload?.allowed_file_types || [SupportUploadFileTypes.image],
allowed_file_extensions: features.file_upload?.allowed_file_extensions || FILE_EXTS[SupportUploadFileTypes.image].map(ext => `.${ext}`),
allowed_file_upload_methods: features.file_upload?.allowed_file_upload_methods || features.file_upload?.image?.transfer_methods || ['local_file', 'remote_url'],
number_limits: features.file_upload?.number_limits || features.file_upload?.image?.number_limits || 3,
fileUploadConfig: fileUploadConfigResponse,
},
opening: {
enabled: !!features.opening_statement,
opening_statement: features.opening_statement,
suggested_questions: features.suggested_questions,
},
suggested: features.suggested_questions_after_answer || { enabled: false },
speech2text: features.speech_to_text || { enabled: false },
text2speech: features.text_to_speech || { enabled: false },
citation: features.retriever_resource || { enabled: false },
moderation: features.sensitive_word_avoidance || { enabled: false },
}
return (
<WorkflowWithDefaultContext

View File

@@ -1,44 +0,0 @@
import { createStore } from 'zustand/vanilla'
import { createWorkflowSlice } from '../workflow-slice'
describe('createWorkflowSlice', () => {
it('should initialize workflow slice state with expected defaults', () => {
const store = createStore(createWorkflowSlice)
const state = store.getState()
expect(state.appId).toBe('')
expect(state.appName).toBe('')
expect(state.notInitialWorkflow).toBe(false)
expect(state.shouldAutoOpenStartNodeSelector).toBe(false)
expect(state.nodesDefaultConfigs).toEqual({})
expect(state.showOnboarding).toBe(false)
expect(state.hasSelectedStartNode).toBe(false)
expect(state.hasShownOnboarding).toBe(false)
})
it('should update every workflow slice field through its setters', () => {
const store = createStore(createWorkflowSlice)
store.setState({
appId: 'app-1',
appName: 'Workflow App',
})
store.getState().setNotInitialWorkflow(true)
store.getState().setShouldAutoOpenStartNodeSelector(true)
store.getState().setNodesDefaultConfigs({ start: { title: 'Start' } })
store.getState().setShowOnboarding(true)
store.getState().setHasSelectedStartNode(true)
store.getState().setHasShownOnboarding(true)
expect(store.getState()).toMatchObject({
appId: 'app-1',
appName: 'Workflow App',
notInitialWorkflow: true,
shouldAutoOpenStartNodeSelector: true,
nodesDefaultConfigs: { start: { title: 'Start' } },
showOnboarding: true,
hasSelectedStartNode: true,
hasShownOnboarding: true,
})
})
})

View File

@@ -1,107 +0,0 @@
import type { Features as FeaturesData } from '@/app/components/base/features/types'
import type { FileUploadConfigResponse } from '@/models/common'
import { FILE_EXTS } from '@/app/components/base/prompt-editor/constants'
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
import { TransferMethod } from '@/types/app'
type TriggerStatusLike = {
node_id: string
status: string
}
type FileUploadFeatureLike = {
enabled?: boolean
allowed_file_types?: SupportUploadFileTypes[]
allowed_file_extensions?: string[]
allowed_file_upload_methods?: TransferMethod[]
number_limits?: number
image?: {
enabled?: boolean
number_limits?: number
transfer_methods?: TransferMethod[]
}
}
type WorkflowFeaturesLike = {
file_upload?: FileUploadFeatureLike
opening_statement?: string
suggested_questions?: string[]
suggested_questions_after_answer?: { enabled?: boolean }
speech_to_text?: { enabled?: boolean }
text_to_speech?: { enabled?: boolean }
retriever_resource?: { enabled?: boolean }
sensitive_word_avoidance?: { enabled?: boolean }
}
export const buildTriggerStatusMap = (triggers: TriggerStatusLike[]) => {
return triggers.reduce<Record<string, 'enabled' | 'disabled'>>((acc, trigger) => {
acc[trigger.node_id] = trigger.status === 'enabled' ? 'enabled' : 'disabled'
return acc
}, {})
}
export const coerceReplayUserInputs = (rawInputs: unknown): Record<string, string | number | boolean> | null => {
if (!rawInputs || typeof rawInputs !== 'object' || Array.isArray(rawInputs))
return null
const userInputs: Record<string, string | number | boolean> = {}
Object.entries(rawInputs as Record<string, unknown>).forEach(([key, value]) => {
if (key.startsWith('sys.'))
return
if (value == null) {
userInputs[key] = ''
return
}
if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {
userInputs[key] = value
return
}
try {
userInputs[key] = JSON.stringify(value)
}
catch {
userInputs[key] = String(value)
}
})
return userInputs
}
export const buildInitialFeatures = (
featuresSource: WorkflowFeaturesLike | null | undefined,
fileUploadConfigResponse: FileUploadConfigResponse | undefined,
): FeaturesData => {
const features = featuresSource || {}
const fileUpload = features.file_upload
const imageUpload = fileUpload?.image
return {
file: {
image: {
enabled: !!imageUpload?.enabled,
number_limits: imageUpload?.number_limits || 3,
transfer_methods: imageUpload?.transfer_methods || [TransferMethod.local_file, TransferMethod.remote_url],
},
enabled: !!(fileUpload?.enabled || imageUpload?.enabled),
allowed_file_types: fileUpload?.allowed_file_types || [SupportUploadFileTypes.image],
allowed_file_extensions: fileUpload?.allowed_file_extensions || FILE_EXTS[SupportUploadFileTypes.image].map(ext => `.${ext}`),
allowed_file_upload_methods: fileUpload?.allowed_file_upload_methods || imageUpload?.transfer_methods || [TransferMethod.local_file, TransferMethod.remote_url],
number_limits: fileUpload?.number_limits || imageUpload?.number_limits || 3,
fileUploadConfig: fileUploadConfigResponse,
},
opening: {
enabled: !!features.opening_statement,
opening_statement: features.opening_statement,
suggested_questions: features.suggested_questions,
},
suggested: features.suggested_questions_after_answer || { enabled: false },
speech2text: features.speech_to_text || { enabled: false },
text2speech: features.text_to_speech || { enabled: false },
citation: features.retriever_resource || { enabled: false },
moderation: features.sensitive_word_avoidance || { enabled: false },
}
}

View File

@@ -2,9 +2,6 @@ import { renderHook } from '@testing-library/react'
import useNodeResizeObserver from '../use-node-resize-observer'
describe('useNodeResizeObserver', () => {
afterEach(() => {
vi.unstubAllGlobals()
})
it('should observe and disconnect when enabled with a mounted node ref', () => {
const observe = vi.fn()
const disconnect = vi.fn()

View File

@@ -57,16 +57,6 @@ describe('before-run-form helpers', () => {
values: createValues({ query: '' }),
})], [{}], t)).toContain('errorMsg.fieldRequired')
expect(getFormErrorMessage([createForm({
inputs: [createInput({ variable: 'file', label: 'File', type: InputVarType.singleFile, required: true })],
values: createValues({ file: [] }),
})], [{}], t)).toContain('errorMsg.fieldRequired')
expect(getFormErrorMessage([createForm({
inputs: [createInput({ variable: 'files', label: 'Files', type: InputVarType.multiFiles, required: true })],
values: createValues({ files: [] }),
})], [{}], t)).toContain('errorMsg.fieldRequired')
expect(getFormErrorMessage([createForm({
inputs: [createInput({ variable: 'file', label: 'File', type: InputVarType.singleFile })],
values: createValues({ file: { transferMethod: TransferMethod.local_file } }),

View File

@@ -56,16 +56,7 @@ export const getFormErrorMessage = (
const missingRequired = input.required
&& input.type !== InputVarType.checkbox
&& !(input.variable in existVarValuesInForm)
&& (
value === '' || value === undefined || value === null
|| (
(input.type === InputVarType.files
|| input.type === InputVarType.multiFiles
|| input.type === InputVarType.singleFile)
&& Array.isArray(value)
&& value.length === 0
)
)
&& (value === '' || value === undefined || value === null || (input.type === InputVarType.files && Array.isArray(value) && value.length === 0))
if (!errMsg && missingRequired) {
errMsg = t('errorMsg.fieldRequired', { ns: 'workflow', field: typeof input.label === 'object' ? input.label.variable : input.label })

View File

@@ -75,12 +75,16 @@ describe('workflow-panel helpers', () => {
})
describe('custom run form fallback', () => {
it('should return null for unsupported custom run form nodes', () => {
it('should return a fallback message for unsupported custom run form nodes', () => {
const form = getCustomRunForm({
...createCustomRunFormProps({ type: BlockEnum.Tool }),
})
expect(form).toBeNull()
expect(form).toMatchObject({
props: {
children: expect.arrayContaining(['Custom Run Form:', ' ', 'not found']),
},
})
})
})
})

View File

@@ -39,7 +39,14 @@ export const getCustomRunForm = (params: CustomRunFormProps): ReactNode => {
case BlockEnum.DataSource:
return <DataSourceBeforeRunForm {...params} />
default:
return null
return (
<div>
Custom Run Form:
{nodeType}
{' '}
not found
</div>
)
}
}

View File

@@ -1,4 +1,4 @@
import { act, fireEvent, render, screen, waitFor } from '@testing-library/react'
import { fireEvent, render, screen, waitFor } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import { useState } from 'react'
import GenericTable from '../generic-table'
@@ -50,19 +50,8 @@ const advancedColumns = [
describe('GenericTable', () => {
beforeEach(() => {
vi.clearAllMocks()
vi.useRealTimers()
})
const selectOption = async (triggerName: string, optionName: string) => {
await act(async () => {
fireEvent.click(screen.getByRole('button', { name: triggerName }))
})
await act(async () => {
fireEvent.click(await screen.findByRole('option', { name: optionName }))
})
}
it('should render an empty editable row and append a configured row when typing into the virtual row', async () => {
const onChange = vi.fn()
@@ -154,11 +143,11 @@ describe('GenericTable', () => {
<ControlledTable />,
)
await selectOption('Choose method', 'POST')
await user.click(screen.getByRole('button', { name: 'Choose method' }))
await user.click(await screen.findByRole('option', { name: 'POST' }))
await waitFor(() => {
expect(onChange).toHaveBeenCalledWith([{ method: 'post', preview: '' }])
expect(screen.getByRole('button', { name: 'POST' })).toBeInTheDocument()
})
onChange.mockClear()

View File

@@ -90,22 +90,6 @@ describe('useVariableModalState', () => {
])
})
it('should keep valid object rows when switching to json mode from form mode', () => {
const { result } = renderHook(() => useVariableModalState(createOptions()))
act(() => {
result.current.handleTypeChange(ChatVarType.Object)
result.current.setObjectValue([
{ key: '', type: ChatVarType.String, value: undefined },
{ key: 'timeout', type: ChatVarType.Number, value: 30 },
])
result.current.handleEditorChange(true)
})
expect(result.current.editInJSON).toBe(true)
expect(result.current.value).toEqual({ timeout: 30 })
expect(result.current.editorContent).toBe(JSON.stringify({ timeout: 30 }))
})
it('should reset object form values when leaving empty json mode', () => {
const { result } = renderHook(() => useVariableModalState(createOptions({
chatVar: {
@@ -157,19 +141,6 @@ describe('useVariableModalState', () => {
expect(result.current.editorContent).toBe(JSON.stringify(['True', 'False']))
})
it('should preserve zero values when switching number arrays into json mode', () => {
const { result } = renderHook(() => useVariableModalState(createOptions()))
act(() => {
result.current.handleTypeChange(ChatVarType.ArrayNumber)
result.current.setValue([0, 2, undefined])
result.current.handleEditorChange(true)
})
expect(result.current.editInJSON).toBe(true)
expect(result.current.value).toEqual([0, 2])
expect(result.current.editorContent).toBe(JSON.stringify([0, 2]))
})
it('should notify and stop saving when object keys are invalid', () => {
const notify = vi.fn()
const onSave = vi.fn()
@@ -190,7 +161,7 @@ describe('useVariableModalState', () => {
result.current.handleSave()
})
expect(notify).toHaveBeenCalledWith({ type: 'error', message: 'chatVariable.modal.objectKeyRequired' })
expect(notify).toHaveBeenCalledWith({ type: 'error', message: 'object key can not be empty' })
expect(onSave).not.toHaveBeenCalled()
expect(onClose).not.toHaveBeenCalled()
})

View File

@@ -33,10 +33,6 @@ describe('variable-modal helpers', () => {
{ key: '', type: ChatVarType.Number, value: 1 },
])).toEqual({ apiKey: 'secret' })
expect(formatObjectValueFromList([
{ key: 'count', type: ChatVarType.Number, value: 0 },
{ key: 'label', type: ChatVarType.String, value: '' },
])).toEqual({ count: 0, label: null })
expect(formatChatVariableValue({
editInJSON: false,
objectValue: [{ key: 'enabled', type: ChatVarType.String, value: 'true' }],
@@ -58,13 +54,6 @@ describe('variable-modal helpers', () => {
value: ['a', '', 'b'],
})).toEqual(['a', 'b'])
expect(formatChatVariableValue({
editInJSON: false,
objectValue: [],
type: ChatVarType.ArrayNumber,
value: [0, 1, undefined, null, ''] as unknown as Array<number | undefined>,
})).toEqual([0, 1])
expect(formatChatVariableValue({
editInJSON: false,
objectValue: [],
@@ -105,10 +94,6 @@ describe('variable-modal helpers', () => {
type: ChatVarType.ArrayBoolean,
})).toEqual([true, false, true, false])
expect(() => parseEditorContent({
content: '{"enabled":true}',
type: ChatVarType.ArrayBoolean,
})).toThrow('JSON array')
expect(parseEditorContent({
content: '{"enabled":true}',
type: ChatVarType.Object,

View File

@@ -80,7 +80,7 @@ describe('variable-modal', () => {
await user.type(screen.getByPlaceholderText('workflow.chatVariable.modal.namePlaceholder'), 'existing_name')
await user.click(screen.getByText('common.operation.save'))
expect(mockToastError.mock.calls.at(-1)?.[0]).toBe('appDebug.varKeyError.keyAlreadyExists:{"key":"workflow.chatVariable.modal.name"}')
expect(mockToastError.mock.calls.at(-1)?.[0]).toBe('name is existed')
expect(onSave).not.toHaveBeenCalled()
})
@@ -100,10 +100,8 @@ describe('variable-modal', () => {
expect(screen.getByDisplayValue('secret')).toBeInTheDocument()
expect(screen.getByDisplayValue('30')).toBeInTheDocument()
const timeoutInput = screen.getByDisplayValue('30') as HTMLInputElement
await user.clear(screen.getByDisplayValue('secret'))
await user.clear(timeoutInput)
await user.type(timeoutInput, '5')
await user.type(screen.getByDisplayValue('30'), '5')
await user.click(screen.getByText('common.operation.save'))
expect(onSave).toHaveBeenCalledWith({
@@ -112,7 +110,7 @@ describe('variable-modal', () => {
value_type: ChatVarType.Object,
value: {
apiKey: null,
timeout: 5,
timeout: 305,
},
description: 'settings',
})
@@ -197,22 +195,4 @@ describe('variable-modal', () => {
description: '',
})
})
it('should keep the number input empty while editing after the user clears it', async () => {
const user = userEvent.setup()
renderVariableModal({
chatVar: {
id: 'var-4',
name: 'timeout',
description: '',
value_type: ChatVarType.Number,
value: 3,
},
})
const input = screen.getByDisplayValue('3') as HTMLInputElement
await user.clear(input)
expect(input.value).toBe('')
})
})

View File

@@ -108,7 +108,7 @@ export const useVariableModalState = ({
if (prev.type === ChatVarType.Object) {
if (nextEditInJSON) {
const nextValue = prev.objectValue.some(item => item.key) ? formatObjectValueFromList(prev.objectValue) : undefined
const nextValue = !prev.objectValue[0].key ? undefined : formatObjectValueFromList(prev.objectValue)
nextState.value = nextValue
nextState.editorContent = JSON.stringify(nextValue)
return nextState
@@ -133,11 +133,8 @@ export const useVariableModalState = ({
if (prev.type === ChatVarType.ArrayString || prev.type === ChatVarType.ArrayNumber) {
if (nextEditInJSON) {
const compactValues = Array.isArray(prev.value)
? prev.value.filter(item => item !== null && item !== undefined && item !== '')
: []
const nextValue = compactValues.length
? compactValues
const nextValue = (Array.isArray(prev.value) && prev.value.length && prev.value.filter(Boolean).length)
? prev.value.filter(Boolean)
: undefined
nextState.value = nextValue
if (!prev.editorContent)
@@ -184,15 +181,12 @@ export const useVariableModalState = ({
return
if (!chatVar && conversationVariables.some(item => item.name === state.name)) {
notify({
type: 'error',
message: t('varKeyError.keyAlreadyExists', { ns: 'appDebug', key: t('chatVariable.modal.name', { ns: 'workflow' }) }),
})
notify({ type: 'error', message: 'name is existed' })
return
}
if (state.type === ChatVarType.Object && state.objectValue.some(item => !item.key && item.value !== undefined && item.value !== '')) {
notify({ type: 'error', message: t('chatVariable.modal.objectKeyRequired', { ns: 'workflow' }) })
if (state.type === ChatVarType.Object && state.objectValue.some(item => !item.key && !!item.value)) {
notify({ type: 'error', message: 'object key can not be empty' })
return
}

View File

@@ -72,7 +72,7 @@ export const buildObjectValueItems = (chatVar?: ConversationVariable): ObjectVal
export const formatObjectValueFromList = (list: ObjectValueItem[]) => {
return list.reduce<Record<string, string | number | null>>((acc, curr) => {
if (curr.key)
acc[curr.key] = curr.value === '' || curr.value === undefined ? null : curr.value
acc[curr.key] = curr.value || null
return acc
}, {})
}
@@ -88,8 +88,6 @@ export const formatChatVariableValue = ({
type: ChatVarType
value: unknown
}) => {
const compactArrayValue = (items: unknown[]) =>
items.filter(item => item !== null && item !== undefined && item !== '')
switch (type) {
case ChatVarTypeEnum.String:
return value || ''
@@ -102,7 +100,7 @@ export const formatChatVariableValue = ({
case ChatVarTypeEnum.ArrayString:
case ChatVarTypeEnum.ArrayNumber:
case ChatVarTypeEnum.ArrayObject:
return Array.isArray(value) ? compactArrayValue(value) : []
return Array.isArray(value) ? value.filter(Boolean) : []
case ChatVarTypeEnum.ArrayBoolean:
return value || []
}
@@ -153,8 +151,6 @@ export const parseEditorContent = ({
if (type !== ChatVarTypeEnum.ArrayBoolean)
return parsed
if (!Array.isArray(parsed))
throw new TypeError('ArrayBoolean editor content must be a JSON array')
return parsed
.map((item: string | boolean) => {
if (item === 'True' || item === 'true' || item === true)

View File

@@ -138,10 +138,7 @@ export const ValueSection = ({
<Input
placeholder={t('chatVariable.modal.valuePlaceholder', { ns: 'workflow' }) || ''}
value={value as number | undefined}
onChange={(e) => {
const rawValue = e.target.value
onArrayChange([rawValue === '' ? undefined : Number(rawValue)])
}}
onChange={e => onArrayChange([Number(e.target.value)])}
type="number"
/>
)}

View File

@@ -6416,8 +6416,11 @@
}
},
"app/components/workflow-app/hooks/use-workflow-run.ts": {
"no-restricted-imports": {
"count": 1
},
"ts/no-explicit-any": {
"count": 5
"count": 13
}
},
"app/components/workflow-app/hooks/use-workflow-template.ts": {

View File

@@ -1,7 +1,7 @@
{
"name": "dify-web",
"type": "module",
"version": "1.13.3",
"version": "1.13.2",
"private": true,
"packageManager": "pnpm@10.32.1",
"imports": {