Compare commits

...

14 Commits

Author SHA1 Message Date
yyh
3b5f1d95b8 Merge branch 'main' into refactor/query-params-nuqs 2026-01-10 13:57:34 +08:00
wangxiaolei
0711dd4159 feat: enhance start node object value check (#30732)
Some checks failed
autofix.ci / autofix (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/amd64, build-api-amd64) (push) Has been cancelled
Build and Push API & Web / build (api, DIFY_API_IMAGE_NAME, linux/arm64, build-api-arm64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/amd64, build-web-amd64) (push) Has been cancelled
Build and Push API & Web / build (web, DIFY_WEB_IMAGE_NAME, linux/arm64, build-web-arm64) (push) Has been cancelled
Main CI Pipeline / Check Changed Files (push) Has been cancelled
Main CI Pipeline / Style Check (push) Has been cancelled
Build and Push API & Web / create-manifest (api, DIFY_API_IMAGE_NAME, merge-api-images) (push) Has been cancelled
Build and Push API & Web / create-manifest (web, DIFY_WEB_IMAGE_NAME, merge-web-images) (push) Has been cancelled
Main CI Pipeline / API Tests (push) Has been cancelled
Main CI Pipeline / Web Tests (push) Has been cancelled
Main CI Pipeline / VDB Tests (push) Has been cancelled
Main CI Pipeline / DB Migration Test (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2026-01-09 16:13:17 +08:00
QuantumGhost
ae0a26f5b6 revert: "fix: fix assign value stand as default (#30651)" (#30717)
The original fix seems correct on its own. However, for chatflows with multiple answer nodes, the `message_replace` command only preserves the output of the last executed answer node.
2026-01-09 16:08:24 +08:00
yyh
146f4f70ca chore: remove redundant comments in log component
Remove comments that describe "what" rather than "why" to align with
CLAUDE.md guideline for self-documenting code.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-07 10:48:37 +08:00
yyh
dbc212cd51 Merge branch 'main' into refactor/query-params-nuqs 2026-01-07 10:34:27 +08:00
yyh
465135838e Merge branch 'main' into refactor/query-params-nuqs 2026-01-05 15:03:41 +08:00
yyh
453b9ae77b Update web/app/components/app/log/index.tsx 2025-12-31 13:27:20 +08:00
yyh
9ff4d2bbf3 Merge branch 'main' into refactor/query-params-nuqs 2025-12-31 13:25:58 +08:00
yyh
7f3437e577 fix: stabilize query param usage 2025-12-30 13:27:55 +08:00
yyh
fc196df814 test: add nuqs adapter to hook specs 2025-12-30 13:27:07 +08:00
yyh
5e7aa8dd03 test: cover query param state 2025-12-30 13:21:23 +08:00
yyh
c1a822b114 fix: stabilize document list query actions 2025-12-30 12:49:45 +08:00
yyh
20d10d42b9 fix: restore query param behavior 2025-12-30 12:41:30 +08:00
yyh
e97857ef7f refactor: migrate query params to nuqs 2025-12-30 12:36:51 +08:00
25 changed files with 579 additions and 642 deletions

View File

@@ -1,4 +1,3 @@
import json
from collections.abc import Sequence
from enum import StrEnum, auto
from typing import Any, Literal
@@ -121,7 +120,7 @@ class VariableEntity(BaseModel):
allowed_file_types: Sequence[FileType] | None = Field(default_factory=list)
allowed_file_extensions: Sequence[str] | None = Field(default_factory=list)
allowed_file_upload_methods: Sequence[FileTransferMethod] | None = Field(default_factory=list)
json_schema: str | None = Field(default=None)
json_schema: dict | None = Field(default=None)
@field_validator("description", mode="before")
@classmethod
@@ -135,17 +134,11 @@ class VariableEntity(BaseModel):
@field_validator("json_schema")
@classmethod
def validate_json_schema(cls, schema: str | None) -> str | None:
def validate_json_schema(cls, schema: dict | None) -> dict | None:
if schema is None:
return None
try:
json_schema = json.loads(schema)
except json.JSONDecodeError:
raise ValueError(f"invalid json_schema value {schema}")
try:
Draft7Validator.check_schema(json_schema)
Draft7Validator.check_schema(schema)
except SchemaError as e:
raise ValueError(f"Invalid JSON schema: {e.message}")
return schema

View File

@@ -26,7 +26,6 @@ class AdvancedChatAppConfigManager(BaseAppConfigManager):
@classmethod
def get_app_config(cls, app_model: App, workflow: Workflow) -> AdvancedChatAppConfig:
features_dict = workflow.features_dict
app_mode = AppMode.value_of(app_model.mode)
app_config = AdvancedChatAppConfig(
tenant_id=app_model.tenant_id,

View File

@@ -358,25 +358,6 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
if node_finish_resp:
yield node_finish_resp
# For ANSWER nodes, check if we need to send a message_replace event
# Only send if the final output differs from the accumulated task_state.answer
# This happens when variables were updated by variable_assigner during workflow execution
if event.node_type == NodeType.ANSWER and event.outputs:
final_answer = event.outputs.get("answer")
if final_answer is not None and final_answer != self._task_state.answer:
logger.info(
"ANSWER node final output '%s' differs from accumulated answer '%s', sending message_replace event",
final_answer,
self._task_state.answer,
)
# Update the task state answer
self._task_state.answer = str(final_answer)
# Send message_replace event to update the UI
yield self._message_cycle_manager.message_replace_to_stream_response(
answer=str(final_answer),
reason="variable_update",
)
def _handle_node_failed_events(
self,
event: Union[QueueNodeFailedEvent, QueueNodeExceptionEvent],

View File

@@ -1,4 +1,3 @@
import json
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Union, final
@@ -76,12 +75,24 @@ class BaseAppGenerator:
user_inputs = {**user_inputs, **files_inputs, **file_list_inputs}
# Check if all files are converted to File
if any(filter(lambda v: isinstance(v, dict), user_inputs.values())):
raise ValueError("Invalid input type")
if any(
filter(lambda v: isinstance(v, dict), filter(lambda item: isinstance(item, list), user_inputs.values()))
):
raise ValueError("Invalid input type")
invalid_dict_keys = [
k
for k, v in user_inputs.items()
if isinstance(v, dict)
and entity_dictionary[k].type not in {VariableEntityType.FILE, VariableEntityType.JSON_OBJECT}
]
if invalid_dict_keys:
raise ValueError(f"Invalid input type for {invalid_dict_keys}")
invalid_list_dict_keys = [
k
for k, v in user_inputs.items()
if isinstance(v, list)
and any(isinstance(item, dict) for item in v)
and entity_dictionary[k].type != VariableEntityType.FILE_LIST
]
if invalid_list_dict_keys:
raise ValueError(f"Invalid input type for {invalid_list_dict_keys}")
return user_inputs
@@ -178,12 +189,8 @@ class BaseAppGenerator:
elif value == 0:
value = False
case VariableEntityType.JSON_OBJECT:
if not isinstance(value, str):
raise ValueError(f"{variable_entity.variable} in input form must be a string")
try:
json.loads(value)
except json.JSONDecodeError:
raise ValueError(f"{variable_entity.variable} in input form must be a valid JSON object")
if not isinstance(value, dict):
raise ValueError(f"{variable_entity.variable} in input form must be a dict")
case _:
raise AssertionError("this statement should be unreachable.")

View File

@@ -1,4 +1,3 @@
import json
from typing import Any
from jsonschema import Draft7Validator, ValidationError
@@ -43,25 +42,22 @@ class StartNode(Node[StartNodeData]):
if value is None and variable.required:
raise ValueError(f"{key} is required in input form")
# If no value provided, skip further processing for this key
if not value:
continue
if not isinstance(value, dict):
raise ValueError(f"JSON object for '{key}' must be an object")
# Overwrite with normalized dict to ensure downstream consistency
node_inputs[key] = value
# If schema exists, then validate against it
schema = variable.json_schema
if not schema:
continue
if not value:
continue
try:
json_schema = json.loads(schema)
except json.JSONDecodeError as e:
raise ValueError(f"{schema} must be a valid JSON object")
try:
json_value = json.loads(value)
except json.JSONDecodeError as e:
raise ValueError(f"{value} must be a valid JSON object")
try:
Draft7Validator(json_schema).validate(json_value)
Draft7Validator(schema).validate(value)
except ValidationError as e:
raise ValueError(f"JSON object for '{key}' does not match schema: {e.message}")
node_inputs[key] = json_value

View File

@@ -1,390 +0,0 @@
"""
Tests for AdvancedChatAppGenerateTaskPipeline._handle_node_succeeded_event method,
specifically testing the ANSWER node message_replace logic.
"""
from datetime import datetime
from types import SimpleNamespace
from unittest.mock import MagicMock, Mock, patch
import pytest
from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity
from core.app.entities.queue_entities import QueueNodeSucceededEvent
from core.workflow.enums import NodeType
from models import EndUser
from models.model import AppMode
class TestAnswerNodeMessageReplace:
"""Test cases for ANSWER node message_replace event logic."""
@pytest.fixture
def mock_application_generate_entity(self):
"""Create a mock application generate entity."""
entity = Mock(spec=AdvancedChatAppGenerateEntity)
entity.task_id = "test-task-id"
entity.app_id = "test-app-id"
entity.workflow_run_id = "test-workflow-run-id"
# minimal app_config used by pipeline internals
entity.app_config = SimpleNamespace(
tenant_id="test-tenant-id",
app_id="test-app-id",
app_mode=AppMode.ADVANCED_CHAT,
app_model_config_dict={},
additional_features=None,
sensitive_word_avoidance=None,
)
entity.query = "test query"
entity.files = []
entity.extras = {}
entity.trace_manager = None
entity.inputs = {}
entity.invoke_from = "debugger"
return entity
@pytest.fixture
def mock_workflow(self):
"""Create a mock workflow."""
workflow = Mock()
workflow.id = "test-workflow-id"
workflow.features_dict = {}
return workflow
@pytest.fixture
def mock_queue_manager(self):
"""Create a mock queue manager."""
manager = Mock()
manager.listen.return_value = []
manager.graph_runtime_state = None
return manager
@pytest.fixture
def mock_conversation(self):
"""Create a mock conversation."""
conversation = Mock()
conversation.id = "test-conversation-id"
conversation.mode = "advanced_chat"
return conversation
@pytest.fixture
def mock_message(self):
"""Create a mock message."""
message = Mock()
message.id = "test-message-id"
message.query = "test query"
message.created_at = Mock()
message.created_at.timestamp.return_value = 1234567890
return message
@pytest.fixture
def mock_user(self):
"""Create a mock end user."""
user = MagicMock(spec=EndUser)
user.id = "test-user-id"
user.session_id = "test-session-id"
return user
@pytest.fixture
def mock_draft_var_saver_factory(self):
"""Create a mock draft variable saver factory."""
return Mock()
@pytest.fixture
def pipeline(
self,
mock_application_generate_entity,
mock_workflow,
mock_queue_manager,
mock_conversation,
mock_message,
mock_user,
mock_draft_var_saver_factory,
):
"""Create an AdvancedChatAppGenerateTaskPipeline instance with mocked dependencies."""
from core.app.apps.advanced_chat.generate_task_pipeline import AdvancedChatAppGenerateTaskPipeline
with patch("core.app.apps.advanced_chat.generate_task_pipeline.db"):
pipeline = AdvancedChatAppGenerateTaskPipeline(
application_generate_entity=mock_application_generate_entity,
workflow=mock_workflow,
queue_manager=mock_queue_manager,
conversation=mock_conversation,
message=mock_message,
user=mock_user,
stream=True,
dialogue_count=1,
draft_var_saver_factory=mock_draft_var_saver_factory,
)
# Initialize workflow run id to avoid validation errors
pipeline._workflow_run_id = "test-workflow-run-id"
# Mock the message cycle manager methods we need to track
pipeline._message_cycle_manager.message_replace_to_stream_response = Mock()
return pipeline
def test_answer_node_with_different_output_sends_message_replace(self, pipeline, mock_application_generate_entity):
"""
Test that when an ANSWER node's final output differs from accumulated answer,
a message_replace event is sent.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "initial answer"
# Create ANSWER node succeeded event with different final output
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={"answer": "updated final answer"},
)
# Mock the workflow response converter to avoid extra processing
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
responses = list(pipeline._handle_node_succeeded_event(event))
# Assert
assert pipeline._task_state.answer == "updated final answer"
# Verify message_replace was called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_called_once_with(
answer="updated final answer", reason="variable_update"
)
def test_answer_node_with_same_output_does_not_send_message_replace(self, pipeline):
"""
Test that when an ANSWER node's final output is the same as accumulated answer,
no message_replace event is sent.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "same answer"
# Create ANSWER node succeeded event with same output
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={"answer": "same answer"},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: answer should remain unchanged
assert pipeline._task_state.answer == "same answer"
# Verify message_replace was NOT called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_not_called()
def test_answer_node_with_none_output_does_not_send_message_replace(self, pipeline):
"""
Test that when an ANSWER node's output is None or missing 'answer' key,
no message_replace event is sent.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "existing answer"
# Create ANSWER node succeeded event with None output
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={"answer": None},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: answer should remain unchanged
assert pipeline._task_state.answer == "existing answer"
# Verify message_replace was NOT called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_not_called()
def test_answer_node_with_empty_outputs_does_not_send_message_replace(self, pipeline):
"""
Test that when an ANSWER node has empty outputs dict,
no message_replace event is sent.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "existing answer"
# Create ANSWER node succeeded event with empty outputs
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: answer should remain unchanged
assert pipeline._task_state.answer == "existing answer"
# Verify message_replace was NOT called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_not_called()
def test_answer_node_with_no_answer_key_in_outputs(self, pipeline):
"""
Test that when an ANSWER node's outputs don't contain 'answer' key,
no message_replace event is sent.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "existing answer"
# Create ANSWER node succeeded event without 'answer' key in outputs
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={"other_key": "some value"},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: answer should remain unchanged
assert pipeline._task_state.answer == "existing answer"
# Verify message_replace was NOT called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_not_called()
def test_non_answer_node_does_not_send_message_replace(self, pipeline):
"""
Test that non-ANSWER nodes (e.g., LLM, END) don't trigger message_replace events.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "existing answer"
# Test with LLM node
llm_event = QueueNodeSucceededEvent(
node_execution_id="test-llm-execution-id",
node_id="test-llm-node",
node_type=NodeType.LLM,
start_at=datetime.now(),
outputs={"answer": "different answer"},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(llm_event))
# Assert: answer should remain unchanged
assert pipeline._task_state.answer == "existing answer"
# Verify message_replace was NOT called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_not_called()
def test_end_node_does_not_send_message_replace(self, pipeline):
"""
Test that END nodes don't trigger message_replace events even with 'answer' output.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "existing answer"
# Create END node succeeded event with answer output
event = QueueNodeSucceededEvent(
node_execution_id="test-end-execution-id",
node_id="test-end-node",
node_type=NodeType.END,
start_at=datetime.now(),
outputs={"answer": "different answer"},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: answer should remain unchanged
assert pipeline._task_state.answer == "existing answer"
# Verify message_replace was NOT called
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_not_called()
def test_answer_node_with_numeric_output_converts_to_string(self, pipeline):
"""
Test that when an ANSWER node's final output is numeric,
it gets converted to string properly.
"""
# Arrange: Set initial accumulated answer
pipeline._task_state.answer = "text answer"
# Create ANSWER node succeeded event with numeric output
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={"answer": 12345},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: answer should be converted to string
assert pipeline._task_state.answer == "12345"
# Verify message_replace was called with string
pipeline._message_cycle_manager.message_replace_to_stream_response.assert_called_once_with(
answer="12345", reason="variable_update"
)
def test_answer_node_files_are_recorded(self, pipeline):
"""
Test that ANSWER nodes properly record files from outputs.
"""
# Arrange
pipeline._task_state.answer = "existing answer"
# Create ANSWER node succeeded event with files
event = QueueNodeSucceededEvent(
node_execution_id="test-node-execution-id",
node_id="test-answer-node",
node_type=NodeType.ANSWER,
start_at=datetime.now(),
outputs={
"answer": "same answer",
"files": [
{"type": "image", "transfer_method": "remote_url", "remote_url": "http://example.com/img.png"}
],
},
)
# Mock the workflow response converter
pipeline._workflow_response_converter.fetch_files_from_node_outputs = Mock(return_value=event.outputs["files"])
pipeline._workflow_response_converter.workflow_node_finish_to_stream_response = Mock(return_value=None)
pipeline._save_output_for_event = Mock()
# Act
list(pipeline._handle_node_succeeded_event(event))
# Assert: files should be recorded
assert len(pipeline._recorded_files) == 1
assert pipeline._recorded_files[0] == event.outputs["files"][0]

View File

@@ -58,6 +58,8 @@ def test_json_object_valid_schema():
}
)
schema = json.loads(schema)
variables = [
VariableEntity(
variable="profile",
@@ -68,7 +70,7 @@ def test_json_object_valid_schema():
)
]
user_inputs = {"profile": json.dumps({"age": 20, "name": "Tom"})}
user_inputs = {"profile": {"age": 20, "name": "Tom"}}
node = make_start_node(user_inputs, variables)
result = node._run()
@@ -87,6 +89,8 @@ def test_json_object_invalid_json_string():
"required": ["age", "name"],
}
)
schema = json.loads(schema)
variables = [
VariableEntity(
variable="profile",
@@ -97,12 +101,12 @@ def test_json_object_invalid_json_string():
)
]
# Missing closing brace makes this invalid JSON
# Providing a string instead of an object should raise a type error
user_inputs = {"profile": '{"age": 20, "name": "Tom"'}
node = make_start_node(user_inputs, variables)
with pytest.raises(ValueError, match='{"age": 20, "name": "Tom" must be a valid JSON object'):
with pytest.raises(ValueError, match="JSON object for 'profile' must be an object"):
node._run()
@@ -118,6 +122,8 @@ def test_json_object_does_not_match_schema():
}
)
schema = json.loads(schema)
variables = [
VariableEntity(
variable="profile",
@@ -129,7 +135,7 @@ def test_json_object_does_not_match_schema():
]
# age is a string, which violates the schema (expects number)
user_inputs = {"profile": json.dumps({"age": "twenty", "name": "Tom"})}
user_inputs = {"profile": {"age": "twenty", "name": "Tom"}}
node = make_start_node(user_inputs, variables)
@@ -149,6 +155,8 @@ def test_json_object_missing_required_schema_field():
}
)
schema = json.loads(schema)
variables = [
VariableEntity(
variable="profile",
@@ -160,7 +168,7 @@ def test_json_object_missing_required_schema_field():
]
# Missing required field "name"
user_inputs = {"profile": json.dumps({"age": 20})}
user_inputs = {"profile": {"age": 20}}
node = make_start_node(user_inputs, variables)

View File

@@ -83,7 +83,7 @@ const ConfigModal: FC<IConfigModalProps> = ({
if (!isJsonObject || !tempPayload.json_schema)
return ''
try {
return JSON.stringify(JSON.parse(tempPayload.json_schema), null, 2)
return tempPayload.json_schema
}
catch {
return ''

View File

@@ -0,0 +1,200 @@
import type { ReactNode } from 'react'
import type { ChatConversationGeneralDetail, ChatConversationsResponse } from '@/models/log'
import type { App, AppIconType } from '@/types/app'
import { render, screen } from '@testing-library/react'
import { NuqsTestingAdapter } from 'nuqs/adapters/testing'
import { APP_PAGE_LIMIT } from '@/config'
import { AppModeEnum } from '@/types/app'
import Logs from './index'
const mockUseChatConversations = vi.fn()
const mockUseCompletionConversations = vi.fn()
const mockUseAnnotationsCount = vi.fn()
const mockRouterPush = vi.fn()
const mockRouterReplace = vi.fn()
const mockAppStoreState = {
setShowPromptLogModal: vi.fn(),
setShowAgentLogModal: vi.fn(),
setShowMessageLogModal: vi.fn(),
}
vi.mock('next/navigation', () => ({
useRouter: () => ({
push: mockRouterPush,
replace: mockRouterReplace,
}),
usePathname: () => '/apps/app-123/logs',
useSearchParams: () => new URLSearchParams(),
}))
vi.mock('@/service/use-log', () => ({
useChatConversations: (args: unknown) => mockUseChatConversations(args),
useCompletionConversations: (args: unknown) => mockUseCompletionConversations(args),
useAnnotationsCount: () => mockUseAnnotationsCount(),
useChatConversationDetail: () => ({ data: undefined }),
useCompletionConversationDetail: () => ({ data: undefined }),
}))
vi.mock('@/service/log', () => ({
fetchChatMessages: vi.fn(),
updateLogMessageAnnotations: vi.fn(),
updateLogMessageFeedbacks: vi.fn(),
}))
vi.mock('@/context/app-context', () => ({
useAppContext: () => ({
userProfile: { timezone: 'UTC' },
}),
}))
vi.mock('@/app/components/app/store', () => ({
useStore: (selector: (state: typeof mockAppStoreState) => unknown) => selector(mockAppStoreState),
}))
const renderWithAdapter = (ui: ReactNode, searchParams = '') => {
return render(
<NuqsTestingAdapter searchParams={searchParams}>
{ui}
</NuqsTestingAdapter>,
)
}
const createMockApp = (overrides: Partial<App> = {}): App => ({
id: 'app-123',
name: 'Test App',
description: 'Test app description',
author_name: 'Test Author',
icon_type: 'emoji' as AppIconType,
icon: ':icon:',
icon_background: '#FFEAD5',
icon_url: null,
use_icon_as_answer_icon: false,
mode: AppModeEnum.CHAT,
enable_site: true,
enable_api: true,
api_rpm: 60,
api_rph: 3600,
is_demo: false,
model_config: {} as App['model_config'],
app_model_config: {} as App['app_model_config'],
created_at: Date.now(),
updated_at: Date.now(),
site: {
access_token: 'token',
app_base_url: 'https://example.com',
} as App['site'],
api_base_url: 'https://api.example.com',
tags: [],
access_mode: 'public_access' as App['access_mode'],
...overrides,
})
const createChatConversation = (overrides: Partial<ChatConversationGeneralDetail> = {}): ChatConversationGeneralDetail => ({
id: 'conversation-1',
status: 'normal',
from_source: 'api',
from_end_user_id: 'user-1',
from_end_user_session_id: 'session-1',
from_account_id: 'account-1',
read_at: new Date(),
created_at: 1700000000,
updated_at: 1700000001,
user_feedback_stats: { like: 0, dislike: 0 },
admin_feedback_stats: { like: 0, dislike: 0 },
model_config: {
provider: 'openai',
model_id: 'gpt-4',
configs: { prompt_template: '' },
},
summary: 'Conversation summary',
message_count: 1,
annotated: false,
...overrides,
})
const createChatConversationsResponse = (overrides: Partial<ChatConversationsResponse> = {}): ChatConversationsResponse => ({
data: [createChatConversation()],
has_more: false,
limit: APP_PAGE_LIMIT,
total: 1,
page: 1,
...overrides,
})
// Logs page: loading, empty, and data states.
describe('Logs', () => {
beforeEach(() => {
vi.clearAllMocks()
globalThis.innerWidth = 1024
mockUseAnnotationsCount.mockReturnValue({
data: { count: 0 },
isLoading: false,
})
mockUseChatConversations.mockReturnValue({
data: undefined,
refetch: vi.fn(),
})
mockUseCompletionConversations.mockReturnValue({
data: undefined,
refetch: vi.fn(),
})
})
// Loading behavior when no data yet.
describe('Rendering', () => {
it('should render loading state when conversations are undefined', () => {
// Arrange
const appDetail = createMockApp()
// Act
renderWithAdapter(<Logs appDetail={appDetail} />)
// Assert
expect(screen.getByRole('status')).toBeInTheDocument()
})
it('should render empty state when there are no conversations', () => {
// Arrange
mockUseChatConversations.mockReturnValue({
data: createChatConversationsResponse({ data: [], total: 0 }),
refetch: vi.fn(),
})
const appDetail = createMockApp()
// Act
renderWithAdapter(<Logs appDetail={appDetail} />)
// Assert
expect(screen.getByText('appLog.table.empty.element.title')).toBeInTheDocument()
expect(screen.queryByRole('status')).not.toBeInTheDocument()
})
})
// Data rendering behavior.
describe('Props', () => {
it('should render list with pagination when conversations exist', () => {
// Arrange
mockUseChatConversations.mockReturnValue({
data: createChatConversationsResponse({ total: APP_PAGE_LIMIT + 1 }),
refetch: vi.fn(),
})
const appDetail = createMockApp()
// Act
renderWithAdapter(<Logs appDetail={appDetail} />, '?page=0&limit=0')
// Assert
expect(screen.getByText('appLog.table.header.summary')).toBeInTheDocument()
expect(screen.getByText('25')).toBeInTheDocument()
const firstCallArgs = mockUseChatConversations.mock.calls[0]?.[0]
expect(firstCallArgs.params.page).toBe(1)
expect(firstCallArgs.params.limit).toBe(APP_PAGE_LIMIT)
})
})
})

View File

@@ -4,9 +4,13 @@ import type { App } from '@/types/app'
import { useDebounce } from 'ahooks'
import dayjs from 'dayjs'
import { omit } from 'es-toolkit/object'
import { usePathname, useRouter, useSearchParams } from 'next/navigation'
import {
parseAsInteger,
parseAsString,
useQueryStates,
} from 'nuqs'
import * as React from 'react'
import { useCallback, useEffect, useState } from 'react'
import { useCallback } from 'react'
import { useTranslation } from 'react-i18next'
import Loading from '@/app/components/base/loading'
import Pagination from '@/app/components/base/pagination'
@@ -28,53 +32,38 @@ export type QueryParam = {
sort_by?: string
}
const defaultQueryParams: QueryParam = {
period: '2',
annotation_status: 'all',
sort_by: '-created_at',
}
const logsStateCache = new Map<string, {
queryParams: QueryParam
currPage: number
limit: number
}>()
const Logs: FC<ILogsProps> = ({ appDetail }) => {
const { t } = useTranslation()
const router = useRouter()
const pathname = usePathname()
const searchParams = useSearchParams()
const getPageFromParams = useCallback(() => {
const pageParam = Number.parseInt(searchParams.get('page') || '1', 10)
if (Number.isNaN(pageParam) || pageParam < 1)
return 0
return pageParam - 1
}, [searchParams])
const cachedState = logsStateCache.get(appDetail.id)
const [queryParams, setQueryParams] = useState<QueryParam>(cachedState?.queryParams ?? defaultQueryParams)
const [currPage, setCurrPage] = React.useState<number>(() => cachedState?.currPage ?? getPageFromParams())
const [limit, setLimit] = React.useState<number>(cachedState?.limit ?? APP_PAGE_LIMIT)
const [queryParams, setQueryParams] = useQueryStates(
{
page: parseAsInteger.withDefault(1),
limit: parseAsInteger.withDefault(APP_PAGE_LIMIT),
period: parseAsString.withDefault('2'),
annotation_status: parseAsString.withDefault('all'),
keyword: parseAsString,
sort_by: parseAsString.withDefault('-created_at'),
},
{
urlKeys: {
page: 'page',
limit: 'limit',
period: 'period',
annotation_status: 'annotation_status',
keyword: 'keyword',
sort_by: 'sort_by',
},
},
)
const debouncedQueryParams = useDebounce(queryParams, { wait: 500 })
const page = queryParams.page > 0 ? queryParams.page : 1
const limit = queryParams.limit > 0 ? queryParams.limit : APP_PAGE_LIMIT
useEffect(() => {
const pageFromParams = getPageFromParams()
setCurrPage(prev => (prev === pageFromParams ? prev : pageFromParams))
}, [getPageFromParams])
useEffect(() => {
logsStateCache.set(appDetail.id, {
queryParams,
currPage,
limit,
})
}, [appDetail.id, currPage, limit, queryParams])
// Get the app type first
const isChatMode = appDetail.mode !== AppModeEnum.COMPLETION
const query = {
page: currPage + 1,
page,
limit,
...((debouncedQueryParams.period !== '9')
? {
@@ -83,10 +72,10 @@ const Logs: FC<ILogsProps> = ({ appDetail }) => {
}
: {}),
...(isChatMode ? { sort_by: debouncedQueryParams.sort_by } : {}),
...omit(debouncedQueryParams, ['period']),
...omit(debouncedQueryParams, ['period', 'page', 'limit']),
keyword: debouncedQueryParams.keyword || undefined,
}
// When the details are obtained, proceed to the next request
const { data: chatConversations, refetch: mutateChatList } = useChatConversations({
appId: isChatMode ? appDetail.id : '',
params: query,
@@ -100,41 +89,38 @@ const Logs: FC<ILogsProps> = ({ appDetail }) => {
const total = isChatMode ? chatConversations?.total : completionConversations?.total
const handleQueryParamsChange = useCallback((next: QueryParam) => {
setCurrPage(0)
setQueryParams(next)
}, [])
setQueryParams({
...next,
page: 1, // Reset to page 1 on filter change
})
}, [setQueryParams])
const handlePageChange = useCallback((page: number) => {
setCurrPage(page)
const params = new URLSearchParams(searchParams.toString())
const nextPageValue = page + 1
if (nextPageValue === 1)
params.delete('page')
else
params.set('page', String(nextPageValue))
const queryString = params.toString()
router.replace(queryString ? `${pathname}?${queryString}` : pathname, { scroll: false })
}, [pathname, router, searchParams])
setQueryParams({ page: page + 1 })
}, [setQueryParams])
const handleLimitChange = useCallback((limit: number) => {
setQueryParams({ limit, page: 1 })
}, [setQueryParams])
return (
<div className="flex h-full grow flex-col">
<p className="system-sm-regular shrink-0 text-text-tertiary">{t('description', { ns: 'appLog' })}</p>
<div className="flex max-h-[calc(100%-16px)] flex-1 grow flex-col py-4">
<Filter isChatMode={isChatMode} appId={appDetail.id} queryParams={queryParams} setQueryParams={handleQueryParamsChange} />
<Filter isChatMode={isChatMode} appId={appDetail.id} queryParams={{ ...queryParams, keyword: queryParams.keyword ?? undefined }} setQueryParams={handleQueryParamsChange} />
{total === undefined
? <Loading type="app" />
: total > 0
? <List logs={isChatMode ? chatConversations : completionConversations} appDetail={appDetail} onRefresh={isChatMode ? mutateChatList : mutateCompletionList} />
: <EmptyElement appDetail={appDetail} />}
{/* Show Pagination only if the total is more than the limit */}
{(total && total > APP_PAGE_LIMIT)
? (
<Pagination
current={currPage}
current={page - 1}
onChange={handlePageChange}
total={total}
limit={limit}
onLimitChange={setLimit}
onLimitChange={handleLimitChange}
/>
)
: null}

View File

@@ -37,7 +37,7 @@ export const getProcessedInputs = (inputs: Record<string, any>, inputsForm: Inpu
return
}
if (!inputValue)
if (inputValue == null)
return
if (item.type === InputVarType.singleFile) {
@@ -52,6 +52,20 @@ export const getProcessedInputs = (inputs: Record<string, any>, inputsForm: Inpu
else
processedInputs[item.variable] = getProcessedFiles(inputValue)
}
else if (item.type === InputVarType.jsonObject) {
// Prefer sending an object if the user entered valid JSON; otherwise keep the raw string.
try {
const v = typeof inputValue === 'string' ? JSON.parse(inputValue) : inputValue
if (v && typeof v === 'object' && !Array.isArray(v))
processedInputs[item.variable] = v
else
processedInputs[item.variable] = inputValue
}
catch {
// keep original string; backend will parse/validate
processedInputs[item.variable] = inputValue
}
}
})
return processedInputs

View File

@@ -3,6 +3,7 @@ import type { ChatConfig } from '../types'
import type { AppConversationData, AppData, AppMeta, ConversationItem } from '@/models/share'
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
import { act, renderHook, waitFor } from '@testing-library/react'
import { NuqsTestingAdapter } from 'nuqs/adapters/testing'
import { ToastProvider } from '@/app/components/base/toast'
import {
fetchChatList,
@@ -74,9 +75,11 @@ const createQueryClient = () => new QueryClient({
const createWrapper = (queryClient: QueryClient) => {
return ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>
<ToastProvider>{children}</ToastProvider>
</QueryClientProvider>
<NuqsTestingAdapter>
<QueryClientProvider client={queryClient}>
<ToastProvider>{children}</ToastProvider>
</QueryClientProvider>
</NuqsTestingAdapter>
)
}

View File

@@ -11,6 +11,7 @@ import type {
import { useLocalStorageState } from 'ahooks'
import { noop } from 'es-toolkit/function'
import { produce } from 'immer'
import { parseAsString, useQueryState } from 'nuqs'
import {
useCallback,
useEffect,
@@ -82,12 +83,10 @@ export const useEmbeddedChatbot = () => {
setConversationId(embeddedConversationId || undefined)
}, [embeddedConversationId])
const [localeParam] = useQueryState('locale', parseAsString)
useEffect(() => {
const setLanguageFromParams = async () => {
// Check URL parameters for language override
const urlParams = new URLSearchParams(window.location.search)
const localeParam = urlParams.get('locale')
// Check for encoded system variables
const systemVariables = await getProcessedSystemVariablesFromUrlParams()
const localeFromSysVar = systemVariables.locale
@@ -107,7 +106,7 @@ export const useEmbeddedChatbot = () => {
}
setLanguageFromParams()
}, [appInfo])
}, [appInfo, localeParam])
const [conversationIdInfo, setConversationIdInfo] = useLocalStorageState<Record<string, Record<string, string>>>(CONVERSATION_ID_INFO, {
defaultValue: {},

View File

@@ -1,12 +1,9 @@
import type { ReactNode } from 'react'
import { act, renderHook } from '@testing-library/react'
import { NuqsTestingAdapter } from 'nuqs/adapters/testing'
import { PARTNER_STACK_CONFIG } from '@/config'
import usePSInfo from './use-ps-info'
let searchParamsValues: Record<string, string | null> = {}
const setSearchParams = (values: Record<string, string | null>) => {
searchParamsValues = values
}
type PartnerStackGlobal = typeof globalThis & {
__partnerStackCookieMocks?: {
get: ReturnType<typeof vi.fn>
@@ -48,11 +45,6 @@ vi.mock('js-cookie', () => {
remove,
}
})
vi.mock('next/navigation', () => ({
useSearchParams: () => ({
get: (key: string) => searchParamsValues[key] ?? null,
}),
}))
vi.mock('@/service/use-billing', () => {
const mutateAsync = vi.fn()
const globals = getPartnerStackGlobal()
@@ -64,6 +56,15 @@ vi.mock('@/service/use-billing', () => {
}
})
const renderWithAdapter = (searchParams = '') => {
const wrapper = ({ children }: { children: ReactNode }) => (
<NuqsTestingAdapter searchParams={searchParams}>
{children}
</NuqsTestingAdapter>
)
return renderHook(() => usePSInfo(), { wrapper })
}
describe('usePSInfo', () => {
const originalLocationDescriptor = Object.getOwnPropertyDescriptor(globalThis, 'location')
@@ -75,7 +76,6 @@ describe('usePSInfo', () => {
})
beforeEach(() => {
setSearchParams({})
const { get, set, remove } = ensureCookieMocks()
get.mockReset()
set.mockReset()
@@ -94,12 +94,7 @@ describe('usePSInfo', () => {
it('saves partner info when query params change', () => {
const { get, set } = ensureCookieMocks()
get.mockReturnValue(JSON.stringify({ partnerKey: 'old', clickId: 'old-click' }))
setSearchParams({
ps_partner_key: 'new-partner',
ps_xid: 'new-click',
})
const { result } = renderHook(() => usePSInfo())
const { result } = renderWithAdapter('?ps_partner_key=new-partner&ps_xid=new-click')
expect(result.current.psPartnerKey).toBe('new-partner')
expect(result.current.psClickId).toBe('new-click')
@@ -123,17 +118,13 @@ describe('usePSInfo', () => {
})
it('does not overwrite cookie when params do not change', () => {
setSearchParams({
ps_partner_key: 'existing',
ps_xid: 'existing-click',
})
const { get } = ensureCookieMocks()
get.mockReturnValue(JSON.stringify({
partnerKey: 'existing',
clickId: 'existing-click',
}))
const { result } = renderHook(() => usePSInfo())
const { result } = renderWithAdapter('?ps_partner_key=existing&ps_xid=existing-click')
act(() => {
result.current.saveOrUpdate()
@@ -144,12 +135,7 @@ describe('usePSInfo', () => {
})
it('binds partner info and clears cookie once', async () => {
setSearchParams({
ps_partner_key: 'bind-partner',
ps_xid: 'bind-click',
})
const { result } = renderHook(() => usePSInfo())
const { result } = renderWithAdapter('?ps_partner_key=bind-partner&ps_xid=bind-click')
const mutate = ensureMutateAsync()
const { remove } = ensureCookieMocks()
@@ -176,12 +162,7 @@ describe('usePSInfo', () => {
it('still removes cookie when bind fails with status 400', async () => {
const mutate = ensureMutateAsync()
mutate.mockRejectedValueOnce({ status: 400 })
setSearchParams({
ps_partner_key: 'bind-partner',
ps_xid: 'bind-click',
})
const { result } = renderHook(() => usePSInfo())
const { result } = renderWithAdapter('?ps_partner_key=bind-partner&ps_xid=bind-click')
await act(async () => {
await result.current.bind()

View File

@@ -1,12 +1,13 @@
import { useBoolean } from 'ahooks'
import Cookies from 'js-cookie'
import { useSearchParams } from 'next/navigation'
import { parseAsString, useQueryState } from 'nuqs'
import { useCallback } from 'react'
import { PARTNER_STACK_CONFIG } from '@/config'
import { useBindPartnerStackInfo } from '@/service/use-billing'
const usePSInfo = () => {
const searchParams = useSearchParams()
const [partnerKey] = useQueryState('ps_partner_key', parseAsString)
const [clickId] = useQueryState('ps_xid', parseAsString)
const psInfoInCookie = (() => {
try {
return JSON.parse(Cookies.get(PARTNER_STACK_CONFIG.cookieName) || '{}')
@@ -16,8 +17,8 @@ const usePSInfo = () => {
return {}
}
})()
const psPartnerKey = searchParams.get('ps_partner_key') || psInfoInCookie?.partnerKey
const psClickId = searchParams.get('ps_xid') || psInfoInCookie?.clickId
const psPartnerKey = partnerKey || psInfoInCookie?.partnerKey
const psClickId = clickId || psInfoInCookie?.clickId
const isPSChanged = psInfoInCookie?.partnerKey !== psPartnerKey || psInfoInCookie?.clickId !== psClickId
const [hasBind, {
setTrue: setBind,

View File

@@ -0,0 +1,133 @@
import type { ReactNode } from 'react'
import { act, renderHook, waitFor } from '@testing-library/react'
import { NuqsTestingAdapter } from 'nuqs/adapters/testing'
import useDocumentListQueryState from './use-document-list-query-state'
const renderWithAdapter = (searchParams = '') => {
const wrapper = ({ children }: { children: ReactNode }) => (
<NuqsTestingAdapter searchParams={searchParams}>
{children}
</NuqsTestingAdapter>
)
return renderHook(() => useDocumentListQueryState(), { wrapper })
}
// Document list query state: defaults, sanitization, and update actions.
describe('useDocumentListQueryState', () => {
beforeEach(() => {
vi.clearAllMocks()
})
// Default query values.
describe('Rendering', () => {
it('should return default query values when URL params are missing', () => {
// Arrange
const { result } = renderWithAdapter()
// Act
const { query } = result.current
// Assert
expect(query).toEqual({
page: 1,
limit: 10,
keyword: '',
status: 'all',
sort: '-created_at',
})
})
})
// URL sanitization behavior.
describe('Edge Cases', () => {
it('should sanitize invalid URL query values', () => {
// Arrange
const { result } = renderWithAdapter('?page=0&limit=500&keyword=%20%20&status=invalid&sort=bad')
// Act
const { query } = result.current
// Assert
expect(query).toEqual({
page: 1,
limit: 10,
keyword: '',
status: 'all',
sort: '-created_at',
})
})
})
// Query update actions.
describe('User Interactions', () => {
it('should normalize query updates', async () => {
// Arrange
const { result } = renderWithAdapter()
// Act
act(() => {
result.current.updateQuery({
page: 0,
limit: 200,
keyword: ' search ',
status: 'invalid',
sort: 'hit_count',
})
})
// Assert
await waitFor(() => {
expect(result.current.query).toEqual({
page: 1,
limit: 10,
keyword: ' search ',
status: 'all',
sort: 'hit_count',
})
})
})
it('should reset query values to defaults', async () => {
// Arrange
const { result } = renderWithAdapter('?page=2&limit=25&keyword=hello&status=enabled&sort=hit_count')
// Act
act(() => {
result.current.resetQuery()
})
// Assert
await waitFor(() => {
expect(result.current.query).toEqual({
page: 1,
limit: 10,
keyword: '',
status: 'all',
sort: '-created_at',
})
})
})
})
// Callback stability.
describe('Performance', () => {
it('should keep action callbacks stable across updates', async () => {
// Arrange
const { result } = renderWithAdapter()
const initialUpdate = result.current.updateQuery
const initialReset = result.current.resetQuery
// Act
act(() => {
result.current.updateQuery({ page: 2 })
})
// Assert
await waitFor(() => {
expect(result.current.updateQuery).toBe(initialUpdate)
expect(result.current.resetQuery).toBe(initialReset)
})
})
})
})

View File

@@ -1,6 +1,5 @@
import type { ReadonlyURLSearchParams } from 'next/navigation'
import type { SortType } from '@/service/datasets'
import { usePathname, useRouter, useSearchParams } from 'next/navigation'
import { parseAsInteger, parseAsString, useQueryStates } from 'nuqs'
import { useCallback, useMemo } from 'react'
import { sanitizeStatusValue } from '../status-filter'
@@ -21,6 +20,14 @@ export type DocumentListQuery = {
sort: SortType
}
type DocumentListQueryInput = {
page?: number
limit?: number
keyword?: string | null
status?: string | null
sort?: string | null
}
const DEFAULT_QUERY: DocumentListQuery = {
page: 1,
limit: 10,
@@ -29,89 +36,60 @@ const DEFAULT_QUERY: DocumentListQuery = {
sort: '-created_at',
}
// Parse the query parameters from the URL search string.
function parseParams(params: ReadonlyURLSearchParams): DocumentListQuery {
const page = Number.parseInt(params.get('page') || '1', 10)
const limit = Number.parseInt(params.get('limit') || '10', 10)
const keyword = params.get('keyword') || ''
const status = sanitizeStatusValue(params.get('status'))
const sort = sanitizeSortValue(params.get('sort'))
const normalizeKeywordValue = (value?: string | null) => (value && value.trim() ? value : '')
const normalizeDocumentListQuery = (query: DocumentListQueryInput): DocumentListQuery => {
const page = (query.page && query.page > 0) ? query.page : DEFAULT_QUERY.page
const limit = (query.limit && query.limit > 0 && query.limit <= 100) ? query.limit : DEFAULT_QUERY.limit
const keyword = normalizeKeywordValue(query.keyword ?? DEFAULT_QUERY.keyword)
const status = sanitizeStatusValue(query.status ?? DEFAULT_QUERY.status)
const sort = sanitizeSortValue(query.sort ?? DEFAULT_QUERY.sort)
return {
page: page > 0 ? page : 1,
limit: (limit > 0 && limit <= 100) ? limit : 10,
keyword: keyword ? decodeURIComponent(keyword) : '',
page,
limit,
keyword,
status,
sort,
}
}
// Update the URL search string with the given query parameters.
function updateSearchParams(query: DocumentListQuery, searchParams: URLSearchParams) {
const { page, limit, keyword, status, sort } = query || {}
const hasNonDefaultParams = (page && page > 1) || (limit && limit !== 10) || (keyword && keyword.trim())
if (hasNonDefaultParams) {
searchParams.set('page', (page || 1).toString())
searchParams.set('limit', (limit || 10).toString())
}
else {
searchParams.delete('page')
searchParams.delete('limit')
}
if (keyword && keyword.trim())
searchParams.set('keyword', encodeURIComponent(keyword))
else
searchParams.delete('keyword')
const sanitizedStatus = sanitizeStatusValue(status)
if (sanitizedStatus && sanitizedStatus !== 'all')
searchParams.set('status', sanitizedStatus)
else
searchParams.delete('status')
const sanitizedSort = sanitizeSortValue(sort)
if (sanitizedSort !== '-created_at')
searchParams.set('sort', sanitizedSort)
else
searchParams.delete('sort')
}
function useDocumentListQueryState() {
const searchParams = useSearchParams()
const query = useMemo(() => parseParams(searchParams), [searchParams])
const [query, setQuery] = useQueryStates(
{
page: parseAsInteger.withDefault(DEFAULT_QUERY.page),
limit: parseAsInteger.withDefault(DEFAULT_QUERY.limit),
keyword: parseAsString.withDefault(DEFAULT_QUERY.keyword),
status: parseAsString.withDefault(DEFAULT_QUERY.status),
sort: parseAsString.withDefault(DEFAULT_QUERY.sort),
},
{
history: 'push',
urlKeys: {
page: 'page',
limit: 'limit',
keyword: 'keyword',
status: 'status',
sort: 'sort',
},
},
)
const router = useRouter()
const pathname = usePathname()
const finalQuery = useMemo(() => normalizeDocumentListQuery(query), [query])
// Helper function to update specific query parameters
const updateQuery = useCallback((updates: Partial<DocumentListQuery>) => {
const newQuery = { ...query, ...updates }
newQuery.status = sanitizeStatusValue(newQuery.status)
newQuery.sort = sanitizeSortValue(newQuery.sort)
const params = new URLSearchParams()
updateSearchParams(newQuery, params)
const search = params.toString()
const queryString = search ? `?${search}` : ''
router.push(`${pathname}${queryString}`, { scroll: false })
}, [query, router, pathname])
setQuery(prev => normalizeDocumentListQuery({ ...prev, ...updates }))
}, [setQuery])
// Helper function to reset query to defaults
const resetQuery = useCallback(() => {
const params = new URLSearchParams()
updateSearchParams(DEFAULT_QUERY, params)
const search = params.toString()
const queryString = search ? `?${search}` : ''
router.push(`${pathname}${queryString}`, { scroll: false })
}, [router, pathname])
setQuery(DEFAULT_QUERY)
}, [setQuery])
return useMemo(() => ({
query,
query: finalQuery,
updateQuery,
resetQuery,
}), [query, updateQuery, resetQuery])
}), [finalQuery, updateQuery, resetQuery])
}
export default useDocumentListQueryState

View File

@@ -195,7 +195,7 @@ const RunOnce: FC<IRunOnceProps> = ({
noWrapper
className="bg h-[80px] overflow-y-auto rounded-[10px] bg-components-input-bg-normal p-1"
placeholder={
<div className="whitespace-pre">{item.json_schema}</div>
<div className="whitespace-pre">{typeof item.json_schema === 'string' ? item.json_schema : JSON.stringify(item.json_schema || '', null, 2)}</div>
}
/>
)}

View File

@@ -48,6 +48,12 @@ const FormItem: FC<Props> = ({
const { t } = useTranslation()
const { type } = payload
const fileSettings = useHooksStore(s => s.configsMap?.fileSettings)
const jsonSchemaPlaceholder = React.useMemo(() => {
const schema = (payload as any)?.json_schema
if (!schema)
return ''
return typeof schema === 'string' ? schema : JSON.stringify(schema, null, 2)
}, [payload])
const handleArrayItemChange = useCallback((index: number) => {
return (newValue: any) => {
@@ -211,7 +217,7 @@ const FormItem: FC<Props> = ({
noWrapper
className="bg h-[80px] overflow-y-auto rounded-[10px] bg-components-input-bg-normal p-1"
placeholder={
<div className="whitespace-pre">{payload.json_schema}</div>
<div className="whitespace-pre">{jsonSchemaPlaceholder}</div>
}
/>
)}

View File

@@ -353,7 +353,7 @@ const formatItem = (
try {
if (type === VarType.object && v.json_schema) {
varRes.children = {
schema: JSON.parse(v.json_schema),
schema: typeof v.json_schema === 'string' ? JSON.parse(v.json_schema) : v.json_schema,
}
}
}

View File

@@ -223,7 +223,7 @@ export type InputVar = {
getVarValueFromDependent?: boolean
hide?: boolean
isFileItem?: boolean
json_schema?: string // for jsonObject type
json_schema?: string | Record<string, any> // for jsonObject type
} & Partial<UploadFileSetting>
export type ModelConfig = {

View File

@@ -1,13 +1,12 @@
'use client'
import { useSearchParams } from 'next/navigation'
import { parseAsString, useQueryState } from 'nuqs'
import { useEffect } from 'react'
import usePSInfo from '../components/billing/partner-stack/use-ps-info'
import NormalForm from './normal-form'
import OneMoreStep from './one-more-step'
const SignIn = () => {
const searchParams = useSearchParams()
const step = searchParams.get('step')
const [step] = useQueryState('step', parseAsString)
const { saveOrUpdate } = usePSInfo()
useEffect(() => {

View File

@@ -62,7 +62,7 @@ export type PromptVariable = {
icon?: string
icon_background?: string
hide?: boolean // used in frontend to hide variable
json_schema?: string
json_schema?: string | Record<string, any>
}
export type CompletionParams = {

View File

@@ -66,7 +66,30 @@ export const sanitizeWorkflowDraftPayload = (params: WorkflowDraftSyncParams): W
if (!graph?.nodes?.length)
return params
const sanitizedNodes = graph.nodes.map(node => sanitizeTriggerPluginNode(node as Node<TriggerPluginNodePayload>))
const sanitizedNodes = graph.nodes.map((node) => {
// First sanitize known node types (TriggerPlugin)
const n = sanitizeTriggerPluginNode(node as Node<TriggerPluginNodePayload>) as Node<any>
// Normalize Start node variable json_schema: ensure dict, not string
if ((n.data as any)?.type === BlockEnum.Start && Array.isArray((n.data as any).variables)) {
const next = { ...n, data: { ...n.data } }
next.data.variables = (n.data as any).variables.map((v: any) => {
if (v && v.type === 'json_object' && typeof v.json_schema === 'string') {
try {
const obj = JSON.parse(v.json_schema)
return { ...v, json_schema: obj }
}
catch {
return v
}
}
return v
})
return next
}
return n
})
return {
...params,
@@ -126,7 +149,25 @@ export const hydrateWorkflowDraftResponse = (draft: FetchWorkflowDraftResponse):
if (node.data)
removeTempProperties(node.data as Record<string, unknown>)
return hydrateTriggerPluginNode(node)
let n = hydrateTriggerPluginNode(node)
// Normalize Start node variable json_schema to object when loading
if ((n.data as any)?.type === BlockEnum.Start && Array.isArray((n.data as any).variables)) {
const next = { ...n, data: { ...n.data } } as Node<any>
next.data.variables = (n.data as any).variables.map((v: any) => {
if (v && v.type === 'json_object' && typeof v.json_schema === 'string') {
try {
const obj = JSON.parse(v.json_schema)
return { ...v, json_schema: obj }
}
catch {
return v
}
}
return v
})
n = next
}
return n
})
}

View File

@@ -9,6 +9,7 @@ import type {
} from '@/types/workflow'
import { get, post } from './base'
import { getFlowPrefix } from './utils'
import { sanitizeWorkflowDraftPayload } from './workflow-payload'
export const fetchWorkflowDraft = (url: string) => {
return get(url, {}, { silent: true }) as Promise<FetchWorkflowDraftResponse>
@@ -18,7 +19,8 @@ export const syncWorkflowDraft = ({ url, params }: {
url: string
params: Pick<FetchWorkflowDraftResponse, 'graph' | 'features' | 'environment_variables' | 'conversation_variables'>
}) => {
return post<CommonResponse & { updated_at: number, hash: string }>(url, { body: params }, { silent: true })
const sanitized = sanitizeWorkflowDraftPayload(params)
return post<CommonResponse & { updated_at: number, hash: string }>(url, { body: sanitized }, { silent: true })
}
export const fetchNodesDefaultConfigs = (url: string) => {