Compare commits

...

2 Commits

Author SHA1 Message Date
CodingOnStar
0c29b67e22 Merge remote-tracking branch 'origin/main' into refactor/configuration 2026-01-27 11:43:36 +08:00
CodingOnStar
c080c48aba refactor(debug): extract hooks and components, add comprehensive tests
Extract reusable hooks and components from debug/index.tsx:
- useInputValidation, useFormattingChangeConfirm, useModalWidth hooks
- useTextCompletion hook for text completion logic
- DebugHeader component for header UI
- TextCompletionResult component for completion display

Add comprehensive test coverage for debug-with-multiple-model:
- chat-item.spec.tsx (23 tests)
- debug-item.spec.tsx (25 tests)
- model-parameter-trigger.spec.tsx (14 tests)
- text-generation-item.spec.tsx (16 tests)
- index.spec.tsx expanded (84 tests)

Total: 183 tests passing with 95%+ coverage

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-27 11:42:09 +08:00
12 changed files with 4019 additions and 492 deletions

View File

@@ -0,0 +1,91 @@
'use client'
import type { FC } from 'react'
import type { ModelAndParameter } from './types'
import {
RiAddLine,
RiEqualizer2Line,
} from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import ActionButton, { ActionButtonState } from '@/app/components/base/action-button'
import Button from '@/app/components/base/button'
import { RefreshCcw01 } from '@/app/components/base/icons/src/vender/line/arrows'
import TooltipPlus from '@/app/components/base/tooltip'
import { AppModeEnum } from '@/types/app'
type DebugHeaderProps = {
readonly?: boolean
mode: AppModeEnum
debugWithMultipleModel: boolean
multipleModelConfigs: ModelAndParameter[]
varListLength: number
expanded: boolean
onExpandedChange: (expanded: boolean) => void
onClearConversation: () => void
onAddModel: () => void
}
const DebugHeader: FC<DebugHeaderProps> = ({
readonly,
mode,
debugWithMultipleModel,
multipleModelConfigs,
varListLength,
expanded,
onExpandedChange,
onClearConversation,
onAddModel,
}) => {
const { t } = useTranslation()
return (
<div className="flex items-center justify-between px-4 pb-2 pt-3">
<div className="system-xl-semibold text-text-primary">{t('inputs.title', { ns: 'appDebug' })}</div>
<div className="flex items-center">
{debugWithMultipleModel && (
<>
<Button
variant="ghost-accent"
onClick={onAddModel}
disabled={multipleModelConfigs.length >= 4}
>
<RiAddLine className="mr-1 h-3.5 w-3.5" />
{t('modelProvider.addModel', { ns: 'common' })}
(
{multipleModelConfigs.length}
/4)
</Button>
<div className="mx-2 h-[14px] w-[1px] bg-divider-regular" />
</>
)}
{mode !== AppModeEnum.COMPLETION && (
<>
{!readonly && (
<TooltipPlus popupContent={t('operation.refresh', { ns: 'common' })}>
<ActionButton onClick={onClearConversation}>
<RefreshCcw01 className="h-4 w-4" />
</ActionButton>
</TooltipPlus>
)}
{varListLength > 0 && (
<div className="relative ml-1 mr-2">
<TooltipPlus popupContent={t('panel.userInputField', { ns: 'workflow' })}>
<ActionButton
state={expanded ? ActionButtonState.Active : undefined}
onClick={() => !readonly && onExpandedChange(!expanded)}
>
<RiEqualizer2Line className="h-4 w-4" />
</ActionButton>
</TooltipPlus>
{expanded && (
<div className="absolute bottom-[-14px] right-[5px] z-10 h-3 w-3 rotate-45 border-l-[0.5px] border-t-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg" />
)}
</div>
)}
</>
)}
</div>
</div>
)
}
export default DebugHeader

View File

@@ -0,0 +1,737 @@
import type { ModelAndParameter } from '../types'
import type { ChatConfig } from '@/app/components/base/chat/types'
import { render, screen, waitFor } from '@testing-library/react'
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { DEFAULT_AGENT_SETTING, DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
import { ModelModeType } from '@/types/app'
import { APP_CHAT_WITH_MULTIPLE_MODEL, APP_CHAT_WITH_MULTIPLE_MODEL_RESTART } from '../types'
import ChatItem from './chat-item'
const mockUseAppContext = vi.fn()
const mockUseDebugConfigurationContext = vi.fn()
const mockUseProviderContext = vi.fn()
const mockUseFeatures = vi.fn()
const mockUseConfigFromDebugContext = vi.fn()
const mockUseFormattingChangedSubscription = vi.fn()
const mockUseChat = vi.fn()
const mockUseEventEmitterContextContext = vi.fn()
vi.mock('@/context/app-context', () => ({
useAppContext: () => mockUseAppContext(),
}))
vi.mock('@/context/debug-configuration', () => ({
useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
}))
vi.mock('@/context/provider-context', () => ({
useProviderContext: () => mockUseProviderContext(),
}))
vi.mock('@/app/components/base/features/hooks', () => ({
useFeatures: (selector: (state: unknown) => unknown) => mockUseFeatures(selector),
}))
vi.mock('../hooks', () => ({
useConfigFromDebugContext: () => mockUseConfigFromDebugContext(),
useFormattingChangedSubscription: (chatList: unknown) => mockUseFormattingChangedSubscription(chatList),
}))
vi.mock('@/app/components/base/chat/chat/hooks', () => ({
useChat: (...args: unknown[]) => mockUseChat(...args),
}))
vi.mock('@/context/event-emitter', () => ({
useEventEmitterContextContext: () => mockUseEventEmitterContextContext(),
}))
const mockStopChatMessageResponding = vi.fn()
const mockFetchConversationMessages = vi.fn()
const mockFetchSuggestedQuestions = vi.fn()
vi.mock('@/service/debug', () => ({
fetchConversationMessages: (...args: unknown[]) => mockFetchConversationMessages(...args),
fetchSuggestedQuestions: (...args: unknown[]) => mockFetchSuggestedQuestions(...args),
stopChatMessageResponding: (...args: unknown[]) => mockStopChatMessageResponding(...args),
}))
vi.mock('@/utils', () => ({
canFindTool: (collectionId: string, providerId: string) => collectionId === providerId,
}))
vi.mock('@/app/components/base/chat/utils', () => ({
getLastAnswer: (chatList: { id: string }[]) => chatList.length > 0 ? chatList[chatList.length - 1] : null,
}))
let capturedChatProps: Record<string, unknown> | null = null
vi.mock('@/app/components/base/chat/chat', () => ({
default: (props: Record<string, unknown>) => {
capturedChatProps = props
return <div data-testid="chat-component">Chat</div>
},
}))
vi.mock('@/app/components/base/avatar', () => ({
default: ({ name }: { name: string }) => <div data-testid="avatar">{name}</div>,
}))
let modelIdCounter = 0
const createModelAndParameter = (overrides: Partial<ModelAndParameter> = {}): ModelAndParameter => ({
id: `model-${++modelIdCounter}`,
model: 'gpt-3.5-turbo',
provider: 'openai',
parameters: { temperature: 0.7 },
...overrides,
})
const createDefaultModelConfig = () => ({
provider: 'openai',
model_id: 'gpt-4',
mode: ModelModeType.chat,
configs: {
prompt_template: 'Hello {{name}}',
prompt_variables: [
{ key: 'name', name: 'Name', type: 'string' as const },
{ key: 'api-var', name: 'API Var', type: 'api' as const },
],
},
chat_prompt_config: DEFAULT_CHAT_PROMPT_CONFIG,
completion_prompt_config: DEFAULT_COMPLETION_PROMPT_CONFIG,
opening_statement: '',
more_like_this: null,
suggested_questions: [],
suggested_questions_after_answer: null,
speech_to_text: null,
text_to_speech: null,
file_upload: null,
retriever_resource: null,
sensitive_word_avoidance: null,
annotation_reply: null,
external_data_tools: [],
dataSets: [],
agentConfig: DEFAULT_AGENT_SETTING,
system_parameters: {
audio_file_size_limit: 0,
file_size_limit: 0,
image_file_size_limit: 0,
video_file_size_limit: 0,
workflow_file_upload_limit: 0,
},
})
const createDefaultFeatures = () => ({
moreLikeThis: { enabled: false },
opening: { enabled: true, opening_statement: 'Hello', suggested_questions: ['Q1'] },
moderation: { enabled: false },
speech2text: { enabled: true },
text2speech: { enabled: false },
file: { enabled: true, image: { enabled: true } },
suggested: { enabled: true },
citation: { enabled: false },
annotationReply: { enabled: false },
})
const createTextGenerationModelList = (models: Array<{
provider: string
model: string
features?: string[]
mode?: string
}> = []) => {
const providerMap = new Map<string, { model: string, features: string[], model_properties: { mode: string } }[]>()
for (const m of models) {
if (!providerMap.has(m.provider)) {
providerMap.set(m.provider, [])
}
providerMap.get(m.provider)!.push({
model: m.model,
features: m.features ?? [],
model_properties: { mode: m.mode ?? 'chat' },
})
}
return Array.from(providerMap.entries()).map(([provider, modelsList]) => ({
provider,
models: modelsList,
}))
}
describe('ChatItem', () => {
let subscriptionCallback: ((v: { type: string, payload?: { message: string, files?: unknown[] } }) => void) | null = null
beforeEach(() => {
vi.clearAllMocks()
modelIdCounter = 0
capturedChatProps = null
subscriptionCallback = null
mockUseAppContext.mockReturnValue({
userProfile: { avatar_url: 'avatar.png', name: 'Test User' },
})
mockUseDebugConfigurationContext.mockReturnValue({
modelConfig: createDefaultModelConfig(),
appId: 'test-app-id',
inputs: { name: 'World' },
collectionList: [],
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-3.5-turbo', features: [ModelFeatureEnum.vision], mode: 'chat' },
{ provider: 'openai', model: 'gpt-4', features: [], mode: 'chat' },
]),
})
const features = createDefaultFeatures()
mockUseFeatures.mockImplementation((selector: (state: { features: ReturnType<typeof createDefaultFeatures> }) => unknown) => selector({ features }))
mockUseConfigFromDebugContext.mockReturnValue({
baseConfig: true,
})
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1', content: 'Hello' }],
isResponding: false,
handleSend: vi.fn(),
suggestedQuestions: [],
handleRestart: vi.fn(),
})
mockUseEventEmitterContextContext.mockReturnValue({
eventEmitter: {
// eslint-disable-next-line react/no-unnecessary-use-prefix -- mocking real API
useSubscription: (callback: (v: { type: string, payload?: { message: string, files?: unknown[] } }) => void) => {
subscriptionCallback = callback
},
},
})
})
describe('rendering', () => {
it('should render Chat component when chatList is not empty', () => {
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('chat-component')).toBeInTheDocument()
})
it('should return null when chatList is empty', () => {
mockUseChat.mockReturnValue({
chatList: [],
isResponding: false,
handleSend: vi.fn(),
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter()
const { container } = render(<ChatItem modelAndParameter={modelAndParameter} />)
expect(container.firstChild).toBeNull()
})
it('should pass correct props to Chat component', () => {
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
expect(capturedChatProps!.noChatInput).toBe(true)
expect(capturedChatProps!.noStopResponding).toBe(true)
expect(capturedChatProps!.showPromptLog).toBe(true)
expect(capturedChatProps!.hideLogModal).toBe(true)
expect(capturedChatProps!.noSpacing).toBe(true)
expect(capturedChatProps!.chatContainerClassName).toBe('p-4')
expect(capturedChatProps!.chatFooterClassName).toBe('p-4 pb-0')
})
})
describe('config building', () => {
it('should merge configTemplate with features', () => {
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
const config = capturedChatProps!.config as ChatConfig & { baseConfig?: boolean }
expect(config.baseConfig).toBe(true)
expect(config.more_like_this).toEqual({ enabled: false })
expect(config.opening_statement).toBe('Hello')
expect(config.suggested_questions).toEqual(['Q1'])
expect(config.speech_to_text).toEqual({ enabled: true })
expect(config.file_upload).toEqual({ enabled: true, image: { enabled: true } })
})
it('should use empty opening_statement when opening is disabled', () => {
const features = createDefaultFeatures()
features.opening = { enabled: false, opening_statement: 'Hello', suggested_questions: ['Q1'] }
mockUseFeatures.mockImplementation((selector: (state: { features: ReturnType<typeof createDefaultFeatures> }) => unknown) => selector({ features }))
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
const config = capturedChatProps!.config as ChatConfig
expect(config.opening_statement).toBe('')
expect(config.suggested_questions).toEqual([])
})
it('should use empty string fallback when opening_statement is undefined', () => {
const features = createDefaultFeatures()
// eslint-disable-next-line ts/no-explicit-any -- Testing edge case with undefined
features.opening = { enabled: true, opening_statement: undefined as any, suggested_questions: ['Q1'] }
mockUseFeatures.mockImplementation((selector: (state: { features: ReturnType<typeof createDefaultFeatures> }) => unknown) => selector({ features }))
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
const config = capturedChatProps!.config as ChatConfig
expect(config.opening_statement).toBe('')
})
it('should use empty array fallback when suggested_questions is undefined', () => {
const features = createDefaultFeatures()
// eslint-disable-next-line ts/no-explicit-any -- Testing edge case with undefined
features.opening = { enabled: true, opening_statement: 'Hello', suggested_questions: undefined as any }
mockUseFeatures.mockImplementation((selector: (state: { features: ReturnType<typeof createDefaultFeatures> }) => unknown) => selector({ features }))
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
const config = capturedChatProps!.config as ChatConfig
expect(config.suggested_questions).toEqual([])
})
it('should handle undefined opening feature', () => {
const features = createDefaultFeatures()
// eslint-disable-next-line ts/no-explicit-any -- Testing edge case with undefined
features.opening = undefined as any
mockUseFeatures.mockImplementation((selector: (state: { features: ReturnType<typeof createDefaultFeatures> }) => unknown) => selector({ features }))
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
const config = capturedChatProps!.config as ChatConfig
expect(config.opening_statement).toBe('')
expect(config.suggested_questions).toEqual([])
})
})
describe('inputsForm transformation', () => {
it('should filter out api type variables and map to InputForm', () => {
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
// The useChat is called with inputsForm
const useChatCall = mockUseChat.mock.calls[0]
const inputsForm = useChatCall[1].inputsForm
expect(inputsForm).toHaveLength(1)
expect(inputsForm[0]).toEqual(expect.objectContaining({
key: 'name',
label: 'Name',
variable: 'name',
}))
})
})
describe('event subscription', () => {
it('should handle APP_CHAT_WITH_MULTIPLE_MODEL event', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
// Trigger the event
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test message', files: [{ id: 'file-1' }] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalled()
})
})
it('should handle APP_CHAT_WITH_MULTIPLE_MODEL_RESTART event', async () => {
const handleRestart = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend: vi.fn(),
suggestedQuestions: [],
handleRestart,
})
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
// Trigger the event
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL_RESTART,
})
await waitFor(() => {
expect(handleRestart).toHaveBeenCalled()
})
})
})
describe('doSend', () => {
it('should find current provider and model from textGenerationModelList', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-3.5-turbo' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalledWith(
'apps/test-app-id/chat-messages',
expect.objectContaining({
query: 'test',
inputs: { name: 'World' },
model_config: expect.objectContaining({
model: expect.objectContaining({
provider: 'openai',
name: 'gpt-3.5-turbo',
mode: 'chat',
}),
}),
}),
expect.any(Object),
)
})
})
it('should include files when file upload is enabled and vision is supported', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
// gpt-3.5-turbo has vision feature
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-3.5-turbo' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
const files = [{ id: 'file-1', name: 'image.png' }]
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
files,
}),
expect.any(Object),
)
})
})
it('should not include files when vision is not supported', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
// gpt-4 does not have vision feature
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
const files = [{ id: 'file-1', name: 'image.png' }]
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files },
})
await waitFor(() => {
const callArgs = handleSend.mock.calls[0][1]
expect(callArgs.files).toBeUndefined()
})
})
it('should handle provider not found in textGenerationModelList', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
// Use a provider that doesn't exist in the list
const modelAndParameter = createModelAndParameter({ provider: 'unknown-provider', model: 'unknown-model' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [{ id: 'file-1' }] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalled()
const callArgs = handleSend.mock.calls[0][1]
// Files should not be included when provider/model not found (no vision support)
expect(callArgs.files).toBeUndefined()
})
})
it('should handle model with no features array', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
// Model list where model has no features property
mockUseProviderContext.mockReturnValue({
textGenerationModelList: [
{
provider: 'custom',
models: [{ model: 'custom-model', model_properties: { mode: 'chat' } }],
},
],
})
const modelAndParameter = createModelAndParameter({ provider: 'custom', model: 'custom-model' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [{ id: 'file-1' }] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalled()
const callArgs = handleSend.mock.calls[0][1]
// Files should not be included when features is undefined
expect(callArgs.files).toBeUndefined()
})
})
it('should handle undefined files parameter', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-3.5-turbo' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: undefined },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalled()
const callArgs = handleSend.mock.calls[0][1]
expect(callArgs.files).toBeUndefined()
})
})
})
describe('tool icons building', () => {
it('should build tool icons from agent config', () => {
mockUseDebugConfigurationContext.mockReturnValue({
modelConfig: {
...createDefaultModelConfig(),
agentConfig: {
tools: [
{ tool_name: 'search', provider_id: 'provider-1' },
{ tool_name: 'calculator', provider_id: 'provider-2' },
],
},
},
appId: 'test-app-id',
inputs: {},
collectionList: [
{ id: 'provider-1', icon: 'search-icon' },
{ id: 'provider-2', icon: 'calc-icon' },
],
})
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
expect(capturedChatProps!.allToolIcons).toEqual({
search: 'search-icon',
calculator: 'calc-icon',
})
})
it('should handle missing tools gracefully', () => {
mockUseDebugConfigurationContext.mockReturnValue({
modelConfig: {
...createDefaultModelConfig(),
agentConfig: {
tools: undefined,
},
},
appId: 'test-app-id',
inputs: {},
collectionList: [],
})
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
expect(capturedChatProps!.allToolIcons).toEqual({})
})
})
describe('useFormattingChangedSubscription', () => {
it('should call useFormattingChangedSubscription with chatList', () => {
const chatList = [{ id: 'msg-1' }, { id: 'msg-2' }]
mockUseChat.mockReturnValue({
chatList,
isResponding: false,
handleSend: vi.fn(),
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
expect(mockUseFormattingChangedSubscription).toHaveBeenCalledWith(chatList)
})
})
describe('useChat callbacks', () => {
it('should pass stopChatMessageResponding callback to useChat', () => {
const modelAndParameter = createModelAndParameter()
render(<ChatItem modelAndParameter={modelAndParameter} />)
// Get the stopResponding callback passed to useChat (4th argument)
const useChatCall = mockUseChat.mock.calls[0]
const stopRespondingCallback = useChatCall[3]
// Invoke it with a taskId
stopRespondingCallback('test-task-id')
expect(mockStopChatMessageResponding).toHaveBeenCalledWith('test-app-id', 'test-task-id')
})
it('should pass onGetConversationMessages callback to handleSend', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-3.5-turbo' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalled()
})
// Get the callbacks object (3rd argument to handleSend)
const callbacks = handleSend.mock.calls[0][2]
// Invoke onGetConversationMessages
const mockGetAbortController = vi.fn()
callbacks.onGetConversationMessages('conv-123', mockGetAbortController)
expect(mockFetchConversationMessages).toHaveBeenCalledWith('test-app-id', 'conv-123', mockGetAbortController)
})
it('should pass onGetSuggestedQuestions callback to handleSend', async () => {
const handleSend = vi.fn()
mockUseChat.mockReturnValue({
chatList: [{ id: 'msg-1' }],
isResponding: false,
handleSend,
suggestedQuestions: [],
handleRestart: vi.fn(),
})
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-3.5-turbo' })
render(<ChatItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalled()
})
// Get the callbacks object (3rd argument to handleSend)
const callbacks = handleSend.mock.calls[0][2]
// Invoke onGetSuggestedQuestions
const mockGetAbortController = vi.fn()
callbacks.onGetSuggestedQuestions('response-item-123', mockGetAbortController)
expect(mockFetchSuggestedQuestions).toHaveBeenCalledWith('test-app-id', 'response-item-123', mockGetAbortController)
})
})
})

View File

@@ -0,0 +1,599 @@
import type { ModelAndParameter } from '../types'
import { fireEvent, render, screen } from '@testing-library/react'
import { ModelStatusEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { AppModeEnum } from '@/types/app'
import DebugItem from './debug-item'
const mockUseTranslation = vi.fn()
const mockUseDebugConfigurationContext = vi.fn()
const mockUseDebugWithMultipleModelContext = vi.fn()
const mockUseProviderContext = vi.fn()
vi.mock('react-i18next', () => ({
useTranslation: () => mockUseTranslation(),
}))
vi.mock('@/context/debug-configuration', () => ({
useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
}))
vi.mock('./context', () => ({
useDebugWithMultipleModelContext: () => mockUseDebugWithMultipleModelContext(),
}))
vi.mock('@/context/provider-context', () => ({
useProviderContext: () => mockUseProviderContext(),
}))
vi.mock('./chat-item', () => ({
default: ({ modelAndParameter }: { modelAndParameter: ModelAndParameter }) => (
<div data-testid="chat-item" data-model-id={modelAndParameter.id}>ChatItem</div>
),
}))
vi.mock('./text-generation-item', () => ({
default: ({ modelAndParameter }: { modelAndParameter: ModelAndParameter }) => (
<div data-testid="text-generation-item" data-model-id={modelAndParameter.id}>TextGenerationItem</div>
),
}))
vi.mock('./model-parameter-trigger', () => ({
default: ({ modelAndParameter }: { modelAndParameter: ModelAndParameter }) => (
<div data-testid="model-parameter-trigger" data-model-id={modelAndParameter.id}>ModelParameterTrigger</div>
),
}))
type DropdownItem = { value: string, text: string }
type DropdownProps = {
items?: DropdownItem[]
secondItems?: DropdownItem[]
onSelect: (item: DropdownItem) => void
}
let capturedDropdownProps: DropdownProps | null = null
vi.mock('@/app/components/base/dropdown', () => ({
default: (props: DropdownProps) => {
capturedDropdownProps = props
return (
<div data-testid="dropdown">
<button
type="button"
data-testid="dropdown-trigger"
onClick={() => {
// Mock dropdown menu showing items
}}
>
Dropdown
</button>
{props.items?.map((item: DropdownItem) => (
<button
key={item.value}
type="button"
data-testid={`dropdown-item-${item.value}`}
onClick={() => props.onSelect(item)}
>
{item.text}
</button>
))}
{props.secondItems?.map((item: DropdownItem) => (
<button
key={item.value}
type="button"
data-testid={`dropdown-second-item-${item.value}`}
onClick={() => props.onSelect(item)}
>
{item.text}
</button>
))}
</div>
)
},
}))
let modelIdCounter = 0
const createModelAndParameter = (overrides: Partial<ModelAndParameter> = {}): ModelAndParameter => ({
id: `model-${++modelIdCounter}`,
model: 'gpt-3.5-turbo',
provider: 'openai',
parameters: {},
...overrides,
})
const createTextGenerationModelList = (models: Array<{ provider: string, model: string, status?: ModelStatusEnum }> = []) => {
const providerMap = new Map<string, { model: string, status: ModelStatusEnum, model_properties: { mode: string }, features: string[] }[]>()
for (const m of models) {
if (!providerMap.has(m.provider)) {
providerMap.set(m.provider, [])
}
providerMap.get(m.provider)!.push({
model: m.model,
status: m.status ?? ModelStatusEnum.active,
model_properties: { mode: 'chat' },
features: [],
})
}
return Array.from(providerMap.entries()).map(([provider, modelsList]) => ({
provider,
models: modelsList,
}))
}
describe('DebugItem', () => {
beforeEach(() => {
vi.clearAllMocks()
modelIdCounter = 0
capturedDropdownProps = null
mockUseTranslation.mockReturnValue({
t: (key: string) => key,
})
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.CHAT,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: [],
})
})
describe('rendering', () => {
it('should render with index number', () => {
const modelAndParameter = createModelAndParameter({ id: 'model-a' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.getByText('#1')).toBeInTheDocument()
})
it('should render correct index for second model', () => {
const model1 = createModelAndParameter({ id: 'model-a' })
const model2 = createModelAndParameter({ id: 'model-b' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [model1, model2],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={model2} />)
expect(screen.getByText('#2')).toBeInTheDocument()
})
it('should render ModelParameterTrigger', () => {
const modelAndParameter = createModelAndParameter()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('model-parameter-trigger')).toBeInTheDocument()
})
it('should render Dropdown', () => {
const modelAndParameter = createModelAndParameter()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('dropdown')).toBeInTheDocument()
})
it('should apply custom className', () => {
const modelAndParameter = createModelAndParameter()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
const { container } = render(<DebugItem modelAndParameter={modelAndParameter} className="custom-class" />)
expect(container.firstChild).toHaveClass('custom-class')
})
it('should apply custom style', () => {
const modelAndParameter = createModelAndParameter()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
const { container } = render(<DebugItem modelAndParameter={modelAndParameter} style={{ width: '300px' }} />)
expect(container.firstChild).toHaveStyle({ width: '300px' })
})
})
describe('ChatItem rendering', () => {
it('should render ChatItem in CHAT mode with active model', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.CHAT,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.active },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('chat-item')).toBeInTheDocument()
})
it('should render ChatItem in AGENT_CHAT mode with active model', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.AGENT_CHAT,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.active },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('chat-item')).toBeInTheDocument()
})
it('should not render ChatItem when model is not active', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.CHAT,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.disabled },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
})
it('should not render ChatItem when provider not found', () => {
const modelAndParameter = createModelAndParameter({ provider: 'unknown', model: 'model' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.CHAT,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.active },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.queryByTestId('chat-item')).not.toBeInTheDocument()
})
})
describe('TextGenerationItem rendering', () => {
it('should render TextGenerationItem in COMPLETION mode with active model', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.COMPLETION,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.active },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('text-generation-item')).toBeInTheDocument()
})
it('should not render TextGenerationItem when model is not active', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.COMPLETION,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.disabled },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.queryByTestId('text-generation-item')).not.toBeInTheDocument()
})
it('should not render TextGenerationItem in CHAT mode', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugConfigurationContext.mockReturnValue({
mode: AppModeEnum.CHAT,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-4', status: ModelStatusEnum.active },
]),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(screen.queryByTestId('text-generation-item')).not.toBeInTheDocument()
})
})
describe('dropdown menu items', () => {
it('should show duplicate option when less than 4 models', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter, createModelAndParameter()],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.items).toContainEqual(
expect.objectContaining({ value: 'duplicate' }),
)
})
it('should hide duplicate option when 4 or more models', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [
modelAndParameter,
createModelAndParameter(),
createModelAndParameter(),
createModelAndParameter(),
],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.items).not.toContainEqual(
expect.objectContaining({ value: 'duplicate' }),
)
})
it('should show debug-as-single-model when provider and model are set', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.items).toContainEqual(
expect.objectContaining({ value: 'debug-as-single-model' }),
)
})
it('should hide debug-as-single-model when provider is missing', () => {
const modelAndParameter = createModelAndParameter({ provider: '', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.items).not.toContainEqual(
expect.objectContaining({ value: 'debug-as-single-model' }),
)
})
it('should hide debug-as-single-model when model is missing', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: '' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.items).not.toContainEqual(
expect.objectContaining({ value: 'debug-as-single-model' }),
)
})
it('should show remove option when more than 2 models', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter, createModelAndParameter(), createModelAndParameter()],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.secondItems).toContainEqual(
expect.objectContaining({ value: 'remove' }),
)
})
it('should hide remove option when 2 or fewer models', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter, createModelAndParameter()],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
expect(capturedDropdownProps!.secondItems).toBeUndefined()
})
})
describe('dropdown actions', () => {
it('should duplicate model when clicking duplicate', () => {
const modelAndParameter = createModelAndParameter({ id: 'model-a', provider: 'openai', model: 'gpt-4' })
const model2 = createModelAndParameter({ id: 'model-b' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter, model2],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
fireEvent.click(screen.getByTestId('dropdown-item-duplicate'))
expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
true,
expect.arrayContaining([
expect.objectContaining({ id: 'model-a' }),
expect.objectContaining({ provider: 'openai', model: 'gpt-4' }),
expect.objectContaining({ id: 'model-b' }),
]),
)
expect(onMultipleModelConfigsChange.mock.calls[0][1]).toHaveLength(3)
})
it('should not duplicate when already at 4 models', () => {
const modelAndParameter = createModelAndParameter({ id: 'model-a', provider: 'openai', model: 'gpt-4' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [
modelAndParameter,
createModelAndParameter(),
createModelAndParameter(),
createModelAndParameter(),
],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
// Duplicate option should not be rendered when at 4 models
expect(screen.queryByTestId('dropdown-item-duplicate')).not.toBeInTheDocument()
})
it('should early return when trying to duplicate with 4 models via handleSelect', () => {
const modelAndParameter = createModelAndParameter({ id: 'model-a', provider: 'openai', model: 'gpt-4' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [
modelAndParameter,
createModelAndParameter(),
createModelAndParameter(),
createModelAndParameter(),
],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
// Directly call handleSelect with duplicate action to cover line 42
capturedDropdownProps!.onSelect({ value: 'duplicate', text: 'Duplicate' })
// Should not call onMultipleModelConfigsChange due to early return
expect(onMultipleModelConfigsChange).not.toHaveBeenCalled()
})
it('should call onDebugWithMultipleModelChange when clicking debug-as-single-model', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
const onDebugWithMultipleModelChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange,
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
fireEvent.click(screen.getByTestId('dropdown-item-debug-as-single-model'))
expect(onDebugWithMultipleModelChange).toHaveBeenCalledWith(modelAndParameter)
})
it('should remove model when clicking remove', () => {
const modelAndParameter = createModelAndParameter({ id: 'model-a', provider: 'openai', model: 'gpt-4' })
const model2 = createModelAndParameter({ id: 'model-b' })
const model3 = createModelAndParameter({ id: 'model-c' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter, model2, model3],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<DebugItem modelAndParameter={modelAndParameter} />)
fireEvent.click(screen.getByTestId('dropdown-second-item-remove'))
expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
true,
[
expect.objectContaining({ id: 'model-b' }),
expect.objectContaining({ id: 'model-c' }),
],
)
})
})
})

View File

@@ -1,6 +1,7 @@
import type { FC } from 'react'
import type { DebugWithMultipleModelContextType } from './context'
import type { InputForm } from '@/app/components/base/chat/chat/type'
import type { EnableType } from '@/app/components/base/chat/types'
import type { FileEntity } from '@/app/components/base/file-uploader/types'
import {
memo,
@@ -40,13 +41,7 @@ const DebugWithMultipleModel = () => {
if (checkCanSend && !checkCanSend())
return
eventEmitter?.emit({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: {
message,
files,
},
} as any)
eventEmitter?.emit({ type: APP_CHAT_WITH_MULTIPLE_MODEL, payload: { message, files } } as any) // eslint-disable-line ts/no-explicit-any
}, [eventEmitter, checkCanSend])
const twoLine = multipleModelConfigs.length === 2
@@ -147,7 +142,7 @@ const DebugWithMultipleModel = () => {
showFileUpload={false}
onFeatureBarClick={setShowAppConfigureFeaturesModal}
onSend={handleSend}
speechToTextConfig={speech2text as any}
speechToTextConfig={speech2text as EnableType}
visionConfig={file}
inputs={inputs}
inputsForm={inputsForm}

View File

@@ -0,0 +1,436 @@
import type * as React from 'react'
import type { ModelAndParameter } from '../types'
import { fireEvent, render, screen } from '@testing-library/react'
import { ModelStatusEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import ModelParameterTrigger from './model-parameter-trigger'
// Mock MODEL_STATUS_TEXT that is imported in the component
vi.mock('@/app/components/header/account-setting/model-provider-page/declarations', async (importOriginal) => {
const original = await importOriginal() as object
return {
...original,
MODEL_STATUS_TEXT: {
'disabled': { en_US: 'Disabled', zh_Hans: '已禁用' },
'quota-exceeded': { en_US: 'Quota Exceeded', zh_Hans: '配额已用完' },
'no-configure': { en_US: 'No Configure', zh_Hans: '未配置凭据' },
},
}
})
const mockUseTranslation = vi.fn()
const mockUseDebugConfigurationContext = vi.fn()
const mockUseDebugWithMultipleModelContext = vi.fn()
const mockUseLanguage = vi.fn()
vi.mock('react-i18next', () => ({
useTranslation: () => mockUseTranslation(),
}))
vi.mock('@/context/debug-configuration', () => ({
useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
}))
vi.mock('./context', () => ({
useDebugWithMultipleModelContext: () => mockUseDebugWithMultipleModelContext(),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useLanguage: () => mockUseLanguage(),
}))
type RenderTriggerParams = {
open: boolean
currentProvider: { provider: string, icon: string } | null
currentModel: { model: string, status: ModelStatusEnum } | null
}
type ModalProps = {
provider: string
modelId: string
isAdvancedMode: boolean
completionParams: Record<string, unknown>
debugWithMultipleModel: boolean
setModel: (model: { modelId: string, provider: string }) => void
onCompletionParamsChange: (params: Record<string, unknown>) => void
onDebugWithMultipleModelChange: () => void
renderTrigger: (params: RenderTriggerParams) => React.ReactElement
}
let capturedModalProps: ModalProps | null = null
let mockRenderTriggerFn: ((params: RenderTriggerParams) => React.ReactElement) | null = null
vi.mock('@/app/components/header/account-setting/model-provider-page/model-parameter-modal', () => ({
default: (props: ModalProps) => {
capturedModalProps = props
mockRenderTriggerFn = props.renderTrigger
// Render the trigger with some mock data
const triggerElement = props.renderTrigger({
open: false,
currentProvider: props.provider
? { provider: props.provider, icon: 'provider-icon' }
: null,
currentModel: props.modelId
? { model: props.modelId, status: ModelStatusEnum.active }
: null,
})
return (
<div data-testid="model-parameter-modal">
{triggerElement}
<button
type="button"
data-testid="select-model-btn"
onClick={() => props.setModel({ modelId: 'new-model', provider: 'new-provider' })}
>
Select Model
</button>
<button
type="button"
data-testid="change-params-btn"
onClick={() => props.onCompletionParamsChange({ temperature: 0.9 })}
>
Change Params
</button>
<button
type="button"
data-testid="debug-single-btn"
onClick={() => props.onDebugWithMultipleModelChange()}
>
Debug Single
</button>
</div>
)
},
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/model-icon', () => ({
default: ({ provider, modelName }: { provider: { provider: string } | null, modelName?: string }) => (
<div data-testid="model-icon" data-provider={provider?.provider} data-model={modelName}>
ModelIcon
</div>
),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/model-name', () => ({
default: ({ modelItem }: { modelItem: { model: string } | null }) => (
<div data-testid="model-name" data-model={modelItem?.model}>
{modelItem?.model}
</div>
),
}))
vi.mock('@/app/components/base/icons/src/vender/line/shapes', () => ({
CubeOutline: () => <div data-testid="cube-icon">CubeOutline</div>,
}))
vi.mock('@/app/components/base/icons/src/vender/line/alertsAndFeedback', () => ({
AlertTriangle: () => <div data-testid="alert-icon">AlertTriangle</div>,
}))
vi.mock('@/app/components/base/tooltip', () => ({
default: ({ children }: { children: React.ReactNode }) => <div data-testid="tooltip">{children}</div>,
}))
let modelIdCounter = 0
const createModelAndParameter = (overrides: Partial<ModelAndParameter> = {}): ModelAndParameter => ({
id: `model-${++modelIdCounter}`,
model: 'gpt-3.5-turbo',
provider: 'openai',
parameters: { temperature: 0.7 },
...overrides,
})
describe('ModelParameterTrigger', () => {
beforeEach(() => {
vi.clearAllMocks()
modelIdCounter = 0
capturedModalProps = null
mockRenderTriggerFn = null
mockUseTranslation.mockReturnValue({
t: (key: string) => key,
})
mockUseDebugConfigurationContext.mockReturnValue({
isAdvancedMode: false,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
mockUseLanguage.mockReturnValue('en_US')
})
describe('rendering', () => {
it('should render ModelParameterModal with correct props', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('model-parameter-modal')).toBeInTheDocument()
expect(capturedModalProps!.isAdvancedMode).toBe(false)
expect(capturedModalProps!.provider).toBe('openai')
expect(capturedModalProps!.modelId).toBe('gpt-4')
expect(capturedModalProps!.completionParams).toEqual({ temperature: 0.7 })
expect(capturedModalProps!.debugWithMultipleModel).toBe(true)
})
it('should pass isAdvancedMode from context', () => {
const modelAndParameter = createModelAndParameter()
mockUseDebugConfigurationContext.mockReturnValue({
isAdvancedMode: true,
})
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
expect(capturedModalProps!.isAdvancedMode).toBe(true)
})
})
describe('trigger rendering', () => {
it('should render model icon when provider exists', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('model-icon')).toBeInTheDocument()
})
it('should render cube icon when no provider', () => {
const modelAndParameter = createModelAndParameter({ provider: '', model: '' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('cube-icon')).toBeInTheDocument()
})
it('should render model name when model exists', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('model-name')).toBeInTheDocument()
})
it('should render select model text when no model', () => {
const modelAndParameter = createModelAndParameter({ provider: '', model: '' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
expect(screen.getByText('modelProvider.selectModel')).toBeInTheDocument()
})
})
describe('handleSelectModel', () => {
it('should update model and provider in configs', () => {
const model1 = createModelAndParameter({ id: 'model-a', provider: 'openai', model: 'gpt-3.5' })
const model2 = createModelAndParameter({ id: 'model-b' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [model1, model2],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={model1} />)
fireEvent.click(screen.getByTestId('select-model-btn'))
expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
true,
[
expect.objectContaining({ id: 'model-a', model: 'new-model', provider: 'new-provider' }),
expect.objectContaining({ id: 'model-b' }),
],
)
})
it('should update correct model when multiple configs exist', () => {
const model1 = createModelAndParameter({ id: 'model-a' })
const model2 = createModelAndParameter({ id: 'model-b' })
const model3 = createModelAndParameter({ id: 'model-c' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [model1, model2, model3],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={model2} />)
fireEvent.click(screen.getByTestId('select-model-btn'))
expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
true,
[
expect.objectContaining({ id: 'model-a' }),
expect.objectContaining({ id: 'model-b', model: 'new-model', provider: 'new-provider' }),
expect.objectContaining({ id: 'model-c' }),
],
)
})
})
describe('handleParamsChange', () => {
it('should update parameters in configs', () => {
const model1 = createModelAndParameter({ id: 'model-a', parameters: { temperature: 0.5 } })
const model2 = createModelAndParameter({ id: 'model-b' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [model1, model2],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={model1} />)
fireEvent.click(screen.getByTestId('change-params-btn'))
expect(onMultipleModelConfigsChange).toHaveBeenCalledWith(
true,
[
expect.objectContaining({ id: 'model-a', parameters: { temperature: 0.9 } }),
expect.objectContaining({ id: 'model-b' }),
],
)
})
})
describe('onDebugWithMultipleModelChange', () => {
it('should call onDebugWithMultipleModelChange with current modelAndParameter', () => {
const modelAndParameter = createModelAndParameter({ id: 'model-a', provider: 'openai', model: 'gpt-4' })
const onDebugWithMultipleModelChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange,
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
fireEvent.click(screen.getByTestId('debug-single-btn'))
expect(onDebugWithMultipleModelChange).toHaveBeenCalledWith(modelAndParameter)
})
})
describe('index finding', () => {
it('should find correct index for model in middle of array', () => {
const model1 = createModelAndParameter({ id: 'model-a' })
const model2 = createModelAndParameter({ id: 'model-b' })
const model3 = createModelAndParameter({ id: 'model-c' })
const onMultipleModelConfigsChange = vi.fn()
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [model1, model2, model3],
onMultipleModelConfigsChange,
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={model2} />)
// Verify that the correct index is used by checking the result of handleSelectModel
fireEvent.click(screen.getByTestId('select-model-btn'))
// The second model (index 1) should be updated
const updatedConfigs = onMultipleModelConfigsChange.mock.calls[0][1]
expect(updatedConfigs[0].id).toBe('model-a')
expect(updatedConfigs[1].model).toBe('new-model') // This one should be updated
expect(updatedConfigs[2].id).toBe('model-c')
})
})
describe('renderTrigger styling and states', () => {
it('should render trigger with open state styling', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
// Call renderTrigger with open=true to test the open styling branch
const triggerWithOpen = mockRenderTriggerFn!({
open: true,
currentProvider: { provider: 'openai', icon: 'provider-icon' },
currentModel: { model: 'gpt-4', status: ModelStatusEnum.active },
})
expect(triggerWithOpen).toBeDefined()
})
it('should render warning tooltip when model status is not active', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
// Call renderTrigger with inactive model status to test the warning branch
const triggerWithInactiveModel = mockRenderTriggerFn!({
open: false,
currentProvider: { provider: 'openai', icon: 'provider-icon' },
currentModel: { model: 'gpt-4', status: ModelStatusEnum.disabled },
})
expect(triggerWithInactiveModel).toBeDefined()
})
it('should render warning background and tooltip for inactive model', () => {
const modelAndParameter = createModelAndParameter({ provider: 'openai', model: 'gpt-4' })
mockUseDebugWithMultipleModelContext.mockReturnValue({
multipleModelConfigs: [modelAndParameter],
onMultipleModelConfigsChange: vi.fn(),
onDebugWithMultipleModelChange: vi.fn(),
})
render(<ModelParameterTrigger modelAndParameter={modelAndParameter} />)
// Test with quota_exceeded status (another inactive status)
const triggerWithQuotaExceeded = mockRenderTriggerFn!({
open: false,
currentProvider: { provider: 'openai', icon: 'provider-icon' },
currentModel: { model: 'gpt-4', status: ModelStatusEnum.quotaExceeded },
})
expect(triggerWithQuotaExceeded).toBeDefined()
})
})
})

View File

@@ -0,0 +1,621 @@
import type { ModelAndParameter } from '../types'
import { render, screen, waitFor } from '@testing-library/react'
import { TransferMethod } from '@/app/components/base/chat/types'
import { DEFAULT_AGENT_SETTING, DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
import { ModelModeType } from '@/types/app'
import { APP_CHAT_WITH_MULTIPLE_MODEL } from '../types'
import TextGenerationItem from './text-generation-item'
const mockUseDebugConfigurationContext = vi.fn()
const mockUseProviderContext = vi.fn()
const mockUseFeatures = vi.fn()
const mockUseTextGeneration = vi.fn()
const mockUseEventEmitterContextContext = vi.fn()
const mockPromptVariablesToUserInputsForm = vi.fn()
vi.mock('@/context/debug-configuration', () => ({
useDebugConfigurationContext: () => mockUseDebugConfigurationContext(),
}))
vi.mock('@/context/provider-context', () => ({
useProviderContext: () => mockUseProviderContext(),
}))
vi.mock('@/app/components/base/features/hooks', () => ({
useFeatures: (selector: (state: unknown) => unknown) => mockUseFeatures(selector),
}))
vi.mock('@/app/components/base/text-generation/hooks', () => ({
useTextGeneration: () => mockUseTextGeneration(),
}))
vi.mock('@/context/event-emitter', () => ({
useEventEmitterContextContext: () => mockUseEventEmitterContextContext(),
}))
vi.mock('@/utils/model-config', () => ({
promptVariablesToUserInputsForm: (vars: unknown) => mockPromptVariablesToUserInputsForm(vars),
}))
let capturedTextGenerationProps: Record<string, unknown> | null = null
vi.mock('@/app/components/app/text-generate/item', () => ({
default: (props: Record<string, unknown>) => {
capturedTextGenerationProps = props
return <div data-testid="text-generation-component">TextGeneration</div>
},
}))
let modelIdCounter = 0
const createModelAndParameter = (overrides: Partial<ModelAndParameter> = {}): ModelAndParameter => ({
id: `model-${++modelIdCounter}`,
model: 'gpt-3.5-turbo',
provider: 'openai',
parameters: { temperature: 0.7 },
...overrides,
})
const createDefaultModelConfig = () => ({
provider: 'openai',
model_id: 'gpt-4',
mode: ModelModeType.completion,
configs: {
prompt_template: 'Hello {{name}}',
prompt_variables: [
{ key: 'name', name: 'Name', type: 'string' as const, is_context_var: false },
{ key: 'context', name: 'Context', type: 'string' as const, is_context_var: true },
],
},
chat_prompt_config: DEFAULT_CHAT_PROMPT_CONFIG,
completion_prompt_config: DEFAULT_COMPLETION_PROMPT_CONFIG,
opening_statement: '',
more_like_this: null,
suggested_questions: [],
suggested_questions_after_answer: null,
speech_to_text: null,
text_to_speech: null,
file_upload: null,
retriever_resource: null,
sensitive_word_avoidance: null,
annotation_reply: null,
external_data_tools: [],
dataSets: [],
agentConfig: DEFAULT_AGENT_SETTING,
system_parameters: {
audio_file_size_limit: 0,
file_size_limit: 0,
image_file_size_limit: 0,
video_file_size_limit: 0,
workflow_file_upload_limit: 0,
},
})
const createDefaultFeatures = () => ({
moreLikeThis: { enabled: true },
moderation: { enabled: false },
text2speech: { enabled: true },
file: { enabled: true },
})
const createTextGenerationModelList = (models: Array<{
provider: string
model: string
mode?: string
}> = []) => {
const providerMap = new Map<string, { model: string, model_properties: { mode: string } }[]>()
for (const m of models) {
if (!providerMap.has(m.provider)) {
providerMap.set(m.provider, [])
}
providerMap.get(m.provider)!.push({
model: m.model,
model_properties: { mode: m.mode ?? 'completion' },
})
}
return Array.from(providerMap.entries()).map(([provider, modelsList]) => ({
provider,
models: modelsList,
}))
}
describe('TextGenerationItem', () => {
let subscriptionCallback: ((v: { type: string, payload?: { message: string, files?: unknown[] } }) => void) | null = null
beforeEach(() => {
vi.clearAllMocks()
modelIdCounter = 0
capturedTextGenerationProps = null
subscriptionCallback = null
mockUseDebugConfigurationContext.mockReturnValue({
isAdvancedMode: false,
modelConfig: createDefaultModelConfig(),
appId: 'test-app-id',
inputs: { name: 'World' },
promptMode: 'simple',
speechToTextConfig: { enabled: true },
introduction: 'Welcome',
suggestedQuestionsAfterAnswerConfig: { enabled: false },
citationConfig: { enabled: false },
externalDataToolsConfig: [],
chatPromptConfig: DEFAULT_CHAT_PROMPT_CONFIG,
completionPromptConfig: DEFAULT_COMPLETION_PROMPT_CONFIG,
dataSets: [{ id: 'ds-1', name: 'Dataset 1' }],
datasetConfigs: { retrieval_model: 'single' },
})
mockUseProviderContext.mockReturnValue({
textGenerationModelList: createTextGenerationModelList([
{ provider: 'openai', model: 'gpt-3.5-turbo', mode: 'completion' },
{ provider: 'openai', model: 'gpt-4', mode: 'completion' },
]),
})
const features = createDefaultFeatures()
mockUseFeatures.mockImplementation((selector: (state: { features: ReturnType<typeof createDefaultFeatures> }) => unknown) => selector({ features }))
mockUseTextGeneration.mockReturnValue({
completion: 'Generated text',
handleSend: vi.fn(),
isResponding: false,
messageId: 'msg-1',
})
mockUseEventEmitterContextContext.mockReturnValue({
eventEmitter: {
// eslint-disable-next-line react/no-unnecessary-use-prefix -- mocking real API
useSubscription: (callback: (v: { type: string, payload?: { message: string, files?: unknown[] } }) => void) => {
subscriptionCallback = callback
},
},
})
mockPromptVariablesToUserInputsForm.mockReturnValue([
{ key: 'name', label: 'Name', variable: 'name' },
])
})
describe('rendering', () => {
it('should render TextGeneration component', () => {
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
expect(screen.getByTestId('text-generation-component')).toBeInTheDocument()
})
it('should pass correct props to TextGeneration component', () => {
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
expect(capturedTextGenerationProps!.content).toBe('Generated text')
expect(capturedTextGenerationProps!.isLoading).toBe(false)
expect(capturedTextGenerationProps!.isResponding).toBe(false)
expect(capturedTextGenerationProps!.messageId).toBe('msg-1')
expect(capturedTextGenerationProps!.isError).toBe(false)
expect(capturedTextGenerationProps!.inSidePanel).toBe(true)
expect(capturedTextGenerationProps!.siteInfo).toBeNull()
})
it('should show loading state when no completion and is responding', () => {
mockUseTextGeneration.mockReturnValue({
completion: '',
handleSend: vi.fn(),
isResponding: true,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
expect(capturedTextGenerationProps!.isLoading).toBe(true)
})
it('should not show loading state when has completion', () => {
mockUseTextGeneration.mockReturnValue({
completion: 'Some text',
handleSend: vi.fn(),
isResponding: true,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
expect(capturedTextGenerationProps!.isLoading).toBe(false)
})
})
describe('config building', () => {
it('should build config with correct pre_prompt in simple mode', () => {
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
// The config is built internally, we verify via the handleSend call
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
const handleSend = mockUseTextGeneration().handleSend
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
pre_prompt: 'Hello {{name}}',
}),
}),
)
})
it('should use empty pre_prompt in advanced mode', () => {
mockUseDebugConfigurationContext.mockReturnValue({
...mockUseDebugConfigurationContext(),
isAdvancedMode: true,
modelConfig: createDefaultModelConfig(),
appId: 'test-app-id',
inputs: {},
promptMode: 'advanced',
speechToTextConfig: { enabled: true },
introduction: '',
suggestedQuestionsAfterAnswerConfig: { enabled: false },
citationConfig: { enabled: false },
externalDataToolsConfig: [],
chatPromptConfig: DEFAULT_CHAT_PROMPT_CONFIG,
completionPromptConfig: DEFAULT_COMPLETION_PROMPT_CONFIG,
dataSets: [],
datasetConfigs: { retrieval_model: 'single' },
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
const handleSend = mockUseTextGeneration().handleSend
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
pre_prompt: '',
}),
}),
)
})
it('should find context variable from prompt_variables', () => {
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
const handleSend = mockUseTextGeneration().handleSend
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
dataset_query_variable: 'context',
}),
}),
)
})
it('should use empty string for dataset_query_variable when no context var exists', () => {
const modelConfigWithoutContextVar = {
...createDefaultModelConfig(),
configs: {
prompt_template: 'Hello {{name}}',
prompt_variables: [
{ key: 'name', name: 'Name', type: 'string' as const, is_context_var: false },
],
},
}
mockUseDebugConfigurationContext.mockReturnValue({
isAdvancedMode: false,
modelConfig: modelConfigWithoutContextVar,
appId: 'test-app-id',
inputs: { name: 'World' },
promptMode: 'simple',
speechToTextConfig: { enabled: true },
introduction: 'Welcome',
suggestedQuestionsAfterAnswerConfig: { enabled: false },
citationConfig: { enabled: false },
externalDataToolsConfig: [],
chatPromptConfig: DEFAULT_CHAT_PROMPT_CONFIG,
completionPromptConfig: DEFAULT_COMPLETION_PROMPT_CONFIG,
dataSets: [],
datasetConfigs: { retrieval_model: 'single' },
})
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
dataset_query_variable: '',
}),
}),
)
})
})
describe('datasets transformation', () => {
it('should transform dataSets to postDatasets format', () => {
mockUseDebugConfigurationContext.mockReturnValue({
...mockUseDebugConfigurationContext(),
isAdvancedMode: false,
modelConfig: createDefaultModelConfig(),
appId: 'test-app-id',
inputs: {},
promptMode: 'simple',
speechToTextConfig: { enabled: true },
introduction: '',
suggestedQuestionsAfterAnswerConfig: { enabled: false },
citationConfig: { enabled: false },
externalDataToolsConfig: [],
chatPromptConfig: DEFAULT_CHAT_PROMPT_CONFIG,
completionPromptConfig: DEFAULT_COMPLETION_PROMPT_CONFIG,
dataSets: [
{ id: 'ds-1', name: 'Dataset 1' },
{ id: 'ds-2', name: 'Dataset 2' },
],
datasetConfigs: { retrieval_model: 'single' },
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
const handleSend = mockUseTextGeneration().handleSend
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
dataset_configs: expect.objectContaining({
datasets: {
datasets: [
{ dataset: { enabled: true, id: 'ds-1' } },
{ dataset: { enabled: true, id: 'ds-2' } },
],
},
}),
}),
}),
)
})
})
describe('event subscription', () => {
it('should handle APP_CHAT_WITH_MULTIPLE_MODEL event', async () => {
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test message', files: [] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalledWith(
'apps/test-app-id/completion-messages',
expect.any(Object),
)
})
})
it('should ignore non-matching events', async () => {
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: 'SOME_OTHER_EVENT',
payload: { message: 'test' },
})
expect(handleSend).not.toHaveBeenCalled()
})
})
describe('doSend', () => {
it('should build config data with model info', async () => {
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter({
provider: 'openai',
model: 'gpt-3.5-turbo',
parameters: { temperature: 0.8 },
})
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
await waitFor(() => {
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
model: {
provider: 'openai',
name: 'gpt-3.5-turbo',
mode: 'completion',
completion_params: { temperature: 0.8 },
},
}),
}),
)
})
})
it('should process local files by clearing url', async () => {
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
const files = [
{ id: 'file-1', transfer_method: TransferMethod.local_file, url: 'http://example.com/file1' },
{ id: 'file-2', transfer_method: TransferMethod.remote_url, url: 'http://example.com/file2' },
]
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files },
})
await waitFor(() => {
const callArgs = handleSend.mock.calls[0][1]
expect(callArgs.files[0].url).toBe('')
expect(callArgs.files[1].url).toBe('http://example.com/file2')
})
})
it('should not include files when file upload is disabled', async () => {
const features = { ...createDefaultFeatures(), file: { enabled: false } }
mockUseFeatures.mockImplementation((selector: (state: { features: typeof features }) => unknown) => selector({ features }))
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
const files = [{ id: 'file-1', transfer_method: TransferMethod.remote_url }]
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files },
})
await waitFor(() => {
const callArgs = handleSend.mock.calls[0][1]
expect(callArgs.files).toBeUndefined()
})
})
it('should not include files when no files provided', async () => {
const handleSend = vi.fn()
mockUseTextGeneration.mockReturnValue({
completion: 'text',
handleSend,
isResponding: false,
messageId: 'msg-1',
})
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
await waitFor(() => {
const callArgs = handleSend.mock.calls[0][1]
expect(callArgs.files).toBeUndefined()
})
})
})
describe('features integration', () => {
it('should include features in config', () => {
const modelAndParameter = createModelAndParameter()
render(<TextGenerationItem modelAndParameter={modelAndParameter} />)
subscriptionCallback?.({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: { message: 'test', files: [] },
})
const handleSend = mockUseTextGeneration().handleSend
expect(handleSend).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
model_config: expect.objectContaining({
more_like_this: { enabled: true },
sensitive_word_avoidance: { enabled: false },
text_to_speech: { enabled: true },
file_upload: { enabled: true },
}),
}),
)
})
})
})

View File

@@ -6,18 +6,26 @@ import type {
ChatConfig,
ChatItem,
} from '@/app/components/base/chat/types'
import type { VisionFile } from '@/types/app'
import { cloneDeep } from 'es-toolkit/object'
import {
useCallback,
useEffect,
useRef,
useState,
} from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import { ToastContext } from '@/app/components/base/toast'
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
import { useDebugConfigurationContext } from '@/context/debug-configuration'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import {
AgentStrategy,
AppModeEnum,
ModelModeType,
TransferMethod,
} from '@/types/app'
import { promptVariablesToUserInputsForm } from '@/utils/model-config'
import { ORCHESTRATE_CHANGED } from './types'
@@ -162,3 +170,111 @@ export const useFormattingChangedSubscription = (chatList: ChatItem[]) => {
}
})
}
export const useInputValidation = () => {
const { t } = useTranslation()
const { notify } = useContext(ToastContext)
const {
isAdvancedMode,
mode,
modelModeType,
hasSetBlockStatus,
modelConfig,
} = useDebugConfigurationContext()
const logError = useCallback((message: string) => {
notify({ type: 'error', message })
}, [notify])
const checkCanSend = useCallback((inputs: Record<string, unknown>, completionFiles: VisionFile[]) => {
if (isAdvancedMode && mode !== AppModeEnum.COMPLETION) {
if (modelModeType === ModelModeType.completion) {
if (!hasSetBlockStatus.history) {
notify({ type: 'error', message: t('otherError.historyNoBeEmpty', { ns: 'appDebug' }) })
return false
}
if (!hasSetBlockStatus.query) {
notify({ type: 'error', message: t('otherError.queryNoBeEmpty', { ns: 'appDebug' }) })
return false
}
}
}
let hasEmptyInput = ''
const requiredVars = modelConfig.configs.prompt_variables.filter(({ key, name, required, type }) => {
if (type !== 'string' && type !== 'paragraph' && type !== 'select' && type !== 'number')
return false
const res = (!key || !key.trim()) || (!name || !name.trim()) || (required || required === undefined || required === null)
return res
})
requiredVars.forEach(({ key, name }) => {
if (hasEmptyInput)
return
if (!inputs[key])
hasEmptyInput = name
})
if (hasEmptyInput) {
logError(t('errorMessage.valueOfVarRequired', { ns: 'appDebug', key: hasEmptyInput }))
return false
}
if (completionFiles.find(item => item.transfer_method === TransferMethod.local_file && !item.upload_file_id)) {
notify({ type: 'info', message: t('errorMessage.waitForFileUpload', { ns: 'appDebug' }) })
return false
}
return !hasEmptyInput
}, [
hasSetBlockStatus.history,
hasSetBlockStatus.query,
isAdvancedMode,
mode,
modelConfig.configs.prompt_variables,
t,
logError,
notify,
modelModeType,
])
return { checkCanSend, logError }
}
export const useFormattingChangeConfirm = () => {
const [isShowFormattingChangeConfirm, setIsShowFormattingChangeConfirm] = useState(false)
const { formattingChanged, setFormattingChanged } = useDebugConfigurationContext()
useEffect(() => {
if (formattingChanged)
setIsShowFormattingChangeConfirm(true) // eslint-disable-line react-hooks-extra/no-direct-set-state-in-use-effect
}, [formattingChanged])
const handleConfirm = useCallback((onClear: () => void) => {
onClear()
setIsShowFormattingChangeConfirm(false)
setFormattingChanged(false)
}, [setFormattingChanged])
const handleCancel = useCallback(() => {
setIsShowFormattingChangeConfirm(false)
setFormattingChanged(false)
}, [setFormattingChanged])
return {
isShowFormattingChangeConfirm,
handleConfirm,
handleCancel,
}
}
export const useModalWidth = (containerRef: React.RefObject<HTMLDivElement | null>) => {
const [width, setWidth] = useState(0)
useEffect(() => {
if (containerRef.current) {
const calculatedWidth = document.body.clientWidth - (containerRef.current.clientWidth + 16) - 8
setWidth(calculatedWidth) // eslint-disable-line react-hooks-extra/no-direct-set-state-in-use-effect
}
}, [containerRef])
return width
}

View File

@@ -3,54 +3,39 @@ import type { FC } from 'react'
import type { DebugWithSingleModelRefType } from './debug-with-single-model'
import type { ModelAndParameter } from './types'
import type { ModelParameterModalProps } from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal'
import type { Inputs } from '@/models/debug'
import type { ModelConfig as BackendModelConfig, VisionFile, VisionSettings } from '@/types/app'
import {
RiAddLine,
RiEqualizer2Line,
RiSparklingFill,
} from '@remixicon/react'
import { useBoolean } from 'ahooks'
import { noop } from 'es-toolkit/function'
import { cloneDeep } from 'es-toolkit/object'
import type { Inputs, PromptVariable } from '@/models/debug'
import type { VisionFile, VisionSettings } from '@/types/app'
import { produce, setAutoFreeze } from 'immer'
import * as React from 'react'
import { useCallback, useEffect, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import { useShallow } from 'zustand/react/shallow'
import ChatUserInput from '@/app/components/app/configuration/debug/chat-user-input'
import PromptValuePanel from '@/app/components/app/configuration/prompt-value-panel'
import { useStore as useAppStore } from '@/app/components/app/store'
import TextGeneration from '@/app/components/app/text-generate/item'
import ActionButton, { ActionButtonState } from '@/app/components/base/action-button'
import AgentLogModal from '@/app/components/base/agent-log-modal'
import Button from '@/app/components/base/button'
import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks'
import { RefreshCcw01 } from '@/app/components/base/icons/src/vender/line/arrows'
import PromptLogModal from '@/app/components/base/prompt-log-modal'
import { ToastContext } from '@/app/components/base/toast'
import TooltipPlus from '@/app/components/base/tooltip'
import { ModelFeatureEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useDefaultModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG, IS_CE_EDITION } from '@/config'
import { IS_CE_EDITION } from '@/config'
import ConfigContext from '@/context/debug-configuration'
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { useProviderContext } from '@/context/provider-context'
import { sendCompletionMessage } from '@/service/debug'
import { AppSourceType } from '@/service/share'
import { AppModeEnum, ModelModeType, TransferMethod } from '@/types/app'
import { formatBooleanInputs, promptVariablesToUserInputsForm } from '@/utils/model-config'
import GroupName from '../base/group-name'
import { AppModeEnum } from '@/types/app'
import CannotQueryDataset from '../base/warning-mask/cannot-query-dataset'
import FormattingChanged from '../base/warning-mask/formatting-changed'
import HasNotSetAPIKEY from '../base/warning-mask/has-not-set-api'
import DebugHeader from './debug-header'
import DebugWithMultipleModel from './debug-with-multiple-model'
import DebugWithSingleModel from './debug-with-single-model'
import { useFormattingChangeConfirm, useInputValidation, useModalWidth } from './hooks'
import TextCompletionResult from './text-completion-result'
import {
APP_CHAT_WITH_MULTIPLE_MODEL,
APP_CHAT_WITH_MULTIPLE_MODEL_RESTART,
} from './types'
import { useTextCompletion } from './use-text-completion'
type IDebug = {
isAPIKeySet: boolean
@@ -71,33 +56,17 @@ const Debug: FC<IDebug> = ({
multipleModelConfigs,
onMultipleModelConfigsChange,
}) => {
const { t } = useTranslation()
const {
readonly,
appId,
mode,
modelModeType,
hasSetBlockStatus,
isAdvancedMode,
promptMode,
chatPromptConfig,
completionPromptConfig,
introduction,
suggestedQuestionsAfterAnswerConfig,
speechToTextConfig,
textToSpeechConfig,
citationConfig,
formattingChanged,
setFormattingChanged,
dataSets,
modelConfig,
completionParams,
hasSetContextVar,
datasetConfigs,
externalDataToolsConfig,
} = useContext(ConfigContext)
const { eventEmitter } = useEventEmitterContextContext()
const { data: text2speechDefaultModel } = useDefaultModel(ModelTypeEnum.textEmbedding)
const features = useFeatures(s => s.features)
const featuresStore = useFeaturesStore()
// Disable immer auto-freeze for this component
useEffect(() => {
setAutoFreeze(false)
return () => {
@@ -105,226 +74,77 @@ const Debug: FC<IDebug> = ({
}
}, [])
const [isResponding, { setTrue: setRespondingTrue, setFalse: setRespondingFalse }] = useBoolean(false)
const [isShowFormattingChangeConfirm, setIsShowFormattingChangeConfirm] = useState(false)
// UI state
const [expanded, setExpanded] = useState(true)
const [isShowCannotQueryDataset, setShowCannotQueryDataset] = useState(false)
useEffect(() => {
if (formattingChanged)
setIsShowFormattingChangeConfirm(true)
}, [formattingChanged])
const containerRef = useRef<HTMLDivElement>(null)
const debugWithSingleModelRef = React.useRef<DebugWithSingleModelRefType>(null!)
const handleClearConversation = () => {
debugWithSingleModelRef.current?.handleRestart()
}
const clearConversation = async () => {
if (debugWithMultipleModel) {
eventEmitter?.emit({
type: APP_CHAT_WITH_MULTIPLE_MODEL_RESTART,
} as any)
return
}
handleClearConversation()
}
// Hooks
const { checkCanSend } = useInputValidation()
const { isShowFormattingChangeConfirm, handleConfirm, handleCancel } = useFormattingChangeConfirm()
const modalWidth = useModalWidth(containerRef)
const handleConfirm = () => {
clearConversation()
setIsShowFormattingChangeConfirm(false)
setFormattingChanged(false)
}
// Wrapper for checkCanSend that uses current completionFiles
const [completionFilesForValidation, setCompletionFilesForValidation] = useState<VisionFile[]>([])
const checkCanSendWithFiles = useCallback(() => {
return checkCanSend(inputs, completionFilesForValidation)
}, [checkCanSend, inputs, completionFilesForValidation])
const handleCancel = () => {
setIsShowFormattingChangeConfirm(false)
setFormattingChanged(false)
}
const { notify } = useContext(ToastContext)
const logError = useCallback((message: string) => {
notify({ type: 'error', message })
}, [notify])
const [completionFiles, setCompletionFiles] = useState<VisionFile[]>([])
const checkCanSend = useCallback(() => {
if (isAdvancedMode && mode !== AppModeEnum.COMPLETION) {
if (modelModeType === ModelModeType.completion) {
if (!hasSetBlockStatus.history) {
notify({ type: 'error', message: t('otherError.historyNoBeEmpty', { ns: 'appDebug' }) })
return false
}
if (!hasSetBlockStatus.query) {
notify({ type: 'error', message: t('otherError.queryNoBeEmpty', { ns: 'appDebug' }) })
return false
}
}
}
let hasEmptyInput = ''
const requiredVars = modelConfig.configs.prompt_variables.filter(({ key, name, required, type }) => {
if (type !== 'string' && type !== 'paragraph' && type !== 'select' && type !== 'number')
return false
const res = (!key || !key.trim()) || (!name || !name.trim()) || (required || required === undefined || required === null)
return res
}) // compatible with old version
requiredVars.forEach(({ key, name }) => {
if (hasEmptyInput)
return
if (!inputs[key])
hasEmptyInput = name
})
if (hasEmptyInput) {
logError(t('errorMessage.valueOfVarRequired', { ns: 'appDebug', key: hasEmptyInput }))
return false
}
if (completionFiles.find(item => item.transfer_method === TransferMethod.local_file && !item.upload_file_id)) {
notify({ type: 'info', message: t('errorMessage.waitForFileUpload', { ns: 'appDebug' }) })
return false
}
return !hasEmptyInput
}, [
const {
isResponding,
completionRes,
messageId,
completionFiles,
hasSetBlockStatus.history,
hasSetBlockStatus.query,
inputs,
isAdvancedMode,
mode,
modelConfig.configs.prompt_variables,
t,
logError,
notify,
modelModeType,
])
const [completionRes, setCompletionRes] = useState('')
const [messageId, setMessageId] = useState<string | null>(null)
const features = useFeatures(s => s.features)
const featuresStore = useFeaturesStore()
const sendTextCompletion = async () => {
if (isResponding) {
notify({ type: 'info', message: t('errorMessage.waitForResponse', { ns: 'appDebug' }) })
return false
}
if (dataSets.length > 0 && !hasSetContextVar) {
setShowCannotQueryDataset(true)
return true
}
if (!checkCanSend())
return
const postDatasets = dataSets.map(({ id }) => ({
dataset: {
enabled: true,
id,
},
}))
const contextVar = modelConfig.configs.prompt_variables.find(item => item.is_context_var)?.key
const postModelConfig: BackendModelConfig = {
pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '',
prompt_type: promptMode,
chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG),
completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG),
user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables),
dataset_query_variable: contextVar || '',
dataset_configs: {
...datasetConfigs,
datasets: {
datasets: [...postDatasets],
} as any,
},
agent_mode: {
enabled: false,
tools: [],
},
model: {
provider: modelConfig.provider,
name: modelConfig.model_id,
mode: modelConfig.mode,
completion_params: completionParams as any,
},
more_like_this: features.moreLikeThis as any,
sensitive_word_avoidance: features.moderation as any,
text_to_speech: features.text2speech as any,
file_upload: features.file as any,
opening_statement: introduction,
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
speech_to_text: speechToTextConfig,
retriever_resource: citationConfig,
system_parameters: modelConfig.system_parameters,
external_data_tools: externalDataToolsConfig,
}
const data: Record<string, any> = {
inputs: formatBooleanInputs(modelConfig.configs.prompt_variables, inputs),
model_config: postModelConfig,
}
if ((features.file as any).enabled && completionFiles && completionFiles?.length > 0) {
data.files = completionFiles.map((item) => {
if (item.transfer_method === TransferMethod.local_file) {
return {
...item,
url: '',
}
}
return item
})
}
setCompletionRes('')
setMessageId('')
let res: string[] = []
setRespondingTrue()
sendCompletionMessage(appId, data, {
onData: (data: string, _isFirstMessage: boolean, { messageId }) => {
res.push(data)
setCompletionRes(res.join(''))
setMessageId(messageId)
},
onMessageReplace: (messageReplace) => {
res = [messageReplace.answer]
setCompletionRes(res.join(''))
},
onCompleted() {
setRespondingFalse()
},
onError() {
setRespondingFalse()
},
})
}
const handleSendTextCompletion = () => {
if (debugWithMultipleModel) {
eventEmitter?.emit({
type: APP_CHAT_WITH_MULTIPLE_MODEL,
payload: {
message: '',
files: completionFiles,
},
} as any)
return
}
sendTextCompletion()
}
const varList = modelConfig.configs.prompt_variables.map((item: any) => {
return {
label: item.key,
value: inputs[item.key],
}
setCompletionFiles,
sendTextCompletion,
} = useTextCompletion({
checkCanSend: checkCanSendWithFiles,
onShowCannotQueryDataset: () => setShowCannotQueryDataset(true),
})
// Sync completionFiles for validation
useEffect(() => {
setCompletionFilesForValidation(completionFiles as VisionFile[]) // eslint-disable-line react-hooks-extra/no-direct-set-state-in-use-effect
}, [completionFiles])
// App store for modals
const { currentLogItem, setCurrentLogItem, showPromptLogModal, setShowPromptLogModal, showAgentLogModal, setShowAgentLogModal } = useAppStore(useShallow(state => ({
currentLogItem: state.currentLogItem,
setCurrentLogItem: state.setCurrentLogItem,
showPromptLogModal: state.showPromptLogModal,
setShowPromptLogModal: state.setShowPromptLogModal,
showAgentLogModal: state.showAgentLogModal,
setShowAgentLogModal: state.setShowAgentLogModal,
})))
// Provider context for model list
const { textGenerationModelList } = useProviderContext()
const handleChangeToSingleModel = (item: ModelAndParameter) => {
// Computed values
const varList = modelConfig.configs.prompt_variables.map((item: PromptVariable) => ({
label: item.key,
value: inputs[item.key],
}))
// Handlers
const handleClearConversation = useCallback(() => {
debugWithSingleModelRef.current?.handleRestart()
}, [])
const clearConversation = useCallback(async () => {
if (debugWithMultipleModel) {
eventEmitter?.emit({ type: APP_CHAT_WITH_MULTIPLE_MODEL_RESTART } as any) // eslint-disable-line ts/no-explicit-any
return
}
handleClearConversation()
}, [debugWithMultipleModel, eventEmitter, handleClearConversation])
const handleFormattingConfirm = useCallback(() => {
handleConfirm(clearConversation)
}, [handleConfirm, clearConversation])
const handleChangeToSingleModel = useCallback((item: ModelAndParameter) => {
const currentProvider = textGenerationModelList.find(modelItem => modelItem.provider === item.provider)
const currentModel = currentProvider?.models.find(model => model.model === item.model)
@@ -335,26 +155,18 @@ const Debug: FC<IDebug> = ({
features: currentModel?.features,
})
modelParameterParams.onCompletionParamsChange(item.parameters)
onMultipleModelConfigsChange(
false,
[],
)
}
onMultipleModelConfigsChange(false, [])
}, [modelParameterParams, onMultipleModelConfigsChange, textGenerationModelList])
const handleVisionConfigInMultipleModel = useCallback(() => {
if (debugWithMultipleModel && mode) {
const supportedVision = multipleModelConfigs.some((modelConfig) => {
const currentProvider = textGenerationModelList.find(modelItem => modelItem.provider === modelConfig.provider)
const currentModel = currentProvider?.models.find(model => model.model === modelConfig.model)
const supportedVision = multipleModelConfigs.some((config) => {
const currentProvider = textGenerationModelList.find(modelItem => modelItem.provider === config.provider)
const currentModel = currentProvider?.models.find(model => model.model === config.model)
return currentModel?.features?.includes(ModelFeatureEnum.vision)
})
const {
features,
setFeatures,
} = featuresStore!.getState()
const newFeatures = produce(features, (draft) => {
const { features: storeFeatures, setFeatures } = featuresStore!.getState()
const newFeatures = produce(storeFeatures, (draft) => {
draft.file = {
...draft.file,
enabled: supportedVision,
@@ -368,210 +180,131 @@ const Debug: FC<IDebug> = ({
handleVisionConfigInMultipleModel()
}, [multipleModelConfigs, mode, handleVisionConfigInMultipleModel])
const { currentLogItem, setCurrentLogItem, showPromptLogModal, setShowPromptLogModal, showAgentLogModal, setShowAgentLogModal } = useAppStore(useShallow(state => ({
currentLogItem: state.currentLogItem,
setCurrentLogItem: state.setCurrentLogItem,
showPromptLogModal: state.showPromptLogModal,
setShowPromptLogModal: state.setShowPromptLogModal,
showAgentLogModal: state.showAgentLogModal,
setShowAgentLogModal: state.setShowAgentLogModal,
})))
const [width, setWidth] = useState(0)
const ref = useRef<HTMLDivElement>(null)
const handleSendTextCompletion = useCallback(() => {
if (debugWithMultipleModel) {
eventEmitter?.emit({ type: APP_CHAT_WITH_MULTIPLE_MODEL, payload: { message: '', files: completionFiles } } as any) // eslint-disable-line ts/no-explicit-any
return
}
sendTextCompletion()
}, [completionFiles, debugWithMultipleModel, eventEmitter, sendTextCompletion])
const adjustModalWidth = () => {
if (ref.current)
setWidth(document.body.clientWidth - (ref.current?.clientWidth + 16) - 8)
}
const handleAddModel = useCallback(() => {
onMultipleModelConfigsChange(true, [...multipleModelConfigs, { id: `${Date.now()}`, model: '', provider: '', parameters: {} }])
}, [multipleModelConfigs, onMultipleModelConfigsChange])
useEffect(() => {
adjustModalWidth()
}, [])
const handleClosePromptLogModal = useCallback(() => {
setCurrentLogItem()
setShowPromptLogModal(false)
}, [setCurrentLogItem, setShowPromptLogModal])
const [expanded, setExpanded] = useState(true)
const handleCloseAgentLogModal = useCallback(() => {
setCurrentLogItem()
setShowAgentLogModal(false)
}, [setCurrentLogItem, setShowAgentLogModal])
const isShowTextToSpeech = features.text2speech?.enabled && !!text2speechDefaultModel
return (
<>
<div className="shrink-0">
<div className="flex items-center justify-between px-4 pb-2 pt-3">
<div className="system-xl-semibold text-text-primary">{t('inputs.title', { ns: 'appDebug' })}</div>
<div className="flex items-center">
{
debugWithMultipleModel
? (
<>
<Button
variant="ghost-accent"
onClick={() => onMultipleModelConfigsChange(true, [...multipleModelConfigs, { id: `${Date.now()}`, model: '', provider: '', parameters: {} }])}
disabled={multipleModelConfigs.length >= 4}
>
<RiAddLine className="mr-1 h-3.5 w-3.5" />
{t('modelProvider.addModel', { ns: 'common' })}
(
{multipleModelConfigs.length}
/4)
</Button>
<div className="mx-2 h-[14px] w-[1px] bg-divider-regular" />
</>
)
: null
}
{mode !== AppModeEnum.COMPLETION && (
<>
{
!readonly && (
<TooltipPlus
popupContent={t('operation.refresh', { ns: 'common' })}
>
<ActionButton onClick={clearConversation}>
<RefreshCcw01 className="h-4 w-4" />
</ActionButton>
</TooltipPlus>
)
}
{
varList.length > 0 && (
<div className="relative ml-1 mr-2">
<TooltipPlus
popupContent={t('panel.userInputField', { ns: 'workflow' })}
>
<ActionButton state={expanded ? ActionButtonState.Active : undefined} onClick={() => !readonly && setExpanded(!expanded)}>
<RiEqualizer2Line className="h-4 w-4" />
</ActionButton>
</TooltipPlus>
{expanded && <div className="absolute bottom-[-14px] right-[5px] z-10 h-3 w-3 rotate-45 border-l-[0.5px] border-t-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg" />}
</div>
)
}
</>
)}
</div>
</div>
<DebugHeader
readonly={readonly}
mode={mode}
debugWithMultipleModel={debugWithMultipleModel}
multipleModelConfigs={multipleModelConfigs}
varListLength={varList.length}
expanded={expanded}
onExpandedChange={setExpanded}
onClearConversation={clearConversation}
onAddModel={handleAddModel}
/>
{mode !== AppModeEnum.COMPLETION && expanded && (
<div className="mx-3">
<ChatUserInput inputs={inputs} />
</div>
)}
{
mode === AppModeEnum.COMPLETION && (
<PromptValuePanel
appType={mode as AppModeEnum}
onSend={handleSendTextCompletion}
inputs={inputs}
visionConfig={{
...features.file! as VisionSettings,
transfer_methods: features.file!.allowed_file_upload_methods || [],
image_file_size_limit: features.file?.fileUploadConfig?.image_file_size_limit,
}}
onVisionFilesChange={setCompletionFiles}
/>
)
}
</div>
{
debugWithMultipleModel && (
<div className="mt-3 grow overflow-hidden" ref={ref}>
<DebugWithMultipleModel
multipleModelConfigs={multipleModelConfigs}
onMultipleModelConfigsChange={onMultipleModelConfigsChange}
onDebugWithMultipleModelChange={handleChangeToSingleModel}
checkCanSend={checkCanSend}
/>
{showPromptLogModal && (
<PromptLogModal
width={width}
currentLogItem={currentLogItem}
onCancel={() => {
setCurrentLogItem()
setShowPromptLogModal(false)
}}
/>
)}
{showAgentLogModal && (
<AgentLogModal
width={width}
currentLogItem={currentLogItem}
onCancel={() => {
setCurrentLogItem()
setShowAgentLogModal(false)
}}
/>
)}
</div>
)
}
{
!debugWithMultipleModel && (
<div className="flex grow flex-col" ref={ref}>
{/* Chat */}
{mode !== AppModeEnum.COMPLETION && (
<div className="h-0 grow overflow-hidden">
<DebugWithSingleModel
ref={debugWithSingleModelRef}
checkCanSend={checkCanSend}
/>
</div>
)}
{/* Text Generation */}
{mode === AppModeEnum.COMPLETION && (
<>
{(completionRes || isResponding) && (
<>
<div className="mx-4 mt-3"><GroupName name={t('result', { ns: 'appDebug' })} /></div>
<div className="mx-3 mb-8">
<TextGeneration
appSourceType={AppSourceType.webApp}
className="mt-2"
content={completionRes}
isLoading={!completionRes && isResponding}
isShowTextToSpeech={textToSpeechConfig.enabled && !!text2speechDefaultModel}
isResponding={isResponding}
messageId={messageId}
isError={false}
onRetry={noop}
siteInfo={null}
/>
</div>
</>
)}
{!completionRes && !isResponding && (
<div className="flex grow flex-col items-center justify-center gap-2">
<RiSparklingFill className="h-12 w-12 text-text-empty-state-icon" />
<div className="system-sm-regular text-text-quaternary">{t('noResult', { ns: 'appDebug' })}</div>
</div>
)}
</>
)}
{mode === AppModeEnum.COMPLETION && showPromptLogModal && (
<PromptLogModal
width={width}
currentLogItem={currentLogItem}
onCancel={() => {
setCurrentLogItem()
setShowPromptLogModal(false)
}}
/>
)}
{isShowCannotQueryDataset && (
<CannotQueryDataset
onConfirm={() => setShowCannotQueryDataset(false)}
/>
)}
</div>
)
}
{
isShowFormattingChangeConfirm && (
<FormattingChanged
onConfirm={handleConfirm}
onCancel={handleCancel}
{mode === AppModeEnum.COMPLETION && (
<PromptValuePanel
appType={mode as AppModeEnum}
onSend={handleSendTextCompletion}
inputs={inputs}
visionConfig={{
...features.file! as VisionSettings,
transfer_methods: features.file!.allowed_file_upload_methods || [],
image_file_size_limit: features.file?.fileUploadConfig?.image_file_size_limit,
}}
onVisionFilesChange={setCompletionFiles}
/>
)
}
{!isAPIKeySet && !readonly && (<HasNotSetAPIKEY isTrailFinished={!IS_CE_EDITION} onSetting={onSetting} />)}
)}
</div>
{debugWithMultipleModel && (
<div className="mt-3 grow overflow-hidden" ref={containerRef}>
<DebugWithMultipleModel
multipleModelConfigs={multipleModelConfigs}
onMultipleModelConfigsChange={onMultipleModelConfigsChange}
onDebugWithMultipleModelChange={handleChangeToSingleModel}
checkCanSend={checkCanSendWithFiles}
/>
{showPromptLogModal && (
<PromptLogModal
width={modalWidth}
currentLogItem={currentLogItem}
onCancel={handleClosePromptLogModal}
/>
)}
{showAgentLogModal && (
<AgentLogModal
width={modalWidth}
currentLogItem={currentLogItem}
onCancel={handleCloseAgentLogModal}
/>
)}
</div>
)}
{!debugWithMultipleModel && (
<div className="flex grow flex-col" ref={containerRef}>
{mode !== AppModeEnum.COMPLETION && (
<div className="h-0 grow overflow-hidden">
<DebugWithSingleModel
ref={debugWithSingleModelRef}
checkCanSend={checkCanSendWithFiles}
/>
</div>
)}
{mode === AppModeEnum.COMPLETION && (
<TextCompletionResult
completionRes={completionRes}
isResponding={isResponding}
messageId={messageId}
isShowTextToSpeech={isShowTextToSpeech}
/>
)}
{mode === AppModeEnum.COMPLETION && showPromptLogModal && (
<PromptLogModal
width={modalWidth}
currentLogItem={currentLogItem}
onCancel={handleClosePromptLogModal}
/>
)}
{isShowCannotQueryDataset && (
<CannotQueryDataset onConfirm={() => setShowCannotQueryDataset(false)} />
)}
</div>
)}
{isShowFormattingChangeConfirm && (
<FormattingChanged
onConfirm={handleFormattingConfirm}
onCancel={handleCancel}
/>
)}
{!isAPIKeySet && !readonly && (
<HasNotSetAPIKEY isTrailFinished={!IS_CE_EDITION} onSetting={onSetting} />
)}
</>
)
}
export default React.memo(Debug)

View File

@@ -0,0 +1,57 @@
'use client'
import type { FC } from 'react'
import { RiSparklingFill } from '@remixicon/react'
import { noop } from 'es-toolkit/function'
import { useTranslation } from 'react-i18next'
import TextGeneration from '@/app/components/app/text-generate/item'
import { AppSourceType } from '@/service/share'
import GroupName from '../base/group-name'
type TextCompletionResultProps = {
completionRes: string
isResponding: boolean
messageId: string | null
isShowTextToSpeech?: boolean
}
const TextCompletionResult: FC<TextCompletionResultProps> = ({
completionRes,
isResponding,
messageId,
isShowTextToSpeech,
}) => {
const { t } = useTranslation()
if (!completionRes && !isResponding) {
return (
<div className="flex grow flex-col items-center justify-center gap-2">
<RiSparklingFill className="h-12 w-12 text-text-empty-state-icon" />
<div className="system-sm-regular text-text-quaternary">{t('noResult', { ns: 'appDebug' })}</div>
</div>
)
}
return (
<>
<div className="mx-4 mt-3">
<GroupName name={t('result', { ns: 'appDebug' })} />
</div>
<div className="mx-3 mb-8">
<TextGeneration
appSourceType={AppSourceType.webApp}
className="mt-2"
content={completionRes}
isLoading={!completionRes && isResponding}
isShowTextToSpeech={isShowTextToSpeech}
isResponding={isResponding}
messageId={messageId}
isError={false}
onRetry={noop}
siteInfo={null}
/>
</div>
</>
)
}
export default TextCompletionResult

View File

@@ -0,0 +1,187 @@
import type { ModelConfig as BackendModelConfig, VisionFile } from '@/types/app'
import { useBoolean } from 'ahooks'
import { cloneDeep } from 'es-toolkit/object'
import { useCallback, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import { useFeatures } from '@/app/components/base/features/hooks'
import { ToastContext } from '@/app/components/base/toast'
import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
import { useDebugConfigurationContext } from '@/context/debug-configuration'
import { sendCompletionMessage } from '@/service/debug'
import { TransferMethod } from '@/types/app'
import { formatBooleanInputs, promptVariablesToUserInputsForm } from '@/utils/model-config'
type UseTextCompletionOptions = {
checkCanSend: () => boolean
onShowCannotQueryDataset: () => void
}
export const useTextCompletion = ({
checkCanSend,
onShowCannotQueryDataset,
}: UseTextCompletionOptions) => {
const { t } = useTranslation()
const { notify } = useContext(ToastContext)
const {
appId,
isAdvancedMode,
promptMode,
chatPromptConfig,
completionPromptConfig,
introduction,
suggestedQuestionsAfterAnswerConfig,
speechToTextConfig,
citationConfig,
dataSets,
modelConfig,
completionParams,
hasSetContextVar,
datasetConfigs,
externalDataToolsConfig,
inputs,
} = useDebugConfigurationContext()
const features = useFeatures(s => s.features)
const [isResponding, { setTrue: setRespondingTrue, setFalse: setRespondingFalse }] = useBoolean(false)
const [completionRes, setCompletionRes] = useState('')
const [messageId, setMessageId] = useState<string | null>(null)
const [completionFiles, setCompletionFiles] = useState<VisionFile[]>([])
const sendTextCompletion = useCallback(async () => {
if (isResponding) {
notify({ type: 'info', message: t('errorMessage.waitForResponse', { ns: 'appDebug' }) })
return false
}
if (dataSets.length > 0 && !hasSetContextVar) {
onShowCannotQueryDataset()
return true
}
if (!checkCanSend())
return
const postDatasets = dataSets.map(({ id }) => ({
dataset: {
enabled: true,
id,
},
}))
const contextVar = modelConfig.configs.prompt_variables.find(item => item.is_context_var)?.key
const postModelConfig: BackendModelConfig = {
pre_prompt: !isAdvancedMode ? modelConfig.configs.prompt_template : '',
prompt_type: promptMode,
chat_prompt_config: isAdvancedMode ? chatPromptConfig : cloneDeep(DEFAULT_CHAT_PROMPT_CONFIG),
completion_prompt_config: isAdvancedMode ? completionPromptConfig : cloneDeep(DEFAULT_COMPLETION_PROMPT_CONFIG),
user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables),
dataset_query_variable: contextVar || '',
/* eslint-disable ts/no-explicit-any */
dataset_configs: {
...datasetConfigs,
datasets: {
datasets: [...postDatasets],
} as any,
},
agent_mode: {
enabled: false,
tools: [],
},
model: {
provider: modelConfig.provider,
name: modelConfig.model_id,
mode: modelConfig.mode,
completion_params: completionParams as any,
},
more_like_this: features.moreLikeThis as any,
sensitive_word_avoidance: features.moderation as any,
text_to_speech: features.text2speech as any,
file_upload: features.file as any,
/* eslint-enable ts/no-explicit-any */
opening_statement: introduction,
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
speech_to_text: speechToTextConfig,
retriever_resource: citationConfig,
system_parameters: modelConfig.system_parameters,
external_data_tools: externalDataToolsConfig,
}
// eslint-disable-next-line ts/no-explicit-any
const data: Record<string, any> = {
inputs: formatBooleanInputs(modelConfig.configs.prompt_variables, inputs),
model_config: postModelConfig,
}
// eslint-disable-next-line ts/no-explicit-any
if ((features.file as any).enabled && completionFiles && completionFiles?.length > 0) {
data.files = completionFiles.map((item) => {
if (item.transfer_method === TransferMethod.local_file) {
return {
...item,
url: '',
}
}
return item
})
}
setCompletionRes('')
setMessageId('')
let res: string[] = []
setRespondingTrue()
sendCompletionMessage(appId, data, {
onData: (data: string, _isFirstMessage: boolean, { messageId }) => {
res.push(data)
setCompletionRes(res.join(''))
setMessageId(messageId)
},
onMessageReplace: (messageReplace) => {
res = [messageReplace.answer]
setCompletionRes(res.join(''))
},
onCompleted() {
setRespondingFalse()
},
onError() {
setRespondingFalse()
},
})
}, [
appId,
checkCanSend,
chatPromptConfig,
citationConfig,
completionFiles,
completionParams,
completionPromptConfig,
datasetConfigs,
dataSets,
externalDataToolsConfig,
features,
hasSetContextVar,
inputs,
introduction,
isAdvancedMode,
isResponding,
modelConfig,
notify,
onShowCannotQueryDataset,
promptMode,
setRespondingFalse,
setRespondingTrue,
speechToTextConfig,
suggestedQuestionsAfterAnswerConfig,
t,
])
return {
isResponding,
completionRes,
messageId,
completionFiles,
setCompletionFiles,
sendTextCompletion,
}
}

View File

@@ -422,16 +422,6 @@
"count": 6
}
},
"app/components/app/configuration/debug/debug-with-multiple-model/index.spec.tsx": {
"ts/no-explicit-any": {
"count": 5
}
},
"app/components/app/configuration/debug/debug-with-multiple-model/index.tsx": {
"ts/no-explicit-any": {
"count": 2
}
},
"app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx": {
"ts/no-explicit-any": {
"count": 8
@@ -455,14 +445,6 @@
"count": 3
}
},
"app/components/app/configuration/debug/index.tsx": {
"react-hooks-extra/no-direct-set-state-in-use-effect": {
"count": 2
},
"ts/no-explicit-any": {
"count": 11
}
},
"app/components/app/configuration/debug/types.ts": {
"ts/no-explicit-any": {
"count": 1