feat: support detect when to show vision config

This commit is contained in:
Joel
2024-03-01 16:43:56 +08:00
parent c3f99779f2
commit 604930db64
2 changed files with 29 additions and 13 deletions

View File

@@ -20,6 +20,7 @@ const Panel: FC = () => {
const {
inputs,
isShowVisionConfig,
handleModelChanged,
handleCompletionParamsChange,
handleVarListChange,
@@ -88,8 +89,8 @@ const Panel: FC = () => {
Prompt
</Field>
{/* */}
{isChatApp && isChatApp && (
{/* Memory examples */}
{isChatApp && isChatModel && (
<div className='text-xs text-gray-300'>Memory examples(Designing)</div>
)}
{/* Memory */}
@@ -106,18 +107,19 @@ const Panel: FC = () => {
)}
{/* Vision: GPT4-vision and so on */}
<Field
title={t(`${i18nPrefix}.vision`)}
tooltip={t('appDebug.vision.description')!}
operations={
<ResolutionPicker
value={inputs.vision.configs.detail}
onChange={handleVisionResolutionChange}
/>
}
/>
{isShowVisionConfig && (
<Field
title={t(`${i18nPrefix}.vision`)}
tooltip={t('appDebug.vision.description')!}
operations={
<ResolutionPicker
value={inputs.vision.configs.detail}
onChange={handleVisionResolutionChange}
/>
}
/>
)}
</div>
<Split />
<div className='px-4 pt-4 pb-2'>
<OutputVars>
<>

View File

@@ -4,11 +4,14 @@ import useVarList from '../_base/hooks/use-var-list'
import type { Memory, ValueSelector } from '../../types'
import type { LLMNodeType } from './types'
import type { Resolution } from '@/types/app'
import { useTextGenerationCurrentProviderAndModelAndModelList } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
const useConfig = (initInputs: LLMNodeType) => {
const [inputs, setInputs] = useState<LLMNodeType>(initInputs)
// model
const model = inputs.model
const handleModelChanged = useCallback((model: { provider: string; modelId: string; mode?: string }) => {
const newInputs = produce(inputs, (draft) => {
draft.model.provider = model.provider
@@ -25,6 +28,16 @@ const useConfig = (initInputs: LLMNodeType) => {
setInputs(newInputs)
}, [inputs, setInputs])
const {
currentModel: currModel,
} = useTextGenerationCurrentProviderAndModelAndModelList(
{
provider: model.provider,
model: model.name,
},
)
const isShowVisionConfig = !!currModel?.features?.includes(ModelFeatureEnum.vision)
// variables
const { handleVarListChange, handleAddVariable } = useVarList<LLMNodeType>({
inputs,
@@ -55,6 +68,7 @@ const useConfig = (initInputs: LLMNodeType) => {
return {
inputs,
isShowVisionConfig,
handleModelChanged,
handleCompletionParamsChange,
handleVarListChange,