mirror of
https://github.com/langgenius/dify.git
synced 2026-01-14 18:59:49 +00:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80a322aaa2 | ||
|
|
82f7875a52 | ||
|
|
4637ddaa7f | ||
|
|
8d2269f762 | ||
|
|
5f03e66489 | ||
|
|
a9c1f1a041 | ||
|
|
49cee773c5 |
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.8.1",
|
||||
default="0.8.2",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
|
||||
from flask_restful import Resource, fields, marshal_with, reqparse
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import InternalServerError
|
||||
|
||||
from controllers.service_api import api
|
||||
@@ -22,10 +23,12 @@ from core.errors.error import (
|
||||
)
|
||||
from core.model_runtime.errors.invoke import InvokeError
|
||||
from extensions.ext_database import db
|
||||
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
|
||||
from libs import helper
|
||||
from models.model import App, AppMode, EndUser
|
||||
from models.workflow import WorkflowRun
|
||||
from services.app_generate_service import AppGenerateService
|
||||
from services.workflow_app_service import WorkflowAppService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -113,6 +116,30 @@ class WorkflowTaskStopApi(Resource):
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
class WorkflowAppLogApi(Resource):
|
||||
@validate_app_token
|
||||
@marshal_with(workflow_app_log_pagination_fields)
|
||||
def get(self, app_model: App):
|
||||
"""
|
||||
Get workflow app logs
|
||||
"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("keyword", type=str, location="args")
|
||||
parser.add_argument("status", type=str, choices=["succeeded", "failed", "stopped"], location="args")
|
||||
parser.add_argument("page", type=int_range(1, 99999), default=1, location="args")
|
||||
parser.add_argument("limit", type=int_range(1, 100), default=20, location="args")
|
||||
args = parser.parse_args()
|
||||
|
||||
# get paginate workflow app logs
|
||||
workflow_app_service = WorkflowAppService()
|
||||
workflow_app_log_pagination = workflow_app_service.get_paginate_workflow_app_logs(
|
||||
app_model=app_model, args=args
|
||||
)
|
||||
|
||||
return workflow_app_log_pagination
|
||||
|
||||
|
||||
api.add_resource(WorkflowRunApi, "/workflows/run")
|
||||
api.add_resource(WorkflowRunDetailApi, "/workflows/run/<string:workflow_id>")
|
||||
api.add_resource(WorkflowTaskStopApi, "/workflows/tasks/<string:task_id>/stop")
|
||||
api.add_resource(WorkflowAppLogApi, "/workflows/logs")
|
||||
|
||||
@@ -620,6 +620,9 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
if "stream_options" in extra_model_kwargs:
|
||||
del extra_model_kwargs["stream_options"]
|
||||
|
||||
if "stop" in extra_model_kwargs:
|
||||
del extra_model_kwargs["stop"]
|
||||
|
||||
# chat model
|
||||
response = client.chat.completions.create(
|
||||
messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages],
|
||||
@@ -635,7 +638,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools)
|
||||
|
||||
if block_as_stream:
|
||||
return self._handle_chat_block_as_stream_response(block_result, prompt_messages)
|
||||
return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop)
|
||||
|
||||
return block_result
|
||||
|
||||
@@ -643,6 +646,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
self,
|
||||
block_result: LLMResult,
|
||||
prompt_messages: list[PromptMessage],
|
||||
stop: Optional[list[str]] = None,
|
||||
) -> Generator[LLMResultChunk, None, None]:
|
||||
"""
|
||||
Handle llm chat response
|
||||
@@ -652,15 +656,22 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
:param response: response
|
||||
:param prompt_messages: prompt messages
|
||||
:param tools: tools for tool calling
|
||||
:param stop: stop words
|
||||
:return: llm response chunk generator
|
||||
"""
|
||||
text = block_result.message.content
|
||||
text = cast(str, text)
|
||||
|
||||
if stop:
|
||||
text = self.enforce_stop_tokens(text, stop)
|
||||
|
||||
yield LLMResultChunk(
|
||||
model=block_result.model,
|
||||
prompt_messages=prompt_messages,
|
||||
system_fingerprint=block_result.system_fingerprint,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=block_result.message,
|
||||
message=AssistantPromptMessage(content=text),
|
||||
finish_reason="stop",
|
||||
usage=block_result.usage,
|
||||
),
|
||||
@@ -912,6 +923,20 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
]
|
||||
)
|
||||
|
||||
if model.startswith("o1"):
|
||||
system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)])
|
||||
if system_message_count > 0:
|
||||
new_prompt_messages = []
|
||||
for prompt_message in prompt_messages:
|
||||
if isinstance(prompt_message, SystemPromptMessage):
|
||||
prompt_message = UserPromptMessage(
|
||||
content=prompt_message.content,
|
||||
name=prompt_message.name,
|
||||
)
|
||||
|
||||
new_prompt_messages.append(prompt_message)
|
||||
prompt_messages = new_prompt_messages
|
||||
|
||||
return prompt_messages
|
||||
|
||||
def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict:
|
||||
|
||||
@@ -429,7 +429,7 @@ class DatasetRetrieval:
|
||||
top_k=top_k,
|
||||
score_threshold=retrieval_model.get("score_threshold", 0.0)
|
||||
if retrieval_model["score_threshold_enabled"]
|
||||
else None,
|
||||
else 0.0,
|
||||
reranking_model=retrieval_model.get("reranking_model", None)
|
||||
if retrieval_model["reranking_enable"]
|
||||
else None,
|
||||
|
||||
@@ -179,7 +179,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
|
||||
top_k=self.top_k,
|
||||
score_threshold=retrieval_model.get("score_threshold", 0.0)
|
||||
if retrieval_model["score_threshold_enabled"]
|
||||
else None,
|
||||
else 0.0,
|
||||
reranking_model=retrieval_model.get("reranking_model", None)
|
||||
if retrieval_model["reranking_enable"]
|
||||
else None,
|
||||
|
||||
@@ -72,7 +72,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
|
||||
top_k=self.top_k,
|
||||
score_threshold=retrieval_model.get("score_threshold", 0.0)
|
||||
if retrieval_model["score_threshold_enabled"]
|
||||
else None,
|
||||
else 0.0,
|
||||
reranking_model=retrieval_model.get("reranking_model", None)
|
||||
if retrieval_model["reranking_enable"]
|
||||
else None,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import openai
|
||||
import sentry_sdk
|
||||
from langfuse import parse_error
|
||||
from sentry_sdk.integrations.celery import CeleryIntegration
|
||||
from sentry_sdk.integrations.flask import FlaskIntegration
|
||||
from werkzeug.exceptions import HTTPException
|
||||
@@ -10,7 +11,7 @@ def init_app(app):
|
||||
sentry_sdk.init(
|
||||
dsn=app.config.get("SENTRY_DSN"),
|
||||
integrations=[FlaskIntegration(), CeleryIntegration()],
|
||||
ignore_errors=[HTTPException, ValueError, openai.APIStatusError],
|
||||
ignore_errors=[HTTPException, ValueError, openai.APIStatusError, parse_error.defaultErrorResponse],
|
||||
traces_sample_rate=app.config.get("SENTRY_TRACES_SAMPLE_RATE", 1.0),
|
||||
profiles_sample_rate=app.config.get("SENTRY_PROFILES_SAMPLE_RATE", 1.0),
|
||||
environment=app.config.get("DEPLOY_ENV"),
|
||||
|
||||
@@ -61,7 +61,6 @@ ignore = [
|
||||
"SIM117", # multiple-with-statements
|
||||
"SIM210", # if-expr-with-true-false
|
||||
"SIM300", # yoda-conditions,
|
||||
"PT004", # pytest-no-assert
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
|
||||
@@ -42,7 +42,7 @@ class HitTestingService:
|
||||
top_k=retrieval_model.get("top_k", 2),
|
||||
score_threshold=retrieval_model.get("score_threshold", 0.0)
|
||||
if retrieval_model["score_threshold_enabled"]
|
||||
else None,
|
||||
else 0.0,
|
||||
reranking_model=retrieval_model.get("reranking_model", None)
|
||||
if retrieval_model["reranking_enable"]
|
||||
else None,
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.8.1
|
||||
image: langgenius/dify-api:0.8.2
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@@ -227,7 +227,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.8.1
|
||||
image: langgenius/dify-api:0.8.2
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
@@ -396,7 +396,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.8.1
|
||||
image: langgenius/dify-web:0.8.2
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
||||
@@ -208,7 +208,7 @@ x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.8.1
|
||||
image: langgenius/dify-api:0.8.2
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -228,7 +228,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.8.1
|
||||
image: langgenius/dify-api:0.8.2
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -247,7 +247,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.8.1
|
||||
image: langgenius/dify-web:0.8.2
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@@ -413,3 +413,109 @@ Workflow applications offers non-session support and is ideal for translation, a
|
||||
</CodeGroup>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
---
|
||||
|
||||
<Heading
|
||||
url='/workflows/logs'
|
||||
method='GET'
|
||||
title='Get workflow logs'
|
||||
name='#Get-Workflow-Logs'
|
||||
/>
|
||||
<Row>
|
||||
<Col>
|
||||
Returns worklfow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order.
|
||||
|
||||
### Query
|
||||
|
||||
<Properties>
|
||||
<Property name='keyword' type='string' key='keyword'>
|
||||
Keyword to search
|
||||
</Property>
|
||||
<Property name='status' type='string' key='status'>
|
||||
succeeded/failed/stopped
|
||||
</Property>
|
||||
<Property name='page' type='int' key='page'>
|
||||
current page, default is 1.
|
||||
</Property>
|
||||
<Property name='limit' type='int' key='limit'>
|
||||
How many chat history messages to return in one request, default is 20.
|
||||
</Property>
|
||||
</Properties>
|
||||
|
||||
### Response
|
||||
- `page` (int) Current page
|
||||
- `limit` (int) Number of returned items, if input exceeds system limit, returns system limit amount
|
||||
- `total` (int) Number of total items
|
||||
- `has_more` (bool) Whether there is a next page
|
||||
- `data` (array[object]) Log list
|
||||
- `id` (string) ID
|
||||
- `workflow_run` (object) Workflow run
|
||||
- `id` (string) ID
|
||||
- `version` (string) Version
|
||||
- `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped`
|
||||
- `error` (string) Optional reason of error
|
||||
- `elapsed_time` (float) total seconds to be used
|
||||
- `total_tokens` (int) tokens to be used
|
||||
- `total_steps` (int) default 0
|
||||
- `created_at` (timestamp) start time
|
||||
- `finished_at` (timestamp) end time
|
||||
- `created_from` (string) Created from
|
||||
- `created_by_role` (string) Created by role
|
||||
- `created_by_account` (string) Optional Created by account
|
||||
- `created_by_end_user` (object) Created by end user
|
||||
- `id` (string) ID
|
||||
- `type` (string) Type
|
||||
- `is_anonymous` (bool) Is anonymous
|
||||
- `session_id` (string) Session ID
|
||||
- `created_at` (timestamp) create time
|
||||
</Col>
|
||||
<Col sticky>
|
||||
|
||||
<CodeGroup title="Request" tag="GET" label="/workflows/logs" targetCode={`curl -X GET '${props.appDetail.api_base_url}/workflows/logs'\\\n --header 'Authorization: Bearer {api_key}'`}>
|
||||
|
||||
```bash {{ title: 'cURL' }}
|
||||
curl -X GET '${props.appDetail.api_base_url}/workflows/logs?limit=1'
|
||||
--header 'Authorization: Bearer {api_key}'
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
### Response Example
|
||||
<CodeGroup title="Response">
|
||||
```json {{ title: 'Response' }}
|
||||
{
|
||||
"page": 1,
|
||||
"limit": 1,
|
||||
"total": 7,
|
||||
"has_more": true,
|
||||
"data": [
|
||||
{
|
||||
"id": "e41b93f1-7ca2-40fd-b3a8-999aeb499cc0",
|
||||
"workflow_run": {
|
||||
"id": "c0640fc8-03ef-4481-a96c-8a13b732a36e",
|
||||
"version": "2024-08-01 12:17:09.771832",
|
||||
"status": "succeeded",
|
||||
"error": null,
|
||||
"elapsed_time": 1.3588523610014818,
|
||||
"total_tokens": 0,
|
||||
"total_steps": 3,
|
||||
"created_at": 1726139643,
|
||||
"finished_at": 1726139644
|
||||
},
|
||||
"created_from": "service-api",
|
||||
"created_by_role": "end_user",
|
||||
"created_by_account": null,
|
||||
"created_by_end_user": {
|
||||
"id": "7f7d9117-dd9d-441d-8970-87e5e7e687a3",
|
||||
"type": "service_api",
|
||||
"is_anonymous": false,
|
||||
"session_id": "abc-123"
|
||||
},
|
||||
"created_at": 1726139644
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
</CodeGroup>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
@@ -409,3 +409,109 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等
|
||||
</CodeGroup>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
---
|
||||
|
||||
<Heading
|
||||
url='/workflows/logs'
|
||||
method='GET'
|
||||
title='获取 workflow 日志'
|
||||
name='#Get-Workflow-Logs'
|
||||
/>
|
||||
<Row>
|
||||
<Col>
|
||||
倒序返回workflow日志
|
||||
|
||||
### Query
|
||||
|
||||
<Properties>
|
||||
<Property name='keyword' type='string' key='keyword'>
|
||||
关键字
|
||||
</Property>
|
||||
<Property name='status' type='string' key='status'>
|
||||
执行状态 succeeded/failed/stopped
|
||||
</Property>
|
||||
<Property name='page' type='int' key='page'>
|
||||
当前页码, 默认1.
|
||||
</Property>
|
||||
<Property name='limit' type='int' key='limit'>
|
||||
每页条数, 默认20.
|
||||
</Property>
|
||||
</Properties>
|
||||
|
||||
### Response
|
||||
- `page` (int) 当前页码
|
||||
- `limit` (int) 每页条数
|
||||
- `total` (int) 总条数
|
||||
- `has_more` (bool) 是否还有更多数据
|
||||
- `data` (array[object]) 当前页码的数据
|
||||
- `id` (string) 标识
|
||||
- `workflow_run` (object) Workflow 执行日志
|
||||
- `id` (string) 标识
|
||||
- `version` (string) 版本
|
||||
- `status` (string) 执行状态, `running` / `succeeded` / `failed` / `stopped`
|
||||
- `error` (string) (可选) 错误
|
||||
- `elapsed_time` (float) 耗时,单位秒
|
||||
- `total_tokens` (int) 消耗的token数量
|
||||
- `total_steps` (int) 执行步骤长度
|
||||
- `created_at` (timestamp) 开始时间
|
||||
- `finished_at` (timestamp) 结束时间
|
||||
- `created_from` (string) 来源
|
||||
- `created_by_role` (string) 角色
|
||||
- `created_by_account` (string) (可选) 帐号
|
||||
- `created_by_end_user` (object) 用户
|
||||
- `id` (string) 标识
|
||||
- `type` (string) 类型
|
||||
- `is_anonymous` (bool) 是否匿名
|
||||
- `session_id` (string) 会话标识
|
||||
- `created_at` (timestamp) 创建时间
|
||||
</Col>
|
||||
<Col sticky>
|
||||
|
||||
<CodeGroup title="Request" tag="GET" label="/workflows/logs" targetCode={`curl -X GET '${props.appDetail.api_base_url}/workflows/logs'\\\n --header 'Authorization: Bearer {api_key}'`}>
|
||||
|
||||
```bash {{ title: 'cURL' }}
|
||||
curl -X GET '${props.appDetail.api_base_url}/workflows/logs?limit=1'
|
||||
--header 'Authorization: Bearer {api_key}'
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
### Response Example
|
||||
<CodeGroup title="Response">
|
||||
```json {{ title: 'Response' }}
|
||||
{
|
||||
"page": 1,
|
||||
"limit": 1,
|
||||
"total": 7,
|
||||
"has_more": true,
|
||||
"data": [
|
||||
{
|
||||
"id": "e41b93f1-7ca2-40fd-b3a8-999aeb499cc0",
|
||||
"workflow_run": {
|
||||
"id": "c0640fc8-03ef-4481-a96c-8a13b732a36e",
|
||||
"version": "2024-08-01 12:17:09.771832",
|
||||
"status": "succeeded",
|
||||
"error": null,
|
||||
"elapsed_time": 1.3588523610014818,
|
||||
"total_tokens": 0,
|
||||
"total_steps": 3,
|
||||
"created_at": 1726139643,
|
||||
"finished_at": 1726139644
|
||||
},
|
||||
"created_from": "service-api",
|
||||
"created_by_role": "end_user",
|
||||
"created_by_account": null,
|
||||
"created_by_end_user": {
|
||||
"id": "7f7d9117-dd9d-441d-8970-87e5e7e687a3",
|
||||
"type": "service_api",
|
||||
"is_anonymous": false,
|
||||
"session_id": "abc-123"
|
||||
},
|
||||
"created_at": 1726139644
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
</CodeGroup>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
@@ -70,15 +70,16 @@ export const useShortcuts = (): void => {
|
||||
})
|
||||
|
||||
useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.c`, (e) => {
|
||||
const { showDebugAndPreviewPanel, showInputsPanel } = workflowStore.getState()
|
||||
if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel && !showInputsPanel) {
|
||||
const { showDebugAndPreviewPanel } = workflowStore.getState()
|
||||
if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel) {
|
||||
e.preventDefault()
|
||||
handleNodesCopy()
|
||||
}
|
||||
}, { exactMatch: true, useCapture: true })
|
||||
|
||||
useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.v`, (e) => {
|
||||
if (shouldHandleShortcut(e)) {
|
||||
const { showDebugAndPreviewPanel } = workflowStore.getState()
|
||||
if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel) {
|
||||
e.preventDefault()
|
||||
handleNodesPaste()
|
||||
}
|
||||
@@ -99,7 +100,8 @@ export const useShortcuts = (): void => {
|
||||
}, { exactMatch: true, useCapture: true })
|
||||
|
||||
useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.z`, (e) => {
|
||||
if (shouldHandleShortcut(e)) {
|
||||
const { showDebugAndPreviewPanel } = workflowStore.getState()
|
||||
if (shouldHandleShortcut(e) && !showDebugAndPreviewPanel) {
|
||||
e.preventDefault()
|
||||
workflowHistoryShortcutsEnabled && handleHistoryBack()
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
} from '@remixicon/react'
|
||||
import produce from 'immer'
|
||||
import { useStoreApi } from 'reactflow'
|
||||
import useAvailableVarList from '../../hooks/use-available-var-list'
|
||||
import VarReferencePopup from './var-reference-popup'
|
||||
import { getNodeInfoById, isConversationVar, isENV, isSystemVar } from './utils'
|
||||
import ConstantField from './constant-field'
|
||||
@@ -26,7 +27,6 @@ import {
|
||||
} from '@/app/components/base/portal-to-follow-elem'
|
||||
import {
|
||||
useIsChatMode,
|
||||
useWorkflow,
|
||||
useWorkflowVariables,
|
||||
} from '@/app/components/workflow/hooks'
|
||||
import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types'
|
||||
@@ -67,7 +67,7 @@ const VarReferencePicker: FC<Props> = ({
|
||||
onlyLeafNodeVar,
|
||||
filterVar = () => true,
|
||||
availableNodes: passedInAvailableNodes,
|
||||
availableVars,
|
||||
availableVars: passedInAvailableVars,
|
||||
isAddBtnTrigger,
|
||||
schema,
|
||||
valueTypePlaceHolder,
|
||||
@@ -79,11 +79,12 @@ const VarReferencePicker: FC<Props> = ({
|
||||
} = store.getState()
|
||||
const isChatMode = useIsChatMode()
|
||||
|
||||
const { getTreeLeafNodes, getBeforeNodesInSameBranch } = useWorkflow()
|
||||
const { getCurrentVariableType, getNodeAvailableVars } = useWorkflowVariables()
|
||||
const availableNodes = useMemo(() => {
|
||||
return passedInAvailableNodes || (onlyLeafNodeVar ? getTreeLeafNodes(nodeId) : getBeforeNodesInSameBranch(nodeId))
|
||||
}, [getBeforeNodesInSameBranch, getTreeLeafNodes, nodeId, onlyLeafNodeVar, passedInAvailableNodes])
|
||||
const { getCurrentVariableType } = useWorkflowVariables()
|
||||
const { availableNodes, availableVars } = useAvailableVarList(nodeId, {
|
||||
onlyLeafNodeVar,
|
||||
passedInAvailableNodes,
|
||||
filterVar,
|
||||
})
|
||||
const startNode = availableNodes.find((node: any) => {
|
||||
return node.data.type === BlockEnum.Start
|
||||
})
|
||||
@@ -102,19 +103,8 @@ const VarReferencePicker: FC<Props> = ({
|
||||
|
||||
const [varKindType, setVarKindType] = useState<VarKindType>(defaultVarKindType)
|
||||
const isConstant = isSupportConstantValue && varKindType === VarKindType.constant
|
||||
const outputVars = useMemo(() => {
|
||||
if (availableVars)
|
||||
return availableVars
|
||||
|
||||
const vars = getNodeAvailableVars({
|
||||
parentNode: iterationNode,
|
||||
beforeNodes: availableNodes,
|
||||
isChatMode,
|
||||
filterVar,
|
||||
})
|
||||
|
||||
return vars
|
||||
}, [iterationNode, availableNodes, isChatMode, filterVar, availableVars, getNodeAvailableVars])
|
||||
const outputVars = useMemo(() => (passedInAvailableVars || availableVars), [passedInAvailableVars, availableVars])
|
||||
|
||||
const [open, setOpen] = useState(false)
|
||||
useEffect(() => {
|
||||
|
||||
@@ -4,12 +4,13 @@ import {
|
||||
useWorkflow,
|
||||
useWorkflowVariables,
|
||||
} from '@/app/components/workflow/hooks'
|
||||
import type { ValueSelector, Var } from '@/app/components/workflow/types'
|
||||
import type { Node, ValueSelector, Var } from '@/app/components/workflow/types'
|
||||
type Params = {
|
||||
onlyLeafNodeVar?: boolean
|
||||
hideEnv?: boolean
|
||||
hideChatVar?: boolean
|
||||
filterVar: (payload: Var, selector: ValueSelector) => boolean
|
||||
passedInAvailableNodes?: Node[]
|
||||
}
|
||||
|
||||
const useAvailableVarList = (nodeId: string, {
|
||||
@@ -17,6 +18,7 @@ const useAvailableVarList = (nodeId: string, {
|
||||
filterVar,
|
||||
hideEnv,
|
||||
hideChatVar,
|
||||
passedInAvailableNodes,
|
||||
}: Params = {
|
||||
onlyLeafNodeVar: false,
|
||||
filterVar: () => true,
|
||||
@@ -25,7 +27,7 @@ const useAvailableVarList = (nodeId: string, {
|
||||
const { getNodeAvailableVars } = useWorkflowVariables()
|
||||
const isChatMode = useIsChatMode()
|
||||
|
||||
const availableNodes = onlyLeafNodeVar ? getTreeLeafNodes(nodeId) : getBeforeNodesInSameBranch(nodeId)
|
||||
const availableNodes = passedInAvailableNodes || (onlyLeafNodeVar ? getTreeLeafNodes(nodeId) : getBeforeNodesInSameBranch(nodeId))
|
||||
|
||||
const {
|
||||
parentNode: iterationNode,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.8.1",
|
||||
"version": "0.8.2",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.17.0"
|
||||
|
||||
Reference in New Issue
Block a user