Compare commits

...

25 Commits

Author SHA1 Message Date
-LAN-
0ff8bd2aa9 chore: bump version to 0.13.2 (#11489)
Signed-off-by: -LAN- <laipz8200@outlook.com>
2024-12-09 17:57:23 +08:00
Hash Brown
2866383228 fix: cannot close notification manually (#11490) 2024-12-09 17:55:06 +08:00
Jyong
00ac7edeb3 improve message clean logic (#11487) 2024-12-09 16:12:30 +08:00
-LAN-
537068cfde refactor(iteration_node): use Sequence and Mapping in parameters (#11483)
Signed-off-by: -LAN- <laipz8200@outlook.com>
2024-12-09 15:41:20 +08:00
suzuki.sh
c3c6a48059 Fix the token count at the iteration node (#11235)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-12-09 15:02:04 +08:00
zhaobingshuang
5c166b3f40 fix: tags could not be saved when the Workflow Tool was created (#11481)
Co-authored-by: zhaobs <zhaobs@cailian.net>
2024-12-09 14:38:02 +08:00
kurokobo
230fa3286b feat: add 'Open in Explore' link for each apps on studio (#11402) 2024-12-09 12:04:03 +08:00
Muneyuki Noguchi
061c0b10fd Fix the Japanese translation for 'Detail' (#11476) 2024-12-09 11:18:28 +08:00
Yi Xiao
32f8a98cf8 feat: ifelse condition variable editable after selection (#11431) 2024-12-09 11:06:47 +08:00
Charlie.Wei
6c60ecb237 Refactor: Remove redundant style and simplify Mermaid component (#11472) 2024-12-09 09:47:58 +08:00
xiandan-erizo
c3fae5e801 Update ext_redis.py (#11214) 2024-12-09 09:35:52 +08:00
VoidIsVoid
a594e256ae remove mermail render cache (#11470)
Co-authored-by: Gimling <huangjl@ruyi.ai>
2024-12-09 09:33:18 +08:00
Trey Dong
41d90c2408 fix(api): throw error when notion block can not find (#11433) 2024-12-09 09:10:59 +08:00
yihong
7ff42b1b7a fix: unit tests env will need clear too (#11445)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2024-12-09 09:04:11 +08:00
Kazuki Takamatsu
4d7cfd0de5 Fix model provider of vertex ai (#11437) 2024-12-08 08:44:49 +08:00
Hash Brown
266d32bd77 fix: cannot upload animated webp image as app icon (#11453) 2024-12-08 08:37:21 +08:00
非法操作
7e1184c071 feat: support json_schema for ollama models (#11449) 2024-12-08 08:36:12 +08:00
非法操作
1ce51e57ab feat: add gemini exp 1206 (#11444) 2024-12-07 22:28:10 +08:00
非法操作
142b4fd699 feat: add zhipu glm_4v_flash (#11440) 2024-12-07 22:27:57 +08:00
Hash Brown
cc8feaa483 style: EmojiPicker component top padding (#11452) 2024-12-07 22:26:28 +08:00
yihong
d9d5d35a77 fix: issue #10596 by making the iteration node outputs right (#11394)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
Signed-off-by: -LAN- <laipz8200@outlook.com>
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-12-07 16:28:15 +08:00
Huỳnh Gia Bôi
9277156b6c fix(document_extractor): pptx file type and missing metadata_filename UnstructuredIO (#11364)
Co-authored-by: Julian Huynh <julian.huynh@immersio.io>
2024-12-06 18:55:59 +08:00
KVOJJJin
1490a19fa1 Fix: compatible with outputs data structure (#11432) 2024-12-06 17:35:35 +08:00
Jyong
9b7adcd4d9 update tidb batch get endpoint to basic mode (#11426) 2024-12-06 17:06:46 +08:00
Jyong
a8d32f9964 fix external retrieval without segment id (#11423) 2024-12-06 14:45:15 +08:00
43 changed files with 439 additions and 179 deletions

View File

@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="0.13.1",
default="0.13.2",
)
COMMIT_SHA: str = Field(

View File

@@ -1,5 +1,6 @@
from datetime import UTC, datetime
from flask import request
from flask_login import current_user
from flask_restful import Resource, inputs, marshal_with, reqparse
from sqlalchemy import and_
@@ -20,8 +21,17 @@ class InstalledAppsListApi(Resource):
@account_initialization_required
@marshal_with(installed_app_list_fields)
def get(self):
app_id = request.args.get("app_id", default=None, type=str)
current_tenant_id = current_user.current_tenant_id
installed_apps = db.session.query(InstalledApp).filter(InstalledApp.tenant_id == current_tenant_id).all()
if app_id:
installed_apps = (
db.session.query(InstalledApp)
.filter(and_(InstalledApp.tenant_id == current_tenant_id, InstalledApp.app_id == app_id))
.all()
)
else:
installed_apps = db.session.query(InstalledApp).filter(InstalledApp.tenant_id == current_tenant_id).all()
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
installed_apps = [

View File

@@ -368,6 +368,7 @@ class ToolWorkflowProviderCreateApi(Resource):
description=args["description"],
parameters=args["parameters"],
privacy_policy=args["privacy_policy"],
labels=args["labels"],
)

View File

@@ -82,7 +82,7 @@ class AppGenerateResponseConverter(ABC):
for resource in metadata["retriever_resources"]:
updated_resources.append(
{
"segment_id": resource["segment_id"],
"segment_id": resource.get("segment_id", ""),
"position": resource["position"],
"document_name": resource["document_name"],
"score": resource["score"],

View File

@@ -2,7 +2,7 @@ from datetime import datetime
from enum import Enum, StrEnum
from typing import Any, Optional
from pydantic import BaseModel, field_validator
from pydantic import BaseModel
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
from core.workflow.entities.node_entities import NodeRunMetadataKey
@@ -113,18 +113,6 @@ class QueueIterationNextEvent(AppQueueEvent):
output: Optional[Any] = None # output for the current iteration
duration: Optional[float] = None
@field_validator("output", mode="before")
@classmethod
def set_output(cls, v):
"""
Set output
"""
if v is None:
return None
if isinstance(v, int | float | str | bool | dict | list):
return v
raise ValueError("output must be a valid type")
class QueueIterationCompletedEvent(AppQueueEvent):
"""

View File

@@ -0,0 +1,38 @@
model: gemini-exp-1206
label:
en_US: Gemini exp 1206
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 2097152
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@@ -181,9 +181,11 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
# prepare the payload for a simple ping to the model
data = {"model": model, "stream": stream}
if "format" in model_parameters:
data["format"] = model_parameters["format"]
del model_parameters["format"]
if format_schema := model_parameters.pop("format", None):
try:
data["format"] = format_schema if format_schema == "json" else json.loads(format_schema)
except json.JSONDecodeError as e:
raise InvokeBadRequestError(f"Invalid format schema: {str(e)}")
if "keep_alive" in model_parameters:
data["keep_alive"] = model_parameters["keep_alive"]
@@ -733,12 +735,12 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
ParameterRule(
name="format",
label=I18nObject(en_US="Format", zh_Hans="返回格式"),
type=ParameterType.STRING,
type=ParameterType.TEXT,
default="json",
help=I18nObject(
en_US="the format to return a response in. Currently the only accepted value is json.",
zh_Hans="返回响应的格式。目前唯一接受的值是json。",
en_US="the format to return a response in. Format can be `json` or a JSON schema.",
zh_Hans="返回响应的格式。目前接受的值是字符串`json`或JSON schema.",
),
options=["json"],
),
],
pricing=PriceConfig(

View File

@@ -104,13 +104,14 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
"""
# use Anthropic official SDK references
# - https://github.com/anthropics/anthropic-sdk-python
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
service_account_key = credentials.get("vertex_service_account_key", "")
project_id = credentials["vertex_project_id"]
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
token = ""
# get access token from service account credential
if service_account_info:
if service_account_key:
service_account_info = json.loads(base64.b64decode(service_account_key))
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
request = google.auth.transport.requests.Request()
credentials.refresh(request)
@@ -478,10 +479,11 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
if stop:
config_kwargs["stop_sequences"] = stop
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
service_account_key = credentials.get("vertex_service_account_key", "")
project_id = credentials["vertex_project_id"]
location = credentials["vertex_location"]
if service_account_info:
if service_account_key:
service_account_info = json.loads(base64.b64decode(service_account_key))
service_accountSA = service_account.Credentials.from_service_account_info(service_account_info)
aiplatform.init(credentials=service_accountSA, project=project_id, location=location)
else:

View File

@@ -48,10 +48,11 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
:param input_type: input type
:return: embeddings result
"""
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
service_account_key = credentials.get("vertex_service_account_key", "")
project_id = credentials["vertex_project_id"]
location = credentials["vertex_location"]
if service_account_info:
if service_account_key:
service_account_info = json.loads(base64.b64decode(service_account_key))
service_accountSA = service_account.Credentials.from_service_account_info(service_account_info)
aiplatform.init(credentials=service_accountSA, project=project_id, location=location)
else:
@@ -100,10 +101,11 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
:return:
"""
try:
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
service_account_key = credentials.get("vertex_service_account_key", "")
project_id = credentials["vertex_project_id"]
location = credentials["vertex_location"]
if service_account_info:
if service_account_key:
service_account_info = json.loads(base64.b64decode(service_account_key))
service_accountSA = service_account.Credentials.from_service_account_info(service_account_info)
aiplatform.init(credentials=service_accountSA, project=project_id, location=location)
else:

View File

@@ -0,0 +1,52 @@
model: glm-4v-flash
label:
en_US: glm-4v-flash
model_type: llm
model_properties:
mode: chat
context_size: 2048
features:
- vision
parameter_rules:
- name: temperature
use_template: temperature
default: 0.95
min: 0.0
max: 1.0
help:
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: top_p
use_template: top_p
default: 0.6
help:
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
- name: do_sample
label:
zh_Hans: 采样策略
en_US: Sampling strategy
type: boolean
help:
zh_Hans: do_sample 为 true 时启用采样策略do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。
en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
default: true
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 1024
- name: web_search
type: boolean
label:
zh_Hans: 联网搜索
en_US: Web Search
default: false
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: RMB

View File

@@ -144,7 +144,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}:
if isinstance(copy_prompt_message.content, list):
# check if model is 'glm-4v'
if model not in {"glm-4v", "glm-4v-plus"}:
if not model.startswith("glm-4v"):
# not support list message
continue
# get image and
@@ -188,7 +188,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
else:
model_parameters["tools"] = [web_search_params]
if model in {"glm-4v", "glm-4v-plus"}:
if model.startswith("glm-4v"):
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
else:
params = {"model": model, "messages": [], **model_parameters}
@@ -412,6 +412,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
human_prompt = "\n\nHuman:"
ai_prompt = "\n\nAssistant:"
content = message.content
if isinstance(content, list):
content = "".join(c.data for c in content if c.type == PromptMessageContentType.TEXT)
if isinstance(message, UserPromptMessage):
message_text = f"{human_prompt} {content}"

View File

@@ -162,7 +162,7 @@ class TidbService:
clusters = []
tidb_serverless_list_map = {item.cluster_id: item for item in tidb_serverless_list}
cluster_ids = [item.cluster_id for item in tidb_serverless_list]
params = {"clusterIds": cluster_ids, "view": "FULL"}
params = {"clusterIds": cluster_ids, "view": "BASIC"}
response = requests.get(
f"{api_url}/clusters:batchGet", params=params, auth=HTTPDigestAuth(public_key, private_key)
)

View File

@@ -1,3 +1,4 @@
from collections.abc import Mapping
from datetime import datetime
from typing import Any, Optional
@@ -140,8 +141,8 @@ class BaseIterationEvent(GraphEngineEvent):
class IterationRunStartedEvent(BaseIterationEvent):
start_at: datetime = Field(..., description="start at")
inputs: Optional[dict[str, Any]] = None
metadata: Optional[dict[str, Any]] = None
inputs: Optional[Mapping[str, Any]] = None
metadata: Optional[Mapping[str, Any]] = None
predecessor_node_id: Optional[str] = None
@@ -153,18 +154,18 @@ class IterationRunNextEvent(BaseIterationEvent):
class IterationRunSucceededEvent(BaseIterationEvent):
start_at: datetime = Field(..., description="start at")
inputs: Optional[dict[str, Any]] = None
outputs: Optional[dict[str, Any]] = None
metadata: Optional[dict[str, Any]] = None
inputs: Optional[Mapping[str, Any]] = None
outputs: Optional[Mapping[str, Any]] = None
metadata: Optional[Mapping[str, Any]] = None
steps: int = 0
iteration_duration_map: Optional[dict[str, float]] = None
class IterationRunFailedEvent(BaseIterationEvent):
start_at: datetime = Field(..., description="start at")
inputs: Optional[dict[str, Any]] = None
outputs: Optional[dict[str, Any]] = None
metadata: Optional[dict[str, Any]] = None
inputs: Optional[Mapping[str, Any]] = None
outputs: Optional[Mapping[str, Any]] = None
metadata: Optional[Mapping[str, Any]] = None
steps: int = 0
error: str = Field(..., description="failed reason")

View File

@@ -1,6 +1,8 @@
import csv
import io
import json
import os
import tempfile
import docx
import pandas as pd
@@ -264,14 +266,20 @@ def _extract_text_from_ppt(file_content: bytes) -> str:
def _extract_text_from_pptx(file_content: bytes) -> str:
try:
with io.BytesIO(file_content) as file:
if dify_config.UNSTRUCTURED_API_URL and dify_config.UNSTRUCTURED_API_KEY:
elements = partition_via_api(
file=file,
api_url=dify_config.UNSTRUCTURED_API_URL,
api_key=dify_config.UNSTRUCTURED_API_KEY,
)
else:
if dify_config.UNSTRUCTURED_API_URL and dify_config.UNSTRUCTURED_API_KEY:
with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as temp_file:
temp_file.write(file_content)
temp_file.flush()
with open(temp_file.name, "rb") as file:
elements = partition_via_api(
file=file,
metadata_filename=temp_file.name,
api_url=dify_config.UNSTRUCTURED_API_URL,
api_key=dify_config.UNSTRUCTURED_API_KEY,
)
os.unlink(temp_file.name)
else:
with io.BytesIO(file_content) as file:
elements = partition_pptx(file=file)
return "\n".join([getattr(element, "text", "") for element in elements])
except Exception as e:

View File

@@ -9,7 +9,7 @@ from typing import TYPE_CHECKING, Any, Optional, cast
from flask import Flask, current_app
from configs import dify_config
from core.model_runtime.utils.encoders import jsonable_encoder
from core.variables import IntegerVariable
from core.workflow.entities.node_entities import (
NodeRunMetadataKey,
NodeRunResult,
@@ -155,32 +155,34 @@ class IterationNode(BaseNode[IterationNodeData]):
iteration_node_data=self.node_data,
index=0,
pre_iteration_output=None,
duration=None,
)
iter_run_map: dict[str, float] = {}
outputs: list[Any] = [None] * len(iterator_list_value)
try:
if self.node_data.is_parallel:
futures: list[Future] = []
q = Queue()
q: Queue = Queue()
thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100)
for index, item in enumerate(iterator_list_value):
future: Future = thread_pool.submit(
self._run_single_iter_parallel,
current_app._get_current_object(),
q,
iterator_list_value,
inputs,
outputs,
start_at,
graph_engine,
iteration_graph,
index,
item,
iter_run_map,
flask_app=current_app._get_current_object(), # type: ignore
q=q,
iterator_list_value=iterator_list_value,
inputs=inputs,
outputs=outputs,
start_at=start_at,
graph_engine=graph_engine,
iteration_graph=iteration_graph,
index=index,
item=item,
iter_run_map=iter_run_map,
)
future.add_done_callback(thread_pool.task_done_callback)
futures.append(future)
succeeded_count = 0
empty_count = 0
while True:
try:
event = q.get(timeout=1)
@@ -208,17 +210,22 @@ class IterationNode(BaseNode[IterationNodeData]):
else:
for _ in range(len(iterator_list_value)):
yield from self._run_single_iter(
iterator_list_value,
variable_pool,
inputs,
outputs,
start_at,
graph_engine,
iteration_graph,
iter_run_map,
iterator_list_value=iterator_list_value,
variable_pool=variable_pool,
inputs=inputs,
outputs=outputs,
start_at=start_at,
graph_engine=graph_engine,
iteration_graph=iteration_graph,
iter_run_map=iter_run_map,
)
if self.node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
outputs = [output for output in outputs if output is not None]
# Flatten the list of lists
if isinstance(outputs, list) and all(isinstance(output, list) for output in outputs):
outputs = [item for sublist in outputs for item in sublist]
yield IterationRunSucceededEvent(
iteration_id=self.id,
iteration_node_id=self.node_id,
@@ -226,7 +233,7 @@ class IterationNode(BaseNode[IterationNodeData]):
iteration_node_data=self.node_data,
start_at=start_at,
inputs=inputs,
outputs={"output": jsonable_encoder(outputs)},
outputs={"output": outputs},
steps=len(iterator_list_value),
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
)
@@ -234,8 +241,11 @@ class IterationNode(BaseNode[IterationNodeData]):
yield RunCompletedEvent(
run_result=NodeRunResult(
status=WorkflowNodeExecutionStatus.SUCCEEDED,
outputs={"output": jsonable_encoder(outputs)},
metadata={NodeRunMetadataKey.ITERATION_DURATION_MAP: iter_run_map},
outputs={"output": outputs},
metadata={
NodeRunMetadataKey.ITERATION_DURATION_MAP: iter_run_map,
NodeRunMetadataKey.TOTAL_TOKENS: graph_engine.graph_runtime_state.total_tokens,
},
)
)
except IterationNodeError as e:
@@ -248,7 +258,7 @@ class IterationNode(BaseNode[IterationNodeData]):
iteration_node_data=self.node_data,
start_at=start_at,
inputs=inputs,
outputs={"output": jsonable_encoder(outputs)},
outputs={"output": outputs},
steps=len(iterator_list_value),
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
error=str(e),
@@ -280,7 +290,7 @@ class IterationNode(BaseNode[IterationNodeData]):
:param node_data: node data
:return:
"""
variable_mapping = {
variable_mapping: dict[str, Sequence[str]] = {
f"{node_id}.input_selector": node_data.iterator_selector,
}
@@ -308,7 +318,7 @@ class IterationNode(BaseNode[IterationNodeData]):
sub_node_variable_mapping = node_cls.extract_variable_selector_to_variable_mapping(
graph_config=graph_config, config=sub_node_config
)
sub_node_variable_mapping = cast(dict[str, list[str]], sub_node_variable_mapping)
sub_node_variable_mapping = cast(dict[str, Sequence[str]], sub_node_variable_mapping)
except NotImplementedError:
sub_node_variable_mapping = {}
@@ -329,8 +339,12 @@ class IterationNode(BaseNode[IterationNodeData]):
return variable_mapping
def _handle_event_metadata(
self, event: BaseNodeEvent, iter_run_index: str, parallel_mode_run_id: str
) -> NodeRunStartedEvent | BaseNodeEvent:
self,
*,
event: BaseNodeEvent | InNodeEvent,
iter_run_index: int,
parallel_mode_run_id: str | None,
) -> NodeRunStartedEvent | BaseNodeEvent | InNodeEvent:
"""
add iteration metadata to event.
"""
@@ -355,9 +369,10 @@ class IterationNode(BaseNode[IterationNodeData]):
def _run_single_iter(
self,
iterator_list_value: list[str],
*,
iterator_list_value: Sequence[str],
variable_pool: VariablePool,
inputs: dict[str, list],
inputs: Mapping[str, list],
outputs: list,
start_at: datetime,
graph_engine: "GraphEngine",
@@ -373,12 +388,12 @@ class IterationNode(BaseNode[IterationNodeData]):
try:
rst = graph_engine.run()
# get current iteration index
current_index = variable_pool.get([self.node_id, "index"]).value
index_variable = variable_pool.get([self.node_id, "index"])
if not isinstance(index_variable, IntegerVariable):
raise IterationIndexNotFoundError(f"iteration {self.node_id} current index not found")
current_index = index_variable.value
iteration_run_id = parallel_mode_run_id if parallel_mode_run_id is not None else f"{current_index}"
next_index = int(current_index) + 1
if current_index is None:
raise IterationIndexNotFoundError(f"iteration {self.node_id} current index not found")
for event in rst:
if isinstance(event, (BaseNodeEvent | BaseParallelBranchEvent)) and not event.in_iteration_id:
event.in_iteration_id = self.node_id
@@ -391,7 +406,9 @@ class IterationNode(BaseNode[IterationNodeData]):
continue
if isinstance(event, NodeRunSucceededEvent):
yield self._handle_event_metadata(event, current_index, parallel_mode_run_id)
yield self._handle_event_metadata(
event=event, iter_run_index=current_index, parallel_mode_run_id=parallel_mode_run_id
)
elif isinstance(event, BaseGraphEvent):
if isinstance(event, GraphRunFailedEvent):
# iteration run failed
@@ -404,7 +421,7 @@ class IterationNode(BaseNode[IterationNodeData]):
parallel_mode_run_id=parallel_mode_run_id,
start_at=start_at,
inputs=inputs,
outputs={"output": jsonable_encoder(outputs)},
outputs={"output": outputs},
steps=len(iterator_list_value),
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
error=event.error,
@@ -417,7 +434,7 @@ class IterationNode(BaseNode[IterationNodeData]):
iteration_node_data=self.node_data,
start_at=start_at,
inputs=inputs,
outputs={"output": jsonable_encoder(outputs)},
outputs={"output": outputs},
steps=len(iterator_list_value),
metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
error=event.error,
@@ -429,9 +446,11 @@ class IterationNode(BaseNode[IterationNodeData]):
)
)
return
else:
event = cast(InNodeEvent, event)
metadata_event = self._handle_event_metadata(event, current_index, parallel_mode_run_id)
elif isinstance(event, InNodeEvent):
# event = cast(InNodeEvent, event)
metadata_event = self._handle_event_metadata(
event=event, iter_run_index=current_index, parallel_mode_run_id=parallel_mode_run_id
)
if isinstance(event, NodeRunFailedEvent):
if self.node_data.error_handle_mode == ErrorHandleMode.CONTINUE_ON_ERROR:
yield NodeInIterationFailedEvent(
@@ -513,7 +532,7 @@ class IterationNode(BaseNode[IterationNodeData]):
iteration_node_data=self.node_data,
index=next_index,
parallel_mode_run_id=parallel_mode_run_id,
pre_iteration_output=jsonable_encoder(current_iteration_output) if current_iteration_output else None,
pre_iteration_output=current_iteration_output or None,
duration=duration,
)
@@ -540,10 +559,11 @@ class IterationNode(BaseNode[IterationNodeData]):
def _run_single_iter_parallel(
self,
*,
flask_app: Flask,
q: Queue,
iterator_list_value: list[str],
inputs: dict[str, list],
iterator_list_value: Sequence[str],
inputs: Mapping[str, list],
outputs: list,
start_at: datetime,
graph_engine: "GraphEngine",
@@ -551,7 +571,7 @@ class IterationNode(BaseNode[IterationNodeData]):
index: int,
item: Any,
iter_run_map: dict[str, float],
) -> Generator[NodeEvent | InNodeEvent, None, None]:
):
"""
run single iteration in parallel mode
"""

View File

@@ -253,6 +253,8 @@ class NotionOAuth(OAuthDataSource):
}
response = requests.get(url=f"{self._NOTION_BLOCK_SEARCH}/{block_id}", headers=headers)
response_json = response.json()
if response.status_code != 200:
raise ValueError(f"Error fetching block parent page ID: {response_json.message}")
parent = response_json["parent"]
parent_type = parent["type"]
if parent_type == "block_id":

View File

@@ -36,14 +36,16 @@ def clean_messages():
db.session.query(Message)
.filter(Message.created_at < plan_sandbox_clean_message_day)
.order_by(Message.created_at.desc())
.paginate(page=page, per_page=100)
.limit(100)
.all()
)
except NotFound:
break
if messages.items is None or len(messages.items) == 0:
if not messages:
break
for message in messages.items:
for message in messages:
plan_sandbox_clean_message_day = message.created_at
app = App.query.filter_by(id=message.app_id).first()
features_cache_key = f"features:{app.tenant_id}"
plan_cache = redis_client.get(features_cache_key)

View File

@@ -81,6 +81,10 @@ class WorkflowToolManageService:
db.session.add(workflow_tool_provider)
db.session.commit()
if labels is not None:
ToolLabelManager.update_tool_labels(
ToolTransformService.workflow_provider_to_controller(workflow_tool_provider), labels
)
return {"result": "success"}
@classmethod

View File

@@ -37,7 +37,11 @@ def test_dify_config_undefined_entry(example_env_file):
assert config["LOG_LEVEL"] == "INFO"
# NOTE: If there is a `.env` file in your Workspace, this test might not succeed as expected.
# This is due to `pymilvus` loading all the variables from the `.env` file into `os.environ`.
def test_dify_config(example_env_file):
# clear system environment variables
os.environ.clear()
# load dotenv file with pydantic-settings
config = DifyConfig(_env_file=example_env_file)

View File

@@ -2,7 +2,7 @@ version: '3'
services:
# API service
api:
image: langgenius/dify-api:0.13.1
image: langgenius/dify-api:0.13.2
restart: always
environment:
# Startup mode, 'api' starts the API server.
@@ -227,7 +227,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.13.1
image: langgenius/dify-api:0.13.2
restart: always
environment:
CONSOLE_WEB_URL: ''
@@ -397,7 +397,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.13.1
image: langgenius/dify-web:0.13.2
restart: always
environment:
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is

View File

@@ -292,7 +292,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:0.13.1
image: langgenius/dify-api:0.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -312,7 +312,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.13.1
image: langgenius/dify-api:0.13.2
restart: always
environment:
# Use the shared environment variables.
@@ -331,7 +331,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.13.1
image: langgenius/dify-web:0.13.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@@ -9,7 +9,7 @@ import s from './style.module.css'
import cn from '@/utils/classnames'
import type { App } from '@/types/app'
import Confirm from '@/app/components/base/confirm'
import { ToastContext } from '@/app/components/base/toast'
import Toast, { ToastContext } from '@/app/components/base/toast'
import { copyApp, deleteApp, exportAppConfig, updateAppInfo } from '@/service/apps'
import DuplicateAppModal from '@/app/components/app/duplicate-modal'
import type { DuplicateAppModalProps } from '@/app/components/app/duplicate-modal'
@@ -31,6 +31,7 @@ import TagSelector from '@/app/components/base/tag-management/selector'
import type { EnvironmentVariable } from '@/app/components/workflow/types'
import DSLExportConfirmModal from '@/app/components/workflow/dsl-export-confirm-modal'
import { fetchWorkflowDraft } from '@/service/workflow'
import { fetchInstalledAppList } from '@/service/explore'
export type AppCardProps = {
app: App
@@ -209,6 +210,21 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
e.preventDefault()
setShowConfirmDelete(true)
}
const onClickInstalledApp = async (e: React.MouseEvent<HTMLButtonElement>) => {
e.stopPropagation()
props.onClick?.()
e.preventDefault()
try {
const { installed_apps }: any = await fetchInstalledAppList(app.id) || {}
if (installed_apps?.length > 0)
window.open(`/explore/installed/${installed_apps[0].id}`, '_blank')
else
throw new Error('No app found in Explore')
}
catch (e: any) {
Toast.notify({ type: 'error', message: `${e.message || e}` })
}
}
return (
<div className="relative w-full py-1" onMouseLeave={onMouseLeave}>
<button className={s.actionItem} onClick={onClickSettings}>
@@ -233,6 +249,10 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
</>
)}
<Divider className="!my-1" />
<button className={s.actionItem} onClick={onClickInstalledApp}>
<span className={s.actionName}>{t('app.openInExplore')}</span>
</button>
<Divider className="!my-1" />
<div
className={cn(s.actionItem, s.deleteActionItem, 'group')}
onClick={onClickDelete}
@@ -353,10 +373,10 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
}
popupClassName={
(app.mode === 'completion' || app.mode === 'chat')
? '!w-[238px] translate-x-[-110px]'
: ''
? '!w-[256px] translate-x-[-224px]'
: '!w-[160px] translate-x-[-128px]'
}
className={'!w-[128px] h-fit !z-20'}
className={'h-fit !z-20'}
/>
</div>
</>

View File

@@ -5,7 +5,8 @@ import {
} from 'react'
import { useTranslation } from 'react-i18next'
import dayjs from 'dayjs'
import { RiArrowDownSLine } from '@remixicon/react'
import { RiArrowDownSLine, RiPlanetLine } from '@remixicon/react'
import Toast from '../../base/toast'
import type { ModelAndParameter } from '../configuration/debug/types'
import SuggestedAction from './suggested-action'
import PublishWithMultipleModel from './publish-with-multiple-model'
@@ -15,6 +16,7 @@ import {
PortalToFollowElemContent,
PortalToFollowElemTrigger,
} from '@/app/components/base/portal-to-follow-elem'
import { fetchInstalledAppList } from '@/service/explore'
import EmbeddedModal from '@/app/components/app/overview/embedded'
import { useStore as useAppStore } from '@/app/components/app/store'
import { useGetLanguage } from '@/context/i18n'
@@ -105,6 +107,19 @@ const AppPublisher = ({
setPublished(false)
}, [disabled, onToggle, open])
const handleOpenInExplore = useCallback(async () => {
try {
const { installed_apps }: any = await fetchInstalledAppList(appDetail?.id) || {}
if (installed_apps?.length > 0)
window.open(`/explore/installed/${installed_apps[0].id}`, '_blank')
else
throw new Error('No app found in Explore')
}
catch (e: any) {
Toast.notify({ type: 'error', message: `${e.message || e}` })
}
}, [appDetail?.id])
const [embeddingModalOpen, setEmbeddingModalOpen] = useState(false)
return (
@@ -205,6 +220,15 @@ const AppPublisher = ({
{t('workflow.common.embedIntoSite')}
</SuggestedAction>
)}
<SuggestedAction
onClick={() => {
handleOpenInExplore()
}}
disabled={!publishedAt}
icon={<RiPlanetLine className='w-4 h-4' />}
>
{t('workflow.common.openInExplore')}
</SuggestedAction>
<SuggestedAction disabled={!publishedAt} link='./develop' icon={<FileText className='w-4 h-4' />}>{t('workflow.common.accessAPIReference')}</SuggestedAction>
{appDetail?.mode === 'workflow' && (
<WorkflowToolConfigureButton

View File

@@ -11,16 +11,19 @@ import { useDraggableUploader } from './hooks'
import { checkIsAnimatedImage } from './utils'
import { ALLOW_FILE_EXTENSIONS } from '@/types/app'
type UploaderProps = {
className?: string
onImageCropped?: (tempUrl: string, croppedAreaPixels: Area, fileName: string) => void
onUpload?: (file?: File) => void
export type OnImageInput = {
(isCropped: true, tempUrl: string, croppedAreaPixels: Area, fileName: string): void
(isCropped: false, file: File): void
}
const Uploader: FC<UploaderProps> = ({
type UploaderProps = {
className?: string
onImageInput?: OnImageInput
}
const ImageInput: FC<UploaderProps> = ({
className,
onImageCropped,
onUpload,
onImageInput,
}) => {
const [inputImage, setInputImage] = useState<{ file: File; url: string }>()
const [isAnimatedImage, setIsAnimatedImage] = useState<boolean>(false)
@@ -37,8 +40,7 @@ const Uploader: FC<UploaderProps> = ({
const onCropComplete = async (_: Area, croppedAreaPixels: Area) => {
if (!inputImage)
return
onImageCropped?.(inputImage.url, croppedAreaPixels, inputImage.file.name)
onUpload?.(undefined)
onImageInput?.(true, inputImage.url, croppedAreaPixels, inputImage.file.name)
}
const handleLocalFileInput = (e: ChangeEvent<HTMLInputElement>) => {
@@ -48,7 +50,7 @@ const Uploader: FC<UploaderProps> = ({
checkIsAnimatedImage(file).then((isAnimatedImage) => {
setIsAnimatedImage(!!isAnimatedImage)
if (isAnimatedImage)
onUpload?.(file)
onImageInput?.(false, file)
})
}
}
@@ -117,4 +119,4 @@ const Uploader: FC<UploaderProps> = ({
)
}
export default Uploader
export default ImageInput

View File

@@ -8,12 +8,14 @@ import Button from '../button'
import { ImagePlus } from '../icons/src/vender/line/images'
import { useLocalFileUploader } from '../image-uploader/hooks'
import EmojiPickerInner from '../emoji-picker/Inner'
import Uploader from './Uploader'
import type { OnImageInput } from './ImageInput'
import ImageInput from './ImageInput'
import s from './style.module.css'
import getCroppedImg from './utils'
import type { AppIconType, ImageFile } from '@/types/app'
import cn from '@/utils/classnames'
import { DISABLE_UPLOAD_IMAGE_AS_ICON } from '@/config'
export type AppIconEmojiSelection = {
type: 'emoji'
icon: string
@@ -69,14 +71,15 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
},
})
const [imageCropInfo, setImageCropInfo] = useState<{ tempUrl: string; croppedAreaPixels: Area; fileName: string }>()
const handleImageCropped = async (tempUrl: string, croppedAreaPixels: Area, fileName: string) => {
setImageCropInfo({ tempUrl, croppedAreaPixels, fileName })
}
type InputImageInfo = { file: File } | { tempUrl: string; croppedAreaPixels: Area; fileName: string }
const [inputImageInfo, setInputImageInfo] = useState<InputImageInfo>()
const [uploadImageInfo, setUploadImageInfo] = useState<{ file?: File }>()
const handleUpload = async (file?: File) => {
setUploadImageInfo({ file })
const handleImageInput: OnImageInput = async (isCropped: boolean, fileOrTempUrl: string | File, croppedAreaPixels?: Area, fileName?: string) => {
setInputImageInfo(
isCropped
? { tempUrl: fileOrTempUrl as string, croppedAreaPixels: croppedAreaPixels!, fileName: fileName! }
: { file: fileOrTempUrl as File },
)
}
const handleSelect = async () => {
@@ -90,15 +93,15 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
}
}
else {
if (!imageCropInfo && !uploadImageInfo)
if (!inputImageInfo)
return
setUploading(true)
if (imageCropInfo.file) {
handleLocalFileUpload(imageCropInfo.file)
if ('file' in inputImageInfo) {
handleLocalFileUpload(inputImageInfo.file)
return
}
const blob = await getCroppedImg(imageCropInfo.tempUrl, imageCropInfo.croppedAreaPixels, imageCropInfo.fileName)
const file = new File([blob], imageCropInfo.fileName, { type: blob.type })
const blob = await getCroppedImg(inputImageInfo.tempUrl, inputImageInfo.croppedAreaPixels, inputImageInfo.fileName)
const file = new File([blob], inputImageInfo.fileName, { type: blob.type })
handleLocalFileUpload(file)
}
}
@@ -127,10 +130,8 @@ const AppIconPicker: FC<AppIconPickerProps> = ({
</div>
</div>}
<Divider className='m-0' />
<EmojiPickerInner className={activeTab === 'emoji' ? 'block' : 'hidden'} onSelect={handleSelectEmoji} />
<Uploader className={activeTab === 'image' ? 'block' : 'hidden'} onImageCropped={handleImageCropped} onUpload={handleUpload}/>
<EmojiPickerInner className={cn(activeTab === 'emoji' ? 'block' : 'hidden', 'pt-2')} onSelect={handleSelectEmoji} />
<ImageInput className={activeTab === 'image' ? 'block' : 'hidden'} onImageInput={handleImageInput} />
<Divider className='m-0' />
<div className='w-full flex items-center justify-center p-3 gap-2'>

View File

@@ -116,12 +116,12 @@ export default async function getCroppedImg(
})
}
export function checkIsAnimatedImage(file) {
export function checkIsAnimatedImage(file: File): Promise<boolean> {
return new Promise((resolve, reject) => {
const fileReader = new FileReader()
fileReader.onload = function (e) {
const arr = new Uint8Array(e.target.result)
const arr = new Uint8Array(e.target?.result as ArrayBuffer)
// Check file extension
const fileName = file.name.toLowerCase()
@@ -148,7 +148,7 @@ export function checkIsAnimatedImage(file) {
}
// Function to check for WebP signature
function isWebP(arr) {
function isWebP(arr: Uint8Array) {
return (
arr[0] === 0x52 && arr[1] === 0x49 && arr[2] === 0x46 && arr[3] === 0x46
&& arr[8] === 0x57 && arr[9] === 0x45 && arr[10] === 0x42 && arr[11] === 0x50
@@ -156,7 +156,7 @@ function isWebP(arr) {
}
// Function to check if the WebP is animated (contains ANIM chunk)
function checkWebPAnimation(arr) {
function checkWebPAnimation(arr: Uint8Array) {
// Search for the ANIM chunk in WebP to determine if it's animated
for (let i = 12; i < arr.length - 4; i++) {
if (arr[i] === 0x41 && arr[i + 1] === 0x4E && arr[i + 2] === 0x49 && arr[i + 3] === 0x4D)

View File

@@ -68,7 +68,7 @@ const EmojiPickerInner: FC<IEmojiPickerInnerProps> = ({
}, [onSelect, selectedEmoji, selectedBackground])
return <div className={cn(className)}>
<div className='flex flex-col items-center w-full px-3'>
<div className='flex flex-col items-center w-full px-3 pb-2'>
<div className="relative w-full">
<div className="absolute inset-y-0 left-0 flex items-center pl-3 pointer-events-none">
<MagnifyingGlassIcon className="w-5 h-5 text-gray-400" aria-hidden="true" />

View File

@@ -158,13 +158,13 @@ export const isAllowedFileExtension = (fileName: string, fileMimetype: string, a
export const getFilesInLogs = (rawData: any) => {
const result = Object.keys(rawData || {}).map((key) => {
if (typeof rawData[key] === 'object' && rawData[key].dify_model_identity === '__dify__file__') {
if (typeof rawData[key] === 'object' && rawData[key]?.dify_model_identity === '__dify__file__') {
return {
varName: key,
list: getProcessedFilesFromResponse([rawData[key]]),
}
}
if (Array.isArray(rawData[key]) && rawData[key].some(item => item.dify_model_identity === '__dify__file__')) {
if (Array.isArray(rawData[key]) && rawData[key].some(item => item?.dify_model_identity === '__dify__file__')) {
return {
varName: key,
list: getProcessedFilesFromResponse(rawData[key]),

View File

@@ -1,7 +1,6 @@
import React, { useCallback, useEffect, useRef, useState } from 'react'
import mermaid from 'mermaid'
import { usePrevious } from 'ahooks'
import CryptoJS from 'crypto-js'
import { useTranslation } from 'react-i18next'
import { ExclamationTriangleIcon } from '@heroicons/react/24/outline'
import LoadingAnim from '@/app/components/base/chat/chat/loading-anim'
@@ -14,12 +13,6 @@ mermaidAPI = null
if (typeof window !== 'undefined')
mermaidAPI = mermaid.mermaidAPI
const style = {
minWidth: '480px',
height: 'auto',
overflow: 'auto',
}
const svgToBase64 = (svgGraph: string) => {
const svgBytes = new TextEncoder().encode(svgGraph)
const blob = new Blob([svgBytes], { type: 'image/svg+xml;charset=utf-8' })
@@ -38,7 +31,6 @@ const Flowchart = React.forwardRef((props: {
const [svgCode, setSvgCode] = useState(null)
const [look, setLook] = useState<'classic' | 'handDrawn'>('classic')
const chartId = useRef(`flowchart_${CryptoJS.MD5(props.PrimitiveCode).toString()}`)
const prevPrimitiveCode = usePrevious(props.PrimitiveCode)
const [isLoading, setIsLoading] = useState(true)
const timeRef = useRef<NodeJS.Timeout>()
@@ -51,12 +43,10 @@ const Flowchart = React.forwardRef((props: {
try {
if (typeof window !== 'undefined' && mermaidAPI) {
const svgGraph = await mermaidAPI.render(chartId.current, PrimitiveCode)
const svgGraph = await mermaidAPI.render('flowchart', PrimitiveCode)
const base64Svg: any = await svgToBase64(svgGraph.svg)
setSvgCode(base64Svg)
setIsLoading(false)
if (chartId.current && base64Svg)
localStorage.setItem(chartId.current, base64Svg)
}
}
catch (error) {
@@ -79,19 +69,11 @@ const Flowchart = React.forwardRef((props: {
},
})
localStorage.removeItem(chartId.current)
renderFlowchart(props.PrimitiveCode)
}
}, [look])
useEffect(() => {
const cachedSvg: any = localStorage.getItem(chartId.current)
if (cachedSvg) {
setSvgCode(cachedSvg)
setIsLoading(false)
return
}
if (timeRef.current)
clearTimeout(timeRef.current)
@@ -130,8 +112,8 @@ const Flowchart = React.forwardRef((props: {
</div>
{
svgCode
&& <div className="mermaid cursor-pointer" style={style} onClick={() => setImagePreviewUrl(svgCode)}>
{svgCode && <img src={svgCode} style={{ width: '100%', height: 'auto' }} alt="mermaid_chart" />}
&& <div className="mermaid cursor-pointer h-auto w-full object-fit: cover" onClick={() => setImagePreviewUrl(svgCode)}>
{svgCode && <img src={svgCode} alt="mermaid_chart" />}
</div>
}
{isLoading

View File

@@ -123,11 +123,25 @@ Toast.notify = ({
const holder = document.createElement('div')
const root = createRoot(holder)
root.render(<Toast type={type} size={size} message={message} duration={duration} className={className} />)
root.render(
<ToastContext.Provider value={{
notify: () => {},
close: () => {
if (holder) {
root.unmount()
holder.remove()
}
},
}}>
<Toast type={type} size={size} message={message} duration={duration} className={className} />
</ToastContext.Provider>,
)
document.body.appendChild(holder)
setTimeout(() => {
if (holder)
if (holder) {
root.unmount()
holder.remove()
}
}, duration || defaultDuring)
}
}

View File

@@ -72,7 +72,7 @@ const VariableTag = ({
{isEnv && <Env className='shrink-0 mr-0.5 w-3.5 h-3.5 text-util-colors-violet-violet-600' />}
{isChatVar && <BubbleX className='w-3.5 h-3.5 text-util-colors-teal-teal-700' />}
<div
className={cn('truncate text-text-accent font-medium', (isEnv || isChatVar) && 'text-text-secondary')}
className={cn('truncate ml-0.5 text-text-accent font-medium', (isEnv || isChatVar) && 'text-text-secondary')}
title={variableName}
>
{variableName}

View File

@@ -274,7 +274,7 @@ const VarReferenceVars: FC<Props> = ({
{
!hideSearch && (
<>
<div className={cn('mb-2 mx-1', searchBoxClassName)} onClick={e => e.stopPropagation()}>
<div className={cn('mb-1 mx-2 mt-2', searchBoxClassName)} onClick={e => e.stopPropagation()}>
<Input
showLeftIcon
showClearIcon

View File

@@ -25,10 +25,12 @@ import { FILE_TYPE_OPTIONS, SUB_VARIABLES, TRANSFER_METHOD } from '../../default
import ConditionWrap from '../condition-wrap'
import ConditionOperator from './condition-operator'
import ConditionInput from './condition-input'
import VariableTag from '@/app/components/workflow/nodes/_base/components/variable-tag'
import ConditionVarSelector from './condition-var-selector'
import type {
Node,
NodeOutPutVar,
ValueSelector,
Var,
} from '@/app/components/workflow/types'
import { VarType } from '@/app/components/workflow/types'
@@ -82,6 +84,7 @@ const ConditionItem = ({
const { t } = useTranslation()
const [isHovered, setIsHovered] = useState(false)
const [open, setOpen] = useState(false)
const doUpdateCondition = useCallback((newCondition: Condition) => {
if (isSubVariableKey)
@@ -190,6 +193,17 @@ const ConditionItem = ({
onRemoveCondition?.(caseId, condition.id)
}, [caseId, condition, conditionId, isSubVariableKey, onRemoveCondition, onRemoveSubVariableCondition])
const handleVarChange = useCallback((valueSelector: ValueSelector, varItem: Var) => {
const newCondition = produce(condition, (draft) => {
draft.variable_selector = valueSelector
draft.varType = varItem.type
draft.value = ''
draft.comparison_operator = getOperators(varItem.type)[0]
})
doUpdateCondition(newCondition)
setOpen(false)
}, [condition, doUpdateCondition])
return (
<div className={cn('flex mb-1 last-of-type:mb-0', className)}>
<div className={cn(
@@ -221,11 +235,14 @@ const ConditionItem = ({
/>
)
: (
<VariableTag
<ConditionVarSelector
open={open}
onOpenChange={setOpen}
valueSelector={condition.variable_selector || []}
varType={condition.varType}
availableNodes={availableNodes}
isShort
nodesOutputVars={nodesOutputVars}
onChange={handleVarChange}
/>
)}

View File

@@ -0,0 +1,58 @@
import { PortalToFollowElem, PortalToFollowElemContent, PortalToFollowElemTrigger } from '@/app/components/base/portal-to-follow-elem'
import VariableTag from '@/app/components/workflow/nodes/_base/components/variable-tag'
import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars'
import type { Node, NodeOutPutVar, ValueSelector, Var, VarType } from '@/app/components/workflow/types'
type ConditionVarSelectorProps = {
open: boolean
onOpenChange: (open: boolean) => void
valueSelector: ValueSelector
varType: VarType
availableNodes: Node[]
nodesOutputVars: NodeOutPutVar[]
onChange: (valueSelector: ValueSelector, varItem: Var) => void
}
const ConditionVarSelector = ({
open,
onOpenChange,
valueSelector,
varType,
availableNodes,
nodesOutputVars,
onChange,
}: ConditionVarSelectorProps) => {
return (
<PortalToFollowElem
open={open}
onOpenChange={onOpenChange}
placement='bottom-start'
offset={{
mainAxis: 4,
crossAxis: 0,
}}
>
<PortalToFollowElemTrigger onClick={() => onOpenChange(!open)}>
<div className="cursor-pointer">
<VariableTag
valueSelector={valueSelector}
varType={varType}
availableNodes={availableNodes}
isShort
/>
</div>
</PortalToFollowElemTrigger>
<PortalToFollowElemContent className='z-[1000]'>
<div className='w-[296px] bg-components-panel-bg-blur rounded-lg border-[0.5px] border-components-panel-border shadow-lg'>
<VarReferenceVars
vars={nodesOutputVars}
isSupportFileVar
onChange={onChange}
/>
</div>
</PortalToFollowElemContent>
</PortalToFollowElem>
)
}
export default ConditionVarSelector

View File

@@ -73,7 +73,7 @@ const ConditionValue = ({
<div
className={cn(
'shrink-0 truncate text-xs font-medium text-text-accent',
'shrink-0 ml-0.5 truncate text-xs font-medium text-text-accent',
!notHasValue && 'max-w-[70px]',
)}
title={variableName}

View File

@@ -35,12 +35,12 @@ const OutputPanel: FC<OutputPanelProps> = ({
for (const key in outputs) {
if (Array.isArray(outputs[key])) {
outputs[key].map((output: any) => {
if (output.dify_model_identity === '__dify__file__')
if (output?.dify_model_identity === '__dify__file__')
fileList.push(output)
return null
})
}
else if (outputs[key].dify_model_identity === '__dify__file__') {
else if (outputs[key]?.dify_model_identity === '__dify__file__') {
fileList.push(outputs[key])
}
}

View File

@@ -101,6 +101,7 @@ const translation = {
switchLabel: 'The app copy to be created',
removeOriginal: 'Delete the original app',
switchStart: 'Start switch',
openInExplore: 'Open in Explore',
typeSelector: {
all: 'ALL Types',
chatbot: 'Chatbot',

View File

@@ -32,6 +32,7 @@ const translation = {
restore: 'Restore',
runApp: 'Run App',
batchRunApp: 'Batch Run App',
openInExplore: 'Open in Explore',
accessAPIReference: 'Access API Reference',
embedIntoSite: 'Embed Into Site',
addTitle: 'Add title...',

View File

@@ -80,7 +80,7 @@ const translation = {
title: '会話ログ',
workflowTitle: 'ログの詳細',
fileListLabel: 'ファイルの詳細',
fileListDetail: 'ディテール',
fileListDetail: '詳細',
},
promptLog: 'プロンプトログ',
agentLog: 'エージェントログ',

View File

@@ -93,6 +93,7 @@ const translation = {
switchLabel: '作成されるアプリのコピー',
removeOriginal: '元のアプリを削除する',
switchStart: '切り替えを開始する',
openInExplore: '"探索" で開く',
typeSelector: {
all: 'すべてのタイプ',
chatbot: 'チャットボット',

View File

@@ -32,6 +32,7 @@ const translation = {
restore: '復元',
runApp: 'アプリを実行',
batchRunApp: 'バッチでアプリを実行',
openInExplore: '"探索" で開く',
accessAPIReference: 'APIリファレンスにアクセス',
embedIntoSite: 'サイトに埋め込む',
addTitle: 'タイトルを追加...',

View File

@@ -1,6 +1,6 @@
{
"name": "dify-web",
"version": "0.13.1",
"version": "0.13.2",
"private": true,
"engines": {
"node": ">=18.17.0"

View File

@@ -12,8 +12,8 @@ export const fetchAppDetail = (id: string): Promise<any> => {
return get(`/explore/apps/${id}`)
}
export const fetchInstalledAppList = () => {
return get('/installed-apps')
export const fetchInstalledAppList = (app_id?: string | null) => {
return get(`/installed-apps${app_id ? `?app_id=${app_id}` : ''}`)
}
export const installApp = (id: string) => {