Compare commits

..

20 Commits

Author SHA1 Message Date
twwu
b31ee1f6f7 feat: update styling and improve accessibility in retrieval modal; add translation for retrieval method 2025-02-08 14:08:53 +08:00
twwu
bb45f646dc feat: enhance styling and add history icon to dataset components 2025-02-08 11:52:57 +08:00
twwu
26bd253c2d Merge branch 'main' into feat/knowledge-dark-mode 2025-02-08 10:53:46 +08:00
twwu
0756b49a7c feat: improve styling and accessibility of dataset components 2025-02-08 10:39:28 +08:00
Xin Zhang
982bca5d40 fix: add rate limiting to prevent brute force on password reset (#13292) 2025-02-08 10:28:31 +08:00
Kalo Chin
c8dcde6cd0 fix: Gemini 2.0 Flash 001 model yaml file naming (#13372) 2025-02-08 09:12:42 +08:00
Riddhimaan-Senapati
8f9db61688 feat: added new silicon flow models (#13369) 2025-02-08 09:12:22 +08:00
github-actions[bot]
ebdbaf34e6 chore: translate i18n files (#13349)
Co-authored-by: JzoNgKVO <27049666+JzoNgKVO@users.noreply.github.com>
2025-02-07 22:41:25 +08:00
zhu-an
a081b1e79e fix: add compatibility config for third-party S3-compatible providers (#13354)
Co-authored-by: zhaoqingyu.1075 <zhaoqingyu.1075@bytedance.com>
2025-02-07 22:35:24 +08:00
Steven sun
38c31e64db add enable_search parameter to qwen_max, plus, turbo (#13335)
Co-authored-by: steven <sunzwj@digitalchina.com>
2025-02-07 22:16:26 +08:00
Yi Xiao
ae6f67420c Chore: update app detail panel (#13337) 2025-02-07 18:56:43 +08:00
twwu
25711ffae2 feat: enhance UI components with improved styling and icon updates 2025-02-07 16:49:10 +08:00
twwu
f127e10e0c Merge branch 'main' into feat/knowledge-dark-mode 2025-02-07 14:30:14 +08:00
twwu
7616ef8c22 feat: enhance document picker styles for dark mode 2025-01-24 10:06:47 +08:00
twwu
6c69baf025 feat: update icons and styles in dataset components for improved UI consistency 2025-01-23 16:47:26 +08:00
twwu
08bd96f170 feat: update styling for dataset creation components and replace error message background 2025-01-23 15:46:12 +08:00
twwu
684f7188f4 Merge branch 'main' into feat/knowledge-dark-mode 2025-01-23 15:10:46 +08:00
twwu
ebad19c9f7 feat: update error message styles and add background gradients for dataset crawler 2025-01-23 15:09:35 +08:00
twwu
49674507c6 Merge branch 'main' into feat/knowledge-dark-mode 2025-01-22 14:31:44 +08:00
twwu
80ad81471b refactor: remove unused CSS files and update translations for Firecrawl and Jina Reader 2025-01-22 14:30:14 +08:00
214 changed files with 2075 additions and 1919 deletions

View File

@@ -5,8 +5,8 @@ on:
branches:
- "main"
- "deploy/dev"
tags:
- "*"
release:
types: [published]
concurrency:
group: build-push-${{ github.head_ref || github.run_id }}

View File

@@ -1,3 +0,0 @@
{
"MD024": false
}

View File

@@ -1,32 +0,0 @@
# Changelog
All notable changes to Dify will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.15.7] - 2025-04-27
### Added
- Added support for GPT-4.1 in model providers (#18912)
- Added support for Amazon Bedrock DeepSeek-R1 model (#18908)
- Added support for Amazon Bedrock Claude Sonnet 3.7 model (#18788)
- Refined version compatibility logic in app DSL service
### Fixed
- Fixed issue with creating apps from template categories (#18807, #18868)
- Fixed DSL version check when creating apps from explore templates (#18872, #18878)
## [0.15.6] - 2025-04-22
### Security
- Fixed clickjacking vulnerability (#18552)
- Fixed reset password security issue (#18366)
- Updated reset password token when email code verification succeeds (#18362)
### Fixed
- Fixed Vertex AI Gemini 2.0 Flash 001 schema (#18405)

View File

@@ -430,7 +430,4 @@ CREATE_TIDB_SERVICE_JOB_ENABLED=false
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
MAX_SUBMIT_COUNT=100
# Lockout duration in seconds
LOGIN_LOCKOUT_DURATION=86400
# Prevent Clickjacking
ALLOW_EMBED=false
LOGIN_LOCKOUT_DURATION=86400

View File

@@ -498,6 +498,11 @@ class AuthConfig(BaseSettings):
default=86400,
)
FORGOT_PASSWORD_LOCKOUT_DURATION: PositiveInt = Field(
description="Time (in seconds) a user must wait before retrying password reset after exceeding the rate limit.",
default=86400,
)
class ModerationConfig(BaseSettings):
"""

View File

@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="0.15.7",
default="0.15.3",
)
COMMIT_SHA: str = Field(

View File

@@ -59,3 +59,9 @@ class EmailCodeAccountDeletionRateLimitExceededError(BaseHTTPException):
error_code = "email_code_account_deletion_rate_limit_exceeded"
description = "Too many account deletion emails have been sent. Please try again in 5 minutes."
code = 429
class EmailPasswordResetLimitError(BaseHTTPException):
error_code = "email_password_reset_limit"
description = "Too many failed password reset attempts. Please try again in 24 hours."
code = 429

View File

@@ -6,9 +6,15 @@ from flask_restful import Resource, reqparse # type: ignore
from constants.languages import languages
from controllers.console import api
from controllers.console.auth.error import EmailCodeError, InvalidEmailError, InvalidTokenError, PasswordMismatchError
from controllers.console.auth.error import (
EmailCodeError,
EmailPasswordResetLimitError,
InvalidEmailError,
InvalidTokenError,
PasswordMismatchError,
)
from controllers.console.error import AccountInFreezeError, AccountNotFound, EmailSendIpLimitError
from controllers.console.wraps import email_password_login_enabled, setup_required
from controllers.console.wraps import setup_required
from events.tenant_event import tenant_was_created
from extensions.ext_database import db
from libs.helper import email, extract_remote_ip
@@ -22,7 +28,6 @@ from services.feature_service import FeatureService
class ForgotPasswordSendEmailApi(Resource):
@setup_required
@email_password_login_enabled
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("email", type=email, required=True, location="json")
@@ -54,7 +59,6 @@ class ForgotPasswordSendEmailApi(Resource):
class ForgotPasswordCheckApi(Resource):
@setup_required
@email_password_login_enabled
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("email", type=str, required=True, location="json")
@@ -64,6 +68,10 @@ class ForgotPasswordCheckApi(Resource):
user_email = args["email"]
is_forgot_password_error_rate_limit = AccountService.is_forgot_password_error_rate_limit(args["email"])
if is_forgot_password_error_rate_limit:
raise EmailPasswordResetLimitError()
token_data = AccountService.get_reset_password_data(args["token"])
if token_data is None:
raise InvalidTokenError()
@@ -72,22 +80,15 @@ class ForgotPasswordCheckApi(Resource):
raise InvalidEmailError()
if args["code"] != token_data.get("code"):
AccountService.add_forgot_password_error_rate_limit(args["email"])
raise EmailCodeError()
# Verified, revoke the first token
AccountService.revoke_reset_password_token(args["token"])
# Refresh token data by generating a new token
_, new_token = AccountService.generate_reset_password_token(
user_email, code=args["code"], additional_data={"phase": "reset"}
)
return {"is_valid": True, "email": token_data.get("email"), "token": new_token}
AccountService.reset_forgot_password_error_rate_limit(args["email"])
return {"is_valid": True, "email": token_data.get("email")}
class ForgotPasswordResetApi(Resource):
@setup_required
@email_password_login_enabled
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("token", type=str, required=True, nullable=False, location="json")
@@ -106,9 +107,6 @@ class ForgotPasswordResetApi(Resource):
if reset_data is None:
raise InvalidTokenError()
# Must use token in reset phase
if reset_data.get("phase", "") != "reset":
raise InvalidTokenError()
AccountService.revoke_reset_password_token(token)

View File

@@ -22,7 +22,7 @@ from controllers.console.error import (
EmailSendIpLimitError,
NotAllowedCreateWorkspace,
)
from controllers.console.wraps import email_password_login_enabled, setup_required
from controllers.console.wraps import setup_required
from events.tenant_event import tenant_was_created
from libs.helper import email, extract_remote_ip
from libs.password import valid_password
@@ -38,7 +38,6 @@ class LoginApi(Resource):
"""Resource for user login."""
@setup_required
@email_password_login_enabled
def post(self):
"""Authenticate user and login."""
parser = reqparse.RequestParser()
@@ -111,7 +110,6 @@ class LogoutApi(Resource):
class ResetPasswordSendEmailApi(Resource):
@setup_required
@email_password_login_enabled
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("email", type=email, required=True, location="json")

View File

@@ -154,16 +154,3 @@ def enterprise_license_required(view):
return view(*args, **kwargs)
return decorated
def email_password_login_enabled(view):
@wraps(view)
def decorated(*args, **kwargs):
features = FeatureService.get_system_features()
if features.enable_email_password_login:
return view(*args, **kwargs)
# otherwise, return 403
abort(403)
return decorated

View File

@@ -104,6 +104,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# recalc llm max tokens
prompt_messages = self._organize_prompt_messages()
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks = model_instance.invoke_llm(
prompt_messages=prompt_messages,

View File

@@ -84,6 +84,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
# recalc llm max tokens
prompt_messages = self._organize_prompt_messages()
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
prompt_messages=prompt_messages,

View File

@@ -55,6 +55,20 @@ class AgentChatAppRunner(AppRunner):
query = application_generate_entity.query
files = application_generate_entity.files
# Pre-calculate the number of tokens of the prompt messages,
# and return the rest number of tokens by model context token size limit and max token size limit.
# If the rest number of tokens is not enough, raise exception.
# Include: prompt template, inputs, query(optional), files(optional)
# Not Include: memory, external data, dataset context
self.get_pre_calculate_rest_tokens(
app_record=app_record,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
query=query,
)
memory = None
if application_generate_entity.conversation_id:
# get memory of conversation (read-only)

View File

@@ -15,8 +15,10 @@ from core.app.features.annotation_reply.annotation_reply import AnnotationReplyF
from core.app.features.hosting_moderation.hosting_moderation import HostingModerationFeature
from core.external_data_tool.external_data_fetch import ExternalDataFetch
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.moderation.input_moderation import InputModeration
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
@@ -29,6 +31,106 @@ if TYPE_CHECKING:
class AppRunner:
def get_pre_calculate_rest_tokens(
self,
app_record: App,
model_config: ModelConfigWithCredentialsEntity,
prompt_template_entity: PromptTemplateEntity,
inputs: Mapping[str, str],
files: Sequence["File"],
query: Optional[str] = None,
) -> int:
"""
Get pre calculate rest tokens
:param app_record: app record
:param model_config: model config entity
:param prompt_template_entity: prompt template entity
:param inputs: inputs
:param files: files
:param query: query
:return:
"""
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
)
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
max_tokens = 0
for parameter_rule in model_config.model_schema.parameter_rules:
if parameter_rule.name == "max_tokens" or (
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
):
max_tokens = (
model_config.parameters.get(parameter_rule.name)
or model_config.parameters.get(parameter_rule.use_template or "")
) or 0
if model_context_tokens is None:
return -1
if max_tokens is None:
max_tokens = 0
# get prompt messages without memory and context
prompt_messages, stop = self.organize_prompt_messages(
app_record=app_record,
model_config=model_config,
prompt_template_entity=prompt_template_entity,
inputs=inputs,
files=files,
query=query,
)
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
rest_tokens: int = model_context_tokens - max_tokens - prompt_tokens
if rest_tokens < 0:
raise InvokeBadRequestError(
"Query or prefix prompt is too long, you can reduce the prefix prompt, "
"or shrink the max token, or switch to a llm with a larger token limit size."
)
return rest_tokens
def recalc_llm_max_tokens(
self, model_config: ModelConfigWithCredentialsEntity, prompt_messages: list[PromptMessage]
):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
)
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
max_tokens = 0
for parameter_rule in model_config.model_schema.parameter_rules:
if parameter_rule.name == "max_tokens" or (
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
):
max_tokens = (
model_config.parameters.get(parameter_rule.name)
or model_config.parameters.get(parameter_rule.use_template or "")
) or 0
if model_context_tokens is None:
return -1
if max_tokens is None:
max_tokens = 0
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
if prompt_tokens + max_tokens > model_context_tokens:
max_tokens = max(model_context_tokens - prompt_tokens, 16)
for parameter_rule in model_config.model_schema.parameter_rules:
if parameter_rule.name == "max_tokens" or (
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
):
model_config.parameters[parameter_rule.name] = max_tokens
def organize_prompt_messages(
self,
app_record: App,

View File

@@ -50,6 +50,20 @@ class ChatAppRunner(AppRunner):
query = application_generate_entity.query
files = application_generate_entity.files
# Pre-calculate the number of tokens of the prompt messages,
# and return the rest number of tokens by model context token size limit and max token size limit.
# If the rest number of tokens is not enough, raise exception.
# Include: prompt template, inputs, query(optional), files(optional)
# Not Include: memory, external data, dataset context
self.get_pre_calculate_rest_tokens(
app_record=app_record,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
query=query,
)
memory = None
if application_generate_entity.conversation_id:
# get memory of conversation (read-only)
@@ -180,6 +194,9 @@ class ChatAppRunner(AppRunner):
if hosting_moderation_result:
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,

View File

@@ -43,6 +43,20 @@ class CompletionAppRunner(AppRunner):
query = application_generate_entity.query
files = application_generate_entity.files
# Pre-calculate the number of tokens of the prompt messages,
# and return the rest number of tokens by model context token size limit and max token size limit.
# If the rest number of tokens is not enough, raise exception.
# Include: prompt template, inputs, query(optional), files(optional)
# Not Include: memory, external data, dataset context
self.get_pre_calculate_rest_tokens(
app_record=app_record,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
query=query,
)
# organize all inputs and template to prompt messages
# Include: prompt template, inputs, query(optional), files(optional)
prompt_messages, stop = self.organize_prompt_messages(
@@ -138,6 +152,9 @@ class CompletionAppRunner(AppRunner):
if hosting_moderation_result:
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,

View File

@@ -26,7 +26,7 @@ class TokenBufferMemory:
self.model_instance = model_instance
def get_history_prompt_messages(
self, max_token_limit: int = 100000, message_limit: Optional[int] = None
self, max_token_limit: int = 2000, message_limit: Optional[int] = None
) -> Sequence[PromptMessage]:
"""
Get history prompt messages.

View File

@@ -1,115 +0,0 @@
model: us.anthropic.claude-3-7-sonnet-20250219-v1:0
label:
en_US: Claude 3.7 Sonnet(US.Cross Region Inference)
icon: icon_s_en.svg
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: enable_cache
label:
zh_Hans: 启用提示缓存
en_US: Enable Prompt Cache
type: boolean
required: false
default: true
help:
zh_Hans: 启用提示缓存可以提高性能并降低成本。Claude 3.7 Sonnet支持在system、messages和tools字段中使用缓存检查点。
en_US: Enable prompt caching to improve performance and reduce costs. Claude 3.7 Sonnet supports cache checkpoints in system, messages, and tools fields.
- name: reasoning_type
label:
zh_Hans: 推理配置
en_US: Reasoning Type
type: boolean
required: false
default: false
placeholder:
zh_Hans: 设置推理配置
en_US: Set reasoning configuration
help:
zh_Hans: 控制模型的推理能力。启用时temperature将固定为1且top_p将被禁用。
en_US: Controls the model's reasoning capability. When enabled, temperature will be fixed to 1 and top_p will be disabled.
- name: reasoning_budget
show_on:
- variable: reasoning_type
value: true
label:
zh_Hans: 推理预算
en_US: Reasoning Budget
type: int
default: 1024
min: 0
max: 128000
help:
zh_Hans: 推理的预算限制最小1024必须小于max_tokens。仅在推理类型为enabled时可用。
en_US: Budget limit for reasoning (minimum 1024), must be less than max_tokens. Only available when reasoning type is enabled.
- name: max_tokens
use_template: max_tokens
required: true
label:
zh_Hans: 最大token数
en_US: Max Tokens
type: int
default: 8192
min: 1
max: 128000
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
- name: temperature
use_template: temperature
required: false
label:
zh_Hans: 模型温度
en_US: Model Temperature
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。当推理功能启用时该值将被固定为1。
en_US: The amount of randomness injected into the response. When reasoning is enabled, this value will be fixed to 1.
- name: top_p
show_on:
- variable: reasoning_type
value: disabled
use_template: top_p
label:
zh_Hans: Top P
en_US: Top P
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中的概率阈值。当推理功能启用时,该参数将被禁用。
en_US: The probability threshold in nucleus sampling. When reasoning is enabled, this parameter will be disabled.
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
- name: response_format
use_template: response_format
pricing:
input: '0.003'
output: '0.015'
unit: '0.001'
currency: USD

View File

@@ -58,7 +58,6 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
# TODO There is invoke issue: context limit on Cohere Model, will add them after fixed.
CONVERSE_API_ENABLED_MODEL_INFO = [
{"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "us.deepseek", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False},
{"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
{"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},

View File

@@ -1,63 +0,0 @@
model: us.deepseek.r1-v1:0
label:
en_US: DeepSeek-R1(US.Cross Region Inference)
icon: icon_s_en.svg
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 32768
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
label:
zh_Hans: 最大token数
en_US: Max Tokens
type: int
default: 8192
min: 1
max: 128000
help:
zh_Hans: 停止前生成的最大令牌数。
en_US: The maximum number of tokens to generate before stopping.
- name: temperature
use_template: temperature
required: false
label:
zh_Hans: 模型温度
en_US: Model Temperature
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。当推理功能启用时该值将被固定为1。
en_US: The amount of randomness injected into the response. When reasoning is enabled, this value will be fixed to 1.
- name: top_p
show_on:
- variable: reasoning_type
value: disabled
use_template: top_p
label:
zh_Hans: Top P
en_US: Top P
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中的概率阈值。当推理功能启用时,该参数将被禁用。
en_US: The probability threshold in nucleus sampling. When reasoning is enabled, this parameter will be disabled.
- name: response_format
use_template: response_format
pricing:
input: '0.001'
output: '0.005'
unit: '0.001'
currency: USD

View File

@@ -19,8 +19,8 @@ class GoogleProvider(ModelProvider):
try:
model_instance = self.get_model_instance(ModelType.LLM)
# Use `gemini-2.0-flash` model for validate,
model_instance.validate_credentials(model="gemini-2.0-flash", credentials=credentials)
# Use `gemini-pro` model for validate,
model_instance.validate_credentials(model="gemini-pro", credentials=credentials)
except CredentialsValidateFailedError as ex:
raise ex
except Exception as ex:

View File

@@ -19,3 +19,5 @@
- gemini-exp-1206
- gemini-exp-1121
- gemini-exp-1114
- gemini-pro
- gemini-pro-vision

View File

@@ -0,0 +1,35 @@
model: gemini-pro-vision
label:
en_US: Gemini Pro Vision
model_type: llm
features:
- vision
model_properties:
mode: chat
context_size: 12288
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens_to_sample
use_template: max_tokens
required: true
default: 4096
min: 1
max: 4096
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD
deprecated: true

View File

@@ -0,0 +1,39 @@
model: gemini-pro
label:
en_US: Gemini Pro
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 30720
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens_to_sample
use_template: max_tokens
required: true
default: 2048
min: 1
max: 2048
- name: response_format
use_template: response_format
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD
deprecated: true

View File

@@ -1,4 +1,3 @@
- gpt-4.1
- o1
- o1-2024-12-17
- o1-mini

View File

@@ -1,60 +0,0 @@
model: gpt-4.1
label:
zh_Hans: gpt-4.1
en_US: gpt-4.1
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
model_properties:
mode: chat
context_size: 1047576
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 32768
- name: reasoning_effort
label:
zh_Hans: 推理工作
en_US: Reasoning Effort
type: string
help:
zh_Hans: 限制推理模型的推理工作
en_US: Constrains effort on reasoning for reasoning models
required: false
options:
- low
- medium
- high
- name: response_format
label:
zh_Hans: 回复格式
en_US: Response Format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
- json_schema
- name: json_schema
use_template: json_schema
pricing:
input: '2.00'
output: '8.00'
unit: '0.000001'
currency: USD

View File

@@ -1057,7 +1057,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
model = "gpt-4o"
try:
encoding = tiktoken.get_encoding(model)
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"

View File

@@ -17,6 +17,13 @@
- deepseek-ai/DeepSeek-V2.5
- deepseek-ai/DeepSeek-V3
- deepseek-ai/DeepSeek-Coder-V2-Instruct
- deepseek-ai/DeepSeek-R1-Distill-Llama-8B
- deepseek-ai/DeepSeek-R1-Distill-Llama-70B
- deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
- deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
- deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
- deepseek-ai/Janus-Pro-7B
- THUDM/glm-4-9b-chat
- 01-ai/Yi-1.5-34B-Chat-16K
- 01-ai/Yi-1.5-9B-Chat-16K

View File

@@ -0,0 +1,21 @@
model: deepseek-ai/DeepSeek-R1-Distill-Llama-70B
label:
zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Llama-70B
en_US: deepseek-ai/DeepSeek-R1-Distill-Llama-70B
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "4.3"
unit: "0.000001"
currency: RMB

View File

@@ -0,0 +1,21 @@
model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B
label:
zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Llama-8B
en_US: deepseek-ai/DeepSeek-R1-Distill-Llama-8B
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "0.00"
unit: "0.000001"
currency: RMB

View File

@@ -0,0 +1,21 @@
model: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
label:
zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "1.26"
unit: "0.000001"
currency: RMB

View File

@@ -0,0 +1,21 @@
model: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
label:
zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "0.70"
unit: "0.000001"
currency: RMB

View File

@@ -0,0 +1,21 @@
model: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
label:
zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "1.26"
unit: "0.000001"
currency: RMB

View File

@@ -0,0 +1,21 @@
model: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
label:
zh_Hans: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
en_US: deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "0.00"
unit: "0.000001"
currency: RMB

View File

@@ -0,0 +1,22 @@
model: deepseek-ai/Janus-Pro-7B
label:
zh_Hans: deepseek-ai/Janus-Pro-7B
en_US: deepseek-ai/Janus-Pro-7B
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 32000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "0.00"
output: "0.00"
unit: "0.000001"
currency: RMB

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -68,6 +68,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -68,6 +68,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -69,6 +69,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -67,6 +67,15 @@ parameter_rules:
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
label:
zh_Hans: 联网搜索
en_US: Web Search
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@@ -5,6 +5,11 @@ model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 1048576
@@ -15,21 +20,20 @@ parameter_rules:
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_output_tokens
use_template: max_tokens
required: true
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'

View File

@@ -77,4 +77,5 @@
- onebot
- regex
- trello
- vanna
- fal

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

View File

@@ -0,0 +1,134 @@
from typing import Any, Union
from vanna.remote import VannaDefault # type: ignore
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.tool.builtin_tool import BuiltinTool
class VannaTool(BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
invoke tools
"""
# Ensure runtime and credentials
if not self.runtime or not self.runtime.credentials:
raise ToolProviderCredentialValidationError("Tool runtime or credentials are missing")
api_key = self.runtime.credentials.get("api_key", None)
if not api_key:
raise ToolProviderCredentialValidationError("Please input api key")
model = tool_parameters.get("model", "")
if not model:
return self.create_text_message("Please input RAG model")
prompt = tool_parameters.get("prompt", "")
if not prompt:
return self.create_text_message("Please input prompt")
url = tool_parameters.get("url", "")
if not url:
return self.create_text_message("Please input URL/Host/DSN")
db_name = tool_parameters.get("db_name", "")
username = tool_parameters.get("username", "")
password = tool_parameters.get("password", "")
port = tool_parameters.get("port", 0)
base_url = self.runtime.credentials.get("base_url", None)
vn = VannaDefault(model=model, api_key=api_key, config={"endpoint": base_url})
db_type = tool_parameters.get("db_type", "")
if db_type in {"Postgres", "MySQL", "Hive", "ClickHouse"}:
if not db_name:
return self.create_text_message("Please input database name")
if not username:
return self.create_text_message("Please input username")
if port < 1:
return self.create_text_message("Please input port")
schema_sql = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS"
match db_type:
case "SQLite":
schema_sql = "SELECT type, sql FROM sqlite_master WHERE sql is not null"
vn.connect_to_sqlite(url)
case "Postgres":
vn.connect_to_postgres(host=url, dbname=db_name, user=username, password=password, port=port)
case "DuckDB":
vn.connect_to_duckdb(url=url)
case "SQLServer":
vn.connect_to_mssql(url)
case "MySQL":
vn.connect_to_mysql(host=url, dbname=db_name, user=username, password=password, port=port)
case "Oracle":
vn.connect_to_oracle(user=username, password=password, dsn=url)
case "Hive":
vn.connect_to_hive(host=url, dbname=db_name, user=username, password=password, port=port)
case "ClickHouse":
vn.connect_to_clickhouse(host=url, dbname=db_name, user=username, password=password, port=port)
enable_training = tool_parameters.get("enable_training", False)
reset_training_data = tool_parameters.get("reset_training_data", False)
if enable_training:
if reset_training_data:
existing_training_data = vn.get_training_data()
if len(existing_training_data) > 0:
for _, training_data in existing_training_data.iterrows():
vn.remove_training_data(training_data["id"])
ddl = tool_parameters.get("ddl", "")
question = tool_parameters.get("question", "")
sql = tool_parameters.get("sql", "")
memos = tool_parameters.get("memos", "")
training_metadata = tool_parameters.get("training_metadata", False)
if training_metadata:
if db_type == "SQLite":
df_ddl = vn.run_sql(schema_sql)
for ddl in df_ddl["sql"].to_list():
vn.train(ddl=ddl)
else:
df_information_schema = vn.run_sql(schema_sql)
plan = vn.get_training_plan_generic(df_information_schema)
vn.train(plan=plan)
if ddl:
vn.train(ddl=ddl)
if sql:
if question:
vn.train(question=question, sql=sql)
else:
vn.train(sql=sql)
if memos:
vn.train(documentation=memos)
#########################################################################################
# Due to CVE-2024-5565, we have to disable the chart generation feature
# The Vanna library uses a prompt function to present the user with visualized results,
# it is possible to alter the prompt using prompt injection and run arbitrary Python code
# instead of the intended visualization code.
# Specifically - allowing external input to the librarys “ask” method
# with "visualize" set to True (default behavior) leads to remote code execution.
# Affected versions: <= 0.5.5
#########################################################################################
allow_llm_to_see_data = tool_parameters.get("allow_llm_to_see_data", False)
res = vn.ask(
prompt, print_results=False, auto_train=True, visualize=False, allow_llm_to_see_data=allow_llm_to_see_data
)
result = []
if res is not None:
result.append(self.create_text_message(res[0]))
if len(res) > 1 and res[1] is not None:
result.append(self.create_text_message(res[1].to_markdown()))
if len(res) > 2 and res[2] is not None:
result.append(
self.create_blob_message(blob=res[2].to_image(format="svg"), meta={"mime_type": "image/svg+xml"})
)
return result

View File

@@ -0,0 +1,213 @@
identity:
name: vanna
author: QCTC
label:
en_US: Vanna.AI
zh_Hans: Vanna.AI
description:
human:
en_US: The fastest way to get actionable insights from your database just by asking questions.
zh_Hans: 一个基于大模型和RAG的Text2SQL工具。
llm: A tool for converting text to SQL.
parameters:
- name: prompt
type: string
required: true
label:
en_US: Prompt
zh_Hans: 提示词
pt_BR: Prompt
human_description:
en_US: used for generating SQL
zh_Hans: 用于生成SQL
llm_description: key words for generating SQL
form: llm
- name: model
type: string
required: true
label:
en_US: RAG Model
zh_Hans: RAG模型
human_description:
en_US: RAG Model for your database DDL
zh_Hans: 存储数据库训练数据的RAG模型
llm_description: RAG Model for generating SQL
form: llm
- name: db_type
type: select
required: true
options:
- value: SQLite
label:
en_US: SQLite
zh_Hans: SQLite
- value: Postgres
label:
en_US: Postgres
zh_Hans: Postgres
- value: DuckDB
label:
en_US: DuckDB
zh_Hans: DuckDB
- value: SQLServer
label:
en_US: Microsoft SQL Server
zh_Hans: 微软 SQL Server
- value: MySQL
label:
en_US: MySQL
zh_Hans: MySQL
- value: Oracle
label:
en_US: Oracle
zh_Hans: Oracle
- value: Hive
label:
en_US: Hive
zh_Hans: Hive
- value: ClickHouse
label:
en_US: ClickHouse
zh_Hans: ClickHouse
default: SQLite
label:
en_US: DB Type
zh_Hans: 数据库类型
human_description:
en_US: Database type.
zh_Hans: 选择要链接的数据库类型。
form: form
- name: url
type: string
required: true
label:
en_US: URL/Host/DSN
zh_Hans: URL/Host/DSN
human_description:
en_US: Please input depending on DB type, visit https://vanna.ai/docs/ for more specification
zh_Hans: 请根据数据库类型填入对应值详情参考https://vanna.ai/docs/
form: form
- name: db_name
type: string
required: false
label:
en_US: DB name
zh_Hans: 数据库名
human_description:
en_US: Database name
zh_Hans: 数据库名
form: form
- name: username
type: string
required: false
label:
en_US: Username
zh_Hans: 用户名
human_description:
en_US: Username
zh_Hans: 用户名
form: form
- name: password
type: secret-input
required: false
label:
en_US: Password
zh_Hans: 密码
human_description:
en_US: Password
zh_Hans: 密码
form: form
- name: port
type: number
required: false
label:
en_US: Port
zh_Hans: 端口
human_description:
en_US: Port
zh_Hans: 端口
form: form
- name: ddl
type: string
required: false
label:
en_US: Training DDL
zh_Hans: 训练DDL
human_description:
en_US: DDL statements for training data
zh_Hans: 用于训练RAG Model的建表语句
form: llm
- name: question
type: string
required: false
label:
en_US: Training Question
zh_Hans: 训练问题
human_description:
en_US: Question-SQL Pairs
zh_Hans: Question-SQL中的问题
form: llm
- name: sql
type: string
required: false
label:
en_US: Training SQL
zh_Hans: 训练SQL
human_description:
en_US: SQL queries to your training data
zh_Hans: 用于训练RAG Model的SQL语句
form: llm
- name: memos
type: string
required: false
label:
en_US: Training Memos
zh_Hans: 训练说明
human_description:
en_US: Sometimes you may want to add documentation about your business terminology or definitions
zh_Hans: 添加更多关于数据库的业务说明
form: llm
- name: enable_training
type: boolean
required: false
default: false
label:
en_US: Training Data
zh_Hans: 训练数据
human_description:
en_US: You only need to train once. Do not train again unless you want to add more training data
zh_Hans: 训练数据无更新时,训练一次即可
form: form
- name: reset_training_data
type: boolean
required: false
default: false
label:
en_US: Reset Training Data
zh_Hans: 重置训练数据
human_description:
en_US: Remove all training data in the current RAG Model
zh_Hans: 删除当前RAG Model中的所有训练数据
form: form
- name: training_metadata
type: boolean
required: false
default: false
label:
en_US: Training Metadata
zh_Hans: 训练元数据
human_description:
en_US: If enabled, it will attempt to train on the metadata of that database
zh_Hans: 是否自动从数据库获取元数据来训练
form: form
- name: allow_llm_to_see_data
type: boolean
required: false
default: false
label:
en_US: Whether to allow the LLM to see the data
zh_Hans: 是否允许LLM查看数据
human_description:
en_US: Whether to allow the LLM to see the data
zh_Hans: 是否允许LLM查看数据
form: form

View File

@@ -0,0 +1,46 @@
import re
from typing import Any
from urllib.parse import urlparse
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.vanna.tools.vanna import VannaTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class VannaProvider(BuiltinToolProviderController):
def _get_protocol_and_main_domain(self, url):
parsed_url = urlparse(url)
protocol = parsed_url.scheme
hostname = parsed_url.hostname
port = f":{parsed_url.port}" if parsed_url.port else ""
# Check if the hostname is an IP address
is_ip = re.match(r"^\d{1,3}(\.\d{1,3}){3}$", hostname) is not None
# Return the full hostname (with port if present) for IP addresses, otherwise return the main domain
main_domain = f"{hostname}{port}" if is_ip else ".".join(hostname.split(".")[-2:]) + port
return f"{protocol}://{main_domain}"
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
base_url = credentials.get("base_url")
if not base_url:
base_url = "https://ask.vanna.ai/rpc"
else:
base_url = base_url.removesuffix("/")
credentials["base_url"] = base_url
try:
VannaTool().fork_tool_runtime(
runtime={
"credentials": credentials,
}
).invoke(
user_id="",
tool_parameters={
"model": "chinook",
"db_type": "SQLite",
"url": f"{self._get_protocol_and_main_domain(credentials['base_url'])}/Chinook.sqlite",
"query": "What are the top 10 customers by sales?",
},
)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))

View File

@@ -0,0 +1,35 @@
identity:
author: QCTC
name: vanna
label:
en_US: Vanna.AI
zh_Hans: Vanna.AI
description:
en_US: The fastest way to get actionable insights from your database just by asking questions.
zh_Hans: 一个基于大模型和RAG的Text2SQL工具。
icon: icon.png
tags:
- utilities
- productivity
credentials_for_provider:
api_key:
type: secret-input
required: true
label:
en_US: API key
zh_Hans: API key
placeholder:
en_US: Please input your API key
zh_Hans: 请输入你的 API key
pt_BR: Please input your API key
help:
en_US: Get your API key from Vanna.AI
zh_Hans: 从 Vanna.AI 获取你的 API key
url: https://vanna.ai/account/profile
base_url:
type: text-input
required: false
label:
en_US: Vanna.AI Endpoint Base URL
placeholder:
en_US: https://ask.vanna.ai/rpc

View File

@@ -195,7 +195,7 @@ class CodeNode(BaseNode[CodeNodeData]):
if output_config.type == "object":
# check if output is object
if not isinstance(result.get(output_name), dict):
if result.get(output_name) is None:
if isinstance(result.get(output_name), type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(
@@ -223,7 +223,7 @@ class CodeNode(BaseNode[CodeNodeData]):
elif output_config.type == "array[number]":
# check if array of number available
if not isinstance(result[output_name], list):
if result[output_name] is None:
if isinstance(result[output_name], type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(
@@ -244,7 +244,7 @@ class CodeNode(BaseNode[CodeNodeData]):
elif output_config.type == "array[string]":
# check if array of string available
if not isinstance(result[output_name], list):
if result[output_name] is None:
if isinstance(result[output_name], type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(
@@ -265,7 +265,7 @@ class CodeNode(BaseNode[CodeNodeData]):
elif output_config.type == "array[object]":
# check if array of object available
if not isinstance(result[output_name], list):
if result[output_name] is None:
if isinstance(result[output_name], type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(

View File

@@ -968,12 +968,14 @@ def _handle_memory_chat_mode(
*,
memory: TokenBufferMemory | None,
memory_config: MemoryConfig | None,
model_config: ModelConfigWithCredentialsEntity, # TODO(-LAN-): Needs to remove
model_config: ModelConfigWithCredentialsEntity,
) -> Sequence[PromptMessage]:
memory_messages: Sequence[PromptMessage] = []
# Get messages from memory for chat model
if memory and memory_config:
rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
memory_messages = memory.get_history_prompt_messages(
max_token_limit=rest_tokens,
message_limit=memory_config.window.size if memory_config.window.enabled else None,
)
return memory_messages

View File

@@ -32,7 +32,11 @@ class AwsS3Storage(BaseStorage):
aws_access_key_id=dify_config.S3_ACCESS_KEY,
endpoint_url=dify_config.S3_ENDPOINT,
region_name=dify_config.S3_REGION,
config=Config(s3={"addressing_style": dify_config.S3_ADDRESS_STYLE}),
config=Config(
s3={"addressing_style": dify_config.S3_ADDRESS_STYLE},
request_checksum_calculation="when_required",
response_checksum_validation="when_required",
),
)
# create bucket
try:

66
api/poetry.lock generated
View File

@@ -10473,44 +10473,44 @@ client = ["SQLAlchemy (>=1.4,<3)"]
[[package]]
name = "tiktoken"
version = "0.9.0"
version = "0.8.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
{file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd"},
{file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de"},
{file = "tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990"},
{file = "tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4"},
{file = "tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e"},
{file = "tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348"},
{file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33"},
{file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136"},
{file = "tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336"},
{file = "tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb"},
{file = "tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03"},
{file = "tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210"},
{file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794"},
{file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22"},
{file = "tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2"},
{file = "tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16"},
{file = "tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb"},
{file = "tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63"},
{file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01"},
{file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139"},
{file = "tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a"},
{file = "tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95"},
{file = "tiktoken-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c6386ca815e7d96ef5b4ac61e0048cd32ca5a92d5781255e13b31381d28667dc"},
{file = "tiktoken-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75f6d5db5bc2c6274b674ceab1615c1778e6416b14705827d19b40e6355f03e0"},
{file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e15b16f61e6f4625a57a36496d28dd182a8a60ec20a534c5343ba3cafa156ac7"},
{file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebcec91babf21297022882344c3f7d9eed855931466c3311b1ad6b64befb3df"},
{file = "tiktoken-0.9.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5fd49e7799579240f03913447c0cdfa1129625ebd5ac440787afc4345990427"},
{file = "tiktoken-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:26242ca9dc8b58e875ff4ca078b9a94d2f0813e6a535dcd2205df5d49d927cc7"},
{file = "tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d"},
{file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"},
{file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"},
{file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"},
{file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"},
{file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"},
{file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"},
{file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"},
{file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"},
{file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"},
{file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"},
{file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"},
{file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"},
{file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"},
{file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"},
{file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"},
{file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"},
{file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"},
{file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"},
{file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"},
{file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"},
{file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"},
{file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"},
{file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"},
{file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"},
{file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"},
{file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"},
{file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"},
{file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"},
{file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"},
{file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"},
{file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"},
]
[package.dependencies]
@@ -12389,4 +12389,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<3.13"
content-hash = "0df8aef68385b6596306fd18af317a835023d648eb5028cd57ec463f176e4c0f"
content-hash = "d197cdff507a70323c1d6aca11609188f54970f67715af744fe6def15b7776fd"

View File

@@ -85,7 +85,7 @@ sentry-sdk = { version = "~1.44.1", extras = ["flask"] }
sqlalchemy = "~2.0.29"
starlette = "0.41.0"
tencentcloud-sdk-python-hunyuan = "~3.0.1294"
tiktoken = "^0.9.0"
tiktoken = "~0.8.0"
tokenizers = "~0.15.0"
transformers = "~4.35.0"
unstructured = { version = "~0.16.1", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] }

View File

@@ -77,6 +77,7 @@ class AccountService:
prefix="email_code_account_deletion_rate_limit", max_attempts=1, time_window=60 * 1
)
LOGIN_MAX_ERROR_LIMITS = 5
FORGOT_PASSWORD_MAX_ERROR_LIMITS = 5
@staticmethod
def _get_refresh_token_key(refresh_token: str) -> str:
@@ -406,8 +407,10 @@ class AccountService:
raise PasswordResetRateLimitExceededError()
code, token = cls.generate_reset_password_token(account_email, account)
code = "".join([str(random.randint(0, 9)) for _ in range(6)])
token = TokenManager.generate_token(
account=account, email=email, token_type="reset_password", additional_data={"code": code}
)
send_reset_password_mail_task.delay(
language=language,
to=account_email,
@@ -416,22 +419,6 @@ class AccountService:
cls.reset_password_rate_limiter.increment_rate_limit(account_email)
return token
@classmethod
def generate_reset_password_token(
cls,
email: str,
account: Optional[Account] = None,
code: Optional[str] = None,
additional_data: dict[str, Any] = {},
):
if not code:
code = "".join([str(random.randint(0, 9)) for _ in range(6)])
additional_data["code"] = code
token = TokenManager.generate_token(
account=account, email=email, token_type="reset_password", additional_data=additional_data
)
return code, token
@classmethod
def revoke_reset_password_token(cls, token: str):
TokenManager.revoke_token(token, "reset_password")
@@ -517,6 +504,32 @@ class AccountService:
key = f"login_error_rate_limit:{email}"
redis_client.delete(key)
@staticmethod
def add_forgot_password_error_rate_limit(email: str) -> None:
key = f"forgot_password_error_rate_limit:{email}"
count = redis_client.get(key)
if count is None:
count = 0
count = int(count) + 1
redis_client.setex(key, dify_config.FORGOT_PASSWORD_LOCKOUT_DURATION, count)
@staticmethod
def is_forgot_password_error_rate_limit(email: str) -> bool:
key = f"forgot_password_error_rate_limit:{email}"
count = redis_client.get(key)
if count is None:
return False
count = int(count)
if count > AccountService.FORGOT_PASSWORD_MAX_ERROR_LIMITS:
return True
return False
@staticmethod
def reset_forgot_password_error_rate_limit(email: str):
key = f"forgot_password_error_rate_limit:{email}"
redis_client.delete(key)
@staticmethod
def is_email_send_ip_limit(ip_address: str):
minute_key = f"email_send_ip_limit_minute:{ip_address}"

View File

@@ -55,19 +55,13 @@ def _check_version_compatibility(imported_version: str) -> ImportStatus:
except version.InvalidVersion:
return ImportStatus.FAILED
# If imported version is newer than current, always return PENDING
if imported_ver > current_ver:
# Compare major version and minor version
if current_ver.major != imported_ver.major or current_ver.minor != imported_ver.minor:
return ImportStatus.PENDING
# If imported version is older than current's major, return PENDING
if imported_ver.major < current_ver.major:
return ImportStatus.PENDING
# If imported version is older than current's minor, return COMPLETED_WITH_WARNINGS
if imported_ver.minor < current_ver.minor:
if current_ver.micro != imported_ver.micro:
return ImportStatus.COMPLETED_WITH_WARNINGS
# If imported version equals or is older than current's micro, return COMPLETED
return ImportStatus.COMPLETED

View File

@@ -932,6 +932,3 @@ MAX_SUBMIT_COUNT=100
# The maximum number of top-k value for RAG.
TOP_K_MAX_VALUE=10
# Prevent Clickjacking
ALLOW_EMBED=false

View File

@@ -1,8 +1,8 @@
x-shared-env: &shared-api-worker-env
x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:0.15.7
image: langgenius/dify-api:0.15.3
restart: always
environment:
# Use the shared environment variables.
@@ -25,7 +25,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.15.7
image: langgenius/dify-api:0.15.3
restart: always
environment:
# Use the shared environment variables.
@@ -47,7 +47,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.15.7
image: langgenius/dify-web:0.15.3
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@@ -56,7 +56,6 @@ services:
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
ALLOW_EMBED: ${ALLOW_EMBED:-false}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
@@ -99,7 +98,7 @@ services:
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.11
image: langgenius/dify-sandbox:0.2.10
restart: always
environment:
# The DifySandbox configurations

View File

@@ -43,7 +43,7 @@ services:
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.11
image: langgenius/dify-sandbox:0.2.10
restart: always
environment:
# The DifySandbox configurations

View File

@@ -389,12 +389,11 @@ x-shared-env: &shared-api-worker-env
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
ALLOW_EMBED: ${ALLOW_EMBED:-false}
services:
# API service
api:
image: langgenius/dify-api:0.15.7
image: langgenius/dify-api:0.15.3
restart: always
environment:
# Use the shared environment variables.
@@ -417,7 +416,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.15.7
image: langgenius/dify-api:0.15.3
restart: always
environment:
# Use the shared environment variables.
@@ -439,7 +438,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.15.7
image: langgenius/dify-web:0.15.3
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@@ -448,7 +447,6 @@ services:
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
ALLOW_EMBED: ${ALLOW_EMBED:-false}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
@@ -491,7 +489,7 @@ services:
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.11
image: langgenius/dify-sandbox:0.2.10
restart: always
environment:
# The DifySandbox configurations

View File

@@ -31,6 +31,3 @@ NEXT_PUBLIC_TOP_K_MAX_VALUE=10
# The maximum number of tokens for segmentation
NEXT_PUBLIC_INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
# Default is not allow to embed into iframe to prevent Clickjacking: https://owasp.org/www-community/attacks/Clickjacking
NEXT_PUBLIC_ALLOW_EMBED=

View File

@@ -161,9 +161,9 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
}
return (
<div className={cn(s.app, 'flex', 'overflow-hidden')}>
<div className={cn(s.app, 'flex relative', 'overflow-hidden')}>
{appDetail && (
<AppSideBar title={appDetail.name} icon={appDetail.icon} icon_background={appDetail.icon_background} desc={appDetail.mode} navigation={navigation} />
<AppSideBar title={appDetail.name} icon={appDetail.icon} icon_background={appDetail.icon_background as string} desc={appDetail.mode} navigation={navigation} />
)}
<div className="bg-components-panel-bg grow overflow-hidden">
{children}

View File

@@ -24,9 +24,11 @@ import AppContext from '@/context/app-context'
export type ICardViewProps = {
appId: string
isInPanel?: boolean
className?: string
}
const CardView: FC<ICardViewProps> = ({ appId }) => {
const CardView: FC<ICardViewProps> = ({ appId, isInPanel, className }) => {
const { t } = useTranslation()
const { notify } = useContext(ToastContext)
const appDetail = useAppStore(state => state.appDetail)
@@ -120,10 +122,11 @@ const CardView: FC<ICardViewProps> = ({ appId }) => {
return <Loading />
return (
<div className="grid gap-6 grid-cols-1 xl:grid-cols-2 w-full mb-6">
<div className={className || 'grid gap-6 grid-cols-1 xl:grid-cols-2 w-full mb-6'}>
<AppCard
appInfo={appDetail}
cardType="webapp"
isInPanel={isInPanel}
onChangeStatus={onChangeSiteStatus}
onGenerateCode={onGenerateCode}
onSaveSiteConfig={onSaveSiteConfig}
@@ -131,6 +134,7 @@ const CardView: FC<ICardViewProps> = ({ appId }) => {
<AppCard
cardType="api"
appInfo={appDetail}
isInPanel={isInPanel}
onChangeStatus={onChangeApiStatus}
/>
</div>

View File

@@ -15,7 +15,7 @@ import {
// CommandLineIcon as CommandLineSolidIcon,
DocumentTextIcon as DocumentTextSolidIcon,
} from '@heroicons/react/24/solid'
import { RiApps2AddLine, RiInformation2Line } from '@remixicon/react'
import { RiApps2AddLine, RiBookOpenLine, RiInformation2Line } from '@remixicon/react'
import s from './style.module.css'
import classNames from '@/utils/classnames'
import { fetchDatasetDetail, fetchDatasetRelatedApps } from '@/service/datasets'
@@ -58,13 +58,6 @@ const TargetSolidIcon = ({ className }: SVGProps<SVGElement>) => {
</svg>
}
const BookOpenIcon = ({ className }: SVGProps<SVGElement>) => {
return <svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg" className={className ?? ''}>
<path opacity="0.12" d="M1 3.1C1 2.53995 1 2.25992 1.10899 2.04601C1.20487 1.85785 1.35785 1.70487 1.54601 1.60899C1.75992 1.5 2.03995 1.5 2.6 1.5H2.8C3.9201 1.5 4.48016 1.5 4.90798 1.71799C5.28431 1.90973 5.59027 2.21569 5.78201 2.59202C6 3.01984 6 3.5799 6 4.7V10.5L5.94997 10.425C5.60265 9.90398 5.42899 9.64349 5.19955 9.45491C4.99643 9.28796 4.76238 9.1627 4.5108 9.0863C4.22663 9 3.91355 9 3.28741 9H2.6C2.03995 9 1.75992 9 1.54601 8.89101C1.35785 8.79513 1.20487 8.64215 1.10899 8.45399C1 8.24008 1 7.96005 1 7.4V3.1Z" fill="#155EEF" />
<path d="M6 10.5L5.94997 10.425C5.60265 9.90398 5.42899 9.64349 5.19955 9.45491C4.99643 9.28796 4.76238 9.1627 4.5108 9.0863C4.22663 9 3.91355 9 3.28741 9H2.6C2.03995 9 1.75992 9 1.54601 8.89101C1.35785 8.79513 1.20487 8.64215 1.10899 8.45399C1 8.24008 1 7.96005 1 7.4V3.1C1 2.53995 1 2.25992 1.10899 2.04601C1.20487 1.85785 1.35785 1.70487 1.54601 1.60899C1.75992 1.5 2.03995 1.5 2.6 1.5H2.8C3.9201 1.5 4.48016 1.5 4.90798 1.71799C5.28431 1.90973 5.59027 2.21569 5.78201 2.59202C6 3.01984 6 3.5799 6 4.7M6 10.5V4.7M6 10.5L6.05003 10.425C6.39735 9.90398 6.57101 9.64349 6.80045 9.45491C7.00357 9.28796 7.23762 9.1627 7.4892 9.0863C7.77337 9 8.08645 9 8.71259 9H9.4C9.96005 9 10.2401 9 10.454 8.89101C10.6422 8.79513 10.7951 8.64215 10.891 8.45399C11 8.24008 11 7.96005 11 7.4V3.1C11 2.53995 11 2.25992 10.891 2.04601C10.7951 1.85785 10.6422 1.70487 10.454 1.60899C10.2401 1.5 9.96005 1.5 9.4 1.5H9.2C8.07989 1.5 7.51984 1.5 7.09202 1.71799C6.71569 1.90973 6.40973 2.21569 6.21799 2.59202C6 3.01984 6 3.5799 6 4.7" stroke="#155EEF" strokeLinecap="round" strokeLinejoin="round" />
</svg>
}
type IExtraInfoProps = {
isMobile: boolean
relatedApps?: RelatedAppResponse
@@ -131,7 +124,7 @@ const ExtraInfo = ({ isMobile, relatedApps, expand }: IExtraInfoProps) => {
}
target='_blank' rel='noopener noreferrer'
>
<BookOpenIcon className='mr-1' />
<RiBookOpenLine className='mr-1 text-text-accent' />
{t('common.datasetMenus.viewDoc')}
</a>
</div>

View File

@@ -31,8 +31,6 @@ const ApiServer: FC<ApiServerProps> = ({
</div>
<SecretKeyButton
className='flex-shrink-0 !h-8 bg-white'
textCls='!text-gray-700 font-medium'
iconCls='stroke-[1.2px]'
/>
</div>
)

View File

@@ -192,7 +192,7 @@ const DatasetCard = ({
/>
</div>
</div>
<div className='!hidden group-hover:!flex shrink-0 mx-1 w-[1px] h-[14px] bg-gray-200' />
<div className='!hidden group-hover:!flex shrink-0 mx-1 w-[1px] h-[14px] bg-divider-regular' />
<div className='!hidden group-hover:!flex shrink-0'>
<CustomPopover
htmlContent={<Operations showDelete={!isCurrentWorkspaceDatasetOperator} />}

View File

@@ -1,18 +1,18 @@
import { useTranslation } from 'react-i18next'
import { useRouter } from 'next/navigation'
import { useContext, useContextSelector } from 'use-context-selector'
import { RiArrowDownSLine } from '@remixicon/react'
import React, { useCallback, useState } from 'react'
import {
RiDeleteBinLine,
RiEditLine,
RiEqualizer2Line,
RiFileCopy2Line,
RiFileDownloadLine,
RiFileUploadLine,
} from '@remixicon/react'
import AppIcon from '../base/app-icon'
import SwitchAppModal from '../app/switch-app-modal'
import s from './style.module.css'
import cn from '@/utils/classnames'
import {
PortalToFollowElem,
PortalToFollowElemContent,
PortalToFollowElemTrigger,
} from '@/app/components/base/portal-to-follow-elem'
import Divider from '@/app/components/base/divider'
import Confirm from '@/app/components/base/confirm'
import { useStore as useAppStore } from '@/app/components/app/store'
import { ToastContext } from '@/app/components/base/toast'
@@ -22,8 +22,6 @@ import { copyApp, deleteApp, exportAppConfig, updateAppInfo } from '@/service/ap
import DuplicateAppModal from '@/app/components/app/duplicate-modal'
import type { DuplicateAppModalProps } from '@/app/components/app/duplicate-modal'
import CreateAppModal from '@/app/components/explore/create-app-modal'
import { AiText, ChatBot, CuteRobot } from '@/app/components/base/icons/src/vender/solid/communication'
import { Route } from '@/app/components/base/icons/src/vender/solid/mapsAndTravel'
import type { CreateAppModalProps } from '@/app/components/explore/create-app-modal'
import { NEED_REFRESH_APP_LIST_KEY } from '@/config'
import { getRedirection } from '@/utils/app-redirection'
@@ -31,6 +29,9 @@ import UpdateDSLModal from '@/app/components/workflow/update-dsl-modal'
import type { EnvironmentVariable } from '@/app/components/workflow/types'
import DSLExportConfirmModal from '@/app/components/workflow/dsl-export-confirm-modal'
import { fetchWorkflowDraft } from '@/service/workflow'
import ContentDialog from '@/app/components/base/content-dialog'
import Button from '@/app/components/base/button'
import CardView from '@/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/cardView'
export type IAppInfoProps = {
expand: boolean
@@ -47,7 +48,6 @@ const AppInfo = ({ expand }: IAppInfoProps) => {
const [showEditModal, setShowEditModal] = useState(false)
const [showDuplicateModal, setShowDuplicateModal] = useState(false)
const [showConfirmDelete, setShowConfirmDelete] = useState(false)
const [showSwitchTip, setShowSwitchTip] = useState<string>('')
const [showSwitchModal, setShowSwitchModal] = useState<boolean>(false)
const [showImportDSLModal, setShowImportDSLModal] = useState<boolean>(false)
const [secretEnvList, setSecretEnvList] = useState<EnvironmentVariable[]>([])
@@ -183,291 +183,199 @@ const AppInfo = ({ expand }: IAppInfoProps) => {
return null
return (
<PortalToFollowElem
open={open}
onOpenChange={setOpen}
placement='bottom-start'
offset={4}
>
<div className='relative'>
<PortalToFollowElemTrigger
onClick={() => {
if (isCurrentWorkspaceEditor)
setOpen(v => !v)
}}
className='block'
>
<div className={cn('flex p-1 rounded-lg', open && 'bg-gray-100', isCurrentWorkspaceEditor && 'hover:bg-gray-100 cursor-pointer')}>
<div className='relative shrink-0 mr-2'>
<AppIcon
size={expand ? 'large' : 'small'}
iconType={appDetail.icon_type}
icon={appDetail.icon}
background={appDetail.icon_background}
imageUrl={appDetail.icon_url}
/>
<span className={cn(
'absolute bottom-[-3px] right-[-3px] w-4 h-4 p-0.5 bg-white rounded border-[0.5px] border-[rgba(0,0,0,0.02)] shadow-sm',
!expand && '!w-3.5 !h-3.5 !bottom-[-2px] !right-[-2px]',
)}>
{appDetail.mode === 'advanced-chat' && (
<ChatBot className={cn('w-3 h-3 text-[#1570EF]', !expand && '!w-2.5 !h-2.5')} />
)}
{appDetail.mode === 'agent-chat' && (
<CuteRobot className={cn('w-3 h-3 text-indigo-600', !expand && '!w-2.5 !h-2.5')} />
)}
{appDetail.mode === 'chat' && (
<ChatBot className={cn('w-3 h-3 text-[#1570EF]', !expand && '!w-2.5 !h-2.5')} />
)}
{appDetail.mode === 'completion' && (
<AiText className={cn('w-3 h-3 text-[#0E9384]', !expand && '!w-2.5 !h-2.5')} />
)}
{appDetail.mode === 'workflow' && (
<Route className={cn('w-3 h-3 text-[#f79009]', !expand && '!w-2.5 !h-2.5')} />
)}
</span>
</div>
{expand && (
<div className="grow w-0">
<div className='flex justify-between items-center text-sm leading-5 font-medium text-text-secondary'>
<div className='truncate' title={appDetail.name}>{appDetail.name}</div>
{isCurrentWorkspaceEditor && <RiArrowDownSLine className='shrink-0 ml-[2px] w-3 h-3 text-gray-500' />}
</div>
<div className='flex items-center text-[10px] leading-[18px] font-medium text-gray-500 gap-1'>
{appDetail.mode === 'advanced-chat' && (
<>
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.chatbot').toUpperCase()}</div>
<div title={t('app.types.advanced') || ''} className='px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.advanced').toUpperCase()}</div>
</>
)}
{appDetail.mode === 'agent-chat' && (
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.agent').toUpperCase()}</div>
)}
{appDetail.mode === 'chat' && (
<>
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.chatbot').toUpperCase()}</div>
<div title={t('app.types.basic') || ''} className='px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{(t('app.types.basic').toUpperCase())}</div>
</>
)}
{appDetail.mode === 'completion' && (
<>
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.completion').toUpperCase()}</div>
<div title={t('app.types.basic') || ''} className='px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{(t('app.types.basic').toUpperCase())}</div>
</>
)}
{appDetail.mode === 'workflow' && (
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.workflow').toUpperCase()}</div>
)}
</div>
<div>
<button
onClick={() => {
if (isCurrentWorkspaceEditor)
setOpen(v => !v)
}}
className='block w-full'
>
<div className={cn('flex rounded-lg', expand ? 'p-2 pb-2.5 flex-col gap-2' : 'p-1 gap-1 justify-center items-start', open && 'bg-state-base-hover', isCurrentWorkspaceEditor && 'hover:bg-state-base-hover cursor-pointer')}>
<div className={`flex items-center self-stretch ${expand ? 'justify-between' : 'flex-col gap-1'}`}>
<AppIcon
size={expand ? 'large' : 'small'}
iconType={appDetail.icon_type}
icon={appDetail.icon}
background={appDetail.icon_background}
imageUrl={appDetail.icon_url}
/>
<div className='flex p-0.5 justify-center items-center rounded-md'>
<div className='flex w-5 h-5 justify-center items-center'>
<RiEqualizer2Line className='w-4 h-4 text-text-tertiary' />
</div>
)}
</div>
</div>
</PortalToFollowElemTrigger>
<PortalToFollowElemContent className='z-[1002]'>
<div className='relative w-[320px] bg-white rounded-2xl shadow-xl'>
{/* header */}
<div className={cn('flex pl-4 pt-3 pr-3', !appDetail.description && 'pb-2')}>
<div className='relative shrink-0 mr-2'>
<AppIcon
size="large"
iconType={appDetail.icon_type}
icon={appDetail.icon}
background={appDetail.icon_background}
imageUrl={appDetail.icon_url}
/>
<span className='absolute bottom-[-3px] right-[-3px] w-4 h-4 p-0.5 bg-white rounded border-[0.5px] border-[rgba(0,0,0,0.02)] shadow-sm'>
{appDetail.mode === 'advanced-chat' && (
<ChatBot className='w-3 h-3 text-[#1570EF]' />
)}
{appDetail.mode === 'agent-chat' && (
<CuteRobot className='w-3 h-3 text-indigo-600' />
)}
{appDetail.mode === 'chat' && (
<ChatBot className='w-3 h-3 text-[#1570EF]' />
)}
{appDetail.mode === 'completion' && (
<AiText className='w-3 h-3 text-[#0E9384]' />
)}
{appDetail.mode === 'workflow' && (
<Route className='w-3 h-3 text-[#f79009]' />
)}
</span>
</div>
<div className='grow w-0'>
<div title={appDetail.name} className='flex justify-between items-center text-sm leading-5 font-medium text-gray-900 truncate'>{appDetail.name}</div>
<div className='flex items-center text-[10px] leading-[18px] font-medium text-gray-500 gap-1'>
{appDetail.mode === 'advanced-chat' && (
<>
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.chatbot').toUpperCase()}</div>
<div title={t('app.types.advanced') || ''} className='px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.advanced').toUpperCase()}</div>
</>
)}
{appDetail.mode === 'agent-chat' && (
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.agent').toUpperCase()}</div>
)}
{appDetail.mode === 'chat' && (
<>
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.chatbot').toUpperCase()}</div>
<div title={t('app.types.basic') || ''} className='px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{(t('app.types.basic').toUpperCase())}</div>
</>
)}
{appDetail.mode === 'completion' && (
<>
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.completion').toUpperCase()}</div>
<div title={t('app.types.basic') || ''} className='px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{(t('app.types.basic').toUpperCase())}</div>
</>
)}
{appDetail.mode === 'workflow' && (
<div className='shrink-0 px-1 border bg-white border-[rgba(0,0,0,0.08)] rounded-[5px] truncate'>{t('app.types.workflow').toUpperCase()}</div>
)}
{
expand && (
<div className='flex flex-col items-start gap-1'>
<div className='flex w-full'>
<div className='text-text-secondary system-md-semibold truncate'>{appDetail.name}</div>
</div>
<div className='text-text-tertiary system-2xs-medium-uppercase'>{appDetail.mode === 'advanced-chat' ? t('app.types.chatbot') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}</div>
</div>
)
}
</div>
</button>
<ContentDialog
show={open}
onClose={() => setOpen(false)}
className='!p-0 flex flex-col absolute left-2 top-2 bottom-2 w-[420px] rounded-2xl'
>
<div className='flex p-4 flex-col justify-center items-start gap-3 self-stretch shrink-0'>
<div className='flex items-center gap-3 self-stretch'>
<AppIcon
size="large"
iconType={appDetail.icon_type}
icon={appDetail.icon}
background={appDetail.icon_background}
imageUrl={appDetail.icon_url}
/>
<div className='flex flex-col justify-center items-start grow w-full'>
<div className='text-text-secondary system-md-semibold truncate w-full'>{appDetail.name}</div>
<div className='text-text-tertiary system-2xs-medium-uppercase'>{appDetail.mode === 'advanced-chat' ? t('app.types.chatbot') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}</div>
</div>
{/* description */}
{appDetail.description && (
<div className='px-4 py-2 text-gray-500 text-xs leading-[18px]'>{appDetail.description}</div>
)}
{/* operations */}
<Divider className="!my-1" />
<div className="w-full py-1">
<div className='h-9 py-2 px-3 mx-1 flex items-center hover:bg-gray-50 rounded-lg cursor-pointer' onClick={() => {
</div>
{/* description */}
{appDetail.description && (
<div className='text-text-tertiary system-xs-regular'>{appDetail.description}</div>
)}
{/* operations */}
<div className='flex items-center gap-1 self-stretch'>
<Button
size={'small'}
variant={'secondary'}
className='gap-[1px]'
onClick={() => {
setOpen(false)
setShowEditModal(true)
}}>
<span className='text-gray-700 text-sm leading-5'>{t('app.editApp')}</span>
</div>
<div className='h-9 py-2 px-3 mx-1 flex items-center hover:bg-gray-50 rounded-lg cursor-pointer' onClick={() => {
}}
>
<RiEditLine className='w-3.5 h-3.5 text-components-button-secondary-text' />
<span className='text-components-button-secondary-text system-xs-medium'>{t('app.editApp')}</span>
</Button>
<Button
size={'small'}
variant={'secondary'}
className='gap-[1px]'
onClick={() => {
setOpen(false)
setShowDuplicateModal(true)
}}>
<span className='text-gray-700 text-sm leading-5'>{t('app.duplicate')}</span>
</div>
{(appDetail.mode === 'completion' || appDetail.mode === 'chat') && (
<>
<Divider className="!my-1" />
<div
className='h-9 py-2 px-3 mx-1 flex items-center hover:bg-gray-50 rounded-lg cursor-pointer'
onMouseEnter={() => setShowSwitchTip(appDetail.mode)}
onMouseLeave={() => setShowSwitchTip('')}
onClick={() => {
setOpen(false)
setShowSwitchModal(true)
}}
>
<span className='text-gray-700 text-sm leading-5'>{t('app.switch')}</span>
</div>
</>
)}
<Divider className="!my-1" />
<div className='h-9 py-2 px-3 mx-1 flex items-center hover:bg-gray-50 rounded-lg cursor-pointer' onClick={exportCheck}>
<span className='text-gray-700 text-sm leading-5'>{t('app.export')}</span>
</div>
{
(appDetail.mode === 'advanced-chat' || appDetail.mode === 'workflow') && (
<div
className='h-9 py-2 px-3 mx-1 flex items-center hover:bg-gray-50 rounded-lg cursor-pointer'
onClick={() => {
setOpen(false)
setShowImportDSLModal(true)
}}>
<span className='text-gray-700 text-sm leading-5'>{t('workflow.common.importDSL')}</span>
</div>
)
}
<Divider className="!my-1" />
<div className='group h-9 py-2 px-3 mx-1 flex items-center hover:bg-red-50 rounded-lg cursor-pointer' onClick={() => {
setOpen(false)
setShowConfirmDelete(true)
}}>
<span className='text-gray-700 text-sm leading-5 group-hover:text-red-500'>
{t('common.operation.delete')}
</span>
</div>
</div>
{/* switch tip */}
<div
className={cn(
'hidden absolute left-[324px] top-0 w-[376px] rounded-xl bg-white border-[0.5px] border-[rgba(0,0,0,0.05)] shadow-lg',
showSwitchTip && '!block',
)}
}}
>
<div className={cn(
'w-full h-[256px] bg-center bg-no-repeat bg-contain rounded-xl',
showSwitchTip === 'chat' && s.expertPic,
showSwitchTip === 'completion' && s.completionPic,
)} />
<div className='px-4 pb-2'>
<div className='flex items-center gap-1 text-gray-700 text-md leading-6 font-semibold'>
{showSwitchTip === 'chat' ? t('app.types.advanced') : t('app.types.workflow')}
<span className='px-1 rounded-[5px] bg-white border border-black/8 text-gray-500 text-[10px] leading-[18px] font-medium'>BETA</span>
</div>
<div className='text-orange-500 text-xs leading-[18px] font-medium'>{t('app.newApp.advancedFor').toLocaleUpperCase()}</div>
<div className='mt-1 text-gray-500 text-sm leading-5'>{t('app.newApp.advancedDescription')}</div>
</div>
</div>
<RiFileCopy2Line className='w-3.5 h-3.5 text-components-button-secondary-text' />
<span className='text-components-button-secondary-text system-xs-medium'>{t('app.duplicate')}</span>
</Button>
<Button
size={'small'}
variant={'secondary'}
className='gap-[1px]'
onClick={exportCheck}
>
<RiFileDownloadLine className='w-3.5 h-3.5 text-components-button-secondary-text' />
<span className='text-components-button-secondary-text system-xs-medium'>{t('app.export')}</span>
</Button>
{
(appDetail.mode === 'advanced-chat' || appDetail.mode === 'workflow') && (
<Button
size={'small'}
variant={'secondary'}
className='gap-[1px]'
onClick={() => {
setOpen(false)
setShowImportDSLModal(true)
}}
>
<RiFileUploadLine className='w-3.5 h-3.5 text-components-button-secondary-text' />
<span className='text-components-button-secondary-text system-xs-medium'>{t('workflow.common.importDSL')}</span>
</Button>
)
}
</div>
</PortalToFollowElemContent>
{showSwitchModal && (
<SwitchAppModal
inAppDetail
show={showSwitchModal}
appDetail={appDetail}
onClose={() => setShowSwitchModal(false)}
onSuccess={() => setShowSwitchModal(false)}
</div>
<div className='flex flex-1'>
<CardView
appId={appDetail.id}
isInPanel={true}
className='flex flex-col px-2 py-1 gap-2 grow overflow-auto'
/>
)}
{showEditModal && (
<CreateAppModal
isEditModal
appName={appDetail.name}
appIconType={appDetail.icon_type}
appIcon={appDetail.icon}
appIconBackground={appDetail.icon_background}
appIconUrl={appDetail.icon_url}
appDescription={appDetail.description}
appMode={appDetail.mode}
appUseIconAsAnswerIcon={appDetail.use_icon_as_answer_icon}
show={showEditModal}
onConfirm={onEdit}
onHide={() => setShowEditModal(false)}
/>
)}
{showDuplicateModal && (
<DuplicateAppModal
appName={appDetail.name}
icon_type={appDetail.icon_type}
icon={appDetail.icon}
icon_background={appDetail.icon_background}
icon_url={appDetail.icon_url}
show={showDuplicateModal}
onConfirm={onCopy}
onHide={() => setShowDuplicateModal(false)}
/>
)}
{showConfirmDelete && (
<Confirm
title={t('app.deleteAppConfirmTitle')}
content={t('app.deleteAppConfirmContent')}
isShow={showConfirmDelete}
onConfirm={onConfirmDelete}
onCancel={() => setShowConfirmDelete(false)}
/>
)}
{showImportDSLModal && (
<UpdateDSLModal
onCancel={() => setShowImportDSLModal(false)}
onBackup={exportCheck}
/>
)}
{secretEnvList.length > 0 && (
<DSLExportConfirmModal
envList={secretEnvList}
onConfirm={onExport}
onClose={() => setSecretEnvList([])}
/>
)}
</div>
</PortalToFollowElem>
</div>
<div className='flex p-2 flex-col justify-center items-start gap-3 self-stretch border-t-[0.5px] border-divider-subtle shrink-0 min-h-fit'>
<Button
size={'medium'}
variant={'ghost'}
className='gap-0.5'
onClick={() => {
setOpen(false)
setShowConfirmDelete(true)
}}
>
<RiDeleteBinLine className='w-4 h-4 text-text-tertiary' />
<span className='text-text-tertiary system-sm-medium'>{t('common.operation.deleteApp')}</span>
</Button>
</div>
</ContentDialog>
{showSwitchModal && (
<SwitchAppModal
inAppDetail
show={showSwitchModal}
appDetail={appDetail}
onClose={() => setShowSwitchModal(false)}
onSuccess={() => setShowSwitchModal(false)}
/>
)}
{showEditModal && (
<CreateAppModal
isEditModal
appName={appDetail.name}
appIconType={appDetail.icon_type}
appIcon={appDetail.icon}
appIconBackground={appDetail.icon_background}
appIconUrl={appDetail.icon_url}
appDescription={appDetail.description}
appMode={appDetail.mode}
appUseIconAsAnswerIcon={appDetail.use_icon_as_answer_icon}
show={showEditModal}
onConfirm={onEdit}
onHide={() => setShowEditModal(false)}
/>
)}
{showDuplicateModal && (
<DuplicateAppModal
appName={appDetail.name}
icon_type={appDetail.icon_type}
icon={appDetail.icon}
icon_background={appDetail.icon_background}
icon_url={appDetail.icon_url}
show={showDuplicateModal}
onConfirm={onCopy}
onHide={() => setShowDuplicateModal(false)}
/>
)}
{showConfirmDelete && (
<Confirm
title={t('app.deleteAppConfirmTitle')}
content={t('app.deleteAppConfirmContent')}
isShow={showConfirmDelete}
onConfirm={onConfirmDelete}
onCancel={() => setShowConfirmDelete(false)}
/>
)}
{showImportDSLModal && (
<UpdateDSLModal
onCancel={() => setShowImportDSLModal(false)}
onBackup={exportCheck}
/>
)}
{secretEnvList.length > 0 && (
<DSLExportConfirmModal
envList={secretEnvList}
onConfirm={onExport}
onClose={() => setSecretEnvList([])}
/>
)}
</div>
)
}

View File

@@ -58,7 +58,7 @@ export default function AppBasic({ icon, icon_background, name, isExternal, type
const { t } = useTranslation()
return (
<div className="flex items-start p-1">
<div className="flex items-center grow">
{icon && icon_background && iconType === 'app' && (
<div className='flex-shrink-0 mr-3'>
<AppIcon icon={icon} background={icon_background} />
@@ -71,8 +71,10 @@ export default function AppBasic({ icon, icon_background, name, isExternal, type
}
{mode === 'expand' && <div className="group">
<div className={`flex flex-row items-center text-sm font-semibold text-gray-700 group-hover:text-gray-900 break-all ${textStyle?.main ?? ''}`}>
{name}
<div className={`flex flex-row items-center system-md-semibold text-text-secondary group-hover:text-text-primary ${textStyle?.main ?? ''}`}>
<div className="max-w-[180px] truncate">
{name}
</div>
{hoverTip
&& <Tooltip
popupContent={
@@ -86,7 +88,6 @@ export default function AppBasic({ icon, icon_background, name, isExternal, type
/>
}
</div>
<div className={`text-xs font-normal text-gray-500 group-hover:text-gray-700 break-all ${textStyle?.extra ?? ''}`}>{type}</div>
<div className='text-text-tertiary system-2xs-medium-uppercase'>{isExternal ? t('dataset.externalTag') : ''}</div>
</div>}
</div>

View File

@@ -57,7 +57,7 @@ const AppDetailNav = ({ title, desc, isExternal, icon, icon_background, navigati
<div
className={`
shrink-0
${expand ? 'p-3' : 'p-2'}
${expand ? 'p-2' : 'p-1'}
`}
>
{iconType === 'app' && (

View File

@@ -44,7 +44,7 @@ export default function NavLink({
key={name}
href={href}
className={classNames(
isActive ? 'bg-state-accent-active text-text-accent font-semibold' : 'text-components-menu-item-text hover:bg-gray-100 hover:text-components-menu-item-text-hover',
isActive ? 'bg-state-accent-active text-text-accent font-semibold' : 'text-components-menu-item-text hover:bg-state-base-hover hover:text-components-menu-item-text-hover',
'group flex items-center h-9 rounded-md py-2 text-sm font-normal',
mode === 'expand' ? 'px-3' : 'px-2.5',
)}

View File

@@ -186,17 +186,15 @@ const Apps = ({
<div className='w-[180px] h-8'></div>
</div>
<div className='relative flex flex-1 overflow-y-auto'>
{!searchKeywords && <div className='h-full w-[200px] p-4'>
<Sidebar current={currCategory as AppCategories} categories={categories} onClick={(category) => { setCurrCategory(category) }} onCreateFromBlank={onCreateFromBlank} />
{!searchKeywords && <div className='w-[200px] h-full p-4'>
<Sidebar current={currCategory as AppCategories} onClick={(category) => { setCurrCategory(category) }} onCreateFromBlank={onCreateFromBlank} />
</div>}
<div className='flex-1 h-full overflow-auto shrink-0 grow p-6 pt-2 border-l border-divider-burn'>
{searchFilteredList && searchFilteredList.length > 0 && <>
<div className='pt-4 pb-1'>
{searchKeywords
? <p className='title-md-semi-bold text-text-tertiary'>{searchFilteredList.length > 1 ? t('app.newApp.foundResults', { count: searchFilteredList.length }) : t('app.newApp.foundResult', { count: searchFilteredList.length })}</p>
: <div className='flex h-[22px] items-center'>
<AppCategoryLabel category={currCategory as AppCategories} className='title-md-semi-bold text-text-primary' />
</div>}
: <AppCategoryLabel category={currCategory as AppCategories} className='title-md-semi-bold text-text-primary' />}
</div>
<div
className={cn(

View File

@@ -1,29 +1,39 @@
'use client'
import { RiStickyNoteAddLine, RiThumbUpLine } from '@remixicon/react'
import { RiAppsFill, RiChatSmileAiFill, RiExchange2Fill, RiPassPendingFill, RiQuillPenAiFill, RiSpeakAiFill, RiStickyNoteAddLine, RiTerminalBoxFill, RiThumbUpFill } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import classNames from '@/utils/classnames'
import Divider from '@/app/components/base/divider'
export enum AppCategories {
RECOMMENDED = 'Recommended',
ASSISTANT = 'Assistant',
AGENT = 'Agent',
HR = 'HR',
PROGRAMMING = 'Programming',
WORKFLOW = 'Workflow',
WRITING = 'Writing',
}
type SidebarProps = {
current: AppCategories | string
categories: string[]
onClick?: (category: AppCategories | string) => void
current: AppCategories
onClick?: (category: AppCategories) => void
onCreateFromBlank?: () => void
}
export default function Sidebar({ current, categories, onClick, onCreateFromBlank }: SidebarProps) {
export default function Sidebar({ current, onClick, onCreateFromBlank }: SidebarProps) {
const { t } = useTranslation()
return <div className="flex h-full w-full flex-col">
<ul className='pt-0.5'>
return <div className="w-full h-full flex flex-col">
<ul>
<CategoryItem category={AppCategories.RECOMMENDED} active={current === AppCategories.RECOMMENDED} onClick={onClick} />
</ul>
<div className='system-xs-medium-uppercase mb-0.5 mt-3 px-3 pb-1 pt-2 text-text-tertiary'>{t('app.newAppFromTemplate.byCategories')}</div>
<ul className='flex grow flex-col gap-0.5'>
{categories.map(category => (<CategoryItem key={category} category={category} active={current === category} onClick={onClick} />))}
<div className='px-3 pt-2 pb-1 system-xs-medium-uppercase text-text-tertiary'>{t('app.newAppFromTemplate.byCategories')}</div>
<ul className='flex-grow flex flex-col gap-0.5'>
<CategoryItem category={AppCategories.ASSISTANT} active={current === AppCategories.ASSISTANT} onClick={onClick} />
<CategoryItem category={AppCategories.AGENT} active={current === AppCategories.AGENT} onClick={onClick} />
<CategoryItem category={AppCategories.HR} active={current === AppCategories.HR} onClick={onClick} />
<CategoryItem category={AppCategories.PROGRAMMING} active={current === AppCategories.PROGRAMMING} onClick={onClick} />
<CategoryItem category={AppCategories.WORKFLOW} active={current === AppCategories.WORKFLOW} onClick={onClick} />
<CategoryItem category={AppCategories.WRITING} active={current === AppCategories.WRITING} onClick={onClick} />
</ul>
<Divider bgStyle='gradient' />
<div className='px-3 py-1 flex items-center gap-1 text-text-tertiary cursor-pointer' onClick={onCreateFromBlank}>
@@ -35,26 +45,47 @@ export default function Sidebar({ current, categories, onClick, onCreateFromBlan
type CategoryItemProps = {
active: boolean
category: AppCategories | string
onClick?: (category: AppCategories | string) => void
category: AppCategories
onClick?: (category: AppCategories) => void
}
function CategoryItem({ category, active, onClick }: CategoryItemProps) {
return <li
className={classNames('p-1 pl-3 h-8 rounded-lg flex items-center gap-2 group cursor-pointer hover:bg-state-base-hover [&.active]:bg-state-base-active', active && 'active')}
className={classNames('p-1 pl-3 rounded-lg flex items-center gap-2 group cursor-pointer hover:bg-state-base-hover [&.active]:bg-state-base-active', active && 'active')}
onClick={() => { onClick?.(category) }}>
{category === AppCategories.RECOMMENDED && <div className='inline-flex h-5 w-5 items-center justify-center rounded-md'>
<RiThumbUpLine className='h-4 w-4 text-components-menu-item-text group-[.active]:text-components-menu-item-text-active' />
</div>}
<div className='w-5 h-5 inline-flex items-center justify-center rounded-md border border-divider-regular bg-components-icon-bg-midnight-solid group-[.active]:bg-components-icon-bg-blue-solid'>
<AppCategoryIcon category={category} />
</div>
<AppCategoryLabel category={category}
className={classNames('system-sm-medium text-components-menu-item-text group-[.active]:text-components-menu-item-text-active group-hover:text-components-menu-item-text-hover', active && 'system-sm-semibold')} />
</li >
}
type AppCategoryLabelProps = {
category: AppCategories | string
category: AppCategories
className?: string
}
export function AppCategoryLabel({ category, className }: AppCategoryLabelProps) {
const { t } = useTranslation()
return <span className={className}>{category === AppCategories.RECOMMENDED ? t('app.newAppFromTemplate.sidebar.Recommended') : category}</span>
return <span className={className}>{t(`app.newAppFromTemplate.sidebar.${category}`)}</span>
}
type AppCategoryIconProps = {
category: AppCategories
}
function AppCategoryIcon({ category }: AppCategoryIconProps) {
if (category === AppCategories.AGENT)
return <RiSpeakAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
if (category === AppCategories.ASSISTANT)
return <RiChatSmileAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
if (category === AppCategories.HR)
return <RiPassPendingFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
if (category === AppCategories.PROGRAMMING)
return <RiTerminalBoxFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
if (category === AppCategories.RECOMMENDED)
return <RiThumbUpFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
if (category === AppCategories.WRITING)
return <RiQuillPenAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
if (category === AppCategories.WORKFLOW)
return <RiExchange2Fill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
return <RiAppsFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
}

View File

@@ -312,7 +312,7 @@ function AppPreview({ mode }: { mode: AppMode }) {
'chat': {
title: t('app.types.chatbot'),
description: t('app.newApp.chatbotUserDescription'),
link: 'https://docs.dify.ai/guides/application-orchestrate#application_type',
link: 'https://docs.dify.ai/guides/application-orchestrate/conversation-application?fallback=true',
},
'advanced-chat': {
title: t('app.types.advanced'),

View File

@@ -1,46 +0,0 @@
import { useTranslation } from 'react-i18next'
import Modal from '@/app/components/base/modal'
import Button from '@/app/components/base/button'
type DSLConfirmModalProps = {
versions?: {
importedVersion: string
systemVersion: string
}
onCancel: () => void
onConfirm: () => void
confirmDisabled?: boolean
}
const DSLConfirmModal = ({
versions = { importedVersion: '', systemVersion: '' },
onCancel,
onConfirm,
confirmDisabled = false,
}: DSLConfirmModalProps) => {
const { t } = useTranslation()
return (
<Modal
isShow
onClose={() => onCancel()}
className='w-[480px]'
>
<div className='flex flex-col items-start gap-2 self-stretch pb-4'>
<div className='title-2xl-semi-bold text-text-primary'>{t('app.newApp.appCreateDSLErrorTitle')}</div>
<div className='system-md-regular flex grow flex-col text-text-secondary'>
<div>{t('app.newApp.appCreateDSLErrorPart1')}</div>
<div>{t('app.newApp.appCreateDSLErrorPart2')}</div>
<br />
<div>{t('app.newApp.appCreateDSLErrorPart3')}<span className='system-md-medium'>{versions.importedVersion}</span></div>
<div>{t('app.newApp.appCreateDSLErrorPart4')}<span className='system-md-medium'>{versions.systemVersion}</span></div>
</div>
</div>
<div className='flex items-start justify-end gap-2 self-stretch pt-6'>
<Button variant='secondary' onClick={() => onCancel()}>{t('app.newApp.Cancel')}</Button>
<Button variant='primary' destructive onClick={onConfirm} disabled={confirmDisabled}>{t('app.newApp.Confirm')}</Button>
</div>
</Modal>
)
}
export default DSLConfirmModal

View File

@@ -1,14 +1,14 @@
'use client'
import type { HTMLProps } from 'react'
import React, { useMemo, useState } from 'react'
import {
Cog8ToothIcon,
DocumentTextIcon,
PaintBrushIcon,
RocketLaunchIcon,
} from '@heroicons/react/24/outline'
import { usePathname, useRouter } from 'next/navigation'
import { useTranslation } from 'react-i18next'
import {
RiBookOpenLine,
RiEqualizer2Line,
RiExternalLinkLine,
RiPaintBrushLine,
RiWindowLine,
} from '@remixicon/react'
import SettingsModal from './settings'
import EmbeddedModal from './embedded'
import CustomizeModal from './customize'
@@ -18,7 +18,6 @@ import Tooltip from '@/app/components/base/tooltip'
import AppBasic from '@/app/components/app-sidebar/basic'
import { asyncRunSafe, randomString } from '@/utils'
import Button from '@/app/components/base/button'
import Tag from '@/app/components/base/tag'
import Switch from '@/app/components/base/switch'
import Divider from '@/app/components/base/divider'
import CopyFeedback from '@/app/components/base/copy-feedback'
@@ -28,10 +27,12 @@ import SecretKeyButton from '@/app/components/develop/secret-key/secret-key-butt
import type { AppDetailResponse } from '@/models/app'
import { useAppContext } from '@/context/app-context'
import type { AppSSO } from '@/types/app'
import Indicator from '@/app/components/header/indicator'
export type IAppCardProps = {
className?: string
appInfo: AppDetailResponse & Partial<AppSSO>
isInPanel?: boolean
cardType?: 'api' | 'webapp'
customBgColor?: string
onChangeStatus: (val: boolean) => Promise<void>
@@ -39,12 +40,9 @@ export type IAppCardProps = {
onGenerateCode?: () => Promise<void>
}
const EmbedIcon = ({ className = '' }: HTMLProps<HTMLDivElement>) => {
return <div className={`${style.codeBrowserIcon} ${className}`}></div>
}
function AppCard({
appInfo,
isInPanel,
cardType = 'webapp',
customBgColor,
onChangeStatus,
@@ -66,17 +64,18 @@ function AppCard({
const OPERATIONS_MAP = useMemo(() => {
const operationsMap = {
webapp: [
{ opName: t('appOverview.overview.appInfo.preview'), opIcon: RocketLaunchIcon },
{ opName: t('appOverview.overview.appInfo.customize.entry'), opIcon: PaintBrushIcon },
{ opName: t('appOverview.overview.appInfo.launch'), opIcon: RiExternalLinkLine },
] as { opName: string; opIcon: any }[],
api: [{ opName: t('appOverview.overview.apiInfo.doc'), opIcon: DocumentTextIcon }],
api: [{ opName: t('appOverview.overview.apiInfo.doc'), opIcon: RiBookOpenLine }],
app: [],
}
if (appInfo.mode !== 'completion' && appInfo.mode !== 'workflow')
operationsMap.webapp.push({ opName: t('appOverview.overview.appInfo.embedded.entry'), opIcon: EmbedIcon })
operationsMap.webapp.push({ opName: t('appOverview.overview.appInfo.embedded.entry'), opIcon: RiWindowLine })
operationsMap.webapp.push({ opName: t('appOverview.overview.appInfo.customize.entry'), opIcon: RiPaintBrushLine })
if (isCurrentWorkspaceEditor)
operationsMap.webapp.push({ opName: t('appOverview.overview.appInfo.settings.entry'), opIcon: Cog8ToothIcon })
operationsMap.webapp.push({ opName: t('appOverview.overview.appInfo.settings.entry'), opIcon: RiEqualizer2Line })
return operationsMap
}, [isCurrentWorkspaceEditor, appInfo, t])
@@ -92,13 +91,9 @@ function AppCard({
const appUrl = `${app_base_url}/${appMode}/${access_token}`
const apiUrl = appInfo?.api_base_url
let bgColor = 'bg-primary-50 bg-opacity-40'
if (cardType === 'api')
bgColor = 'bg-purple-50'
const genClickFuncByName = (opName: string) => {
switch (opName) {
case t('appOverview.overview.appInfo.preview'):
case t('appOverview.overview.appInfo.launch'):
return () => {
window.open(appUrl, '_blank')
}
@@ -135,49 +130,50 @@ function AppCard({
return (
<div
className={
`shadow-xs border-[0.5px] rounded-lg border-gray-200 ${className ?? ''}`}
`${isInPanel ? 'border-l-[0.5px] border-t' : 'shadow-xs border-[0.5px]'} rounded-xl border-effects-highlight w-full max-w-full ${className ?? ''}`}
>
<div className={`px-6 py-5 ${customBgColor ?? bgColor} rounded-lg`}>
<div className="mb-2.5 flex flex-row items-start justify-between">
<AppBasic
iconType={cardType}
icon={appInfo.icon}
icon_background={appInfo.icon_background}
name={basicName}
type={
isApp
? t('appOverview.overview.appInfo.explanation')
: t('appOverview.overview.apiInfo.explanation')
}
/>
<div className="flex flex-row items-center h-9">
<Tag className="mr-2" color={runningStatus ? 'green' : 'yellow'}>
{runningStatus
? t('appOverview.overview.status.running')
: t('appOverview.overview.status.disable')}
</Tag>
<div className={`${customBgColor ?? 'bg-background-default'} rounded-xl`}>
<div className='flex flex-col p-3 justify-center items-start gap-3 self-stretch border-b-[0.5px] border-divider-subtle w-full'>
<div className='flex items-center gap-3 self-stretch w-full'>
<AppBasic
iconType={cardType}
icon={appInfo.icon}
icon_background={appInfo.icon_background}
name={basicName}
type={
isApp
? t('appOverview.overview.appInfo.explanation')
: t('appOverview.overview.apiInfo.explanation')
}
/>
<div className='flex items-center gap-1'>
<Indicator color={runningStatus ? 'green' : 'yellow'} />
<div className={`${runningStatus ? 'text-text-success' : 'text-text-warning'} system-xs-semibold-uppercase`}>
{runningStatus
? t('appOverview.overview.status.running')
: t('appOverview.overview.status.disable')}
</div>
</div>
<Switch defaultValue={runningStatus} onChange={onChangeStatus} disabled={toggleDisabled} />
</div>
</div>
<div className="flex flex-col justify-center py-2">
<div className="py-1">
<div className="pb-1 text-xs text-gray-500">
<div className='flex flex-col justify-center items-start self-stretch'>
<div className="pb-1 system-xs-medium text-text-tertiary">
{isApp
? t('appOverview.overview.appInfo.accessibleAddress')
: t('appOverview.overview.apiInfo.accessibleAddress')}
</div>
<div className="w-full h-9 pl-2 pr-0.5 py-0.5 bg-black bg-opacity-2 rounded-lg border border-black border-opacity-5 justify-start items-center inline-flex">
<div className="h-4 px-2 justify-start items-start gap-2 flex flex-1 min-w-0">
<div className="text-gray-700 text-xs font-medium text-ellipsis overflow-hidden whitespace-nowrap">
<div className="w-full h-9 pl-2 p-1 bg-components-input-bg-normal rounded-lg items-center inline-flex gap-0.5">
<div className="h-4 px-1 justify-start items-start gap-2 flex flex-1 min-w-0">
<div className="text-text-secondary text-xs font-medium text-ellipsis overflow-hidden whitespace-nowrap">
{isApp ? appUrl : apiUrl}
</div>
</div>
<Divider type="vertical" className="!h-3.5 shrink-0 !mx-0.5" />
{isApp && <ShareQRCode content={isApp ? appUrl : apiUrl} selectorId={randomString(8)} className={'hover:bg-gray-200'} />}
<CopyFeedback
content={isApp ? appUrl : apiUrl}
className={'hover:bg-gray-200'}
className={'!size-6'}
/>
{isApp && <ShareQRCode content={isApp ? appUrl : apiUrl} className='z-50 !size-6 hover:bg-state-base-hover rounded-md' selectorId={randomString(8)} />}
{isApp && <Divider type="vertical" className="!h-3.5 shrink-0 !mx-0.5" />}
{/* button copy link/ button regenerate */}
{showConfirmDelete && (
<Confirm
@@ -197,7 +193,7 @@ function AppCard({
popupContent={t('appOverview.overview.appInfo.regenerate') || ''}
>
<div
className="w-8 h-8 ml-0.5 cursor-pointer hover:bg-gray-200 rounded-lg"
className="w-6 h-6 cursor-pointer hover:bg-state-base-hover rounded-md"
onClick={() => setShowConfirmDelete(true)}
>
<div
@@ -210,8 +206,8 @@ function AppCard({
</div>
</div>
</div>
<div className={'pt-2 flex flex-row items-center flex-wrap gap-y-2'}>
{!isApp && <SecretKeyButton className='flex-shrink-0 !h-8 bg-white mr-2' textCls='!text-gray-700 font-medium' iconCls='stroke-[1.2px]' appId={appInfo.id} />}
<div className={'flex p-3 items-center gap-1 self-stretch'}>
{!isApp && <SecretKeyButton appId={appInfo.id} />}
{OPERATIONS_MAP[cardType].map((op) => {
const disabled
= op.opName === t('appOverview.overview.appInfo.settings.entry')
@@ -219,7 +215,9 @@ function AppCard({
: !runningStatus
return (
<Button
className="mr-2"
className="mr-1 min-w-[88px]"
size="small"
variant={'ghost'}
key={op.opName}
onClick={genClickFuncByName(op.opName)}
disabled={disabled}
@@ -230,9 +228,9 @@ function AppCard({
}
popupClassName={disabled ? 'mt-[-8px]' : '!hidden'}
>
<div className="flex flex-row items-center">
<op.opIcon className="h-4 w-4 mr-1.5 stroke-[1.8px]" />
<span className="text-[13px]">{op.opName}</span>
<div className="flex items-center justify-center gap-[1px]">
<op.opIcon className="h-3.5 w-3.5" />
<div className={`${runningStatus ? 'text-text-tertiary' : 'text-components-button-ghost-text-disabled'} system-xs-medium px-[3px]`}>{op.opName}</div>
</div>
</Tooltip>
</Button>

View File

@@ -24,7 +24,7 @@ const OPTION_MAP = {
iframe: {
getContent: (url: string, token: string) =>
`<iframe
src="${url}/chat/${token}"
src="${url}/chatbot/${token}"
style="width: 100%; height: 100%; min-height: 700px"
frameborder="0"
allow="microphone">
@@ -35,12 +35,12 @@ const OPTION_MAP = {
`<script>
window.difyChatbotConfig = {
token: '${token}'${isTestEnv
? `,
? `,
isDev: true`
: ''}${IS_CE_EDITION
? `,
: ''}${IS_CE_EDITION
? `,
baseUrl: '${url}'`
: ''}
: ''}
}
</script>
<script

View File

@@ -34,10 +34,11 @@ export type ButtonProps = {
destructive?: boolean
loading?: boolean
styleCss?: CSSProperties
spinnerClassName?: string
} & React.ButtonHTMLAttributes<HTMLButtonElement> & VariantProps<typeof buttonVariants>
const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
({ className, variant, size, destructive, loading, styleCss, children, ...props }, ref) => {
({ className, variant, size, destructive, loading, styleCss, children, spinnerClassName, ...props }, ref) => {
return (
<button
type='button'
@@ -50,7 +51,7 @@ const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
{...props}
>
{children}
{loading && <Spinner loading={loading} className='!text-white !h-3 !w-3 !border-2 !ml-1' />}
{loading && <Spinner loading={loading} className={classNames('!text-white !h-3 !w-3 !border-2 !ml-1', spinnerClassName)} />}
</button>
)
},

View File

@@ -11,12 +11,10 @@ import { useLocalStorageState } from 'ahooks'
import produce from 'immer'
import type {
ChatConfig,
ChatItem,
Feedback,
} from '../types'
import { CONVERSATION_ID_INFO } from '../constants'
import { buildChatItemTree, getProcessedInputsFromUrlParams } from '../utils'
import { getProcessedFilesFromResponse } from '../../file-uploader/utils'
import { getPrevChatList, getProcessedInputsFromUrlParams } from '../utils'
import {
fetchAppInfo,
fetchAppMeta,
@@ -34,33 +32,6 @@ import { useToastContext } from '@/app/components/base/toast'
import { changeLanguage } from '@/i18n/i18next-config'
import { InputVarType } from '@/app/components/workflow/types'
import { TransferMethod } from '@/types/app'
import { addFileInfos, sortAgentSorts } from '@/app/components/tools/utils'
function getFormattedChatList(messages: any[]) {
const newChatList: ChatItem[] = []
messages.forEach((item) => {
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
newChatList.push({
id: `question-${item.id}`,
content: item.query,
isAnswer: false,
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
parentMessageId: item.parent_message_id || undefined,
})
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
newChatList.push({
id: item.id,
content: item.answer,
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
feedback: item.feedback,
isAnswer: true,
citation: item.retriever_resources,
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
parentMessageId: `question-${item.id}`,
})
})
return newChatList
}
export const useEmbeddedChatbot = () => {
const isInstalledApp = false
@@ -106,7 +77,7 @@ export const useEmbeddedChatbot = () => {
const appPrevChatList = useMemo(
() => (currentConversationId && appChatListData?.data.length)
? buildChatItemTree(getFormattedChatList(appChatListData.data))
? getPrevChatList(appChatListData.data)
: [],
[appChatListData, currentConversationId],
)

View File

@@ -1,8 +1,6 @@
import { UUID_NIL } from './constants'
import type { IChatItem } from './chat/type'
import type { ChatItem, ChatItemInTree } from './types'
import { addFileInfos, sortAgentSorts } from '../../tools/utils'
import { getProcessedFilesFromResponse } from '../file-uploader/utils'
async function decodeBase64AndDecompress(base64String: string) {
const binaryString = atob(base64String)
@@ -21,60 +19,6 @@ function getProcessedInputsFromUrlParams(): Record<string, any> {
return inputs
}
function appendQAToChatList(chatList: ChatItem[], item: any) {
// we append answer first and then question since will reverse the whole chatList later
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
chatList.push({
id: item.id,
content: item.answer,
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
feedback: item.feedback,
isAnswer: true,
citation: item.retriever_resources,
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
})
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
chatList.push({
id: `question-${item.id}`,
content: item.query,
isAnswer: false,
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
})
}
/**
* Computes the latest thread messages from all messages of the conversation.
* Same logic as backend codebase `api/core/prompt/utils/extract_thread_messages.py`
*
* @param fetchedMessages - The history chat list data from the backend, sorted by created_at in descending order. This includes all flattened history messages of the conversation.
* @returns An array of ChatItems representing the latest thread.
*/
function getPrevChatList(fetchedMessages: any[]) {
const ret: ChatItem[] = []
let nextMessageId = null
for (const item of fetchedMessages) {
if (!item.parent_message_id) {
appendQAToChatList(ret, item)
break
}
if (!nextMessageId) {
appendQAToChatList(ret, item)
nextMessageId = item.parent_message_id
}
else {
if (item.id === nextMessageId || nextMessageId === UUID_NIL) {
appendQAToChatList(ret, item)
nextMessageId = item.parent_message_id
}
}
}
return ret.reverse()
}
function isValidGeneratedAnswer(item?: ChatItem | ChatItemInTree): boolean {
return !!item && item.isAnswer && !item.id.startsWith('answer-placeholder-') && !item.isOpeningStatement
}
@@ -220,7 +164,6 @@ function getThreadMessages(tree: ChatItemInTree[], targetMessageId?: string): Ch
export {
getProcessedInputsFromUrlParams,
isValidGeneratedAnswer,
getPrevChatList,
getLastAnswer,
buildChatItemTree,
getThreadMessages,

View File

@@ -0,0 +1,59 @@
import { Fragment, type ReactNode } from 'react'
import { Transition } from '@headlessui/react'
import classNames from '@/utils/classnames'
type ContentDialogProps = {
className?: string
show: boolean
onClose?: () => void
children: ReactNode
}
const ContentDialog = ({
className,
show,
onClose,
children,
}: ContentDialogProps) => {
return (
<Transition
show={show}
as="div"
className="absolute left-0 top-0 w-full h-full z-20 p-2 box-border"
>
<Transition.Child
as={Fragment}
enter="ease-out duration-300"
enterFrom="opacity-0"
enterTo="opacity-100"
leave="ease-in duration-200"
leaveFrom="opacity-100"
leaveTo="opacity-0"
>
<div
className="absolute left-0 inset-0 w-full bg-app-detail-overlay-bg"
onClick={onClose}
/>
</Transition.Child>
<Transition.Child
as={Fragment}
enter="transform transition ease-out duration-300"
enterFrom="-translate-x-full"
enterTo="translate-x-0"
leave="transform transition ease-in duration-200"
leaveFrom="translate-x-0"
leaveTo="-translate-x-full"
>
<div className={classNames(
'absolute left-0 w-full bg-app-detail-bg border-r border-divider-burn',
className,
)}>
{children}
</div>
</Transition.Child>
</Transition>
)
}
export default ContentDialog

View File

@@ -35,7 +35,7 @@ const CopyFeedback = ({ content, className }: Props) => {
}
>
<div
className={`w-8 h-8 cursor-pointer hover:bg-gray-100 rounded-lg ${
className={`w-8 h-8 cursor-pointer hover:bg-state-base-hover rounded-md ${
className ?? ''
}`}
>

View File

@@ -20,7 +20,7 @@
"width": "16",
"height": "16",
"rx": "5",
"fill": "#F2F4F7"
"fill": "currentColor"
},
"children": []
},
@@ -33,7 +33,7 @@
"width": "233",
"height": "10",
"rx": "3",
"fill": "#EAECF0"
"fill": "currentColor"
},
"children": []
},
@@ -46,7 +46,7 @@
"width": "345",
"height": "6",
"rx": "3",
"fill": "#F2F4F7"
"fill": "currentColor"
},
"children": []
}

View File

@@ -10,7 +10,6 @@ import SyntaxHighlighter from 'react-syntax-highlighter'
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
import { Component, memo, useMemo, useRef, useState } from 'react'
import type { CodeComponent } from 'react-markdown/lib/ast-to-react'
import SVGRenderer from './svg-gallery'
import cn from '@/utils/classnames'
import CopyBtn from '@/app/components/base/copy-btn'
import SVGBtn from '@/app/components/base/svg'
@@ -19,7 +18,7 @@ import ImageGallery from '@/app/components/base/image-gallery'
import { useChatContext } from '@/app/components/base/chat/chat/context'
import VideoGallery from '@/app/components/base/video-gallery'
import AudioGallery from '@/app/components/base/audio-gallery'
// import SVGRenderer from '@/app/components/base/svg-gallery'
import SVGRenderer from '@/app/components/base/svg-gallery'
import MarkdownButton from '@/app/components/base/markdown-blocks/button'
import MarkdownForm from '@/app/components/base/markdown-blocks/form'

View File

@@ -1,4 +0,0 @@
.setting-icon {
background: url(./assets/setting.svg) center center no-repeat;
background-size: 14px 14px;
}

View File

@@ -1,10 +1,9 @@
import { useCallback, useEffect, useMemo, useState } from 'react'
import useSWR from 'swr'
import s from './base.module.css'
import { RiEqualizer2Line } from '@remixicon/react'
import WorkspaceSelector from './workspace-selector'
import SearchInput from './search-input'
import PageSelector from './page-selector'
import cn from '@/utils/classnames'
import { preImportNotionPages } from '@/service/datasets'
import { NotionConnector } from '@/app/components/datasets/create/step-one'
import type { DataSourceNotionPageMap, DataSourceNotionWorkspace, NotionPage } from '@/models/common'
@@ -88,23 +87,24 @@ const NotionPageSelector = ({
}, [firstWorkspaceId])
return (
<div className='bg-gray-25 border border-gray-200 rounded-xl'>
<div className='bg-background-default-subtle border border-components-panel-border rounded-xl'>
{
data?.notion_info?.length
? (
<>
<div className='flex items-center pl-[10px] pr-2 h-11 bg-white border-b border-b-gray-200 rounded-t-xl'>
<WorkspaceSelector
value={currentWorkspaceId || firstWorkspaceId}
items={notionWorkspaces}
onSelect={handleSelectWorkspace}
/>
<div className='mx-1 w-[1px] h-3 bg-gray-200' />
<div
className={cn(s['setting-icon'], 'w-6 h-6 cursor-pointer')}
onClick={() => setShowAccountSettingModal({ payload: 'data-source', onCancelCallback: mutate })}
/>
<div className='grow' />
<div className='flex items-center gap-x-2 p-2 h-12 bg-components-panel-bg border-b border-b-divider-regular rounded-t-xl'>
<div className='grow flex items-center gap-x-1'>
<WorkspaceSelector
value={currentWorkspaceId || firstWorkspaceId}
items={notionWorkspaces}
onSelect={handleSelectWorkspace}
/>
<div className='mx-1 w-[1px] h-3 bg-divider-regular' />
<RiEqualizer2Line
className='w-4 h-4 cursor-pointer text-text-tertiary'
onClick={() => setShowAccountSettingModal({ payload: 'data-source', onCancelCallback: mutate })}
/>
</div>
<SearchInput
value={searchValue}
onChange={handleSearchValueChange}

View File

@@ -1,17 +0,0 @@
.arrow {
width: 20px;
height: 20px;
background: url(../assets/down-arrow.svg) center center no-repeat;
background-size: 16px 16px;
transform: rotate(-90deg);
}
.arrow-expand {
transform: rotate(0);
}
.preview-item {
background-color: #eff4ff;
border: 1px solid #D1E0FF;
box-shadow: 0px 1px 2px rgba(16, 24, 40, 0.05);
}

View File

@@ -2,9 +2,9 @@ import { memo, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { FixedSizeList as List, areEqual } from 'react-window'
import type { ListChildComponentProps } from 'react-window'
import { RiArrowDownSLine, RiArrowRightSLine } from '@remixicon/react'
import Checkbox from '../../checkbox'
import NotionIcon from '../../notion-icon'
import s from './index.module.css'
import cn from '@/utils/classnames'
import type { DataSourceNotionPage, DataSourceNotionPageMap } from '@/models/common'
@@ -94,10 +94,16 @@ const ItemComponent = ({ index, style, data }: ListChildComponentProps<{
if (hasChild) {
return (
<div
className={cn(s.arrow, current.expand && s['arrow-expand'], 'shrink-0 mr-1 w-5 h-5 hover:bg-gray-200 rounded-md')}
className='flex items-center justify-center shrink-0 mr-1 w-5 h-5 hover:bg-components-button-ghost-bg-hover rounded-md'
style={{ marginLeft: current.depth * 8 }}
onClick={() => handleToggle(index)}
/>
>
{
current.expand
? <RiArrowDownSLine className='w-4 h-4 text-text-tertiary' />
: <RiArrowRightSLine className='w-4 h-4 text-text-tertiary' />
}
</div>
)
}
if (current.parent_id === 'root' || !pagesMap[current.parent_id]) {
@@ -112,14 +118,12 @@ const ItemComponent = ({ index, style, data }: ListChildComponentProps<{
return (
<div
className={cn('group flex items-center pl-2 pr-[2px] rounded-md border border-transparent hover:bg-gray-100 cursor-pointer', previewPageId === current.page_id && s['preview-item'])}
className={cn('group flex items-center pl-2 pr-[2px] rounded-md hover:bg-state-base-hover cursor-pointer',
previewPageId === current.page_id && 'bg-state-base-hover')}
style={{ ...style, top: style.top as number + 8, left: 8, right: 8, width: 'calc(100% - 16px)' }}
>
<Checkbox
className={cn(
'shrink-0 mr-2 group-hover:border-primary-600 group-hover:border-[2px]',
disabled && 'group-hover:border-transparent',
)}
className='shrink-0 mr-2'
checked={checkedIds.has(current.page_id)}
disabled={disabled}
onCheck={() => {
@@ -135,7 +139,7 @@ const ItemComponent = ({ index, style, data }: ListChildComponentProps<{
src={current.page_icon}
/>
<div
className='grow text-sm font-medium text-gray-700 truncate'
className='grow text-[13px] leading-4 font-medium text-text-secondary truncate'
title={current.page_name}
>
{current.page_name}
@@ -143,7 +147,9 @@ const ItemComponent = ({ index, style, data }: ListChildComponentProps<{
{
canPreview && (
<div
className='shrink-0 hidden group-hover:flex items-center ml-1 px-2 h-6 rounded-md text-xs font-medium text-gray-500 cursor-pointer hover:bg-gray-50 hover:text-gray-700'
className='shrink-0 hidden group-hover:flex items-center ml-1 px-2 h-6 rounded-md text-xs leading-4 font-medium text-components-button-secondary-text
cursor-pointer bg-components-button-secondary-bg border-[0.5px] border-components-button-secondary-border shadow-xs shadow-shadow-shadow-3
backdrop-blur-[10px] hover:bg-components-button-secondary-bg-hover hover:border-components-button-secondary-border-hover'
onClick={() => handlePreview(index)}>
{t('common.dataSource.notion.selector.preview')}
</div>
@@ -152,7 +158,7 @@ const ItemComponent = ({ index, style, data }: ListChildComponentProps<{
{
searchValue && (
<div
className='shrink-0 ml-1 max-w-[120px] text-xs text-gray-400 truncate'
className='shrink-0 ml-1 max-w-[120px] text-xs text-text-quaternary truncate'
title={breadCrumbs.join(' / ')}
>
{breadCrumbs.join(' / ')}
@@ -278,7 +284,7 @@ const PageSelector = ({
if (!currentDataList.length) {
return (
<div className='flex items-center justify-center h-[296px] text-[13px] text-gray-500'>
<div className='flex items-center justify-center h-[296px] text-[13px] text-text-tertiary'>
{t('common.dataSource.notion.selector.noSearchResult')}
</div>
)

Some files were not shown because too many files have changed in this diff Show More