mirror of
https://github.com/langgenius/dify.git
synced 2026-01-03 13:07:19 +00:00
Compare commits
32 Commits
refactor/w
...
0.15.7
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f7f851b17 | ||
|
|
559ab46ee1 | ||
|
|
df98223c8c | ||
|
|
144f9507f8 | ||
|
|
2e097a1ac0 | ||
|
|
9f7d8a981f | ||
|
|
40b31bafd5 | ||
|
|
d38a2c95fb | ||
|
|
7d18e2a0ef | ||
|
|
024f242251 | ||
|
|
bfdce78ca5 | ||
|
|
00c2258352 | ||
|
|
a1b3d41712 | ||
|
|
b26e20fe34 | ||
|
|
161ff432f1 | ||
|
|
99a9def623 | ||
|
|
fe1846c437 | ||
|
|
8e75eb5c63 | ||
|
|
970508fcb6 | ||
|
|
9283a5414f | ||
|
|
2a2a0e9be9 | ||
|
|
061a765b7d | ||
|
|
acd7fead87 | ||
|
|
bbb080d5b2 | ||
|
|
c01d8a70f3 | ||
|
|
1ca15989e0 | ||
|
|
8b5a3a9424 | ||
|
|
42ddcf1edd | ||
|
|
21561df10f | ||
|
|
0e33a3aa5f | ||
|
|
d3895bcd6b | ||
|
|
eeb390650b |
4
.github/workflows/build-push.yml
vendored
4
.github/workflows/build-push.yml
vendored
@@ -5,8 +5,8 @@ on:
|
||||
branches:
|
||||
- "main"
|
||||
- "deploy/dev"
|
||||
release:
|
||||
types: [published]
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
concurrency:
|
||||
group: build-push-${{ github.head_ref || github.run_id }}
|
||||
|
||||
3
.markdownlint.json
Normal file
3
.markdownlint.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"MD024": false
|
||||
}
|
||||
32
CHANGELOG.md
Normal file
32
CHANGELOG.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to Dify will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.15.7] - 2025-04-27
|
||||
|
||||
### Added
|
||||
|
||||
- Added support for GPT-4.1 in model providers (#18912)
|
||||
- Added support for Amazon Bedrock DeepSeek-R1 model (#18908)
|
||||
- Added support for Amazon Bedrock Claude Sonnet 3.7 model (#18788)
|
||||
- Refined version compatibility logic in app DSL service
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed issue with creating apps from template categories (#18807, #18868)
|
||||
- Fixed DSL version check when creating apps from explore templates (#18872, #18878)
|
||||
|
||||
## [0.15.6] - 2025-04-22
|
||||
|
||||
### Security
|
||||
|
||||
- Fixed clickjacking vulnerability (#18552)
|
||||
- Fixed reset password security issue (#18366)
|
||||
- Updated reset password token when email code verification succeeds (#18362)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed Vertex AI Gemini 2.0 Flash 001 schema (#18405)
|
||||
@@ -430,4 +430,7 @@ CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||||
MAX_SUBMIT_COUNT=100
|
||||
# Lockout duration in seconds
|
||||
LOGIN_LOCKOUT_DURATION=86400
|
||||
LOGIN_LOCKOUT_DURATION=86400
|
||||
|
||||
# Prevent Clickjacking
|
||||
ALLOW_EMBED=false
|
||||
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.15.3",
|
||||
default="0.15.7",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@@ -8,7 +8,7 @@ from constants.languages import languages
|
||||
from controllers.console import api
|
||||
from controllers.console.auth.error import EmailCodeError, InvalidEmailError, InvalidTokenError, PasswordMismatchError
|
||||
from controllers.console.error import AccountInFreezeError, AccountNotFound, EmailSendIpLimitError
|
||||
from controllers.console.wraps import setup_required
|
||||
from controllers.console.wraps import email_password_login_enabled, setup_required
|
||||
from events.tenant_event import tenant_was_created
|
||||
from extensions.ext_database import db
|
||||
from libs.helper import email, extract_remote_ip
|
||||
@@ -22,6 +22,7 @@ from services.feature_service import FeatureService
|
||||
|
||||
class ForgotPasswordSendEmailApi(Resource):
|
||||
@setup_required
|
||||
@email_password_login_enabled
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("email", type=email, required=True, location="json")
|
||||
@@ -53,6 +54,7 @@ class ForgotPasswordSendEmailApi(Resource):
|
||||
|
||||
class ForgotPasswordCheckApi(Resource):
|
||||
@setup_required
|
||||
@email_password_login_enabled
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("email", type=str, required=True, location="json")
|
||||
@@ -72,11 +74,20 @@ class ForgotPasswordCheckApi(Resource):
|
||||
if args["code"] != token_data.get("code"):
|
||||
raise EmailCodeError()
|
||||
|
||||
return {"is_valid": True, "email": token_data.get("email")}
|
||||
# Verified, revoke the first token
|
||||
AccountService.revoke_reset_password_token(args["token"])
|
||||
|
||||
# Refresh token data by generating a new token
|
||||
_, new_token = AccountService.generate_reset_password_token(
|
||||
user_email, code=args["code"], additional_data={"phase": "reset"}
|
||||
)
|
||||
|
||||
return {"is_valid": True, "email": token_data.get("email"), "token": new_token}
|
||||
|
||||
|
||||
class ForgotPasswordResetApi(Resource):
|
||||
@setup_required
|
||||
@email_password_login_enabled
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("token", type=str, required=True, nullable=False, location="json")
|
||||
@@ -95,6 +106,9 @@ class ForgotPasswordResetApi(Resource):
|
||||
|
||||
if reset_data is None:
|
||||
raise InvalidTokenError()
|
||||
# Must use token in reset phase
|
||||
if reset_data.get("phase", "") != "reset":
|
||||
raise InvalidTokenError()
|
||||
|
||||
AccountService.revoke_reset_password_token(token)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ from controllers.console.error import (
|
||||
EmailSendIpLimitError,
|
||||
NotAllowedCreateWorkspace,
|
||||
)
|
||||
from controllers.console.wraps import setup_required
|
||||
from controllers.console.wraps import email_password_login_enabled, setup_required
|
||||
from events.tenant_event import tenant_was_created
|
||||
from libs.helper import email, extract_remote_ip
|
||||
from libs.password import valid_password
|
||||
@@ -38,6 +38,7 @@ class LoginApi(Resource):
|
||||
"""Resource for user login."""
|
||||
|
||||
@setup_required
|
||||
@email_password_login_enabled
|
||||
def post(self):
|
||||
"""Authenticate user and login."""
|
||||
parser = reqparse.RequestParser()
|
||||
@@ -110,6 +111,7 @@ class LogoutApi(Resource):
|
||||
|
||||
class ResetPasswordSendEmailApi(Resource):
|
||||
@setup_required
|
||||
@email_password_login_enabled
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("email", type=email, required=True, location="json")
|
||||
|
||||
@@ -154,3 +154,16 @@ def enterprise_license_required(view):
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
def email_password_login_enabled(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
features = FeatureService.get_system_features()
|
||||
if features.enable_email_password_login:
|
||||
return view(*args, **kwargs)
|
||||
|
||||
# otherwise, return 403
|
||||
abort(403)
|
||||
|
||||
return decorated
|
||||
|
||||
@@ -104,7 +104,6 @@ class CotAgentRunner(BaseAgentRunner, ABC):
|
||||
|
||||
# recalc llm max tokens
|
||||
prompt_messages = self._organize_prompt_messages()
|
||||
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
||||
# invoke model
|
||||
chunks = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
|
||||
@@ -84,7 +84,6 @@ class FunctionCallAgentRunner(BaseAgentRunner):
|
||||
|
||||
# recalc llm max tokens
|
||||
prompt_messages = self._organize_prompt_messages()
|
||||
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
||||
# invoke model
|
||||
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
|
||||
@@ -55,20 +55,6 @@ class AgentChatAppRunner(AppRunner):
|
||||
query = application_generate_entity.query
|
||||
files = application_generate_entity.files
|
||||
|
||||
# Pre-calculate the number of tokens of the prompt messages,
|
||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
||||
# If the rest number of tokens is not enough, raise exception.
|
||||
# Include: prompt template, inputs, query(optional), files(optional)
|
||||
# Not Include: memory, external data, dataset context
|
||||
self.get_pre_calculate_rest_tokens(
|
||||
app_record=app_record,
|
||||
model_config=application_generate_entity.model_conf,
|
||||
prompt_template_entity=app_config.prompt_template,
|
||||
inputs=inputs,
|
||||
files=files,
|
||||
query=query,
|
||||
)
|
||||
|
||||
memory = None
|
||||
if application_generate_entity.conversation_id:
|
||||
# get memory of conversation (read-only)
|
||||
|
||||
@@ -15,10 +15,8 @@ from core.app.features.annotation_reply.annotation_reply import AnnotationReplyF
|
||||
from core.app.features.hosting_moderation.hosting_moderation import HostingModerationFeature
|
||||
from core.external_data_tool.external_data_fetch import ExternalDataFetch
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
|
||||
from core.model_runtime.entities.model_entities import ModelPropertyKey
|
||||
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
||||
from core.moderation.input_moderation import InputModeration
|
||||
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
|
||||
@@ -31,106 +29,6 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class AppRunner:
|
||||
def get_pre_calculate_rest_tokens(
|
||||
self,
|
||||
app_record: App,
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
prompt_template_entity: PromptTemplateEntity,
|
||||
inputs: Mapping[str, str],
|
||||
files: Sequence["File"],
|
||||
query: Optional[str] = None,
|
||||
) -> int:
|
||||
"""
|
||||
Get pre calculate rest tokens
|
||||
:param app_record: app record
|
||||
:param model_config: model config entity
|
||||
:param prompt_template_entity: prompt template entity
|
||||
:param inputs: inputs
|
||||
:param files: files
|
||||
:param query: query
|
||||
:return:
|
||||
"""
|
||||
# Invoke model
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
||||
)
|
||||
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
||||
|
||||
max_tokens = 0
|
||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
||||
if parameter_rule.name == "max_tokens" or (
|
||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
||||
):
|
||||
max_tokens = (
|
||||
model_config.parameters.get(parameter_rule.name)
|
||||
or model_config.parameters.get(parameter_rule.use_template or "")
|
||||
) or 0
|
||||
|
||||
if model_context_tokens is None:
|
||||
return -1
|
||||
|
||||
if max_tokens is None:
|
||||
max_tokens = 0
|
||||
|
||||
# get prompt messages without memory and context
|
||||
prompt_messages, stop = self.organize_prompt_messages(
|
||||
app_record=app_record,
|
||||
model_config=model_config,
|
||||
prompt_template_entity=prompt_template_entity,
|
||||
inputs=inputs,
|
||||
files=files,
|
||||
query=query,
|
||||
)
|
||||
|
||||
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
||||
|
||||
rest_tokens: int = model_context_tokens - max_tokens - prompt_tokens
|
||||
if rest_tokens < 0:
|
||||
raise InvokeBadRequestError(
|
||||
"Query or prefix prompt is too long, you can reduce the prefix prompt, "
|
||||
"or shrink the max token, or switch to a llm with a larger token limit size."
|
||||
)
|
||||
|
||||
return rest_tokens
|
||||
|
||||
def recalc_llm_max_tokens(
|
||||
self, model_config: ModelConfigWithCredentialsEntity, prompt_messages: list[PromptMessage]
|
||||
):
|
||||
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
||||
)
|
||||
|
||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
||||
|
||||
max_tokens = 0
|
||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
||||
if parameter_rule.name == "max_tokens" or (
|
||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
||||
):
|
||||
max_tokens = (
|
||||
model_config.parameters.get(parameter_rule.name)
|
||||
or model_config.parameters.get(parameter_rule.use_template or "")
|
||||
) or 0
|
||||
|
||||
if model_context_tokens is None:
|
||||
return -1
|
||||
|
||||
if max_tokens is None:
|
||||
max_tokens = 0
|
||||
|
||||
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
||||
|
||||
if prompt_tokens + max_tokens > model_context_tokens:
|
||||
max_tokens = max(model_context_tokens - prompt_tokens, 16)
|
||||
|
||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
||||
if parameter_rule.name == "max_tokens" or (
|
||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
||||
):
|
||||
model_config.parameters[parameter_rule.name] = max_tokens
|
||||
|
||||
def organize_prompt_messages(
|
||||
self,
|
||||
app_record: App,
|
||||
|
||||
@@ -50,20 +50,6 @@ class ChatAppRunner(AppRunner):
|
||||
query = application_generate_entity.query
|
||||
files = application_generate_entity.files
|
||||
|
||||
# Pre-calculate the number of tokens of the prompt messages,
|
||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
||||
# If the rest number of tokens is not enough, raise exception.
|
||||
# Include: prompt template, inputs, query(optional), files(optional)
|
||||
# Not Include: memory, external data, dataset context
|
||||
self.get_pre_calculate_rest_tokens(
|
||||
app_record=app_record,
|
||||
model_config=application_generate_entity.model_conf,
|
||||
prompt_template_entity=app_config.prompt_template,
|
||||
inputs=inputs,
|
||||
files=files,
|
||||
query=query,
|
||||
)
|
||||
|
||||
memory = None
|
||||
if application_generate_entity.conversation_id:
|
||||
# get memory of conversation (read-only)
|
||||
@@ -194,9 +180,6 @@ class ChatAppRunner(AppRunner):
|
||||
if hosting_moderation_result:
|
||||
return
|
||||
|
||||
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
|
||||
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
|
||||
|
||||
# Invoke model
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
|
||||
|
||||
@@ -43,20 +43,6 @@ class CompletionAppRunner(AppRunner):
|
||||
query = application_generate_entity.query
|
||||
files = application_generate_entity.files
|
||||
|
||||
# Pre-calculate the number of tokens of the prompt messages,
|
||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
||||
# If the rest number of tokens is not enough, raise exception.
|
||||
# Include: prompt template, inputs, query(optional), files(optional)
|
||||
# Not Include: memory, external data, dataset context
|
||||
self.get_pre_calculate_rest_tokens(
|
||||
app_record=app_record,
|
||||
model_config=application_generate_entity.model_conf,
|
||||
prompt_template_entity=app_config.prompt_template,
|
||||
inputs=inputs,
|
||||
files=files,
|
||||
query=query,
|
||||
)
|
||||
|
||||
# organize all inputs and template to prompt messages
|
||||
# Include: prompt template, inputs, query(optional), files(optional)
|
||||
prompt_messages, stop = self.organize_prompt_messages(
|
||||
@@ -152,9 +138,6 @@ class CompletionAppRunner(AppRunner):
|
||||
if hosting_moderation_result:
|
||||
return
|
||||
|
||||
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
|
||||
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
|
||||
|
||||
# Invoke model
|
||||
model_instance = ModelInstance(
|
||||
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
|
||||
|
||||
@@ -26,7 +26,7 @@ class TokenBufferMemory:
|
||||
self.model_instance = model_instance
|
||||
|
||||
def get_history_prompt_messages(
|
||||
self, max_token_limit: int = 2000, message_limit: Optional[int] = None
|
||||
self, max_token_limit: int = 100000, message_limit: Optional[int] = None
|
||||
) -> Sequence[PromptMessage]:
|
||||
"""
|
||||
Get history prompt messages.
|
||||
|
||||
@@ -0,0 +1,115 @@
|
||||
model: us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||
label:
|
||||
en_US: Claude 3.7 Sonnet(US.Cross Region Inference)
|
||||
icon: icon_s_en.svg
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||
parameter_rules:
|
||||
- name: enable_cache
|
||||
label:
|
||||
zh_Hans: 启用提示缓存
|
||||
en_US: Enable Prompt Cache
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
help:
|
||||
zh_Hans: 启用提示缓存可以提高性能并降低成本。Claude 3.7 Sonnet支持在system、messages和tools字段中使用缓存检查点。
|
||||
en_US: Enable prompt caching to improve performance and reduce costs. Claude 3.7 Sonnet supports cache checkpoints in system, messages, and tools fields.
|
||||
- name: reasoning_type
|
||||
label:
|
||||
zh_Hans: 推理配置
|
||||
en_US: Reasoning Type
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
placeholder:
|
||||
zh_Hans: 设置推理配置
|
||||
en_US: Set reasoning configuration
|
||||
help:
|
||||
zh_Hans: 控制模型的推理能力。启用时,temperature将固定为1且top_p将被禁用。
|
||||
en_US: Controls the model's reasoning capability. When enabled, temperature will be fixed to 1 and top_p will be disabled.
|
||||
- name: reasoning_budget
|
||||
show_on:
|
||||
- variable: reasoning_type
|
||||
value: true
|
||||
label:
|
||||
zh_Hans: 推理预算
|
||||
en_US: Reasoning Budget
|
||||
type: int
|
||||
default: 1024
|
||||
min: 0
|
||||
max: 128000
|
||||
help:
|
||||
zh_Hans: 推理的预算限制(最小1024),必须小于max_tokens。仅在推理类型为enabled时可用。
|
||||
en_US: Budget limit for reasoning (minimum 1024), must be less than max_tokens. Only available when reasoning type is enabled.
|
||||
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
label:
|
||||
zh_Hans: 最大token数
|
||||
en_US: Max Tokens
|
||||
type: int
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 128000
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
label:
|
||||
zh_Hans: 模型温度
|
||||
en_US: Model Temperature
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。当推理功能启用时,该值将被固定为1。
|
||||
en_US: The amount of randomness injected into the response. When reasoning is enabled, this value will be fixed to 1.
|
||||
- name: top_p
|
||||
show_on:
|
||||
- variable: reasoning_type
|
||||
value: disabled
|
||||
use_template: top_p
|
||||
label:
|
||||
zh_Hans: Top P
|
||||
en_US: Top P
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中的概率阈值。当推理功能启用时,该参数将被禁用。
|
||||
en_US: The probability threshold in nucleus sampling. When reasoning is enabled, this parameter will be disabled.
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.003'
|
||||
output: '0.015'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -58,6 +58,7 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
# TODO There is invoke issue: context limit on Cohere Model, will add them after fixed.
|
||||
CONVERSE_API_ENABLED_MODEL_INFO = [
|
||||
{"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "us.deepseek", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
|
||||
{"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
model: us.deepseek.r1-v1:0
|
||||
label:
|
||||
en_US: DeepSeek-R1(US.Cross Region Inference)
|
||||
icon: icon_s_en.svg
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
label:
|
||||
zh_Hans: 最大token数
|
||||
en_US: Max Tokens
|
||||
type: int
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 128000
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。
|
||||
en_US: The maximum number of tokens to generate before stopping.
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
label:
|
||||
zh_Hans: 模型温度
|
||||
en_US: Model Temperature
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。当推理功能启用时,该值将被固定为1。
|
||||
en_US: The amount of randomness injected into the response. When reasoning is enabled, this value will be fixed to 1.
|
||||
- name: top_p
|
||||
show_on:
|
||||
- variable: reasoning_type
|
||||
value: disabled
|
||||
use_template: top_p
|
||||
label:
|
||||
zh_Hans: Top P
|
||||
en_US: Top P
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中的概率阈值。当推理功能启用时,该参数将被禁用。
|
||||
en_US: The probability threshold in nucleus sampling. When reasoning is enabled, this parameter will be disabled.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.001'
|
||||
output: '0.005'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -19,8 +19,8 @@ class GoogleProvider(ModelProvider):
|
||||
try:
|
||||
model_instance = self.get_model_instance(ModelType.LLM)
|
||||
|
||||
# Use `gemini-pro` model for validate,
|
||||
model_instance.validate_credentials(model="gemini-pro", credentials=credentials)
|
||||
# Use `gemini-2.0-flash` model for validate,
|
||||
model_instance.validate_credentials(model="gemini-2.0-flash", credentials=credentials)
|
||||
except CredentialsValidateFailedError as ex:
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
|
||||
@@ -19,5 +19,3 @@
|
||||
- gemini-exp-1206
|
||||
- gemini-exp-1121
|
||||
- gemini-exp-1114
|
||||
- gemini-pro
|
||||
- gemini-pro-vision
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
model: gemini-pro-vision
|
||||
label:
|
||||
en_US: Gemini Pro Vision
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 12288
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
deprecated: true
|
||||
@@ -1,39 +0,0 @@
|
||||
model: gemini-pro
|
||||
label:
|
||||
en_US: Gemini Pro
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 30720
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
deprecated: true
|
||||
@@ -1,3 +1,4 @@
|
||||
- gpt-4.1
|
||||
- o1
|
||||
- o1-2024-12-17
|
||||
- o1-mini
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
model: gpt-4.1
|
||||
label:
|
||||
zh_Hans: gpt-4.1
|
||||
en_US: gpt-4.1
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1047576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 32768
|
||||
- name: reasoning_effort
|
||||
label:
|
||||
zh_Hans: 推理工作
|
||||
en_US: Reasoning Effort
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 限制推理模型的推理工作
|
||||
en_US: Constrains effort on reasoning for reasoning models
|
||||
required: false
|
||||
options:
|
||||
- low
|
||||
- medium
|
||||
- high
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
- json_schema
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '2.00'
|
||||
output: '8.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1057,7 +1057,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
model = "gpt-4o"
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
encoding = tiktoken.get_encoding(model)
|
||||
except KeyError:
|
||||
logger.warning("Warning: model not found. Using cl100k_base encoding.")
|
||||
model = "cl100k_base"
|
||||
|
||||
@@ -5,11 +5,6 @@ model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
@@ -20,20 +15,21 @@ parameter_rules:
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
|
||||
@@ -77,5 +77,4 @@
|
||||
- onebot
|
||||
- regex
|
||||
- trello
|
||||
- vanna
|
||||
- fal
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 4.5 KiB |
@@ -1,134 +0,0 @@
|
||||
from typing import Any, Union
|
||||
|
||||
from vanna.remote import VannaDefault # type: ignore
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class VannaTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self, user_id: str, tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
# Ensure runtime and credentials
|
||||
if not self.runtime or not self.runtime.credentials:
|
||||
raise ToolProviderCredentialValidationError("Tool runtime or credentials are missing")
|
||||
api_key = self.runtime.credentials.get("api_key", None)
|
||||
if not api_key:
|
||||
raise ToolProviderCredentialValidationError("Please input api key")
|
||||
|
||||
model = tool_parameters.get("model", "")
|
||||
if not model:
|
||||
return self.create_text_message("Please input RAG model")
|
||||
|
||||
prompt = tool_parameters.get("prompt", "")
|
||||
if not prompt:
|
||||
return self.create_text_message("Please input prompt")
|
||||
|
||||
url = tool_parameters.get("url", "")
|
||||
if not url:
|
||||
return self.create_text_message("Please input URL/Host/DSN")
|
||||
|
||||
db_name = tool_parameters.get("db_name", "")
|
||||
username = tool_parameters.get("username", "")
|
||||
password = tool_parameters.get("password", "")
|
||||
port = tool_parameters.get("port", 0)
|
||||
|
||||
base_url = self.runtime.credentials.get("base_url", None)
|
||||
vn = VannaDefault(model=model, api_key=api_key, config={"endpoint": base_url})
|
||||
|
||||
db_type = tool_parameters.get("db_type", "")
|
||||
if db_type in {"Postgres", "MySQL", "Hive", "ClickHouse"}:
|
||||
if not db_name:
|
||||
return self.create_text_message("Please input database name")
|
||||
if not username:
|
||||
return self.create_text_message("Please input username")
|
||||
if port < 1:
|
||||
return self.create_text_message("Please input port")
|
||||
|
||||
schema_sql = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS"
|
||||
match db_type:
|
||||
case "SQLite":
|
||||
schema_sql = "SELECT type, sql FROM sqlite_master WHERE sql is not null"
|
||||
vn.connect_to_sqlite(url)
|
||||
case "Postgres":
|
||||
vn.connect_to_postgres(host=url, dbname=db_name, user=username, password=password, port=port)
|
||||
case "DuckDB":
|
||||
vn.connect_to_duckdb(url=url)
|
||||
case "SQLServer":
|
||||
vn.connect_to_mssql(url)
|
||||
case "MySQL":
|
||||
vn.connect_to_mysql(host=url, dbname=db_name, user=username, password=password, port=port)
|
||||
case "Oracle":
|
||||
vn.connect_to_oracle(user=username, password=password, dsn=url)
|
||||
case "Hive":
|
||||
vn.connect_to_hive(host=url, dbname=db_name, user=username, password=password, port=port)
|
||||
case "ClickHouse":
|
||||
vn.connect_to_clickhouse(host=url, dbname=db_name, user=username, password=password, port=port)
|
||||
|
||||
enable_training = tool_parameters.get("enable_training", False)
|
||||
reset_training_data = tool_parameters.get("reset_training_data", False)
|
||||
if enable_training:
|
||||
if reset_training_data:
|
||||
existing_training_data = vn.get_training_data()
|
||||
if len(existing_training_data) > 0:
|
||||
for _, training_data in existing_training_data.iterrows():
|
||||
vn.remove_training_data(training_data["id"])
|
||||
|
||||
ddl = tool_parameters.get("ddl", "")
|
||||
question = tool_parameters.get("question", "")
|
||||
sql = tool_parameters.get("sql", "")
|
||||
memos = tool_parameters.get("memos", "")
|
||||
training_metadata = tool_parameters.get("training_metadata", False)
|
||||
|
||||
if training_metadata:
|
||||
if db_type == "SQLite":
|
||||
df_ddl = vn.run_sql(schema_sql)
|
||||
for ddl in df_ddl["sql"].to_list():
|
||||
vn.train(ddl=ddl)
|
||||
else:
|
||||
df_information_schema = vn.run_sql(schema_sql)
|
||||
plan = vn.get_training_plan_generic(df_information_schema)
|
||||
vn.train(plan=plan)
|
||||
|
||||
if ddl:
|
||||
vn.train(ddl=ddl)
|
||||
|
||||
if sql:
|
||||
if question:
|
||||
vn.train(question=question, sql=sql)
|
||||
else:
|
||||
vn.train(sql=sql)
|
||||
if memos:
|
||||
vn.train(documentation=memos)
|
||||
|
||||
#########################################################################################
|
||||
# Due to CVE-2024-5565, we have to disable the chart generation feature
|
||||
# The Vanna library uses a prompt function to present the user with visualized results,
|
||||
# it is possible to alter the prompt using prompt injection and run arbitrary Python code
|
||||
# instead of the intended visualization code.
|
||||
# Specifically - allowing external input to the library’s “ask” method
|
||||
# with "visualize" set to True (default behavior) leads to remote code execution.
|
||||
# Affected versions: <= 0.5.5
|
||||
#########################################################################################
|
||||
allow_llm_to_see_data = tool_parameters.get("allow_llm_to_see_data", False)
|
||||
res = vn.ask(
|
||||
prompt, print_results=False, auto_train=True, visualize=False, allow_llm_to_see_data=allow_llm_to_see_data
|
||||
)
|
||||
|
||||
result = []
|
||||
|
||||
if res is not None:
|
||||
result.append(self.create_text_message(res[0]))
|
||||
if len(res) > 1 and res[1] is not None:
|
||||
result.append(self.create_text_message(res[1].to_markdown()))
|
||||
if len(res) > 2 and res[2] is not None:
|
||||
result.append(
|
||||
self.create_blob_message(blob=res[2].to_image(format="svg"), meta={"mime_type": "image/svg+xml"})
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -1,213 +0,0 @@
|
||||
identity:
|
||||
name: vanna
|
||||
author: QCTC
|
||||
label:
|
||||
en_US: Vanna.AI
|
||||
zh_Hans: Vanna.AI
|
||||
description:
|
||||
human:
|
||||
en_US: The fastest way to get actionable insights from your database just by asking questions.
|
||||
zh_Hans: 一个基于大模型和RAG的Text2SQL工具。
|
||||
llm: A tool for converting text to SQL.
|
||||
parameters:
|
||||
- name: prompt
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Prompt
|
||||
zh_Hans: 提示词
|
||||
pt_BR: Prompt
|
||||
human_description:
|
||||
en_US: used for generating SQL
|
||||
zh_Hans: 用于生成SQL
|
||||
llm_description: key words for generating SQL
|
||||
form: llm
|
||||
- name: model
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: RAG Model
|
||||
zh_Hans: RAG模型
|
||||
human_description:
|
||||
en_US: RAG Model for your database DDL
|
||||
zh_Hans: 存储数据库训练数据的RAG模型
|
||||
llm_description: RAG Model for generating SQL
|
||||
form: llm
|
||||
- name: db_type
|
||||
type: select
|
||||
required: true
|
||||
options:
|
||||
- value: SQLite
|
||||
label:
|
||||
en_US: SQLite
|
||||
zh_Hans: SQLite
|
||||
- value: Postgres
|
||||
label:
|
||||
en_US: Postgres
|
||||
zh_Hans: Postgres
|
||||
- value: DuckDB
|
||||
label:
|
||||
en_US: DuckDB
|
||||
zh_Hans: DuckDB
|
||||
- value: SQLServer
|
||||
label:
|
||||
en_US: Microsoft SQL Server
|
||||
zh_Hans: 微软 SQL Server
|
||||
- value: MySQL
|
||||
label:
|
||||
en_US: MySQL
|
||||
zh_Hans: MySQL
|
||||
- value: Oracle
|
||||
label:
|
||||
en_US: Oracle
|
||||
zh_Hans: Oracle
|
||||
- value: Hive
|
||||
label:
|
||||
en_US: Hive
|
||||
zh_Hans: Hive
|
||||
- value: ClickHouse
|
||||
label:
|
||||
en_US: ClickHouse
|
||||
zh_Hans: ClickHouse
|
||||
default: SQLite
|
||||
label:
|
||||
en_US: DB Type
|
||||
zh_Hans: 数据库类型
|
||||
human_description:
|
||||
en_US: Database type.
|
||||
zh_Hans: 选择要链接的数据库类型。
|
||||
form: form
|
||||
- name: url
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: URL/Host/DSN
|
||||
zh_Hans: URL/Host/DSN
|
||||
human_description:
|
||||
en_US: Please input depending on DB type, visit https://vanna.ai/docs/ for more specification
|
||||
zh_Hans: 请根据数据库类型,填入对应值,详情参考https://vanna.ai/docs/
|
||||
form: form
|
||||
- name: db_name
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: DB name
|
||||
zh_Hans: 数据库名
|
||||
human_description:
|
||||
en_US: Database name
|
||||
zh_Hans: 数据库名
|
||||
form: form
|
||||
- name: username
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: Username
|
||||
zh_Hans: 用户名
|
||||
human_description:
|
||||
en_US: Username
|
||||
zh_Hans: 用户名
|
||||
form: form
|
||||
- name: password
|
||||
type: secret-input
|
||||
required: false
|
||||
label:
|
||||
en_US: Password
|
||||
zh_Hans: 密码
|
||||
human_description:
|
||||
en_US: Password
|
||||
zh_Hans: 密码
|
||||
form: form
|
||||
- name: port
|
||||
type: number
|
||||
required: false
|
||||
label:
|
||||
en_US: Port
|
||||
zh_Hans: 端口
|
||||
human_description:
|
||||
en_US: Port
|
||||
zh_Hans: 端口
|
||||
form: form
|
||||
- name: ddl
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: Training DDL
|
||||
zh_Hans: 训练DDL
|
||||
human_description:
|
||||
en_US: DDL statements for training data
|
||||
zh_Hans: 用于训练RAG Model的建表语句
|
||||
form: llm
|
||||
- name: question
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: Training Question
|
||||
zh_Hans: 训练问题
|
||||
human_description:
|
||||
en_US: Question-SQL Pairs
|
||||
zh_Hans: Question-SQL中的问题
|
||||
form: llm
|
||||
- name: sql
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: Training SQL
|
||||
zh_Hans: 训练SQL
|
||||
human_description:
|
||||
en_US: SQL queries to your training data
|
||||
zh_Hans: 用于训练RAG Model的SQL语句
|
||||
form: llm
|
||||
- name: memos
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: Training Memos
|
||||
zh_Hans: 训练说明
|
||||
human_description:
|
||||
en_US: Sometimes you may want to add documentation about your business terminology or definitions
|
||||
zh_Hans: 添加更多关于数据库的业务说明
|
||||
form: llm
|
||||
- name: enable_training
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Training Data
|
||||
zh_Hans: 训练数据
|
||||
human_description:
|
||||
en_US: You only need to train once. Do not train again unless you want to add more training data
|
||||
zh_Hans: 训练数据无更新时,训练一次即可
|
||||
form: form
|
||||
- name: reset_training_data
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Reset Training Data
|
||||
zh_Hans: 重置训练数据
|
||||
human_description:
|
||||
en_US: Remove all training data in the current RAG Model
|
||||
zh_Hans: 删除当前RAG Model中的所有训练数据
|
||||
form: form
|
||||
- name: training_metadata
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Training Metadata
|
||||
zh_Hans: 训练元数据
|
||||
human_description:
|
||||
en_US: If enabled, it will attempt to train on the metadata of that database
|
||||
zh_Hans: 是否自动从数据库获取元数据来训练
|
||||
form: form
|
||||
- name: allow_llm_to_see_data
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
label:
|
||||
en_US: Whether to allow the LLM to see the data
|
||||
zh_Hans: 是否允许LLM查看数据
|
||||
human_description:
|
||||
en_US: Whether to allow the LLM to see the data
|
||||
zh_Hans: 是否允许LLM查看数据
|
||||
form: form
|
||||
@@ -1,46 +0,0 @@
|
||||
import re
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.provider.builtin.vanna.tools.vanna import VannaTool
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class VannaProvider(BuiltinToolProviderController):
|
||||
def _get_protocol_and_main_domain(self, url):
|
||||
parsed_url = urlparse(url)
|
||||
protocol = parsed_url.scheme
|
||||
hostname = parsed_url.hostname
|
||||
port = f":{parsed_url.port}" if parsed_url.port else ""
|
||||
|
||||
# Check if the hostname is an IP address
|
||||
is_ip = re.match(r"^\d{1,3}(\.\d{1,3}){3}$", hostname) is not None
|
||||
|
||||
# Return the full hostname (with port if present) for IP addresses, otherwise return the main domain
|
||||
main_domain = f"{hostname}{port}" if is_ip else ".".join(hostname.split(".")[-2:]) + port
|
||||
return f"{protocol}://{main_domain}"
|
||||
|
||||
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
|
||||
base_url = credentials.get("base_url")
|
||||
if not base_url:
|
||||
base_url = "https://ask.vanna.ai/rpc"
|
||||
else:
|
||||
base_url = base_url.removesuffix("/")
|
||||
credentials["base_url"] = base_url
|
||||
try:
|
||||
VannaTool().fork_tool_runtime(
|
||||
runtime={
|
||||
"credentials": credentials,
|
||||
}
|
||||
).invoke(
|
||||
user_id="",
|
||||
tool_parameters={
|
||||
"model": "chinook",
|
||||
"db_type": "SQLite",
|
||||
"url": f"{self._get_protocol_and_main_domain(credentials['base_url'])}/Chinook.sqlite",
|
||||
"query": "What are the top 10 customers by sales?",
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
raise ToolProviderCredentialValidationError(str(e))
|
||||
@@ -1,35 +0,0 @@
|
||||
identity:
|
||||
author: QCTC
|
||||
name: vanna
|
||||
label:
|
||||
en_US: Vanna.AI
|
||||
zh_Hans: Vanna.AI
|
||||
description:
|
||||
en_US: The fastest way to get actionable insights from your database just by asking questions.
|
||||
zh_Hans: 一个基于大模型和RAG的Text2SQL工具。
|
||||
icon: icon.png
|
||||
tags:
|
||||
- utilities
|
||||
- productivity
|
||||
credentials_for_provider:
|
||||
api_key:
|
||||
type: secret-input
|
||||
required: true
|
||||
label:
|
||||
en_US: API key
|
||||
zh_Hans: API key
|
||||
placeholder:
|
||||
en_US: Please input your API key
|
||||
zh_Hans: 请输入你的 API key
|
||||
pt_BR: Please input your API key
|
||||
help:
|
||||
en_US: Get your API key from Vanna.AI
|
||||
zh_Hans: 从 Vanna.AI 获取你的 API key
|
||||
url: https://vanna.ai/account/profile
|
||||
base_url:
|
||||
type: text-input
|
||||
required: false
|
||||
label:
|
||||
en_US: Vanna.AI Endpoint Base URL
|
||||
placeholder:
|
||||
en_US: https://ask.vanna.ai/rpc
|
||||
@@ -195,7 +195,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
||||
if output_config.type == "object":
|
||||
# check if output is object
|
||||
if not isinstance(result.get(output_name), dict):
|
||||
if isinstance(result.get(output_name), type(None)):
|
||||
if result.get(output_name) is None:
|
||||
transformed_result[output_name] = None
|
||||
else:
|
||||
raise OutputValidationError(
|
||||
@@ -223,7 +223,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
||||
elif output_config.type == "array[number]":
|
||||
# check if array of number available
|
||||
if not isinstance(result[output_name], list):
|
||||
if isinstance(result[output_name], type(None)):
|
||||
if result[output_name] is None:
|
||||
transformed_result[output_name] = None
|
||||
else:
|
||||
raise OutputValidationError(
|
||||
@@ -244,7 +244,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
||||
elif output_config.type == "array[string]":
|
||||
# check if array of string available
|
||||
if not isinstance(result[output_name], list):
|
||||
if isinstance(result[output_name], type(None)):
|
||||
if result[output_name] is None:
|
||||
transformed_result[output_name] = None
|
||||
else:
|
||||
raise OutputValidationError(
|
||||
@@ -265,7 +265,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
||||
elif output_config.type == "array[object]":
|
||||
# check if array of object available
|
||||
if not isinstance(result[output_name], list):
|
||||
if isinstance(result[output_name], type(None)):
|
||||
if result[output_name] is None:
|
||||
transformed_result[output_name] = None
|
||||
else:
|
||||
raise OutputValidationError(
|
||||
|
||||
@@ -968,14 +968,12 @@ def _handle_memory_chat_mode(
|
||||
*,
|
||||
memory: TokenBufferMemory | None,
|
||||
memory_config: MemoryConfig | None,
|
||||
model_config: ModelConfigWithCredentialsEntity,
|
||||
model_config: ModelConfigWithCredentialsEntity, # TODO(-LAN-): Needs to remove
|
||||
) -> Sequence[PromptMessage]:
|
||||
memory_messages: Sequence[PromptMessage] = []
|
||||
# Get messages from memory for chat model
|
||||
if memory and memory_config:
|
||||
rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
|
||||
memory_messages = memory.get_history_prompt_messages(
|
||||
max_token_limit=rest_tokens,
|
||||
message_limit=memory_config.window.size if memory_config.window.enabled else None,
|
||||
)
|
||||
return memory_messages
|
||||
|
||||
66
api/poetry.lock
generated
66
api/poetry.lock
generated
@@ -10473,44 +10473,44 @@ client = ["SQLAlchemy (>=1.4,<3)"]
|
||||
|
||||
[[package]]
|
||||
name = "tiktoken"
|
||||
version = "0.8.0"
|
||||
version = "0.9.0"
|
||||
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"},
|
||||
{file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"},
|
||||
{file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"},
|
||||
{file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"},
|
||||
{file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"},
|
||||
{file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"},
|
||||
{file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"},
|
||||
{file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"},
|
||||
{file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"},
|
||||
{file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"},
|
||||
{file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"},
|
||||
{file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"},
|
||||
{file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"},
|
||||
{file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"},
|
||||
{file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"},
|
||||
{file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"},
|
||||
{file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"},
|
||||
{file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"},
|
||||
{file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"},
|
||||
{file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"},
|
||||
{file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"},
|
||||
{file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"},
|
||||
{file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"},
|
||||
{file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"},
|
||||
{file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"},
|
||||
{file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"},
|
||||
{file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"},
|
||||
{file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"},
|
||||
{file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"},
|
||||
{file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"},
|
||||
{file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"},
|
||||
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
|
||||
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
|
||||
{file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd"},
|
||||
{file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de"},
|
||||
{file = "tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990"},
|
||||
{file = "tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4"},
|
||||
{file = "tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e"},
|
||||
{file = "tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348"},
|
||||
{file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33"},
|
||||
{file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136"},
|
||||
{file = "tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336"},
|
||||
{file = "tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb"},
|
||||
{file = "tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03"},
|
||||
{file = "tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210"},
|
||||
{file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794"},
|
||||
{file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22"},
|
||||
{file = "tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2"},
|
||||
{file = "tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16"},
|
||||
{file = "tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb"},
|
||||
{file = "tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63"},
|
||||
{file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01"},
|
||||
{file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139"},
|
||||
{file = "tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a"},
|
||||
{file = "tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95"},
|
||||
{file = "tiktoken-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c6386ca815e7d96ef5b4ac61e0048cd32ca5a92d5781255e13b31381d28667dc"},
|
||||
{file = "tiktoken-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75f6d5db5bc2c6274b674ceab1615c1778e6416b14705827d19b40e6355f03e0"},
|
||||
{file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e15b16f61e6f4625a57a36496d28dd182a8a60ec20a534c5343ba3cafa156ac7"},
|
||||
{file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebcec91babf21297022882344c3f7d9eed855931466c3311b1ad6b64befb3df"},
|
||||
{file = "tiktoken-0.9.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5fd49e7799579240f03913447c0cdfa1129625ebd5ac440787afc4345990427"},
|
||||
{file = "tiktoken-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:26242ca9dc8b58e875ff4ca078b9a94d2f0813e6a535dcd2205df5d49d927cc7"},
|
||||
{file = "tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -12389,4 +12389,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "d197cdff507a70323c1d6aca11609188f54970f67715af744fe6def15b7776fd"
|
||||
content-hash = "0df8aef68385b6596306fd18af317a835023d648eb5028cd57ec463f176e4c0f"
|
||||
|
||||
@@ -85,7 +85,7 @@ sentry-sdk = { version = "~1.44.1", extras = ["flask"] }
|
||||
sqlalchemy = "~2.0.29"
|
||||
starlette = "0.41.0"
|
||||
tencentcloud-sdk-python-hunyuan = "~3.0.1294"
|
||||
tiktoken = "~0.8.0"
|
||||
tiktoken = "^0.9.0"
|
||||
tokenizers = "~0.15.0"
|
||||
transformers = "~4.35.0"
|
||||
unstructured = { version = "~0.16.1", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] }
|
||||
|
||||
@@ -406,10 +406,8 @@ class AccountService:
|
||||
|
||||
raise PasswordResetRateLimitExceededError()
|
||||
|
||||
code = "".join([str(random.randint(0, 9)) for _ in range(6)])
|
||||
token = TokenManager.generate_token(
|
||||
account=account, email=email, token_type="reset_password", additional_data={"code": code}
|
||||
)
|
||||
code, token = cls.generate_reset_password_token(account_email, account)
|
||||
|
||||
send_reset_password_mail_task.delay(
|
||||
language=language,
|
||||
to=account_email,
|
||||
@@ -418,6 +416,22 @@ class AccountService:
|
||||
cls.reset_password_rate_limiter.increment_rate_limit(account_email)
|
||||
return token
|
||||
|
||||
@classmethod
|
||||
def generate_reset_password_token(
|
||||
cls,
|
||||
email: str,
|
||||
account: Optional[Account] = None,
|
||||
code: Optional[str] = None,
|
||||
additional_data: dict[str, Any] = {},
|
||||
):
|
||||
if not code:
|
||||
code = "".join([str(random.randint(0, 9)) for _ in range(6)])
|
||||
additional_data["code"] = code
|
||||
token = TokenManager.generate_token(
|
||||
account=account, email=email, token_type="reset_password", additional_data=additional_data
|
||||
)
|
||||
return code, token
|
||||
|
||||
@classmethod
|
||||
def revoke_reset_password_token(cls, token: str):
|
||||
TokenManager.revoke_token(token, "reset_password")
|
||||
|
||||
@@ -55,13 +55,19 @@ def _check_version_compatibility(imported_version: str) -> ImportStatus:
|
||||
except version.InvalidVersion:
|
||||
return ImportStatus.FAILED
|
||||
|
||||
# Compare major version and minor version
|
||||
if current_ver.major != imported_ver.major or current_ver.minor != imported_ver.minor:
|
||||
# If imported version is newer than current, always return PENDING
|
||||
if imported_ver > current_ver:
|
||||
return ImportStatus.PENDING
|
||||
|
||||
if current_ver.micro != imported_ver.micro:
|
||||
# If imported version is older than current's major, return PENDING
|
||||
if imported_ver.major < current_ver.major:
|
||||
return ImportStatus.PENDING
|
||||
|
||||
# If imported version is older than current's minor, return COMPLETED_WITH_WARNINGS
|
||||
if imported_ver.minor < current_ver.minor:
|
||||
return ImportStatus.COMPLETED_WITH_WARNINGS
|
||||
|
||||
# If imported version equals or is older than current's micro, return COMPLETED
|
||||
return ImportStatus.COMPLETED
|
||||
|
||||
|
||||
|
||||
@@ -932,3 +932,6 @@ MAX_SUBMIT_COUNT=100
|
||||
|
||||
# The maximum number of top-k value for RAG.
|
||||
TOP_K_MAX_VALUE=10
|
||||
|
||||
# Prevent Clickjacking
|
||||
ALLOW_EMBED=false
|
||||
@@ -1,8 +1,8 @@
|
||||
x-shared-env: &shared-api-worker-env
|
||||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.15.3
|
||||
image: langgenius/dify-api:0.15.7
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.15.3
|
||||
image: langgenius/dify-api:0.15.7
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.15.3
|
||||
image: langgenius/dify-web:0.15.7
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
@@ -56,6 +56,7 @@ services:
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
|
||||
|
||||
@@ -98,7 +99,7 @@ services:
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.10
|
||||
image: langgenius/dify-sandbox:0.2.11
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
|
||||
@@ -43,7 +43,7 @@ services:
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.10
|
||||
image: langgenius/dify-sandbox:0.2.11
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
|
||||
@@ -389,11 +389,12 @@ x-shared-env: &shared-api-worker-env
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
|
||||
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.15.3
|
||||
image: langgenius/dify-api:0.15.7
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -416,7 +417,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.15.3
|
||||
image: langgenius/dify-api:0.15.7
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -438,7 +439,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.15.3
|
||||
image: langgenius/dify-web:0.15.7
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
@@ -447,6 +448,7 @@ services:
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
ALLOW_EMBED: ${ALLOW_EMBED:-false}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
|
||||
|
||||
@@ -489,7 +491,7 @@ services:
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.10
|
||||
image: langgenius/dify-sandbox:0.2.11
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
|
||||
@@ -31,3 +31,6 @@ NEXT_PUBLIC_TOP_K_MAX_VALUE=10
|
||||
|
||||
# The maximum number of tokens for segmentation
|
||||
NEXT_PUBLIC_INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||||
|
||||
# Default is not allow to embed into iframe to prevent Clickjacking: https://owasp.org/www-community/attacks/Clickjacking
|
||||
NEXT_PUBLIC_ALLOW_EMBED=
|
||||
|
||||
@@ -186,15 +186,17 @@ const Apps = ({
|
||||
<div className='w-[180px] h-8'></div>
|
||||
</div>
|
||||
<div className='relative flex flex-1 overflow-y-auto'>
|
||||
{!searchKeywords && <div className='w-[200px] h-full p-4'>
|
||||
<Sidebar current={currCategory as AppCategories} onClick={(category) => { setCurrCategory(category) }} onCreateFromBlank={onCreateFromBlank} />
|
||||
{!searchKeywords && <div className='h-full w-[200px] p-4'>
|
||||
<Sidebar current={currCategory as AppCategories} categories={categories} onClick={(category) => { setCurrCategory(category) }} onCreateFromBlank={onCreateFromBlank} />
|
||||
</div>}
|
||||
<div className='flex-1 h-full overflow-auto shrink-0 grow p-6 pt-2 border-l border-divider-burn'>
|
||||
{searchFilteredList && searchFilteredList.length > 0 && <>
|
||||
<div className='pt-4 pb-1'>
|
||||
{searchKeywords
|
||||
? <p className='title-md-semi-bold text-text-tertiary'>{searchFilteredList.length > 1 ? t('app.newApp.foundResults', { count: searchFilteredList.length }) : t('app.newApp.foundResult', { count: searchFilteredList.length })}</p>
|
||||
: <AppCategoryLabel category={currCategory as AppCategories} className='title-md-semi-bold text-text-primary' />}
|
||||
: <div className='flex h-[22px] items-center'>
|
||||
<AppCategoryLabel category={currCategory as AppCategories} className='title-md-semi-bold text-text-primary' />
|
||||
</div>}
|
||||
</div>
|
||||
<div
|
||||
className={cn(
|
||||
|
||||
@@ -1,39 +1,29 @@
|
||||
'use client'
|
||||
import { RiAppsFill, RiChatSmileAiFill, RiExchange2Fill, RiPassPendingFill, RiQuillPenAiFill, RiSpeakAiFill, RiStickyNoteAddLine, RiTerminalBoxFill, RiThumbUpFill } from '@remixicon/react'
|
||||
import { RiStickyNoteAddLine, RiThumbUpLine } from '@remixicon/react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import classNames from '@/utils/classnames'
|
||||
import Divider from '@/app/components/base/divider'
|
||||
|
||||
export enum AppCategories {
|
||||
RECOMMENDED = 'Recommended',
|
||||
ASSISTANT = 'Assistant',
|
||||
AGENT = 'Agent',
|
||||
HR = 'HR',
|
||||
PROGRAMMING = 'Programming',
|
||||
WORKFLOW = 'Workflow',
|
||||
WRITING = 'Writing',
|
||||
}
|
||||
|
||||
type SidebarProps = {
|
||||
current: AppCategories
|
||||
onClick?: (category: AppCategories) => void
|
||||
current: AppCategories | string
|
||||
categories: string[]
|
||||
onClick?: (category: AppCategories | string) => void
|
||||
onCreateFromBlank?: () => void
|
||||
}
|
||||
|
||||
export default function Sidebar({ current, onClick, onCreateFromBlank }: SidebarProps) {
|
||||
export default function Sidebar({ current, categories, onClick, onCreateFromBlank }: SidebarProps) {
|
||||
const { t } = useTranslation()
|
||||
return <div className="w-full h-full flex flex-col">
|
||||
<ul>
|
||||
return <div className="flex h-full w-full flex-col">
|
||||
<ul className='pt-0.5'>
|
||||
<CategoryItem category={AppCategories.RECOMMENDED} active={current === AppCategories.RECOMMENDED} onClick={onClick} />
|
||||
</ul>
|
||||
<div className='px-3 pt-2 pb-1 system-xs-medium-uppercase text-text-tertiary'>{t('app.newAppFromTemplate.byCategories')}</div>
|
||||
<ul className='flex-grow flex flex-col gap-0.5'>
|
||||
<CategoryItem category={AppCategories.ASSISTANT} active={current === AppCategories.ASSISTANT} onClick={onClick} />
|
||||
<CategoryItem category={AppCategories.AGENT} active={current === AppCategories.AGENT} onClick={onClick} />
|
||||
<CategoryItem category={AppCategories.HR} active={current === AppCategories.HR} onClick={onClick} />
|
||||
<CategoryItem category={AppCategories.PROGRAMMING} active={current === AppCategories.PROGRAMMING} onClick={onClick} />
|
||||
<CategoryItem category={AppCategories.WORKFLOW} active={current === AppCategories.WORKFLOW} onClick={onClick} />
|
||||
<CategoryItem category={AppCategories.WRITING} active={current === AppCategories.WRITING} onClick={onClick} />
|
||||
<div className='system-xs-medium-uppercase mb-0.5 mt-3 px-3 pb-1 pt-2 text-text-tertiary'>{t('app.newAppFromTemplate.byCategories')}</div>
|
||||
<ul className='flex grow flex-col gap-0.5'>
|
||||
{categories.map(category => (<CategoryItem key={category} category={category} active={current === category} onClick={onClick} />))}
|
||||
</ul>
|
||||
<Divider bgStyle='gradient' />
|
||||
<div className='px-3 py-1 flex items-center gap-1 text-text-tertiary cursor-pointer' onClick={onCreateFromBlank}>
|
||||
@@ -45,47 +35,26 @@ export default function Sidebar({ current, onClick, onCreateFromBlank }: Sidebar
|
||||
|
||||
type CategoryItemProps = {
|
||||
active: boolean
|
||||
category: AppCategories
|
||||
onClick?: (category: AppCategories) => void
|
||||
category: AppCategories | string
|
||||
onClick?: (category: AppCategories | string) => void
|
||||
}
|
||||
function CategoryItem({ category, active, onClick }: CategoryItemProps) {
|
||||
return <li
|
||||
className={classNames('p-1 pl-3 rounded-lg flex items-center gap-2 group cursor-pointer hover:bg-state-base-hover [&.active]:bg-state-base-active', active && 'active')}
|
||||
className={classNames('p-1 pl-3 h-8 rounded-lg flex items-center gap-2 group cursor-pointer hover:bg-state-base-hover [&.active]:bg-state-base-active', active && 'active')}
|
||||
onClick={() => { onClick?.(category) }}>
|
||||
<div className='w-5 h-5 inline-flex items-center justify-center rounded-md border border-divider-regular bg-components-icon-bg-midnight-solid group-[.active]:bg-components-icon-bg-blue-solid'>
|
||||
<AppCategoryIcon category={category} />
|
||||
</div>
|
||||
{category === AppCategories.RECOMMENDED && <div className='inline-flex h-5 w-5 items-center justify-center rounded-md'>
|
||||
<RiThumbUpLine className='h-4 w-4 text-components-menu-item-text group-[.active]:text-components-menu-item-text-active' />
|
||||
</div>}
|
||||
<AppCategoryLabel category={category}
|
||||
className={classNames('system-sm-medium text-components-menu-item-text group-[.active]:text-components-menu-item-text-active group-hover:text-components-menu-item-text-hover', active && 'system-sm-semibold')} />
|
||||
</li >
|
||||
}
|
||||
|
||||
type AppCategoryLabelProps = {
|
||||
category: AppCategories
|
||||
category: AppCategories | string
|
||||
className?: string
|
||||
}
|
||||
export function AppCategoryLabel({ category, className }: AppCategoryLabelProps) {
|
||||
const { t } = useTranslation()
|
||||
return <span className={className}>{t(`app.newAppFromTemplate.sidebar.${category}`)}</span>
|
||||
}
|
||||
|
||||
type AppCategoryIconProps = {
|
||||
category: AppCategories
|
||||
}
|
||||
function AppCategoryIcon({ category }: AppCategoryIconProps) {
|
||||
if (category === AppCategories.AGENT)
|
||||
return <RiSpeakAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
if (category === AppCategories.ASSISTANT)
|
||||
return <RiChatSmileAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
if (category === AppCategories.HR)
|
||||
return <RiPassPendingFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
if (category === AppCategories.PROGRAMMING)
|
||||
return <RiTerminalBoxFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
if (category === AppCategories.RECOMMENDED)
|
||||
return <RiThumbUpFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
if (category === AppCategories.WRITING)
|
||||
return <RiQuillPenAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
if (category === AppCategories.WORKFLOW)
|
||||
return <RiExchange2Fill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
return <RiAppsFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
||||
return <span className={className}>{category === AppCategories.RECOMMENDED ? t('app.newAppFromTemplate.sidebar.Recommended') : category}</span>
|
||||
}
|
||||
|
||||
@@ -312,7 +312,7 @@ function AppPreview({ mode }: { mode: AppMode }) {
|
||||
'chat': {
|
||||
title: t('app.types.chatbot'),
|
||||
description: t('app.newApp.chatbotUserDescription'),
|
||||
link: 'https://docs.dify.ai/guides/application-orchestrate/conversation-application?fallback=true',
|
||||
link: 'https://docs.dify.ai/guides/application-orchestrate#application_type',
|
||||
},
|
||||
'advanced-chat': {
|
||||
title: t('app.types.advanced'),
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import Modal from '@/app/components/base/modal'
|
||||
import Button from '@/app/components/base/button'
|
||||
|
||||
type DSLConfirmModalProps = {
|
||||
versions?: {
|
||||
importedVersion: string
|
||||
systemVersion: string
|
||||
}
|
||||
onCancel: () => void
|
||||
onConfirm: () => void
|
||||
confirmDisabled?: boolean
|
||||
}
|
||||
const DSLConfirmModal = ({
|
||||
versions = { importedVersion: '', systemVersion: '' },
|
||||
onCancel,
|
||||
onConfirm,
|
||||
confirmDisabled = false,
|
||||
}: DSLConfirmModalProps) => {
|
||||
const { t } = useTranslation()
|
||||
|
||||
return (
|
||||
<Modal
|
||||
isShow
|
||||
onClose={() => onCancel()}
|
||||
className='w-[480px]'
|
||||
>
|
||||
<div className='flex flex-col items-start gap-2 self-stretch pb-4'>
|
||||
<div className='title-2xl-semi-bold text-text-primary'>{t('app.newApp.appCreateDSLErrorTitle')}</div>
|
||||
<div className='system-md-regular flex grow flex-col text-text-secondary'>
|
||||
<div>{t('app.newApp.appCreateDSLErrorPart1')}</div>
|
||||
<div>{t('app.newApp.appCreateDSLErrorPart2')}</div>
|
||||
<br />
|
||||
<div>{t('app.newApp.appCreateDSLErrorPart3')}<span className='system-md-medium'>{versions.importedVersion}</span></div>
|
||||
<div>{t('app.newApp.appCreateDSLErrorPart4')}<span className='system-md-medium'>{versions.systemVersion}</span></div>
|
||||
</div>
|
||||
</div>
|
||||
<div className='flex items-start justify-end gap-2 self-stretch pt-6'>
|
||||
<Button variant='secondary' onClick={() => onCancel()}>{t('app.newApp.Cancel')}</Button>
|
||||
<Button variant='primary' destructive onClick={onConfirm} disabled={confirmDisabled}>{t('app.newApp.Confirm')}</Button>
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
|
||||
export default DSLConfirmModal
|
||||
@@ -24,7 +24,7 @@ const OPTION_MAP = {
|
||||
iframe: {
|
||||
getContent: (url: string, token: string) =>
|
||||
`<iframe
|
||||
src="${url}/chatbot/${token}"
|
||||
src="${url}/chat/${token}"
|
||||
style="width: 100%; height: 100%; min-height: 700px"
|
||||
frameborder="0"
|
||||
allow="microphone">
|
||||
@@ -35,12 +35,12 @@ const OPTION_MAP = {
|
||||
`<script>
|
||||
window.difyChatbotConfig = {
|
||||
token: '${token}'${isTestEnv
|
||||
? `,
|
||||
? `,
|
||||
isDev: true`
|
||||
: ''}${IS_CE_EDITION
|
||||
? `,
|
||||
: ''}${IS_CE_EDITION
|
||||
? `,
|
||||
baseUrl: '${url}'`
|
||||
: ''}
|
||||
: ''}
|
||||
}
|
||||
</script>
|
||||
<script
|
||||
|
||||
@@ -11,10 +11,12 @@ import { useLocalStorageState } from 'ahooks'
|
||||
import produce from 'immer'
|
||||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
Feedback,
|
||||
} from '../types'
|
||||
import { CONVERSATION_ID_INFO } from '../constants'
|
||||
import { getPrevChatList, getProcessedInputsFromUrlParams } from '../utils'
|
||||
import { buildChatItemTree, getProcessedInputsFromUrlParams } from '../utils'
|
||||
import { getProcessedFilesFromResponse } from '../../file-uploader/utils'
|
||||
import {
|
||||
fetchAppInfo,
|
||||
fetchAppMeta,
|
||||
@@ -32,6 +34,33 @@ import { useToastContext } from '@/app/components/base/toast'
|
||||
import { changeLanguage } from '@/i18n/i18next-config'
|
||||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
import { TransferMethod } from '@/types/app'
|
||||
import { addFileInfos, sortAgentSorts } from '@/app/components/tools/utils'
|
||||
|
||||
function getFormattedChatList(messages: any[]) {
|
||||
const newChatList: ChatItem[] = []
|
||||
messages.forEach((item) => {
|
||||
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
|
||||
newChatList.push({
|
||||
id: `question-${item.id}`,
|
||||
content: item.query,
|
||||
isAnswer: false,
|
||||
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
parentMessageId: item.parent_message_id || undefined,
|
||||
})
|
||||
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
|
||||
newChatList.push({
|
||||
id: item.id,
|
||||
content: item.answer,
|
||||
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
|
||||
feedback: item.feedback,
|
||||
isAnswer: true,
|
||||
citation: item.retriever_resources,
|
||||
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
parentMessageId: `question-${item.id}`,
|
||||
})
|
||||
})
|
||||
return newChatList
|
||||
}
|
||||
|
||||
export const useEmbeddedChatbot = () => {
|
||||
const isInstalledApp = false
|
||||
@@ -77,7 +106,7 @@ export const useEmbeddedChatbot = () => {
|
||||
|
||||
const appPrevChatList = useMemo(
|
||||
() => (currentConversationId && appChatListData?.data.length)
|
||||
? getPrevChatList(appChatListData.data)
|
||||
? buildChatItemTree(getFormattedChatList(appChatListData.data))
|
||||
: [],
|
||||
[appChatListData, currentConversationId],
|
||||
)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { UUID_NIL } from './constants'
|
||||
import type { IChatItem } from './chat/type'
|
||||
import type { ChatItem, ChatItemInTree } from './types'
|
||||
import { addFileInfos, sortAgentSorts } from '../../tools/utils'
|
||||
import { getProcessedFilesFromResponse } from '../file-uploader/utils'
|
||||
|
||||
async function decodeBase64AndDecompress(base64String: string) {
|
||||
const binaryString = atob(base64String)
|
||||
@@ -19,6 +21,60 @@ function getProcessedInputsFromUrlParams(): Record<string, any> {
|
||||
return inputs
|
||||
}
|
||||
|
||||
function appendQAToChatList(chatList: ChatItem[], item: any) {
|
||||
// we append answer first and then question since will reverse the whole chatList later
|
||||
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
|
||||
chatList.push({
|
||||
id: item.id,
|
||||
content: item.answer,
|
||||
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
|
||||
feedback: item.feedback,
|
||||
isAnswer: true,
|
||||
citation: item.retriever_resources,
|
||||
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
})
|
||||
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
|
||||
chatList.push({
|
||||
id: `question-${item.id}`,
|
||||
content: item.query,
|
||||
isAnswer: false,
|
||||
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the latest thread messages from all messages of the conversation.
|
||||
* Same logic as backend codebase `api/core/prompt/utils/extract_thread_messages.py`
|
||||
*
|
||||
* @param fetchedMessages - The history chat list data from the backend, sorted by created_at in descending order. This includes all flattened history messages of the conversation.
|
||||
* @returns An array of ChatItems representing the latest thread.
|
||||
*/
|
||||
|
||||
|
||||
function getPrevChatList(fetchedMessages: any[]) {
|
||||
const ret: ChatItem[] = []
|
||||
let nextMessageId = null
|
||||
|
||||
for (const item of fetchedMessages) {
|
||||
if (!item.parent_message_id) {
|
||||
appendQAToChatList(ret, item)
|
||||
break
|
||||
}
|
||||
|
||||
if (!nextMessageId) {
|
||||
appendQAToChatList(ret, item)
|
||||
nextMessageId = item.parent_message_id
|
||||
}
|
||||
else {
|
||||
if (item.id === nextMessageId || nextMessageId === UUID_NIL) {
|
||||
appendQAToChatList(ret, item)
|
||||
nextMessageId = item.parent_message_id
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret.reverse()
|
||||
}
|
||||
|
||||
function isValidGeneratedAnswer(item?: ChatItem | ChatItemInTree): boolean {
|
||||
return !!item && item.isAnswer && !item.id.startsWith('answer-placeholder-') && !item.isOpeningStatement
|
||||
}
|
||||
@@ -164,6 +220,7 @@ function getThreadMessages(tree: ChatItemInTree[], targetMessageId?: string): Ch
|
||||
export {
|
||||
getProcessedInputsFromUrlParams,
|
||||
isValidGeneratedAnswer,
|
||||
getPrevChatList,
|
||||
getLastAnswer,
|
||||
buildChatItemTree,
|
||||
getThreadMessages,
|
||||
|
||||
@@ -10,6 +10,7 @@ import SyntaxHighlighter from 'react-syntax-highlighter'
|
||||
import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
|
||||
import { Component, memo, useMemo, useRef, useState } from 'react'
|
||||
import type { CodeComponent } from 'react-markdown/lib/ast-to-react'
|
||||
import SVGRenderer from './svg-gallery'
|
||||
import cn from '@/utils/classnames'
|
||||
import CopyBtn from '@/app/components/base/copy-btn'
|
||||
import SVGBtn from '@/app/components/base/svg'
|
||||
@@ -18,7 +19,7 @@ import ImageGallery from '@/app/components/base/image-gallery'
|
||||
import { useChatContext } from '@/app/components/base/chat/chat/context'
|
||||
import VideoGallery from '@/app/components/base/video-gallery'
|
||||
import AudioGallery from '@/app/components/base/audio-gallery'
|
||||
import SVGRenderer from '@/app/components/base/svg-gallery'
|
||||
// import SVGRenderer from '@/app/components/base/svg-gallery'
|
||||
import MarkdownButton from '@/app/components/base/markdown-blocks/button'
|
||||
import MarkdownForm from '@/app/components/base/markdown-blocks/form'
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { SVG } from '@svgdotjs/svg.js'
|
||||
import DOMPurify from 'dompurify'
|
||||
import ImagePreview from '@/app/components/base/image-uploader/image-preview'
|
||||
|
||||
export const SVGRenderer = ({ content }: { content: string }) => {
|
||||
@@ -44,7 +45,7 @@ export const SVGRenderer = ({ content }: { content: string }) => {
|
||||
|
||||
svgRef.current.style.width = `${Math.min(originalWidth, 298)}px`
|
||||
|
||||
const rootElement = draw.svg(content)
|
||||
const rootElement = draw.svg(DOMPurify.sanitize(content))
|
||||
|
||||
rootElement.click(() => {
|
||||
setImagePreview(svgToDataURL(svgElement as Element))
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
'use client'
|
||||
|
||||
import React, { useMemo, useState } from 'react'
|
||||
import { useRouter } from 'next/navigation'
|
||||
import React, { useCallback, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useContext } from 'use-context-selector'
|
||||
import useSWR from 'swr'
|
||||
import { useDebounceFn } from 'ahooks'
|
||||
import Toast from '../../base/toast'
|
||||
import s from './style.module.css'
|
||||
import cn from '@/utils/classnames'
|
||||
import ExploreContext from '@/context/explore-context'
|
||||
@@ -14,17 +12,17 @@ import type { App } from '@/models/explore'
|
||||
import Category from '@/app/components/explore/category'
|
||||
import AppCard from '@/app/components/explore/app-card'
|
||||
import { fetchAppDetail, fetchAppList } from '@/service/explore'
|
||||
import { importDSL } from '@/service/apps'
|
||||
import { useTabSearchParams } from '@/hooks/use-tab-searchparams'
|
||||
import CreateAppModal from '@/app/components/explore/create-app-modal'
|
||||
import AppTypeSelector from '@/app/components/app/type-selector'
|
||||
import type { CreateAppModalProps } from '@/app/components/explore/create-app-modal'
|
||||
import Loading from '@/app/components/base/loading'
|
||||
import { NEED_REFRESH_APP_LIST_KEY } from '@/config'
|
||||
import { useAppContext } from '@/context/app-context'
|
||||
import { getRedirection } from '@/utils/app-redirection'
|
||||
import Input from '@/app/components/base/input'
|
||||
import { DSLImportMode } from '@/models/app'
|
||||
import {
|
||||
DSLImportMode,
|
||||
} from '@/models/app'
|
||||
import { useImportDSL } from '@/hooks/use-import-dsl'
|
||||
import DSLConfirmModal from '@/app/components/app/create-from-dsl-modal/dsl-confirm-modal'
|
||||
|
||||
type AppsProps = {
|
||||
pageType?: PageType
|
||||
@@ -41,8 +39,6 @@ const Apps = ({
|
||||
onSuccess,
|
||||
}: AppsProps) => {
|
||||
const { t } = useTranslation()
|
||||
const { isCurrentWorkspaceEditor } = useAppContext()
|
||||
const { push } = useRouter()
|
||||
const { hasEditPermission } = useContext(ExploreContext)
|
||||
const allCategoriesEn = t('explore.apps.allCategories', { lng: 'en' })
|
||||
|
||||
@@ -117,6 +113,14 @@ const Apps = ({
|
||||
|
||||
const [currApp, setCurrApp] = React.useState<App | null>(null)
|
||||
const [isShowCreateModal, setIsShowCreateModal] = React.useState(false)
|
||||
|
||||
const {
|
||||
handleImportDSL,
|
||||
handleImportDSLConfirm,
|
||||
versions,
|
||||
isFetching,
|
||||
} = useImportDSL()
|
||||
const [showDSLConfirmModal, setShowDSLConfirmModal] = useState(false)
|
||||
const onCreate: CreateAppModalProps['onConfirm'] = async ({
|
||||
name,
|
||||
icon_type,
|
||||
@@ -127,31 +131,31 @@ const Apps = ({
|
||||
const { export_data } = await fetchAppDetail(
|
||||
currApp?.app.id as string,
|
||||
)
|
||||
try {
|
||||
const app = await importDSL({
|
||||
mode: DSLImportMode.YAML_CONTENT,
|
||||
yaml_content: export_data,
|
||||
name,
|
||||
icon_type,
|
||||
icon,
|
||||
icon_background,
|
||||
description,
|
||||
})
|
||||
setIsShowCreateModal(false)
|
||||
Toast.notify({
|
||||
type: 'success',
|
||||
message: t('app.newApp.appCreated'),
|
||||
})
|
||||
if (onSuccess)
|
||||
onSuccess()
|
||||
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
||||
getRedirection(isCurrentWorkspaceEditor, { id: app.app_id }, push)
|
||||
}
|
||||
catch (e) {
|
||||
Toast.notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||
const payload = {
|
||||
mode: DSLImportMode.YAML_CONTENT,
|
||||
yaml_content: export_data,
|
||||
name,
|
||||
icon_type,
|
||||
icon,
|
||||
icon_background,
|
||||
description,
|
||||
}
|
||||
await handleImportDSL(payload, {
|
||||
onSuccess: () => {
|
||||
setIsShowCreateModal(false)
|
||||
},
|
||||
onPending: () => {
|
||||
setShowDSLConfirmModal(true)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const onConfirmDSL = useCallback(async () => {
|
||||
await handleImportDSLConfirm({
|
||||
onSuccess,
|
||||
})
|
||||
}, [handleImportDSLConfirm, onSuccess])
|
||||
|
||||
if (!categories || categories.length === 0) {
|
||||
return (
|
||||
<div className="flex h-full items-center">
|
||||
@@ -234,9 +238,20 @@ const Apps = ({
|
||||
appDescription={currApp?.app.description || ''}
|
||||
show={isShowCreateModal}
|
||||
onConfirm={onCreate}
|
||||
confirmDisabled={isFetching}
|
||||
onHide={() => setIsShowCreateModal(false)}
|
||||
/>
|
||||
)}
|
||||
{
|
||||
showDSLConfirmModal && (
|
||||
<DSLConfirmModal
|
||||
versions={versions}
|
||||
onCancel={() => setShowDSLConfirmModal(false)}
|
||||
onConfirm={onConfirmDSL}
|
||||
confirmDisabled={isFetching}
|
||||
/>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ export type CreateAppModalProps = {
|
||||
description: string
|
||||
use_icon_as_answer_icon?: boolean
|
||||
}) => Promise<void>
|
||||
confirmDisabled?: boolean
|
||||
onHide: () => void
|
||||
}
|
||||
|
||||
@@ -48,6 +49,7 @@ const CreateAppModal = ({
|
||||
appMode,
|
||||
appUseIconAsAnswerIcon,
|
||||
onConfirm,
|
||||
confirmDisabled,
|
||||
onHide,
|
||||
}: CreateAppModalProps) => {
|
||||
const { t } = useTranslation()
|
||||
@@ -145,7 +147,7 @@ const CreateAppModal = ({
|
||||
{!isEditModal && isAppsFull && <AppsFull loc='app-explore-create' />}
|
||||
</div>
|
||||
<div className='flex flex-row-reverse'>
|
||||
<Button disabled={!isEditModal && isAppsFull} className='w-24 ml-2' variant='primary' onClick={submit}>{!isEditModal ? t('common.operation.create') : t('common.operation.save')}</Button>
|
||||
<Button disabled={(!isEditModal && isAppsFull) || !name.trim() || confirmDisabled} className='w-24 ml-2' variant='primary' onClick={submit}>{!isEditModal ? t('common.operation.create') : t('common.operation.save')}</Button>
|
||||
<Button className='w-24' onClick={onHide}>{t('common.operation.cancel')}</Button>
|
||||
</div>
|
||||
</Modal>
|
||||
|
||||
@@ -39,7 +39,11 @@ export default function CheckCode() {
|
||||
}
|
||||
setIsLoading(true)
|
||||
const ret = await verifyResetPasswordCode({ email, code, token })
|
||||
ret.is_valid && router.push(`/reset-password/set-password?${searchParams.toString()}`)
|
||||
if (ret.is_valid) {
|
||||
const params = new URLSearchParams(searchParams)
|
||||
params.set('token', encodeURIComponent(ret.token))
|
||||
router.push(`/reset-password/set-password?${params.toString()}`)
|
||||
}
|
||||
}
|
||||
catch (error) { console.error(error) }
|
||||
finally {
|
||||
|
||||
@@ -23,6 +23,7 @@ export NEXT_TELEMETRY_DISABLED=${NEXT_TELEMETRY_DISABLED}
|
||||
|
||||
export NEXT_PUBLIC_TEXT_GENERATION_TIMEOUT_MS=${TEXT_GENERATION_TIMEOUT_MS}
|
||||
export NEXT_PUBLIC_CSP_WHITELIST=${CSP_WHITELIST}
|
||||
export NEXT_PUBLIC_ALLOW_EMBED=${ALLOW_EMBED}
|
||||
export NEXT_PUBLIC_TOP_K_MAX_VALUE=${TOP_K_MAX_VALUE}
|
||||
export NEXT_PUBLIC_INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH}
|
||||
|
||||
|
||||
158
web/hooks/use-import-dsl.ts
Normal file
158
web/hooks/use-import-dsl.ts
Normal file
@@ -0,0 +1,158 @@
|
||||
import {
|
||||
useCallback,
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useRouter } from 'next/navigation'
|
||||
import type {
|
||||
DSLImportMode,
|
||||
DSLImportResponse,
|
||||
} from '@/models/app'
|
||||
import { DSLImportStatus } from '@/models/app'
|
||||
import {
|
||||
importDSL,
|
||||
importDSLConfirm,
|
||||
} from '@/service/apps'
|
||||
import type { AppIconType } from '@/types/app'
|
||||
import { useToastContext } from '@/app/components/base/toast'
|
||||
import { getRedirection } from '@/utils/app-redirection'
|
||||
import { useSelector } from '@/context/app-context'
|
||||
import { NEED_REFRESH_APP_LIST_KEY } from '@/config'
|
||||
|
||||
type DSLPayload = {
|
||||
mode: DSLImportMode
|
||||
yaml_content?: string
|
||||
yaml_url?: string
|
||||
name?: string
|
||||
icon_type?: AppIconType
|
||||
icon?: string
|
||||
icon_background?: string
|
||||
description?: string
|
||||
}
|
||||
type ResponseCallback = {
|
||||
onSuccess?: () => void
|
||||
onPending?: (payload: DSLImportResponse) => void
|
||||
onFailed?: () => void
|
||||
}
|
||||
export const useImportDSL = () => {
|
||||
const { t } = useTranslation()
|
||||
const { notify } = useToastContext()
|
||||
const [isFetching, setIsFetching] = useState(false)
|
||||
const isCurrentWorkspaceEditor = useSelector(s => s.isCurrentWorkspaceEditor)
|
||||
const { push } = useRouter()
|
||||
const [versions, setVersions] = useState<{ importedVersion: string; systemVersion: string }>()
|
||||
const importIdRef = useRef<string>('')
|
||||
|
||||
const handleImportDSL = useCallback(async (
|
||||
payload: DSLPayload,
|
||||
{
|
||||
onSuccess,
|
||||
onPending,
|
||||
onFailed,
|
||||
}: ResponseCallback,
|
||||
) => {
|
||||
if (isFetching)
|
||||
return
|
||||
setIsFetching(true)
|
||||
|
||||
try {
|
||||
const response = await importDSL(payload)
|
||||
|
||||
if (!response)
|
||||
return
|
||||
|
||||
const {
|
||||
id,
|
||||
status,
|
||||
app_id,
|
||||
imported_dsl_version,
|
||||
current_dsl_version,
|
||||
} = response
|
||||
|
||||
if (status === DSLImportStatus.COMPLETED || status === DSLImportStatus.COMPLETED_WITH_WARNINGS) {
|
||||
if (!app_id)
|
||||
return
|
||||
|
||||
notify({
|
||||
type: status === DSLImportStatus.COMPLETED ? 'success' : 'warning',
|
||||
message: t(status === DSLImportStatus.COMPLETED ? 'app.newApp.appCreated' : 'app.newApp.caution'),
|
||||
children: status === DSLImportStatus.COMPLETED_WITH_WARNINGS && t('app.newApp.appCreateDSLWarning'),
|
||||
})
|
||||
onSuccess?.()
|
||||
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
||||
getRedirection(isCurrentWorkspaceEditor, { id: app_id }, push)
|
||||
}
|
||||
else if (status === DSLImportStatus.PENDING) {
|
||||
setVersions({
|
||||
importedVersion: imported_dsl_version ?? '',
|
||||
systemVersion: current_dsl_version ?? '',
|
||||
})
|
||||
importIdRef.current = id
|
||||
onPending?.(response)
|
||||
}
|
||||
else {
|
||||
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||
onFailed?.()
|
||||
}
|
||||
}
|
||||
catch {
|
||||
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||
onFailed?.()
|
||||
}
|
||||
finally {
|
||||
setIsFetching(false)
|
||||
}
|
||||
}, [t, notify, isCurrentWorkspaceEditor, push, isFetching])
|
||||
|
||||
const handleImportDSLConfirm = useCallback(async (
|
||||
{
|
||||
onSuccess,
|
||||
onFailed,
|
||||
}: Pick<ResponseCallback, 'onSuccess' | 'onFailed'>,
|
||||
) => {
|
||||
if (isFetching)
|
||||
return
|
||||
setIsFetching(true)
|
||||
if (!importIdRef.current)
|
||||
return
|
||||
|
||||
try {
|
||||
const response = await importDSLConfirm({
|
||||
import_id: importIdRef.current,
|
||||
})
|
||||
|
||||
const { status, app_id } = response
|
||||
if (!app_id)
|
||||
return
|
||||
|
||||
if (status === DSLImportStatus.COMPLETED) {
|
||||
onSuccess?.()
|
||||
notify({
|
||||
type: 'success',
|
||||
message: t('app.newApp.appCreated'),
|
||||
})
|
||||
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
||||
getRedirection(isCurrentWorkspaceEditor, { id: app_id! }, push)
|
||||
}
|
||||
else if (status === DSLImportStatus.FAILED) {
|
||||
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||
onFailed?.()
|
||||
}
|
||||
}
|
||||
catch {
|
||||
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||
onFailed?.()
|
||||
}
|
||||
finally {
|
||||
setIsFetching(false)
|
||||
}
|
||||
}, [t, notify, isCurrentWorkspaceEditor, push, isFetching])
|
||||
|
||||
return {
|
||||
handleImportDSL,
|
||||
handleImportDSLConfirm,
|
||||
versions,
|
||||
isFetching,
|
||||
}
|
||||
}
|
||||
@@ -3,10 +3,26 @@ import { NextResponse } from 'next/server'
|
||||
|
||||
const NECESSARY_DOMAIN = '*.sentry.io http://localhost:* http://127.0.0.1:* https://analytics.google.com googletagmanager.com *.googletagmanager.com https://www.google-analytics.com https://api.github.com'
|
||||
|
||||
const wrapResponseWithXFrameOptions = (response: NextResponse, pathname: string) => {
|
||||
// prevent clickjacking: https://owasp.org/www-community/attacks/Clickjacking
|
||||
// Chatbot page should be allowed to be embedded in iframe. It's a feature
|
||||
if (process.env.NEXT_PUBLIC_ALLOW_EMBED !== 'true' && !pathname.startsWith('/chat'))
|
||||
response.headers.set('X-Frame-Options', 'DENY')
|
||||
|
||||
return response
|
||||
}
|
||||
export function middleware(request: NextRequest) {
|
||||
const { pathname } = request.nextUrl
|
||||
const requestHeaders = new Headers(request.headers)
|
||||
const response = NextResponse.next({
|
||||
request: {
|
||||
headers: requestHeaders,
|
||||
},
|
||||
})
|
||||
|
||||
const isWhiteListEnabled = !!process.env.NEXT_PUBLIC_CSP_WHITELIST && process.env.NODE_ENV === 'production'
|
||||
if (!isWhiteListEnabled)
|
||||
return NextResponse.next()
|
||||
return wrapResponseWithXFrameOptions(response, pathname)
|
||||
|
||||
const whiteList = `${process.env.NEXT_PUBLIC_CSP_WHITELIST} ${NECESSARY_DOMAIN}`
|
||||
const nonce = Buffer.from(crypto.randomUUID()).toString('base64')
|
||||
@@ -33,7 +49,6 @@ export function middleware(request: NextRequest) {
|
||||
.replace(/\s{2,}/g, ' ')
|
||||
.trim()
|
||||
|
||||
const requestHeaders = new Headers(request.headers)
|
||||
requestHeaders.set('x-nonce', nonce)
|
||||
|
||||
requestHeaders.set(
|
||||
@@ -41,17 +56,12 @@ export function middleware(request: NextRequest) {
|
||||
contentSecurityPolicyHeaderValue,
|
||||
)
|
||||
|
||||
const response = NextResponse.next({
|
||||
request: {
|
||||
headers: requestHeaders,
|
||||
},
|
||||
})
|
||||
response.headers.set(
|
||||
'Content-Security-Policy',
|
||||
contentSecurityPolicyHeaderValue,
|
||||
)
|
||||
|
||||
return response
|
||||
return wrapResponseWithXFrameOptions(response, pathname)
|
||||
}
|
||||
|
||||
export const config = {
|
||||
@@ -73,4 +83,4 @@ export const config = {
|
||||
// ],
|
||||
},
|
||||
],
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.15.3",
|
||||
"version": "0.15.7",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.17.0"
|
||||
@@ -51,6 +51,7 @@
|
||||
"crypto-js": "^4.2.0",
|
||||
"dayjs": "^1.11.7",
|
||||
"decimal.js": "^10.4.3",
|
||||
"dompurify": "^3.2.4",
|
||||
"echarts": "^5.5.1",
|
||||
"echarts-for-react": "^3.0.2",
|
||||
"elkjs": "^0.9.3",
|
||||
@@ -70,7 +71,7 @@
|
||||
"mermaid": "11.4.1",
|
||||
"mime": "^4.0.4",
|
||||
"negotiator": "^0.6.3",
|
||||
"next": "^14.2.10",
|
||||
"next": "^14.2.25",
|
||||
"pinyin-pro": "^3.23.0",
|
||||
"qrcode.react": "^3.1.0",
|
||||
"qs": "^6.11.1",
|
||||
|
||||
@@ -40,7 +40,7 @@ import type { SystemFeatures } from '@/types/feature'
|
||||
|
||||
type LoginSuccess = {
|
||||
result: 'success'
|
||||
data: { access_token: string;refresh_token: string }
|
||||
data: { access_token: string; refresh_token: string }
|
||||
}
|
||||
type LoginFail = {
|
||||
result: 'fail'
|
||||
@@ -331,20 +331,20 @@ export const uploadRemoteFileInfo = (url: string, isPublic?: boolean) => {
|
||||
export const sendEMailLoginCode = (email: string, language = 'en-US') =>
|
||||
post<CommonResponse & { data: string }>('/email-code-login', { body: { email, language } })
|
||||
|
||||
export const emailLoginWithCode = (data: { email: string;code: string;token: string }) =>
|
||||
export const emailLoginWithCode = (data: { email: string; code: string; token: string }) =>
|
||||
post<LoginResponse>('/email-code-login/validity', { body: data })
|
||||
|
||||
export const sendResetPasswordCode = (email: string, language = 'en-US') =>
|
||||
post<CommonResponse & { data: string;message?: string ;code?: string }>('/forgot-password', { body: { email, language } })
|
||||
post<CommonResponse & { data: string; message?: string; code?: string }>('/forgot-password', { body: { email, language } })
|
||||
|
||||
export const verifyResetPasswordCode = (body: { email: string;code: string;token: string }) =>
|
||||
post<CommonResponse & { is_valid: boolean }>('/forgot-password/validity', { body })
|
||||
export const verifyResetPasswordCode = (body: { email: string; code: string; token: string }) =>
|
||||
post<CommonResponse & { is_valid: boolean; token: string }>('/forgot-password/validity', { body })
|
||||
|
||||
export const sendDeleteAccountCode = () =>
|
||||
get<CommonResponse & { data: string }>('/account/delete/verify')
|
||||
|
||||
export const verifyDeleteAccountCode = (body: { code: string;token: string }) =>
|
||||
export const verifyDeleteAccountCode = (body: { code: string; token: string }) =>
|
||||
post<CommonResponse & { is_valid: boolean }>('/account/delete', { body })
|
||||
|
||||
export const submitDeleteAccountFeedback = (body: { feedback: string;email: string }) =>
|
||||
export const submitDeleteAccountFeedback = (body: { feedback: string; email: string }) =>
|
||||
post<CommonResponse>('/account/delete/feedback', { body })
|
||||
|
||||
115
web/yarn.lock
115
web/yarn.lock
@@ -2066,10 +2066,10 @@
|
||||
dependencies:
|
||||
"@monaco-editor/loader" "^1.4.0"
|
||||
|
||||
"@next/env@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.npmjs.org/@next/env/-/env-14.2.17.tgz"
|
||||
integrity sha512-MCgO7VHxXo8sYR/0z+sk9fGyJJU636JyRmkjc7ZJY8Hurl8df35qG5hoAh5KMs75FLjhlEo9bb2LGe89Y/scDA==
|
||||
"@next/env@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/env/-/env-14.2.25.tgz#936d10b967e103e49a4bcea1e97292d5605278dd"
|
||||
integrity sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==
|
||||
|
||||
"@next/eslint-plugin-next@14.0.4":
|
||||
version "14.0.4"
|
||||
@@ -2085,50 +2085,50 @@
|
||||
dependencies:
|
||||
source-map "^0.7.0"
|
||||
|
||||
"@next/swc-darwin-arm64@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.17.tgz"
|
||||
integrity sha512-WiOf5nElPknrhRMTipXYTJcUz7+8IAjOYw3vXzj3BYRcVY0hRHKWgTgQ5439EvzQyHEko77XK+yN9x9OJ0oOog==
|
||||
"@next/swc-darwin-arm64@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz#7bcccfda0c0ff045c45fbe34c491b7368e373e3d"
|
||||
integrity sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==
|
||||
|
||||
"@next/swc-darwin-x64@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.17.tgz#e29a17ef28d97c347c7d021f391e13b6c8e4c813"
|
||||
integrity sha512-29y425wYnL17cvtxrDQWC3CkXe/oRrdt8ie61S03VrpwpPRI0XsnTvtKO06XCisK4alaMnZlf8riwZIbJTaSHQ==
|
||||
"@next/swc-darwin-x64@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz#b489e209d7b405260b73f69a38186ed150fb7a08"
|
||||
integrity sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==
|
||||
|
||||
"@next/swc-linux-arm64-gnu@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.17.tgz#10e99c7aa60cc33f8b7633e045f74be9a43e7b0c"
|
||||
integrity sha512-SSHLZls3ZwNEHsc+d0ynKS+7Af0Nr8+KTUBAy9pm6xz9SHkJ/TeuEg6W3cbbcMSh6j4ITvrjv3Oi8n27VR+IPw==
|
||||
"@next/swc-linux-arm64-gnu@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz#ba064fabfdce0190d9859493d8232fffa84ef2e2"
|
||||
integrity sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==
|
||||
|
||||
"@next/swc-linux-arm64-musl@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.17.tgz#9a5bb809d3c6aef96c409959aedae28b4e5db53d"
|
||||
integrity sha512-VFge37us5LNPatB4F7iYeuGs9Dprqe4ZkW7lOEJM91r+Wf8EIdViWHLpIwfdDXinvCdLl6b4VyLpEBwpkctJHA==
|
||||
"@next/swc-linux-arm64-musl@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz#bf0018267e4e0fbfa1524750321f8cae855144a3"
|
||||
integrity sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==
|
||||
|
||||
"@next/swc-linux-x64-gnu@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.17.tgz#64e0ce01870e6dc45ae48f676d7cce82aedcdc62"
|
||||
integrity sha512-aaQlpxUVb9RZ41adlTYVQ3xvYEfBPUC8+6rDgmQ/0l7SvK8S1YNJzPmDPX6a4t0jLtIoNk7j+nroS/pB4nx7vQ==
|
||||
"@next/swc-linux-x64-gnu@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz#64f5a6016a7148297ee80542e0fd788418a32472"
|
||||
integrity sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==
|
||||
|
||||
"@next/swc-linux-x64-musl@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.17.tgz#93114164b6ccfc533908193ab9065f0c3970abc3"
|
||||
integrity sha512-HSyEiFaEY3ay5iATDqEup5WAfrhMATNJm8dYx3ZxL+e9eKv10XKZCwtZByDoLST7CyBmyDz+OFJL1wigyXeaoA==
|
||||
"@next/swc-linux-x64-musl@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz#58dc636d7c55828478159546f7b95ab1e902301c"
|
||||
integrity sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==
|
||||
|
||||
"@next/swc-win32-arm64-msvc@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.17.tgz#4b99dea02178c112e5c33c742f9ff2a49b3b2939"
|
||||
integrity sha512-h5qM9Btqv87eYH8ArrnLoAHLyi79oPTP2vlGNSg4CDvUiXgi7l0+5KuEGp5pJoMhjuv9ChRdm7mRlUUACeBt4w==
|
||||
"@next/swc-win32-arm64-msvc@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz#93562d447c799bded1e89c1a62d5195a2a8c6c0d"
|
||||
integrity sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==
|
||||
|
||||
"@next/swc-win32-ia32-msvc@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.17.tgz#f1c23955405a259b6d45c65f918575b01bcf0106"
|
||||
integrity sha512-BD/G++GKSLexQjdyoEUgyo5nClU7er5rK0sE+HlEqnldJSm96CIr/+YOTT063LVTT/dUOeQsNgp5DXr86/K7/A==
|
||||
"@next/swc-win32-ia32-msvc@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz#ad85a33466be1f41d083211ea21adc0d2c6e6554"
|
||||
integrity sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==
|
||||
|
||||
"@next/swc-win32-x64-msvc@14.2.17":
|
||||
version "14.2.17"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.17.tgz#44f5a4fcd8df1396a8d4326510ca2d92fb809cb3"
|
||||
integrity sha512-vkQfN1+4V4KqDibkW2q0sJ6CxQuXq5l2ma3z0BRcfIqkAMZiiW67T9yCpwqJKP68QghBtPEFjPAlaqe38O6frw==
|
||||
"@next/swc-win32-x64-msvc@14.2.25":
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz#3969c66609e683ec63a6a9f320a855f7be686a08"
|
||||
integrity sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==
|
||||
|
||||
"@nodelib/fs.scandir@2.1.5":
|
||||
version "2.1.5"
|
||||
@@ -5864,6 +5864,13 @@ dompurify@^3.2.1:
|
||||
optionalDependencies:
|
||||
"@types/trusted-types" "^2.0.7"
|
||||
|
||||
dompurify@^3.2.4:
|
||||
version "3.2.4"
|
||||
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.2.4.tgz#af5a5a11407524431456cf18836c55d13441cd8e"
|
||||
integrity sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==
|
||||
optionalDependencies:
|
||||
"@types/trusted-types" "^2.0.7"
|
||||
|
||||
domutils@^2.5.2, domutils@^2.8.0:
|
||||
version "2.8.0"
|
||||
resolved "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz"
|
||||
@@ -9911,12 +9918,12 @@ neo-async@^2.6.2:
|
||||
resolved "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz"
|
||||
integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==
|
||||
|
||||
next@^14.2.10:
|
||||
version "14.2.17"
|
||||
resolved "https://registry.npmjs.org/next/-/next-14.2.17.tgz"
|
||||
integrity sha512-hNo/Zy701DDO3nzKkPmsLRlDfNCtb1OJxFUvjGEl04u7SFa3zwC6hqsOUzMajcaEOEV8ey1GjvByvrg0Qr5AiQ==
|
||||
next@^14.2.25:
|
||||
version "14.2.25"
|
||||
resolved "https://registry.yarnpkg.com/next/-/next-14.2.25.tgz#0657551fde6a97f697cf9870e9ccbdaa465c6008"
|
||||
integrity sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==
|
||||
dependencies:
|
||||
"@next/env" "14.2.17"
|
||||
"@next/env" "14.2.25"
|
||||
"@swc/helpers" "0.5.5"
|
||||
busboy "1.6.0"
|
||||
caniuse-lite "^1.0.30001579"
|
||||
@@ -9924,15 +9931,15 @@ next@^14.2.10:
|
||||
postcss "8.4.31"
|
||||
styled-jsx "5.1.1"
|
||||
optionalDependencies:
|
||||
"@next/swc-darwin-arm64" "14.2.17"
|
||||
"@next/swc-darwin-x64" "14.2.17"
|
||||
"@next/swc-linux-arm64-gnu" "14.2.17"
|
||||
"@next/swc-linux-arm64-musl" "14.2.17"
|
||||
"@next/swc-linux-x64-gnu" "14.2.17"
|
||||
"@next/swc-linux-x64-musl" "14.2.17"
|
||||
"@next/swc-win32-arm64-msvc" "14.2.17"
|
||||
"@next/swc-win32-ia32-msvc" "14.2.17"
|
||||
"@next/swc-win32-x64-msvc" "14.2.17"
|
||||
"@next/swc-darwin-arm64" "14.2.25"
|
||||
"@next/swc-darwin-x64" "14.2.25"
|
||||
"@next/swc-linux-arm64-gnu" "14.2.25"
|
||||
"@next/swc-linux-arm64-musl" "14.2.25"
|
||||
"@next/swc-linux-x64-gnu" "14.2.25"
|
||||
"@next/swc-linux-x64-musl" "14.2.25"
|
||||
"@next/swc-win32-arm64-msvc" "14.2.25"
|
||||
"@next/swc-win32-ia32-msvc" "14.2.25"
|
||||
"@next/swc-win32-x64-msvc" "14.2.25"
|
||||
|
||||
no-case@^3.0.4:
|
||||
version "3.0.4"
|
||||
|
||||
Reference in New Issue
Block a user