mirror of
https://github.com/langgenius/dify.git
synced 2026-01-08 07:14:14 +00:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b938ab18d | ||
|
|
88356de923 | ||
|
|
5f09900dca | ||
|
|
9ac99abf20 | ||
|
|
32588f562e | ||
|
|
36f8bd3f1a | ||
|
|
c919074e06 | ||
|
|
88cd9aedb7 | ||
|
|
16a4f77fb4 | ||
|
|
3401c52665 | ||
|
|
4fa3d78ed8 | ||
|
|
5f7f851b17 | ||
|
|
559ab46ee1 | ||
|
|
df98223c8c | ||
|
|
144f9507f8 | ||
|
|
2e097a1ac0 | ||
|
|
9f7d8a981f | ||
|
|
40b31bafd5 | ||
|
|
d38a2c95fb | ||
|
|
7d18e2a0ef | ||
|
|
024f242251 |
4
.markdownlint.json
Normal file
4
.markdownlint.json
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"MD024": false,
|
||||||
|
"MD013": false
|
||||||
|
}
|
||||||
27
CHANGELOG.md
27
CHANGELOG.md
@@ -5,6 +5,33 @@ All notable changes to Dify will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [0.15.8] - 2025-05-30
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Added gunicorn keepalive setting (#19537)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed database configuration to allow DB_EXTRAS to set search_path via options (#16a4f77)
|
||||||
|
- Fixed frontend third-party package security issues (#19655)
|
||||||
|
- Updated dependencies: huggingface-hub (~0.16.4 to ~0.31.0), transformers (~4.35.0 to ~4.39.0), and resend (~0.7.0 to ~2.9.0) (#19563)
|
||||||
|
- Downgrade boto3 from 1.36 to 1.35 (#19736)
|
||||||
|
|
||||||
|
## [0.15.7] - 2025-04-27
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Added support for GPT-4.1 in model providers (#18912)
|
||||||
|
- Added support for Amazon Bedrock DeepSeek-R1 model (#18908)
|
||||||
|
- Added support for Amazon Bedrock Claude Sonnet 3.7 model (#18788)
|
||||||
|
- Refined version compatibility logic in app DSL service
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed issue with creating apps from template categories (#18807, #18868)
|
||||||
|
- Fixed DSL version check when creating apps from explore templates (#18872, #18878)
|
||||||
|
|
||||||
## [0.15.6] - 2025-04-22
|
## [0.15.6] - 2025-04-22
|
||||||
|
|
||||||
### Security
|
### Security
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from typing import Any, Literal, Optional
|
from typing import Any, Literal, Optional
|
||||||
from urllib.parse import quote_plus
|
from urllib.parse import parse_qsl, quote_plus
|
||||||
|
|
||||||
from pydantic import Field, NonNegativeInt, PositiveFloat, PositiveInt, computed_field
|
from pydantic import Field, NonNegativeInt, PositiveFloat, PositiveInt, computed_field
|
||||||
from pydantic_settings import BaseSettings
|
from pydantic_settings import BaseSettings
|
||||||
@@ -166,14 +166,28 @@ class DatabaseConfig(BaseSettings):
|
|||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
@computed_field
|
@computed_field # type: ignore[misc]
|
||||||
|
@property
|
||||||
def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
|
def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
|
||||||
|
# Parse DB_EXTRAS for 'options'
|
||||||
|
db_extras_dict = dict(parse_qsl(self.DB_EXTRAS))
|
||||||
|
options = db_extras_dict.get("options", "")
|
||||||
|
# Always include timezone
|
||||||
|
timezone_opt = "-c timezone=UTC"
|
||||||
|
if options:
|
||||||
|
# Merge user options and timezone
|
||||||
|
merged_options = f"{options} {timezone_opt}"
|
||||||
|
else:
|
||||||
|
merged_options = timezone_opt
|
||||||
|
|
||||||
|
connect_args = {"options": merged_options}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"pool_size": self.SQLALCHEMY_POOL_SIZE,
|
"pool_size": self.SQLALCHEMY_POOL_SIZE,
|
||||||
"max_overflow": self.SQLALCHEMY_MAX_OVERFLOW,
|
"max_overflow": self.SQLALCHEMY_MAX_OVERFLOW,
|
||||||
"pool_recycle": self.SQLALCHEMY_POOL_RECYCLE,
|
"pool_recycle": self.SQLALCHEMY_POOL_RECYCLE,
|
||||||
"pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING,
|
"pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING,
|
||||||
"connect_args": {"options": "-c timezone=UTC"},
|
"connect_args": connect_args,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
|||||||
|
|
||||||
CURRENT_VERSION: str = Field(
|
CURRENT_VERSION: str = Field(
|
||||||
description="Dify version",
|
description="Dify version",
|
||||||
default="0.15.6",
|
default="0.15.8",
|
||||||
)
|
)
|
||||||
|
|
||||||
COMMIT_SHA: str = Field(
|
COMMIT_SHA: str = Field(
|
||||||
|
|||||||
@@ -6,13 +6,9 @@ from flask_restful import Resource, reqparse # type: ignore
|
|||||||
|
|
||||||
from constants.languages import languages
|
from constants.languages import languages
|
||||||
from controllers.console import api
|
from controllers.console import api
|
||||||
from controllers.console.auth.error import (EmailCodeError, InvalidEmailError,
|
from controllers.console.auth.error import EmailCodeError, InvalidEmailError, InvalidTokenError, PasswordMismatchError
|
||||||
InvalidTokenError,
|
from controllers.console.error import AccountInFreezeError, AccountNotFound, EmailSendIpLimitError
|
||||||
PasswordMismatchError)
|
from controllers.console.wraps import email_password_login_enabled, setup_required
|
||||||
from controllers.console.error import (AccountInFreezeError, AccountNotFound,
|
|
||||||
EmailSendIpLimitError)
|
|
||||||
from controllers.console.wraps import (email_password_login_enabled,
|
|
||||||
setup_required)
|
|
||||||
from events.tenant_event import tenant_was_created
|
from events.tenant_event import tenant_was_created
|
||||||
from extensions.ext_database import db
|
from extensions.ext_database import db
|
||||||
from libs.helper import email, extract_remote_ip
|
from libs.helper import email, extract_remote_ip
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ from models.model import DifySetup
|
|||||||
from services.feature_service import FeatureService, LicenseStatus
|
from services.feature_service import FeatureService, LicenseStatus
|
||||||
from services.operation_service import OperationService
|
from services.operation_service import OperationService
|
||||||
|
|
||||||
from .error import (NotInitValidateError, NotSetupError,
|
from .error import NotInitValidateError, NotSetupError, UnauthorizedAndForceLogout
|
||||||
UnauthorizedAndForceLogout)
|
|
||||||
|
|
||||||
|
|
||||||
def account_initialization_required(view):
|
def account_initialization_required(view):
|
||||||
|
|||||||
@@ -104,7 +104,6 @@ class CotAgentRunner(BaseAgentRunner, ABC):
|
|||||||
|
|
||||||
# recalc llm max tokens
|
# recalc llm max tokens
|
||||||
prompt_messages = self._organize_prompt_messages()
|
prompt_messages = self._organize_prompt_messages()
|
||||||
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
|
||||||
# invoke model
|
# invoke model
|
||||||
chunks = model_instance.invoke_llm(
|
chunks = model_instance.invoke_llm(
|
||||||
prompt_messages=prompt_messages,
|
prompt_messages=prompt_messages,
|
||||||
|
|||||||
@@ -84,7 +84,6 @@ class FunctionCallAgentRunner(BaseAgentRunner):
|
|||||||
|
|
||||||
# recalc llm max tokens
|
# recalc llm max tokens
|
||||||
prompt_messages = self._organize_prompt_messages()
|
prompt_messages = self._organize_prompt_messages()
|
||||||
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
|
|
||||||
# invoke model
|
# invoke model
|
||||||
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
|
||||||
prompt_messages=prompt_messages,
|
prompt_messages=prompt_messages,
|
||||||
|
|||||||
@@ -55,20 +55,6 @@ class AgentChatAppRunner(AppRunner):
|
|||||||
query = application_generate_entity.query
|
query = application_generate_entity.query
|
||||||
files = application_generate_entity.files
|
files = application_generate_entity.files
|
||||||
|
|
||||||
# Pre-calculate the number of tokens of the prompt messages,
|
|
||||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
|
||||||
# If the rest number of tokens is not enough, raise exception.
|
|
||||||
# Include: prompt template, inputs, query(optional), files(optional)
|
|
||||||
# Not Include: memory, external data, dataset context
|
|
||||||
self.get_pre_calculate_rest_tokens(
|
|
||||||
app_record=app_record,
|
|
||||||
model_config=application_generate_entity.model_conf,
|
|
||||||
prompt_template_entity=app_config.prompt_template,
|
|
||||||
inputs=inputs,
|
|
||||||
files=files,
|
|
||||||
query=query,
|
|
||||||
)
|
|
||||||
|
|
||||||
memory = None
|
memory = None
|
||||||
if application_generate_entity.conversation_id:
|
if application_generate_entity.conversation_id:
|
||||||
# get memory of conversation (read-only)
|
# get memory of conversation (read-only)
|
||||||
|
|||||||
@@ -15,10 +15,8 @@ from core.app.features.annotation_reply.annotation_reply import AnnotationReplyF
|
|||||||
from core.app.features.hosting_moderation.hosting_moderation import HostingModerationFeature
|
from core.app.features.hosting_moderation.hosting_moderation import HostingModerationFeature
|
||||||
from core.external_data_tool.external_data_fetch import ExternalDataFetch
|
from core.external_data_tool.external_data_fetch import ExternalDataFetch
|
||||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||||
from core.model_manager import ModelInstance
|
|
||||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
||||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
|
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
|
||||||
from core.model_runtime.entities.model_entities import ModelPropertyKey
|
|
||||||
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
||||||
from core.moderation.input_moderation import InputModeration
|
from core.moderation.input_moderation import InputModeration
|
||||||
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
|
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
|
||||||
@@ -31,106 +29,6 @@ if TYPE_CHECKING:
|
|||||||
|
|
||||||
|
|
||||||
class AppRunner:
|
class AppRunner:
|
||||||
def get_pre_calculate_rest_tokens(
|
|
||||||
self,
|
|
||||||
app_record: App,
|
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
|
||||||
prompt_template_entity: PromptTemplateEntity,
|
|
||||||
inputs: Mapping[str, str],
|
|
||||||
files: Sequence["File"],
|
|
||||||
query: Optional[str] = None,
|
|
||||||
) -> int:
|
|
||||||
"""
|
|
||||||
Get pre calculate rest tokens
|
|
||||||
:param app_record: app record
|
|
||||||
:param model_config: model config entity
|
|
||||||
:param prompt_template_entity: prompt template entity
|
|
||||||
:param inputs: inputs
|
|
||||||
:param files: files
|
|
||||||
:param query: query
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
# Invoke model
|
|
||||||
model_instance = ModelInstance(
|
|
||||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
|
||||||
)
|
|
||||||
|
|
||||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
|
||||||
|
|
||||||
max_tokens = 0
|
|
||||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
|
||||||
if parameter_rule.name == "max_tokens" or (
|
|
||||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
|
||||||
):
|
|
||||||
max_tokens = (
|
|
||||||
model_config.parameters.get(parameter_rule.name)
|
|
||||||
or model_config.parameters.get(parameter_rule.use_template or "")
|
|
||||||
) or 0
|
|
||||||
|
|
||||||
if model_context_tokens is None:
|
|
||||||
return -1
|
|
||||||
|
|
||||||
if max_tokens is None:
|
|
||||||
max_tokens = 0
|
|
||||||
|
|
||||||
# get prompt messages without memory and context
|
|
||||||
prompt_messages, stop = self.organize_prompt_messages(
|
|
||||||
app_record=app_record,
|
|
||||||
model_config=model_config,
|
|
||||||
prompt_template_entity=prompt_template_entity,
|
|
||||||
inputs=inputs,
|
|
||||||
files=files,
|
|
||||||
query=query,
|
|
||||||
)
|
|
||||||
|
|
||||||
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
|
||||||
|
|
||||||
rest_tokens: int = model_context_tokens - max_tokens - prompt_tokens
|
|
||||||
if rest_tokens < 0:
|
|
||||||
raise InvokeBadRequestError(
|
|
||||||
"Query or prefix prompt is too long, you can reduce the prefix prompt, "
|
|
||||||
"or shrink the max token, or switch to a llm with a larger token limit size."
|
|
||||||
)
|
|
||||||
|
|
||||||
return rest_tokens
|
|
||||||
|
|
||||||
def recalc_llm_max_tokens(
|
|
||||||
self, model_config: ModelConfigWithCredentialsEntity, prompt_messages: list[PromptMessage]
|
|
||||||
):
|
|
||||||
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
|
|
||||||
model_instance = ModelInstance(
|
|
||||||
provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
|
|
||||||
)
|
|
||||||
|
|
||||||
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
|
|
||||||
|
|
||||||
max_tokens = 0
|
|
||||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
|
||||||
if parameter_rule.name == "max_tokens" or (
|
|
||||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
|
||||||
):
|
|
||||||
max_tokens = (
|
|
||||||
model_config.parameters.get(parameter_rule.name)
|
|
||||||
or model_config.parameters.get(parameter_rule.use_template or "")
|
|
||||||
) or 0
|
|
||||||
|
|
||||||
if model_context_tokens is None:
|
|
||||||
return -1
|
|
||||||
|
|
||||||
if max_tokens is None:
|
|
||||||
max_tokens = 0
|
|
||||||
|
|
||||||
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
|
|
||||||
|
|
||||||
if prompt_tokens + max_tokens > model_context_tokens:
|
|
||||||
max_tokens = max(model_context_tokens - prompt_tokens, 16)
|
|
||||||
|
|
||||||
for parameter_rule in model_config.model_schema.parameter_rules:
|
|
||||||
if parameter_rule.name == "max_tokens" or (
|
|
||||||
parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
|
|
||||||
):
|
|
||||||
model_config.parameters[parameter_rule.name] = max_tokens
|
|
||||||
|
|
||||||
def organize_prompt_messages(
|
def organize_prompt_messages(
|
||||||
self,
|
self,
|
||||||
app_record: App,
|
app_record: App,
|
||||||
|
|||||||
@@ -50,20 +50,6 @@ class ChatAppRunner(AppRunner):
|
|||||||
query = application_generate_entity.query
|
query = application_generate_entity.query
|
||||||
files = application_generate_entity.files
|
files = application_generate_entity.files
|
||||||
|
|
||||||
# Pre-calculate the number of tokens of the prompt messages,
|
|
||||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
|
||||||
# If the rest number of tokens is not enough, raise exception.
|
|
||||||
# Include: prompt template, inputs, query(optional), files(optional)
|
|
||||||
# Not Include: memory, external data, dataset context
|
|
||||||
self.get_pre_calculate_rest_tokens(
|
|
||||||
app_record=app_record,
|
|
||||||
model_config=application_generate_entity.model_conf,
|
|
||||||
prompt_template_entity=app_config.prompt_template,
|
|
||||||
inputs=inputs,
|
|
||||||
files=files,
|
|
||||||
query=query,
|
|
||||||
)
|
|
||||||
|
|
||||||
memory = None
|
memory = None
|
||||||
if application_generate_entity.conversation_id:
|
if application_generate_entity.conversation_id:
|
||||||
# get memory of conversation (read-only)
|
# get memory of conversation (read-only)
|
||||||
@@ -194,9 +180,6 @@ class ChatAppRunner(AppRunner):
|
|||||||
if hosting_moderation_result:
|
if hosting_moderation_result:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
|
|
||||||
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
|
|
||||||
|
|
||||||
# Invoke model
|
# Invoke model
|
||||||
model_instance = ModelInstance(
|
model_instance = ModelInstance(
|
||||||
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
|
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
|
||||||
|
|||||||
@@ -43,20 +43,6 @@ class CompletionAppRunner(AppRunner):
|
|||||||
query = application_generate_entity.query
|
query = application_generate_entity.query
|
||||||
files = application_generate_entity.files
|
files = application_generate_entity.files
|
||||||
|
|
||||||
# Pre-calculate the number of tokens of the prompt messages,
|
|
||||||
# and return the rest number of tokens by model context token size limit and max token size limit.
|
|
||||||
# If the rest number of tokens is not enough, raise exception.
|
|
||||||
# Include: prompt template, inputs, query(optional), files(optional)
|
|
||||||
# Not Include: memory, external data, dataset context
|
|
||||||
self.get_pre_calculate_rest_tokens(
|
|
||||||
app_record=app_record,
|
|
||||||
model_config=application_generate_entity.model_conf,
|
|
||||||
prompt_template_entity=app_config.prompt_template,
|
|
||||||
inputs=inputs,
|
|
||||||
files=files,
|
|
||||||
query=query,
|
|
||||||
)
|
|
||||||
|
|
||||||
# organize all inputs and template to prompt messages
|
# organize all inputs and template to prompt messages
|
||||||
# Include: prompt template, inputs, query(optional), files(optional)
|
# Include: prompt template, inputs, query(optional), files(optional)
|
||||||
prompt_messages, stop = self.organize_prompt_messages(
|
prompt_messages, stop = self.organize_prompt_messages(
|
||||||
@@ -152,9 +138,6 @@ class CompletionAppRunner(AppRunner):
|
|||||||
if hosting_moderation_result:
|
if hosting_moderation_result:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
|
|
||||||
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
|
|
||||||
|
|
||||||
# Invoke model
|
# Invoke model
|
||||||
model_instance = ModelInstance(
|
model_instance = ModelInstance(
|
||||||
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
|
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ class TokenBufferMemory:
|
|||||||
self.model_instance = model_instance
|
self.model_instance = model_instance
|
||||||
|
|
||||||
def get_history_prompt_messages(
|
def get_history_prompt_messages(
|
||||||
self, max_token_limit: int = 2000, message_limit: Optional[int] = None
|
self, max_token_limit: int = 100000, message_limit: Optional[int] = None
|
||||||
) -> Sequence[PromptMessage]:
|
) -> Sequence[PromptMessage]:
|
||||||
"""
|
"""
|
||||||
Get history prompt messages.
|
Get history prompt messages.
|
||||||
|
|||||||
@@ -0,0 +1,115 @@
|
|||||||
|
model: us.anthropic.claude-3-7-sonnet-20250219-v1:0
|
||||||
|
label:
|
||||||
|
en_US: Claude 3.7 Sonnet(US.Cross Region Inference)
|
||||||
|
icon: icon_s_en.svg
|
||||||
|
model_type: llm
|
||||||
|
features:
|
||||||
|
- agent-thought
|
||||||
|
- vision
|
||||||
|
- tool-call
|
||||||
|
- stream-tool-call
|
||||||
|
model_properties:
|
||||||
|
mode: chat
|
||||||
|
context_size: 200000
|
||||||
|
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
|
||||||
|
parameter_rules:
|
||||||
|
- name: enable_cache
|
||||||
|
label:
|
||||||
|
zh_Hans: 启用提示缓存
|
||||||
|
en_US: Enable Prompt Cache
|
||||||
|
type: boolean
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
help:
|
||||||
|
zh_Hans: 启用提示缓存可以提高性能并降低成本。Claude 3.7 Sonnet支持在system、messages和tools字段中使用缓存检查点。
|
||||||
|
en_US: Enable prompt caching to improve performance and reduce costs. Claude 3.7 Sonnet supports cache checkpoints in system, messages, and tools fields.
|
||||||
|
- name: reasoning_type
|
||||||
|
label:
|
||||||
|
zh_Hans: 推理配置
|
||||||
|
en_US: Reasoning Type
|
||||||
|
type: boolean
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
placeholder:
|
||||||
|
zh_Hans: 设置推理配置
|
||||||
|
en_US: Set reasoning configuration
|
||||||
|
help:
|
||||||
|
zh_Hans: 控制模型的推理能力。启用时,temperature将固定为1且top_p将被禁用。
|
||||||
|
en_US: Controls the model's reasoning capability. When enabled, temperature will be fixed to 1 and top_p will be disabled.
|
||||||
|
- name: reasoning_budget
|
||||||
|
show_on:
|
||||||
|
- variable: reasoning_type
|
||||||
|
value: true
|
||||||
|
label:
|
||||||
|
zh_Hans: 推理预算
|
||||||
|
en_US: Reasoning Budget
|
||||||
|
type: int
|
||||||
|
default: 1024
|
||||||
|
min: 0
|
||||||
|
max: 128000
|
||||||
|
help:
|
||||||
|
zh_Hans: 推理的预算限制(最小1024),必须小于max_tokens。仅在推理类型为enabled时可用。
|
||||||
|
en_US: Budget limit for reasoning (minimum 1024), must be less than max_tokens. Only available when reasoning type is enabled.
|
||||||
|
|
||||||
|
- name: max_tokens
|
||||||
|
use_template: max_tokens
|
||||||
|
required: true
|
||||||
|
label:
|
||||||
|
zh_Hans: 最大token数
|
||||||
|
en_US: Max Tokens
|
||||||
|
type: int
|
||||||
|
default: 8192
|
||||||
|
min: 1
|
||||||
|
max: 128000
|
||||||
|
help:
|
||||||
|
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||||
|
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||||
|
- name: temperature
|
||||||
|
use_template: temperature
|
||||||
|
required: false
|
||||||
|
label:
|
||||||
|
zh_Hans: 模型温度
|
||||||
|
en_US: Model Temperature
|
||||||
|
type: float
|
||||||
|
default: 1
|
||||||
|
min: 0.0
|
||||||
|
max: 1.0
|
||||||
|
help:
|
||||||
|
zh_Hans: 生成内容的随机性。当推理功能启用时,该值将被固定为1。
|
||||||
|
en_US: The amount of randomness injected into the response. When reasoning is enabled, this value will be fixed to 1.
|
||||||
|
- name: top_p
|
||||||
|
show_on:
|
||||||
|
- variable: reasoning_type
|
||||||
|
value: disabled
|
||||||
|
use_template: top_p
|
||||||
|
label:
|
||||||
|
zh_Hans: Top P
|
||||||
|
en_US: Top P
|
||||||
|
required: false
|
||||||
|
type: float
|
||||||
|
default: 0.999
|
||||||
|
min: 0.000
|
||||||
|
max: 1.000
|
||||||
|
help:
|
||||||
|
zh_Hans: 在核采样中的概率阈值。当推理功能启用时,该参数将被禁用。
|
||||||
|
en_US: The probability threshold in nucleus sampling. When reasoning is enabled, this parameter will be disabled.
|
||||||
|
- name: top_k
|
||||||
|
label:
|
||||||
|
zh_Hans: 取样数量
|
||||||
|
en_US: Top k
|
||||||
|
required: false
|
||||||
|
type: int
|
||||||
|
default: 0
|
||||||
|
min: 0
|
||||||
|
# tip docs from aws has error, max value is 500
|
||||||
|
max: 500
|
||||||
|
help:
|
||||||
|
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||||
|
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||||
|
- name: response_format
|
||||||
|
use_template: response_format
|
||||||
|
pricing:
|
||||||
|
input: '0.003'
|
||||||
|
output: '0.015'
|
||||||
|
unit: '0.001'
|
||||||
|
currency: USD
|
||||||
@@ -58,6 +58,7 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
|||||||
# TODO There is invoke issue: context limit on Cohere Model, will add them after fixed.
|
# TODO There is invoke issue: context limit on Cohere Model, will add them after fixed.
|
||||||
CONVERSE_API_ENABLED_MODEL_INFO = [
|
CONVERSE_API_ENABLED_MODEL_INFO = [
|
||||||
{"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False},
|
{"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False},
|
||||||
|
{"prefix": "us.deepseek", "support_system_prompts": True, "support_tool_use": False},
|
||||||
{"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False},
|
{"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False},
|
||||||
{"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
|
{"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
|
||||||
{"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
|
{"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True},
|
||||||
|
|||||||
@@ -0,0 +1,63 @@
|
|||||||
|
model: us.deepseek.r1-v1:0
|
||||||
|
label:
|
||||||
|
en_US: DeepSeek-R1(US.Cross Region Inference)
|
||||||
|
icon: icon_s_en.svg
|
||||||
|
model_type: llm
|
||||||
|
features:
|
||||||
|
- agent-thought
|
||||||
|
- vision
|
||||||
|
- tool-call
|
||||||
|
- stream-tool-call
|
||||||
|
model_properties:
|
||||||
|
mode: chat
|
||||||
|
context_size: 32768
|
||||||
|
parameter_rules:
|
||||||
|
- name: max_tokens
|
||||||
|
use_template: max_tokens
|
||||||
|
required: true
|
||||||
|
label:
|
||||||
|
zh_Hans: 最大token数
|
||||||
|
en_US: Max Tokens
|
||||||
|
type: int
|
||||||
|
default: 8192
|
||||||
|
min: 1
|
||||||
|
max: 128000
|
||||||
|
help:
|
||||||
|
zh_Hans: 停止前生成的最大令牌数。
|
||||||
|
en_US: The maximum number of tokens to generate before stopping.
|
||||||
|
- name: temperature
|
||||||
|
use_template: temperature
|
||||||
|
required: false
|
||||||
|
label:
|
||||||
|
zh_Hans: 模型温度
|
||||||
|
en_US: Model Temperature
|
||||||
|
type: float
|
||||||
|
default: 1
|
||||||
|
min: 0.0
|
||||||
|
max: 1.0
|
||||||
|
help:
|
||||||
|
zh_Hans: 生成内容的随机性。当推理功能启用时,该值将被固定为1。
|
||||||
|
en_US: The amount of randomness injected into the response. When reasoning is enabled, this value will be fixed to 1.
|
||||||
|
- name: top_p
|
||||||
|
show_on:
|
||||||
|
- variable: reasoning_type
|
||||||
|
value: disabled
|
||||||
|
use_template: top_p
|
||||||
|
label:
|
||||||
|
zh_Hans: Top P
|
||||||
|
en_US: Top P
|
||||||
|
required: false
|
||||||
|
type: float
|
||||||
|
default: 0.999
|
||||||
|
min: 0.000
|
||||||
|
max: 1.000
|
||||||
|
help:
|
||||||
|
zh_Hans: 在核采样中的概率阈值。当推理功能启用时,该参数将被禁用。
|
||||||
|
en_US: The probability threshold in nucleus sampling. When reasoning is enabled, this parameter will be disabled.
|
||||||
|
- name: response_format
|
||||||
|
use_template: response_format
|
||||||
|
pricing:
|
||||||
|
input: '0.001'
|
||||||
|
output: '0.005'
|
||||||
|
unit: '0.001'
|
||||||
|
currency: USD
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
- gpt-4.1
|
||||||
- o1
|
- o1
|
||||||
- o1-2024-12-17
|
- o1-2024-12-17
|
||||||
- o1-mini
|
- o1-mini
|
||||||
|
|||||||
@@ -0,0 +1,60 @@
|
|||||||
|
model: gpt-4.1
|
||||||
|
label:
|
||||||
|
zh_Hans: gpt-4.1
|
||||||
|
en_US: gpt-4.1
|
||||||
|
model_type: llm
|
||||||
|
features:
|
||||||
|
- multi-tool-call
|
||||||
|
- agent-thought
|
||||||
|
- stream-tool-call
|
||||||
|
- vision
|
||||||
|
model_properties:
|
||||||
|
mode: chat
|
||||||
|
context_size: 1047576
|
||||||
|
parameter_rules:
|
||||||
|
- name: temperature
|
||||||
|
use_template: temperature
|
||||||
|
- name: top_p
|
||||||
|
use_template: top_p
|
||||||
|
- name: presence_penalty
|
||||||
|
use_template: presence_penalty
|
||||||
|
- name: frequency_penalty
|
||||||
|
use_template: frequency_penalty
|
||||||
|
- name: max_tokens
|
||||||
|
use_template: max_tokens
|
||||||
|
default: 512
|
||||||
|
min: 1
|
||||||
|
max: 32768
|
||||||
|
- name: reasoning_effort
|
||||||
|
label:
|
||||||
|
zh_Hans: 推理工作
|
||||||
|
en_US: Reasoning Effort
|
||||||
|
type: string
|
||||||
|
help:
|
||||||
|
zh_Hans: 限制推理模型的推理工作
|
||||||
|
en_US: Constrains effort on reasoning for reasoning models
|
||||||
|
required: false
|
||||||
|
options:
|
||||||
|
- low
|
||||||
|
- medium
|
||||||
|
- high
|
||||||
|
- name: response_format
|
||||||
|
label:
|
||||||
|
zh_Hans: 回复格式
|
||||||
|
en_US: Response Format
|
||||||
|
type: string
|
||||||
|
help:
|
||||||
|
zh_Hans: 指定模型必须输出的格式
|
||||||
|
en_US: specifying the format that the model must output
|
||||||
|
required: false
|
||||||
|
options:
|
||||||
|
- text
|
||||||
|
- json_object
|
||||||
|
- json_schema
|
||||||
|
- name: json_schema
|
||||||
|
use_template: json_schema
|
||||||
|
pricing:
|
||||||
|
input: '2.00'
|
||||||
|
output: '8.00'
|
||||||
|
unit: '0.000001'
|
||||||
|
currency: USD
|
||||||
@@ -1049,6 +1049,9 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||||||
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
|
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
|
||||||
|
|
||||||
Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
|
Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
|
||||||
|
if not messages and not tools:
|
||||||
|
return 0
|
||||||
|
|
||||||
if model.startswith("ft:"):
|
if model.startswith("ft:"):
|
||||||
model = model.split(":")[1]
|
model = model.split(":")[1]
|
||||||
|
|
||||||
@@ -1057,18 +1060,18 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||||||
model = "gpt-4o"
|
model = "gpt-4o"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
encoding = tiktoken.encoding_for_model(model)
|
|
||||||
except KeyError:
|
|
||||||
logger.warning("Warning: model not found. Using cl100k_base encoding.")
|
|
||||||
model = "cl100k_base"
|
|
||||||
encoding = tiktoken.get_encoding(model)
|
encoding = tiktoken.get_encoding(model)
|
||||||
|
except (KeyError, ValueError) as e:
|
||||||
|
logger.warning("Warning: model not found. Using cl100k_base encoding.")
|
||||||
|
encoding_name = "cl100k_base"
|
||||||
|
encoding = tiktoken.get_encoding(encoding_name)
|
||||||
|
|
||||||
if model.startswith("gpt-3.5-turbo-0301"):
|
if model.startswith("gpt-3.5-turbo-0301"):
|
||||||
# every message follows <im_start>{role/name}\n{content}<im_end>\n
|
# every message follows <im_start>{role/name}\n{content}<im_end>\n
|
||||||
tokens_per_message = 4
|
tokens_per_message = 4
|
||||||
# if there's a name, the role is omitted
|
# if there's a name, the role is omitted
|
||||||
tokens_per_name = -1
|
tokens_per_name = -1
|
||||||
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith(("o1", "o3")):
|
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith(("o1", "o3", "o4")):
|
||||||
tokens_per_message = 3
|
tokens_per_message = 3
|
||||||
tokens_per_name = 1
|
tokens_per_name = 1
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,21 +1,13 @@
|
|||||||
import hashlib
|
|
||||||
import json
|
|
||||||
import mimetypes
|
import mimetypes
|
||||||
import os
|
|
||||||
import re
|
import re
|
||||||
import site
|
from collections.abc import Sequence
|
||||||
import subprocess
|
from dataclasses import dataclass
|
||||||
import tempfile
|
from typing import Any, Optional, cast
|
||||||
import unicodedata
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Literal, Optional, cast
|
|
||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
|
|
||||||
import chardet
|
import chardet
|
||||||
import cloudscraper # type: ignore
|
import cloudscraper # type: ignore
|
||||||
from bs4 import BeautifulSoup, CData, Comment, NavigableString # type: ignore
|
from readabilipy import simple_json_from_html_string # type: ignore
|
||||||
from regex import regex # type: ignore
|
|
||||||
|
|
||||||
from core.helper import ssrf_proxy
|
from core.helper import ssrf_proxy
|
||||||
from core.rag.extractor import extract_processor
|
from core.rag.extractor import extract_processor
|
||||||
@@ -23,9 +15,7 @@ from core.rag.extractor.extract_processor import ExtractProcessor
|
|||||||
|
|
||||||
FULL_TEMPLATE = """
|
FULL_TEMPLATE = """
|
||||||
TITLE: {title}
|
TITLE: {title}
|
||||||
AUTHORS: {authors}
|
AUTHOR: {author}
|
||||||
PUBLISH DATE: {publish_date}
|
|
||||||
TOP_IMAGE_URL: {top_image}
|
|
||||||
TEXT:
|
TEXT:
|
||||||
|
|
||||||
{text}
|
{text}
|
||||||
@@ -73,8 +63,8 @@ def get_url(url: str, user_agent: Optional[str] = None) -> str:
|
|||||||
response = ssrf_proxy.get(url, headers=headers, follow_redirects=True, timeout=(120, 300))
|
response = ssrf_proxy.get(url, headers=headers, follow_redirects=True, timeout=(120, 300))
|
||||||
elif response.status_code == 403:
|
elif response.status_code == 403:
|
||||||
scraper = cloudscraper.create_scraper()
|
scraper = cloudscraper.create_scraper()
|
||||||
scraper.perform_request = ssrf_proxy.make_request
|
scraper.perform_request = ssrf_proxy.make_request # type: ignore
|
||||||
response = scraper.get(url, headers=headers, follow_redirects=True, timeout=(120, 300))
|
response = scraper.get(url, headers=headers, follow_redirects=True, timeout=(120, 300)) # type: ignore
|
||||||
|
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
return "URL returned status code {}.".format(response.status_code)
|
return "URL returned status code {}.".format(response.status_code)
|
||||||
@@ -90,273 +80,36 @@ def get_url(url: str, user_agent: Optional[str] = None) -> str:
|
|||||||
else:
|
else:
|
||||||
content = response.text
|
content = response.text
|
||||||
|
|
||||||
a = extract_using_readabilipy(content)
|
article = extract_using_readabilipy(content)
|
||||||
|
|
||||||
if not a["plain_text"] or not a["plain_text"].strip():
|
if not article.text:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
res = FULL_TEMPLATE.format(
|
res = FULL_TEMPLATE.format(
|
||||||
title=a["title"],
|
title=article.title,
|
||||||
authors=a["byline"],
|
author=article.auther,
|
||||||
publish_date=a["date"],
|
text=article.text,
|
||||||
top_image="",
|
|
||||||
text=a["plain_text"] or "",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def extract_using_readabilipy(html):
|
@dataclass
|
||||||
with tempfile.NamedTemporaryFile(delete=False, mode="w+") as f_html:
|
class Article:
|
||||||
f_html.write(html)
|
title: str
|
||||||
f_html.close()
|
auther: str
|
||||||
html_path = f_html.name
|
text: Sequence[dict]
|
||||||
|
|
||||||
# Call Mozilla's Readability.js Readability.parse() function via node, writing output to a temporary file
|
|
||||||
article_json_path = html_path + ".json"
|
|
||||||
jsdir = os.path.join(find_module_path("readabilipy"), "javascript")
|
|
||||||
with chdir(jsdir):
|
|
||||||
subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path])
|
|
||||||
|
|
||||||
# Read output of call to Readability.parse() from JSON file and return as Python dictionary
|
|
||||||
input_json = json.loads(Path(article_json_path).read_text(encoding="utf-8"))
|
|
||||||
|
|
||||||
# Deleting files after processing
|
|
||||||
os.unlink(article_json_path)
|
|
||||||
os.unlink(html_path)
|
|
||||||
|
|
||||||
article_json: dict[str, Any] = {
|
|
||||||
"title": None,
|
|
||||||
"byline": None,
|
|
||||||
"date": None,
|
|
||||||
"content": None,
|
|
||||||
"plain_content": None,
|
|
||||||
"plain_text": None,
|
|
||||||
}
|
|
||||||
# Populate article fields from readability fields where present
|
|
||||||
if input_json:
|
|
||||||
if input_json.get("title"):
|
|
||||||
article_json["title"] = input_json["title"]
|
|
||||||
if input_json.get("byline"):
|
|
||||||
article_json["byline"] = input_json["byline"]
|
|
||||||
if input_json.get("date"):
|
|
||||||
article_json["date"] = input_json["date"]
|
|
||||||
if input_json.get("content"):
|
|
||||||
article_json["content"] = input_json["content"]
|
|
||||||
article_json["plain_content"] = plain_content(article_json["content"], False, False)
|
|
||||||
article_json["plain_text"] = extract_text_blocks_as_plain_text(article_json["plain_content"])
|
|
||||||
if input_json.get("textContent"):
|
|
||||||
article_json["plain_text"] = input_json["textContent"]
|
|
||||||
article_json["plain_text"] = re.sub(r"\n\s*\n", "\n", article_json["plain_text"])
|
|
||||||
|
|
||||||
return article_json
|
|
||||||
|
|
||||||
|
|
||||||
def find_module_path(module_name):
|
def extract_using_readabilipy(html: str):
|
||||||
for package_path in site.getsitepackages():
|
json_article: dict[str, Any] = simple_json_from_html_string(html, use_readability=True)
|
||||||
potential_path = os.path.join(package_path, module_name)
|
article = Article(
|
||||||
if os.path.exists(potential_path):
|
title=json_article.get("title") or "",
|
||||||
return potential_path
|
auther=json_article.get("byline") or "",
|
||||||
|
text=json_article.get("plain_text") or [],
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def chdir(path):
|
|
||||||
"""Change directory in context and return to original on exit"""
|
|
||||||
# From https://stackoverflow.com/a/37996581, couldn't find a built-in
|
|
||||||
original_path = os.getcwd()
|
|
||||||
os.chdir(path)
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
os.chdir(original_path)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_text_blocks_as_plain_text(paragraph_html):
|
|
||||||
# Load article as DOM
|
|
||||||
soup = BeautifulSoup(paragraph_html, "html.parser")
|
|
||||||
# Select all lists
|
|
||||||
list_elements = soup.find_all(["ul", "ol"])
|
|
||||||
# Prefix text in all list items with "* " and make lists paragraphs
|
|
||||||
for list_element in list_elements:
|
|
||||||
plain_items = "".join(
|
|
||||||
list(filter(None, [plain_text_leaf_node(li)["text"] for li in list_element.find_all("li")]))
|
|
||||||
)
|
|
||||||
list_element.string = plain_items
|
|
||||||
list_element.name = "p"
|
|
||||||
# Select all text blocks
|
|
||||||
text_blocks = [s.parent for s in soup.find_all(string=True)]
|
|
||||||
text_blocks = [plain_text_leaf_node(block) for block in text_blocks]
|
|
||||||
# Drop empty paragraphs
|
|
||||||
text_blocks = list(filter(lambda p: p["text"] is not None, text_blocks))
|
|
||||||
return text_blocks
|
|
||||||
|
|
||||||
|
|
||||||
def plain_text_leaf_node(element):
|
|
||||||
# Extract all text, stripped of any child HTML elements and normalize it
|
|
||||||
plain_text = normalize_text(element.get_text())
|
|
||||||
if plain_text != "" and element.name == "li":
|
|
||||||
plain_text = "* {}, ".format(plain_text)
|
|
||||||
if plain_text == "":
|
|
||||||
plain_text = None
|
|
||||||
if "data-node-index" in element.attrs:
|
|
||||||
plain = {"node_index": element["data-node-index"], "text": plain_text}
|
|
||||||
else:
|
|
||||||
plain = {"text": plain_text}
|
|
||||||
return plain
|
|
||||||
|
|
||||||
|
|
||||||
def plain_content(readability_content, content_digests, node_indexes):
|
|
||||||
# Load article as DOM
|
|
||||||
soup = BeautifulSoup(readability_content, "html.parser")
|
|
||||||
# Make all elements plain
|
|
||||||
elements = plain_elements(soup.contents, content_digests, node_indexes)
|
|
||||||
if node_indexes:
|
|
||||||
# Add node index attributes to nodes
|
|
||||||
elements = [add_node_indexes(element) for element in elements]
|
|
||||||
# Replace article contents with plain elements
|
|
||||||
soup.contents = elements
|
|
||||||
return str(soup)
|
|
||||||
|
|
||||||
|
|
||||||
def plain_elements(elements, content_digests, node_indexes):
|
|
||||||
# Get plain content versions of all elements
|
|
||||||
elements = [plain_element(element, content_digests, node_indexes) for element in elements]
|
|
||||||
if content_digests:
|
|
||||||
# Add content digest attribute to nodes
|
|
||||||
elements = [add_content_digest(element) for element in elements]
|
|
||||||
return elements
|
|
||||||
|
|
||||||
|
|
||||||
def plain_element(element, content_digests, node_indexes):
|
|
||||||
# For lists, we make each item plain text
|
|
||||||
if is_leaf(element):
|
|
||||||
# For leaf node elements, extract the text content, discarding any HTML tags
|
|
||||||
# 1. Get element contents as text
|
|
||||||
plain_text = element.get_text()
|
|
||||||
# 2. Normalize the extracted text string to a canonical representation
|
|
||||||
plain_text = normalize_text(plain_text)
|
|
||||||
# 3. Update element content to be plain text
|
|
||||||
element.string = plain_text
|
|
||||||
elif is_text(element):
|
|
||||||
if is_non_printing(element):
|
|
||||||
# The simplified HTML may have come from Readability.js so might
|
|
||||||
# have non-printing text (e.g. Comment or CData). In this case, we
|
|
||||||
# keep the structure, but ensure that the string is empty.
|
|
||||||
element = type(element)("")
|
|
||||||
else:
|
|
||||||
plain_text = element.string
|
|
||||||
plain_text = normalize_text(plain_text)
|
|
||||||
element = type(element)(plain_text)
|
|
||||||
else:
|
|
||||||
# If not a leaf node or leaf type call recursively on child nodes, replacing
|
|
||||||
element.contents = plain_elements(element.contents, content_digests, node_indexes)
|
|
||||||
return element
|
|
||||||
|
|
||||||
|
|
||||||
def add_node_indexes(element, node_index="0"):
|
|
||||||
# Can't add attributes to string types
|
|
||||||
if is_text(element):
|
|
||||||
return element
|
|
||||||
# Add index to current element
|
|
||||||
element["data-node-index"] = node_index
|
|
||||||
# Add index to child elements
|
|
||||||
for local_idx, child in enumerate([c for c in element.contents if not is_text(c)], start=1):
|
|
||||||
# Can't add attributes to leaf string types
|
|
||||||
child_index = "{stem}.{local}".format(stem=node_index, local=local_idx)
|
|
||||||
add_node_indexes(child, node_index=child_index)
|
|
||||||
return element
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_text(text):
|
|
||||||
"""Normalize unicode and whitespace."""
|
|
||||||
# Normalize unicode first to try and standardize whitespace characters as much as possible before normalizing them
|
|
||||||
text = strip_control_characters(text)
|
|
||||||
text = normalize_unicode(text)
|
|
||||||
text = normalize_whitespace(text)
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def strip_control_characters(text):
|
|
||||||
"""Strip out unicode control characters which might break the parsing."""
|
|
||||||
# Unicode control characters
|
|
||||||
# [Cc]: Other, Control [includes new lines]
|
|
||||||
# [Cf]: Other, Format
|
|
||||||
# [Cn]: Other, Not Assigned
|
|
||||||
# [Co]: Other, Private Use
|
|
||||||
# [Cs]: Other, Surrogate
|
|
||||||
control_chars = {"Cc", "Cf", "Cn", "Co", "Cs"}
|
|
||||||
retained_chars = ["\t", "\n", "\r", "\f"]
|
|
||||||
|
|
||||||
# Remove non-printing control characters
|
|
||||||
return "".join(
|
|
||||||
[
|
|
||||||
"" if (unicodedata.category(char) in control_chars) and (char not in retained_chars) else char
|
|
||||||
for char in text
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return article
|
||||||
def normalize_unicode(text):
|
|
||||||
"""Normalize unicode such that things that are visually equivalent map to the same unicode string where possible."""
|
|
||||||
normal_form: Literal["NFC", "NFD", "NFKC", "NFKD"] = "NFKC"
|
|
||||||
text = unicodedata.normalize(normal_form, text)
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_whitespace(text):
|
|
||||||
"""Replace runs of whitespace characters with a single space as this is what happens when HTML text is displayed."""
|
|
||||||
text = regex.sub(r"\s+", " ", text)
|
|
||||||
# Remove leading and trailing whitespace
|
|
||||||
text = text.strip()
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def is_leaf(element):
|
|
||||||
return element.name in {"p", "li"}
|
|
||||||
|
|
||||||
|
|
||||||
def is_text(element):
|
|
||||||
return isinstance(element, NavigableString)
|
|
||||||
|
|
||||||
|
|
||||||
def is_non_printing(element):
|
|
||||||
return any(isinstance(element, _e) for _e in [Comment, CData])
|
|
||||||
|
|
||||||
|
|
||||||
def add_content_digest(element):
|
|
||||||
if not is_text(element):
|
|
||||||
element["data-content-digest"] = content_digest(element)
|
|
||||||
return element
|
|
||||||
|
|
||||||
|
|
||||||
def content_digest(element):
|
|
||||||
digest: Any
|
|
||||||
if is_text(element):
|
|
||||||
# Hash
|
|
||||||
trimmed_string = element.string.strip()
|
|
||||||
if trimmed_string == "":
|
|
||||||
digest = ""
|
|
||||||
else:
|
|
||||||
digest = hashlib.sha256(trimmed_string.encode("utf-8")).hexdigest()
|
|
||||||
else:
|
|
||||||
contents = element.contents
|
|
||||||
num_contents = len(contents)
|
|
||||||
if num_contents == 0:
|
|
||||||
# No hash when no child elements exist
|
|
||||||
digest = ""
|
|
||||||
elif num_contents == 1:
|
|
||||||
# If single child, use digest of child
|
|
||||||
digest = content_digest(contents[0])
|
|
||||||
else:
|
|
||||||
# Build content digest from the "non-empty" digests of child nodes
|
|
||||||
digest = hashlib.sha256()
|
|
||||||
child_digests = list(filter(lambda x: x != "", [content_digest(content) for content in contents]))
|
|
||||||
for child in child_digests:
|
|
||||||
digest.update(child.encode("utf-8"))
|
|
||||||
digest = digest.hexdigest()
|
|
||||||
return digest
|
|
||||||
|
|
||||||
|
|
||||||
def get_image_upload_file_ids(content):
|
def get_image_upload_file_ids(content):
|
||||||
|
|||||||
@@ -195,7 +195,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
|||||||
if output_config.type == "object":
|
if output_config.type == "object":
|
||||||
# check if output is object
|
# check if output is object
|
||||||
if not isinstance(result.get(output_name), dict):
|
if not isinstance(result.get(output_name), dict):
|
||||||
if isinstance(result.get(output_name), type(None)):
|
if result.get(output_name) is None:
|
||||||
transformed_result[output_name] = None
|
transformed_result[output_name] = None
|
||||||
else:
|
else:
|
||||||
raise OutputValidationError(
|
raise OutputValidationError(
|
||||||
@@ -223,7 +223,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
|||||||
elif output_config.type == "array[number]":
|
elif output_config.type == "array[number]":
|
||||||
# check if array of number available
|
# check if array of number available
|
||||||
if not isinstance(result[output_name], list):
|
if not isinstance(result[output_name], list):
|
||||||
if isinstance(result[output_name], type(None)):
|
if result[output_name] is None:
|
||||||
transformed_result[output_name] = None
|
transformed_result[output_name] = None
|
||||||
else:
|
else:
|
||||||
raise OutputValidationError(
|
raise OutputValidationError(
|
||||||
@@ -244,7 +244,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
|||||||
elif output_config.type == "array[string]":
|
elif output_config.type == "array[string]":
|
||||||
# check if array of string available
|
# check if array of string available
|
||||||
if not isinstance(result[output_name], list):
|
if not isinstance(result[output_name], list):
|
||||||
if isinstance(result[output_name], type(None)):
|
if result[output_name] is None:
|
||||||
transformed_result[output_name] = None
|
transformed_result[output_name] = None
|
||||||
else:
|
else:
|
||||||
raise OutputValidationError(
|
raise OutputValidationError(
|
||||||
@@ -265,7 +265,7 @@ class CodeNode(BaseNode[CodeNodeData]):
|
|||||||
elif output_config.type == "array[object]":
|
elif output_config.type == "array[object]":
|
||||||
# check if array of object available
|
# check if array of object available
|
||||||
if not isinstance(result[output_name], list):
|
if not isinstance(result[output_name], list):
|
||||||
if isinstance(result[output_name], type(None)):
|
if result[output_name] is None:
|
||||||
transformed_result[output_name] = None
|
transformed_result[output_name] = None
|
||||||
else:
|
else:
|
||||||
raise OutputValidationError(
|
raise OutputValidationError(
|
||||||
|
|||||||
@@ -968,14 +968,12 @@ def _handle_memory_chat_mode(
|
|||||||
*,
|
*,
|
||||||
memory: TokenBufferMemory | None,
|
memory: TokenBufferMemory | None,
|
||||||
memory_config: MemoryConfig | None,
|
memory_config: MemoryConfig | None,
|
||||||
model_config: ModelConfigWithCredentialsEntity,
|
model_config: ModelConfigWithCredentialsEntity, # TODO(-LAN-): Needs to remove
|
||||||
) -> Sequence[PromptMessage]:
|
) -> Sequence[PromptMessage]:
|
||||||
memory_messages: Sequence[PromptMessage] = []
|
memory_messages: Sequence[PromptMessage] = []
|
||||||
# Get messages from memory for chat model
|
# Get messages from memory for chat model
|
||||||
if memory and memory_config:
|
if memory and memory_config:
|
||||||
rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
|
|
||||||
memory_messages = memory.get_history_prompt_messages(
|
memory_messages = memory.get_history_prompt_messages(
|
||||||
max_token_limit=rest_tokens,
|
|
||||||
message_limit=memory_config.window.size if memory_config.window.enabled else None,
|
message_limit=memory_config.window.size if memory_config.window.enabled else None,
|
||||||
)
|
)
|
||||||
return memory_messages
|
return memory_messages
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ else
|
|||||||
--worker-class ${SERVER_WORKER_CLASS:-gevent} \
|
--worker-class ${SERVER_WORKER_CLASS:-gevent} \
|
||||||
--worker-connections ${SERVER_WORKER_CONNECTIONS:-10} \
|
--worker-connections ${SERVER_WORKER_CONNECTIONS:-10} \
|
||||||
--timeout ${GUNICORN_TIMEOUT:-200} \
|
--timeout ${GUNICORN_TIMEOUT:-200} \
|
||||||
|
--keep-alive ${GUNICORN_KEEP_ALIVE:-2} \
|
||||||
app:app
|
app:app
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
6289
api/poetry.lock
generated
6289
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,7 @@ azure-ai-inference = "~1.0.0b8"
|
|||||||
azure-ai-ml = "~1.20.0"
|
azure-ai-ml = "~1.20.0"
|
||||||
azure-identity = "1.16.1"
|
azure-identity = "1.16.1"
|
||||||
beautifulsoup4 = "4.12.2"
|
beautifulsoup4 = "4.12.2"
|
||||||
boto3 = "1.36.12"
|
boto3 = "~1.35.0"
|
||||||
bs4 = "~0.0.1"
|
bs4 = "~0.0.1"
|
||||||
cachetools = "~5.3.0"
|
cachetools = "~5.3.0"
|
||||||
celery = "~5.4.0"
|
celery = "~5.4.0"
|
||||||
@@ -48,7 +48,7 @@ google-generativeai = "0.8.1"
|
|||||||
googleapis-common-protos = "1.63.0"
|
googleapis-common-protos = "1.63.0"
|
||||||
gunicorn = "~23.0.0"
|
gunicorn = "~23.0.0"
|
||||||
httpx = { version = "~0.27.0", extras = ["socks"] }
|
httpx = { version = "~0.27.0", extras = ["socks"] }
|
||||||
huggingface-hub = "~0.16.4"
|
huggingface-hub = "~0.31.0"
|
||||||
jieba = "0.42.1"
|
jieba = "0.42.1"
|
||||||
langfuse = "~2.51.3"
|
langfuse = "~2.51.3"
|
||||||
langsmith = "~0.1.77"
|
langsmith = "~0.1.77"
|
||||||
@@ -78,18 +78,18 @@ pyyaml = "~6.0.1"
|
|||||||
readabilipy = "0.2.0"
|
readabilipy = "0.2.0"
|
||||||
redis = { version = "~5.0.3", extras = ["hiredis"] }
|
redis = { version = "~5.0.3", extras = ["hiredis"] }
|
||||||
replicate = "~0.22.0"
|
replicate = "~0.22.0"
|
||||||
resend = "~0.7.0"
|
resend = "~2.9.0"
|
||||||
sagemaker = "~2.231.0"
|
sagemaker = "~2.231.0"
|
||||||
scikit-learn = "~1.5.1"
|
scikit-learn = "~1.5.1"
|
||||||
sentry-sdk = { version = "~1.44.1", extras = ["flask"] }
|
sentry-sdk = { version = "~1.44.1", extras = ["flask"] }
|
||||||
sqlalchemy = "~2.0.29"
|
sqlalchemy = "~2.0.29"
|
||||||
starlette = "0.41.0"
|
starlette = "0.41.0"
|
||||||
tencentcloud-sdk-python-hunyuan = "~3.0.1294"
|
tencentcloud-sdk-python-hunyuan = "~3.0.1294"
|
||||||
tiktoken = "~0.8.0"
|
tiktoken = "^0.9.0"
|
||||||
tokenizers = "~0.15.0"
|
tokenizers = "~0.15.0"
|
||||||
transformers = "~4.35.0"
|
transformers = "~4.39.0"
|
||||||
unstructured = { version = "~0.16.1", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] }
|
unstructured = { version = "~0.16.1", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] }
|
||||||
validators = "0.21.0"
|
validators = "0.22.0"
|
||||||
volcengine-python-sdk = {extras = ["ark"], version = "~1.0.98"}
|
volcengine-python-sdk = {extras = ["ark"], version = "~1.0.98"}
|
||||||
websocket-client = "~1.7.0"
|
websocket-client = "~1.7.0"
|
||||||
xinference-client = "0.15.2"
|
xinference-client = "0.15.2"
|
||||||
@@ -112,7 +112,7 @@ safetensors = "~0.4.3"
|
|||||||
# [ Tools ] dependency group
|
# [ Tools ] dependency group
|
||||||
############################################################
|
############################################################
|
||||||
[tool.poetry.group.tools.dependencies]
|
[tool.poetry.group.tools.dependencies]
|
||||||
arxiv = "2.1.0"
|
arxiv = "2.2.0"
|
||||||
cloudscraper = "1.2.71"
|
cloudscraper = "1.2.71"
|
||||||
duckduckgo-search = "~6.3.0"
|
duckduckgo-search = "~6.3.0"
|
||||||
jsonpath-ng = "1.6.1"
|
jsonpath-ng = "1.6.1"
|
||||||
@@ -166,7 +166,7 @@ tcvectordb = "1.3.2"
|
|||||||
tidb-vector = "0.0.9"
|
tidb-vector = "0.0.9"
|
||||||
upstash-vector = "0.6.0"
|
upstash-vector = "0.6.0"
|
||||||
volcengine-compat = "~1.0.156"
|
volcengine-compat = "~1.0.156"
|
||||||
weaviate-client = "~3.21.0"
|
weaviate-client = "~3.26.0"
|
||||||
|
|
||||||
############################################################
|
############################################################
|
||||||
# [ Dev ] dependency group
|
# [ Dev ] dependency group
|
||||||
|
|||||||
@@ -55,13 +55,19 @@ def _check_version_compatibility(imported_version: str) -> ImportStatus:
|
|||||||
except version.InvalidVersion:
|
except version.InvalidVersion:
|
||||||
return ImportStatus.FAILED
|
return ImportStatus.FAILED
|
||||||
|
|
||||||
# Compare major version and minor version
|
# If imported version is newer than current, always return PENDING
|
||||||
if current_ver.major != imported_ver.major or current_ver.minor != imported_ver.minor:
|
if imported_ver > current_ver:
|
||||||
return ImportStatus.PENDING
|
return ImportStatus.PENDING
|
||||||
|
|
||||||
if current_ver.micro != imported_ver.micro:
|
# If imported version is older than current's major, return PENDING
|
||||||
|
if imported_ver.major < current_ver.major:
|
||||||
|
return ImportStatus.PENDING
|
||||||
|
|
||||||
|
# If imported version is older than current's minor, return COMPLETED_WITH_WARNINGS
|
||||||
|
if imported_ver.minor < current_ver.minor:
|
||||||
return ImportStatus.COMPLETED_WITH_WARNINGS
|
return ImportStatus.COMPLETED_WITH_WARNINGS
|
||||||
|
|
||||||
|
# If imported version equals or is older than current's micro, return COMPLETED
|
||||||
return ImportStatus.COMPLETED
|
return ImportStatus.COMPLETED
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -142,6 +142,9 @@ CELERY_WORKER_CLASS=
|
|||||||
# it is recommended to set it to 360 to support a longer sse connection time.
|
# it is recommended to set it to 360 to support a longer sse connection time.
|
||||||
GUNICORN_TIMEOUT=360
|
GUNICORN_TIMEOUT=360
|
||||||
|
|
||||||
|
# The number of seconds to wait for requests on a Keep-Alive connection, default to 2
|
||||||
|
GUNICORN_KEEP_ALIVE=2
|
||||||
|
|
||||||
# The number of Celery workers. The default is 1, and can be set as needed.
|
# The number of Celery workers. The default is 1, and can be set as needed.
|
||||||
CELERY_WORKER_AMOUNT=
|
CELERY_WORKER_AMOUNT=
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
|
|||||||
services:
|
services:
|
||||||
# API service
|
# API service
|
||||||
api:
|
api:
|
||||||
image: langgenius/dify-api:0.15.6
|
image: langgenius/dify-api:0.15.8
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
# Use the shared environment variables.
|
# Use the shared environment variables.
|
||||||
@@ -25,7 +25,7 @@ services:
|
|||||||
# worker service
|
# worker service
|
||||||
# The Celery worker for processing the queue.
|
# The Celery worker for processing the queue.
|
||||||
worker:
|
worker:
|
||||||
image: langgenius/dify-api:0.15.6
|
image: langgenius/dify-api:0.15.8
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
# Use the shared environment variables.
|
# Use the shared environment variables.
|
||||||
@@ -47,7 +47,7 @@ services:
|
|||||||
|
|
||||||
# Frontend web application.
|
# Frontend web application.
|
||||||
web:
|
web:
|
||||||
image: langgenius/dify-web:0.15.6
|
image: langgenius/dify-web:0.15.8
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ x-shared-env: &shared-api-worker-env
|
|||||||
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
|
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
|
||||||
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
|
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
|
||||||
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
|
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
|
||||||
|
GUNICORN_KEEP_ALIVE: ${GUNICORN_KEEP_ALIVE:-2}
|
||||||
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
|
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
|
||||||
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
|
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
|
||||||
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
|
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
|
||||||
@@ -394,7 +395,7 @@ x-shared-env: &shared-api-worker-env
|
|||||||
services:
|
services:
|
||||||
# API service
|
# API service
|
||||||
api:
|
api:
|
||||||
image: langgenius/dify-api:0.15.6
|
image: langgenius/dify-api:0.15.8
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
# Use the shared environment variables.
|
# Use the shared environment variables.
|
||||||
@@ -417,7 +418,7 @@ services:
|
|||||||
# worker service
|
# worker service
|
||||||
# The Celery worker for processing the queue.
|
# The Celery worker for processing the queue.
|
||||||
worker:
|
worker:
|
||||||
image: langgenius/dify-api:0.15.6
|
image: langgenius/dify-api:0.15.8
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
# Use the shared environment variables.
|
# Use the shared environment variables.
|
||||||
@@ -439,7 +440,7 @@ services:
|
|||||||
|
|
||||||
# Frontend web application.
|
# Frontend web application.
|
||||||
web:
|
web:
|
||||||
image: langgenius/dify-web:0.15.6
|
image: langgenius/dify-web:0.15.8
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||||
|
|||||||
@@ -186,15 +186,17 @@ const Apps = ({
|
|||||||
<div className='w-[180px] h-8'></div>
|
<div className='w-[180px] h-8'></div>
|
||||||
</div>
|
</div>
|
||||||
<div className='relative flex flex-1 overflow-y-auto'>
|
<div className='relative flex flex-1 overflow-y-auto'>
|
||||||
{!searchKeywords && <div className='w-[200px] h-full p-4'>
|
{!searchKeywords && <div className='h-full w-[200px] p-4'>
|
||||||
<Sidebar current={currCategory as AppCategories} onClick={(category) => { setCurrCategory(category) }} onCreateFromBlank={onCreateFromBlank} />
|
<Sidebar current={currCategory as AppCategories} categories={categories} onClick={(category) => { setCurrCategory(category) }} onCreateFromBlank={onCreateFromBlank} />
|
||||||
</div>}
|
</div>}
|
||||||
<div className='flex-1 h-full overflow-auto shrink-0 grow p-6 pt-2 border-l border-divider-burn'>
|
<div className='flex-1 h-full overflow-auto shrink-0 grow p-6 pt-2 border-l border-divider-burn'>
|
||||||
{searchFilteredList && searchFilteredList.length > 0 && <>
|
{searchFilteredList && searchFilteredList.length > 0 && <>
|
||||||
<div className='pt-4 pb-1'>
|
<div className='pt-4 pb-1'>
|
||||||
{searchKeywords
|
{searchKeywords
|
||||||
? <p className='title-md-semi-bold text-text-tertiary'>{searchFilteredList.length > 1 ? t('app.newApp.foundResults', { count: searchFilteredList.length }) : t('app.newApp.foundResult', { count: searchFilteredList.length })}</p>
|
? <p className='title-md-semi-bold text-text-tertiary'>{searchFilteredList.length > 1 ? t('app.newApp.foundResults', { count: searchFilteredList.length }) : t('app.newApp.foundResult', { count: searchFilteredList.length })}</p>
|
||||||
: <AppCategoryLabel category={currCategory as AppCategories} className='title-md-semi-bold text-text-primary' />}
|
: <div className='flex h-[22px] items-center'>
|
||||||
|
<AppCategoryLabel category={currCategory as AppCategories} className='title-md-semi-bold text-text-primary' />
|
||||||
|
</div>}
|
||||||
</div>
|
</div>
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
|
|||||||
@@ -1,39 +1,29 @@
|
|||||||
'use client'
|
'use client'
|
||||||
import { RiAppsFill, RiChatSmileAiFill, RiExchange2Fill, RiPassPendingFill, RiQuillPenAiFill, RiSpeakAiFill, RiStickyNoteAddLine, RiTerminalBoxFill, RiThumbUpFill } from '@remixicon/react'
|
import { RiStickyNoteAddLine, RiThumbUpLine } from '@remixicon/react'
|
||||||
import { useTranslation } from 'react-i18next'
|
import { useTranslation } from 'react-i18next'
|
||||||
import classNames from '@/utils/classnames'
|
import classNames from '@/utils/classnames'
|
||||||
import Divider from '@/app/components/base/divider'
|
import Divider from '@/app/components/base/divider'
|
||||||
|
|
||||||
export enum AppCategories {
|
export enum AppCategories {
|
||||||
RECOMMENDED = 'Recommended',
|
RECOMMENDED = 'Recommended',
|
||||||
ASSISTANT = 'Assistant',
|
|
||||||
AGENT = 'Agent',
|
|
||||||
HR = 'HR',
|
|
||||||
PROGRAMMING = 'Programming',
|
|
||||||
WORKFLOW = 'Workflow',
|
|
||||||
WRITING = 'Writing',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type SidebarProps = {
|
type SidebarProps = {
|
||||||
current: AppCategories
|
current: AppCategories | string
|
||||||
onClick?: (category: AppCategories) => void
|
categories: string[]
|
||||||
|
onClick?: (category: AppCategories | string) => void
|
||||||
onCreateFromBlank?: () => void
|
onCreateFromBlank?: () => void
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function Sidebar({ current, onClick, onCreateFromBlank }: SidebarProps) {
|
export default function Sidebar({ current, categories, onClick, onCreateFromBlank }: SidebarProps) {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
return <div className="w-full h-full flex flex-col">
|
return <div className="flex h-full w-full flex-col">
|
||||||
<ul>
|
<ul className='pt-0.5'>
|
||||||
<CategoryItem category={AppCategories.RECOMMENDED} active={current === AppCategories.RECOMMENDED} onClick={onClick} />
|
<CategoryItem category={AppCategories.RECOMMENDED} active={current === AppCategories.RECOMMENDED} onClick={onClick} />
|
||||||
</ul>
|
</ul>
|
||||||
<div className='px-3 pt-2 pb-1 system-xs-medium-uppercase text-text-tertiary'>{t('app.newAppFromTemplate.byCategories')}</div>
|
<div className='system-xs-medium-uppercase mb-0.5 mt-3 px-3 pb-1 pt-2 text-text-tertiary'>{t('app.newAppFromTemplate.byCategories')}</div>
|
||||||
<ul className='flex-grow flex flex-col gap-0.5'>
|
<ul className='flex grow flex-col gap-0.5'>
|
||||||
<CategoryItem category={AppCategories.ASSISTANT} active={current === AppCategories.ASSISTANT} onClick={onClick} />
|
{categories.map(category => (<CategoryItem key={category} category={category} active={current === category} onClick={onClick} />))}
|
||||||
<CategoryItem category={AppCategories.AGENT} active={current === AppCategories.AGENT} onClick={onClick} />
|
|
||||||
<CategoryItem category={AppCategories.HR} active={current === AppCategories.HR} onClick={onClick} />
|
|
||||||
<CategoryItem category={AppCategories.PROGRAMMING} active={current === AppCategories.PROGRAMMING} onClick={onClick} />
|
|
||||||
<CategoryItem category={AppCategories.WORKFLOW} active={current === AppCategories.WORKFLOW} onClick={onClick} />
|
|
||||||
<CategoryItem category={AppCategories.WRITING} active={current === AppCategories.WRITING} onClick={onClick} />
|
|
||||||
</ul>
|
</ul>
|
||||||
<Divider bgStyle='gradient' />
|
<Divider bgStyle='gradient' />
|
||||||
<div className='px-3 py-1 flex items-center gap-1 text-text-tertiary cursor-pointer' onClick={onCreateFromBlank}>
|
<div className='px-3 py-1 flex items-center gap-1 text-text-tertiary cursor-pointer' onClick={onCreateFromBlank}>
|
||||||
@@ -45,47 +35,26 @@ export default function Sidebar({ current, onClick, onCreateFromBlank }: Sidebar
|
|||||||
|
|
||||||
type CategoryItemProps = {
|
type CategoryItemProps = {
|
||||||
active: boolean
|
active: boolean
|
||||||
category: AppCategories
|
category: AppCategories | string
|
||||||
onClick?: (category: AppCategories) => void
|
onClick?: (category: AppCategories | string) => void
|
||||||
}
|
}
|
||||||
function CategoryItem({ category, active, onClick }: CategoryItemProps) {
|
function CategoryItem({ category, active, onClick }: CategoryItemProps) {
|
||||||
return <li
|
return <li
|
||||||
className={classNames('p-1 pl-3 rounded-lg flex items-center gap-2 group cursor-pointer hover:bg-state-base-hover [&.active]:bg-state-base-active', active && 'active')}
|
className={classNames('p-1 pl-3 h-8 rounded-lg flex items-center gap-2 group cursor-pointer hover:bg-state-base-hover [&.active]:bg-state-base-active', active && 'active')}
|
||||||
onClick={() => { onClick?.(category) }}>
|
onClick={() => { onClick?.(category) }}>
|
||||||
<div className='w-5 h-5 inline-flex items-center justify-center rounded-md border border-divider-regular bg-components-icon-bg-midnight-solid group-[.active]:bg-components-icon-bg-blue-solid'>
|
{category === AppCategories.RECOMMENDED && <div className='inline-flex h-5 w-5 items-center justify-center rounded-md'>
|
||||||
<AppCategoryIcon category={category} />
|
<RiThumbUpLine className='h-4 w-4 text-components-menu-item-text group-[.active]:text-components-menu-item-text-active' />
|
||||||
</div>
|
</div>}
|
||||||
<AppCategoryLabel category={category}
|
<AppCategoryLabel category={category}
|
||||||
className={classNames('system-sm-medium text-components-menu-item-text group-[.active]:text-components-menu-item-text-active group-hover:text-components-menu-item-text-hover', active && 'system-sm-semibold')} />
|
className={classNames('system-sm-medium text-components-menu-item-text group-[.active]:text-components-menu-item-text-active group-hover:text-components-menu-item-text-hover', active && 'system-sm-semibold')} />
|
||||||
</li >
|
</li >
|
||||||
}
|
}
|
||||||
|
|
||||||
type AppCategoryLabelProps = {
|
type AppCategoryLabelProps = {
|
||||||
category: AppCategories
|
category: AppCategories | string
|
||||||
className?: string
|
className?: string
|
||||||
}
|
}
|
||||||
export function AppCategoryLabel({ category, className }: AppCategoryLabelProps) {
|
export function AppCategoryLabel({ category, className }: AppCategoryLabelProps) {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
return <span className={className}>{t(`app.newAppFromTemplate.sidebar.${category}`)}</span>
|
return <span className={className}>{category === AppCategories.RECOMMENDED ? t('app.newAppFromTemplate.sidebar.Recommended') : category}</span>
|
||||||
}
|
|
||||||
|
|
||||||
type AppCategoryIconProps = {
|
|
||||||
category: AppCategories
|
|
||||||
}
|
|
||||||
function AppCategoryIcon({ category }: AppCategoryIconProps) {
|
|
||||||
if (category === AppCategories.AGENT)
|
|
||||||
return <RiSpeakAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
if (category === AppCategories.ASSISTANT)
|
|
||||||
return <RiChatSmileAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
if (category === AppCategories.HR)
|
|
||||||
return <RiPassPendingFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
if (category === AppCategories.PROGRAMMING)
|
|
||||||
return <RiTerminalBoxFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
if (category === AppCategories.RECOMMENDED)
|
|
||||||
return <RiThumbUpFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
if (category === AppCategories.WRITING)
|
|
||||||
return <RiQuillPenAiFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
if (category === AppCategories.WORKFLOW)
|
|
||||||
return <RiExchange2Fill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
return <RiAppsFill className='w-3.5 h-3.5 text-components-avatar-shape-fill-stop-100' />
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,46 @@
|
|||||||
|
import { useTranslation } from 'react-i18next'
|
||||||
|
import Modal from '@/app/components/base/modal'
|
||||||
|
import Button from '@/app/components/base/button'
|
||||||
|
|
||||||
|
type DSLConfirmModalProps = {
|
||||||
|
versions?: {
|
||||||
|
importedVersion: string
|
||||||
|
systemVersion: string
|
||||||
|
}
|
||||||
|
onCancel: () => void
|
||||||
|
onConfirm: () => void
|
||||||
|
confirmDisabled?: boolean
|
||||||
|
}
|
||||||
|
const DSLConfirmModal = ({
|
||||||
|
versions = { importedVersion: '', systemVersion: '' },
|
||||||
|
onCancel,
|
||||||
|
onConfirm,
|
||||||
|
confirmDisabled = false,
|
||||||
|
}: DSLConfirmModalProps) => {
|
||||||
|
const { t } = useTranslation()
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Modal
|
||||||
|
isShow
|
||||||
|
onClose={() => onCancel()}
|
||||||
|
className='w-[480px]'
|
||||||
|
>
|
||||||
|
<div className='flex flex-col items-start gap-2 self-stretch pb-4'>
|
||||||
|
<div className='title-2xl-semi-bold text-text-primary'>{t('app.newApp.appCreateDSLErrorTitle')}</div>
|
||||||
|
<div className='system-md-regular flex grow flex-col text-text-secondary'>
|
||||||
|
<div>{t('app.newApp.appCreateDSLErrorPart1')}</div>
|
||||||
|
<div>{t('app.newApp.appCreateDSLErrorPart2')}</div>
|
||||||
|
<br />
|
||||||
|
<div>{t('app.newApp.appCreateDSLErrorPart3')}<span className='system-md-medium'>{versions.importedVersion}</span></div>
|
||||||
|
<div>{t('app.newApp.appCreateDSLErrorPart4')}<span className='system-md-medium'>{versions.systemVersion}</span></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className='flex items-start justify-end gap-2 self-stretch pt-6'>
|
||||||
|
<Button variant='secondary' onClick={() => onCancel()}>{t('app.newApp.Cancel')}</Button>
|
||||||
|
<Button variant='primary' destructive onClick={onConfirm} disabled={confirmDisabled}>{t('app.newApp.Confirm')}</Button>
|
||||||
|
</div>
|
||||||
|
</Modal>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export default DSLConfirmModal
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
'use client'
|
'use client'
|
||||||
|
|
||||||
import React, { useMemo, useState } from 'react'
|
import React, { useCallback, useMemo, useState } from 'react'
|
||||||
import { useRouter } from 'next/navigation'
|
|
||||||
import { useTranslation } from 'react-i18next'
|
import { useTranslation } from 'react-i18next'
|
||||||
import { useContext } from 'use-context-selector'
|
import { useContext } from 'use-context-selector'
|
||||||
import useSWR from 'swr'
|
import useSWR from 'swr'
|
||||||
import { useDebounceFn } from 'ahooks'
|
import { useDebounceFn } from 'ahooks'
|
||||||
import Toast from '../../base/toast'
|
|
||||||
import s from './style.module.css'
|
import s from './style.module.css'
|
||||||
import cn from '@/utils/classnames'
|
import cn from '@/utils/classnames'
|
||||||
import ExploreContext from '@/context/explore-context'
|
import ExploreContext from '@/context/explore-context'
|
||||||
@@ -14,17 +12,17 @@ import type { App } from '@/models/explore'
|
|||||||
import Category from '@/app/components/explore/category'
|
import Category from '@/app/components/explore/category'
|
||||||
import AppCard from '@/app/components/explore/app-card'
|
import AppCard from '@/app/components/explore/app-card'
|
||||||
import { fetchAppDetail, fetchAppList } from '@/service/explore'
|
import { fetchAppDetail, fetchAppList } from '@/service/explore'
|
||||||
import { importDSL } from '@/service/apps'
|
|
||||||
import { useTabSearchParams } from '@/hooks/use-tab-searchparams'
|
import { useTabSearchParams } from '@/hooks/use-tab-searchparams'
|
||||||
import CreateAppModal from '@/app/components/explore/create-app-modal'
|
import CreateAppModal from '@/app/components/explore/create-app-modal'
|
||||||
import AppTypeSelector from '@/app/components/app/type-selector'
|
import AppTypeSelector from '@/app/components/app/type-selector'
|
||||||
import type { CreateAppModalProps } from '@/app/components/explore/create-app-modal'
|
import type { CreateAppModalProps } from '@/app/components/explore/create-app-modal'
|
||||||
import Loading from '@/app/components/base/loading'
|
import Loading from '@/app/components/base/loading'
|
||||||
import { NEED_REFRESH_APP_LIST_KEY } from '@/config'
|
|
||||||
import { useAppContext } from '@/context/app-context'
|
|
||||||
import { getRedirection } from '@/utils/app-redirection'
|
|
||||||
import Input from '@/app/components/base/input'
|
import Input from '@/app/components/base/input'
|
||||||
import { DSLImportMode } from '@/models/app'
|
import {
|
||||||
|
DSLImportMode,
|
||||||
|
} from '@/models/app'
|
||||||
|
import { useImportDSL } from '@/hooks/use-import-dsl'
|
||||||
|
import DSLConfirmModal from '@/app/components/app/create-from-dsl-modal/dsl-confirm-modal'
|
||||||
|
|
||||||
type AppsProps = {
|
type AppsProps = {
|
||||||
pageType?: PageType
|
pageType?: PageType
|
||||||
@@ -41,8 +39,6 @@ const Apps = ({
|
|||||||
onSuccess,
|
onSuccess,
|
||||||
}: AppsProps) => {
|
}: AppsProps) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
const { isCurrentWorkspaceEditor } = useAppContext()
|
|
||||||
const { push } = useRouter()
|
|
||||||
const { hasEditPermission } = useContext(ExploreContext)
|
const { hasEditPermission } = useContext(ExploreContext)
|
||||||
const allCategoriesEn = t('explore.apps.allCategories', { lng: 'en' })
|
const allCategoriesEn = t('explore.apps.allCategories', { lng: 'en' })
|
||||||
|
|
||||||
@@ -117,6 +113,14 @@ const Apps = ({
|
|||||||
|
|
||||||
const [currApp, setCurrApp] = React.useState<App | null>(null)
|
const [currApp, setCurrApp] = React.useState<App | null>(null)
|
||||||
const [isShowCreateModal, setIsShowCreateModal] = React.useState(false)
|
const [isShowCreateModal, setIsShowCreateModal] = React.useState(false)
|
||||||
|
|
||||||
|
const {
|
||||||
|
handleImportDSL,
|
||||||
|
handleImportDSLConfirm,
|
||||||
|
versions,
|
||||||
|
isFetching,
|
||||||
|
} = useImportDSL()
|
||||||
|
const [showDSLConfirmModal, setShowDSLConfirmModal] = useState(false)
|
||||||
const onCreate: CreateAppModalProps['onConfirm'] = async ({
|
const onCreate: CreateAppModalProps['onConfirm'] = async ({
|
||||||
name,
|
name,
|
||||||
icon_type,
|
icon_type,
|
||||||
@@ -127,31 +131,31 @@ const Apps = ({
|
|||||||
const { export_data } = await fetchAppDetail(
|
const { export_data } = await fetchAppDetail(
|
||||||
currApp?.app.id as string,
|
currApp?.app.id as string,
|
||||||
)
|
)
|
||||||
try {
|
const payload = {
|
||||||
const app = await importDSL({
|
mode: DSLImportMode.YAML_CONTENT,
|
||||||
mode: DSLImportMode.YAML_CONTENT,
|
yaml_content: export_data,
|
||||||
yaml_content: export_data,
|
name,
|
||||||
name,
|
icon_type,
|
||||||
icon_type,
|
icon,
|
||||||
icon,
|
icon_background,
|
||||||
icon_background,
|
description,
|
||||||
description,
|
|
||||||
})
|
|
||||||
setIsShowCreateModal(false)
|
|
||||||
Toast.notify({
|
|
||||||
type: 'success',
|
|
||||||
message: t('app.newApp.appCreated'),
|
|
||||||
})
|
|
||||||
if (onSuccess)
|
|
||||||
onSuccess()
|
|
||||||
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
|
||||||
getRedirection(isCurrentWorkspaceEditor, { id: app.app_id }, push)
|
|
||||||
}
|
|
||||||
catch (e) {
|
|
||||||
Toast.notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
|
||||||
}
|
}
|
||||||
|
await handleImportDSL(payload, {
|
||||||
|
onSuccess: () => {
|
||||||
|
setIsShowCreateModal(false)
|
||||||
|
},
|
||||||
|
onPending: () => {
|
||||||
|
setShowDSLConfirmModal(true)
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const onConfirmDSL = useCallback(async () => {
|
||||||
|
await handleImportDSLConfirm({
|
||||||
|
onSuccess,
|
||||||
|
})
|
||||||
|
}, [handleImportDSLConfirm, onSuccess])
|
||||||
|
|
||||||
if (!categories || categories.length === 0) {
|
if (!categories || categories.length === 0) {
|
||||||
return (
|
return (
|
||||||
<div className="flex h-full items-center">
|
<div className="flex h-full items-center">
|
||||||
@@ -234,9 +238,20 @@ const Apps = ({
|
|||||||
appDescription={currApp?.app.description || ''}
|
appDescription={currApp?.app.description || ''}
|
||||||
show={isShowCreateModal}
|
show={isShowCreateModal}
|
||||||
onConfirm={onCreate}
|
onConfirm={onCreate}
|
||||||
|
confirmDisabled={isFetching}
|
||||||
onHide={() => setIsShowCreateModal(false)}
|
onHide={() => setIsShowCreateModal(false)}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
{
|
||||||
|
showDSLConfirmModal && (
|
||||||
|
<DSLConfirmModal
|
||||||
|
versions={versions}
|
||||||
|
onCancel={() => setShowDSLConfirmModal(false)}
|
||||||
|
onConfirm={onConfirmDSL}
|
||||||
|
confirmDisabled={isFetching}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ export type CreateAppModalProps = {
|
|||||||
description: string
|
description: string
|
||||||
use_icon_as_answer_icon?: boolean
|
use_icon_as_answer_icon?: boolean
|
||||||
}) => Promise<void>
|
}) => Promise<void>
|
||||||
|
confirmDisabled?: boolean
|
||||||
onHide: () => void
|
onHide: () => void
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,6 +49,7 @@ const CreateAppModal = ({
|
|||||||
appMode,
|
appMode,
|
||||||
appUseIconAsAnswerIcon,
|
appUseIconAsAnswerIcon,
|
||||||
onConfirm,
|
onConfirm,
|
||||||
|
confirmDisabled,
|
||||||
onHide,
|
onHide,
|
||||||
}: CreateAppModalProps) => {
|
}: CreateAppModalProps) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
@@ -145,7 +147,7 @@ const CreateAppModal = ({
|
|||||||
{!isEditModal && isAppsFull && <AppsFull loc='app-explore-create' />}
|
{!isEditModal && isAppsFull && <AppsFull loc='app-explore-create' />}
|
||||||
</div>
|
</div>
|
||||||
<div className='flex flex-row-reverse'>
|
<div className='flex flex-row-reverse'>
|
||||||
<Button disabled={!isEditModal && isAppsFull} className='w-24 ml-2' variant='primary' onClick={submit}>{!isEditModal ? t('common.operation.create') : t('common.operation.save')}</Button>
|
<Button disabled={(!isEditModal && isAppsFull) || !name.trim() || confirmDisabled} className='w-24 ml-2' variant='primary' onClick={submit}>{!isEditModal ? t('common.operation.create') : t('common.operation.save')}</Button>
|
||||||
<Button className='w-24' onClick={onHide}>{t('common.operation.cancel')}</Button>
|
<Button className='w-24' onClick={onHide}>{t('common.operation.cancel')}</Button>
|
||||||
</div>
|
</div>
|
||||||
</Modal>
|
</Modal>
|
||||||
|
|||||||
158
web/hooks/use-import-dsl.ts
Normal file
158
web/hooks/use-import-dsl.ts
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
import {
|
||||||
|
useCallback,
|
||||||
|
useRef,
|
||||||
|
useState,
|
||||||
|
} from 'react'
|
||||||
|
import { useTranslation } from 'react-i18next'
|
||||||
|
import { useRouter } from 'next/navigation'
|
||||||
|
import type {
|
||||||
|
DSLImportMode,
|
||||||
|
DSLImportResponse,
|
||||||
|
} from '@/models/app'
|
||||||
|
import { DSLImportStatus } from '@/models/app'
|
||||||
|
import {
|
||||||
|
importDSL,
|
||||||
|
importDSLConfirm,
|
||||||
|
} from '@/service/apps'
|
||||||
|
import type { AppIconType } from '@/types/app'
|
||||||
|
import { useToastContext } from '@/app/components/base/toast'
|
||||||
|
import { getRedirection } from '@/utils/app-redirection'
|
||||||
|
import { useSelector } from '@/context/app-context'
|
||||||
|
import { NEED_REFRESH_APP_LIST_KEY } from '@/config'
|
||||||
|
|
||||||
|
type DSLPayload = {
|
||||||
|
mode: DSLImportMode
|
||||||
|
yaml_content?: string
|
||||||
|
yaml_url?: string
|
||||||
|
name?: string
|
||||||
|
icon_type?: AppIconType
|
||||||
|
icon?: string
|
||||||
|
icon_background?: string
|
||||||
|
description?: string
|
||||||
|
}
|
||||||
|
type ResponseCallback = {
|
||||||
|
onSuccess?: () => void
|
||||||
|
onPending?: (payload: DSLImportResponse) => void
|
||||||
|
onFailed?: () => void
|
||||||
|
}
|
||||||
|
export const useImportDSL = () => {
|
||||||
|
const { t } = useTranslation()
|
||||||
|
const { notify } = useToastContext()
|
||||||
|
const [isFetching, setIsFetching] = useState(false)
|
||||||
|
const isCurrentWorkspaceEditor = useSelector(s => s.isCurrentWorkspaceEditor)
|
||||||
|
const { push } = useRouter()
|
||||||
|
const [versions, setVersions] = useState<{ importedVersion: string; systemVersion: string }>()
|
||||||
|
const importIdRef = useRef<string>('')
|
||||||
|
|
||||||
|
const handleImportDSL = useCallback(async (
|
||||||
|
payload: DSLPayload,
|
||||||
|
{
|
||||||
|
onSuccess,
|
||||||
|
onPending,
|
||||||
|
onFailed,
|
||||||
|
}: ResponseCallback,
|
||||||
|
) => {
|
||||||
|
if (isFetching)
|
||||||
|
return
|
||||||
|
setIsFetching(true)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await importDSL(payload)
|
||||||
|
|
||||||
|
if (!response)
|
||||||
|
return
|
||||||
|
|
||||||
|
const {
|
||||||
|
id,
|
||||||
|
status,
|
||||||
|
app_id,
|
||||||
|
imported_dsl_version,
|
||||||
|
current_dsl_version,
|
||||||
|
} = response
|
||||||
|
|
||||||
|
if (status === DSLImportStatus.COMPLETED || status === DSLImportStatus.COMPLETED_WITH_WARNINGS) {
|
||||||
|
if (!app_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
notify({
|
||||||
|
type: status === DSLImportStatus.COMPLETED ? 'success' : 'warning',
|
||||||
|
message: t(status === DSLImportStatus.COMPLETED ? 'app.newApp.appCreated' : 'app.newApp.caution'),
|
||||||
|
children: status === DSLImportStatus.COMPLETED_WITH_WARNINGS && t('app.newApp.appCreateDSLWarning'),
|
||||||
|
})
|
||||||
|
onSuccess?.()
|
||||||
|
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
||||||
|
getRedirection(isCurrentWorkspaceEditor, { id: app_id }, push)
|
||||||
|
}
|
||||||
|
else if (status === DSLImportStatus.PENDING) {
|
||||||
|
setVersions({
|
||||||
|
importedVersion: imported_dsl_version ?? '',
|
||||||
|
systemVersion: current_dsl_version ?? '',
|
||||||
|
})
|
||||||
|
importIdRef.current = id
|
||||||
|
onPending?.(response)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||||
|
onFailed?.()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch {
|
||||||
|
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||||
|
onFailed?.()
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
setIsFetching(false)
|
||||||
|
}
|
||||||
|
}, [t, notify, isCurrentWorkspaceEditor, push, isFetching])
|
||||||
|
|
||||||
|
const handleImportDSLConfirm = useCallback(async (
|
||||||
|
{
|
||||||
|
onSuccess,
|
||||||
|
onFailed,
|
||||||
|
}: Pick<ResponseCallback, 'onSuccess' | 'onFailed'>,
|
||||||
|
) => {
|
||||||
|
if (isFetching)
|
||||||
|
return
|
||||||
|
setIsFetching(true)
|
||||||
|
if (!importIdRef.current)
|
||||||
|
return
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await importDSLConfirm({
|
||||||
|
import_id: importIdRef.current,
|
||||||
|
})
|
||||||
|
|
||||||
|
const { status, app_id } = response
|
||||||
|
if (!app_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (status === DSLImportStatus.COMPLETED) {
|
||||||
|
onSuccess?.()
|
||||||
|
notify({
|
||||||
|
type: 'success',
|
||||||
|
message: t('app.newApp.appCreated'),
|
||||||
|
})
|
||||||
|
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
||||||
|
getRedirection(isCurrentWorkspaceEditor, { id: app_id! }, push)
|
||||||
|
}
|
||||||
|
else if (status === DSLImportStatus.FAILED) {
|
||||||
|
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||||
|
onFailed?.()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch {
|
||||||
|
notify({ type: 'error', message: t('app.newApp.appCreateFailed') })
|
||||||
|
onFailed?.()
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
setIsFetching(false)
|
||||||
|
}
|
||||||
|
}, [t, notify, isCurrentWorkspaceEditor, push, isFetching])
|
||||||
|
|
||||||
|
return {
|
||||||
|
handleImportDSL,
|
||||||
|
handleImportDSLConfirm,
|
||||||
|
versions,
|
||||||
|
isFetching,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "dify-web",
|
"name": "dify-web",
|
||||||
"version": "0.15.6",
|
"version": "0.15.8",
|
||||||
"private": true,
|
"private": true,
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=18.17.0"
|
"node": ">=18.17.0"
|
||||||
@@ -161,7 +161,7 @@
|
|||||||
"jest-environment-jsdom": "^29.7.0",
|
"jest-environment-jsdom": "^29.7.0",
|
||||||
"lint-staged": "^13.2.2",
|
"lint-staged": "^13.2.2",
|
||||||
"magicast": "^0.3.4",
|
"magicast": "^0.3.4",
|
||||||
"postcss": "^8.4.31",
|
"postcss": "^8.4.47",
|
||||||
"sass": "^1.61.0",
|
"sass": "^1.61.0",
|
||||||
"storybook": "^8.3.5",
|
"storybook": "^8.3.5",
|
||||||
"tailwindcss": "^3.4.4",
|
"tailwindcss": "^3.4.4",
|
||||||
@@ -172,7 +172,10 @@
|
|||||||
"resolutions": {
|
"resolutions": {
|
||||||
"@types/react": "~18.2.0",
|
"@types/react": "~18.2.0",
|
||||||
"@types/react-dom": "~18.2.0",
|
"@types/react-dom": "~18.2.0",
|
||||||
"string-width": "4.2.3"
|
"string-width": "4.2.3",
|
||||||
|
"nanoid": "~3.3.8",
|
||||||
|
"esbuild": "~0.25.0",
|
||||||
|
"serialize-javascript": "~6.0.2"
|
||||||
},
|
},
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
"**/*.js?(x)": [
|
"**/*.js?(x)": [
|
||||||
|
|||||||
9371
web/yarn.lock
9371
web/yarn.lock
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user