Compare commits

...

15 Commits

Author SHA1 Message Date
AkaraChen
5f93e28d75 fix: gen-icon script phantom deps 2024-10-21 10:41:43 +08:00
AkaraChen
6ffedcd8f0 build: switch to pnpm 2024-10-21 10:41:43 +08:00
非法操作
26358e971d fix: draft run workflow node with image will raise error (#9406) 2024-10-21 10:41:43 +08:00
chzphoenix
effd5449bc refactor wenxin rerank (#9486)
Co-authored-by: cuihz <cuihz@knowbox.cn>
2024-10-21 10:41:43 +08:00
kurokobo
a608e23613 fix: ignore all files except for .gitkeep under docker/nginx/ssl by gitignore (#9518) 2024-10-21 10:41:43 +08:00
ice yao
be122ac974 feat: Add custom username and avatar define in discord tool (#9514) 2024-10-21 10:41:43 +08:00
Ziyu Huang
1fe585fcdd Resolve 9508 openai compatible rerank (#9511) 2024-10-21 10:41:43 +08:00
zhuhao
2d034c57da fix: resolve the error of docker-compose startup when the storage is baidu-obs (#9502) 2024-10-21 10:41:43 +08:00
Chenhe Gu
2c102c9b34 refine wording in license (#9505) 2024-10-21 10:41:43 +08:00
Zven
4f3113febb chore: update the description for storage_type (#9492) 2024-10-21 10:41:43 +08:00
ice yao
dfd85c6d11 feat: Add volcengine tos storage test (#9495) 2024-10-21 10:41:43 +08:00
Oliver Lee
3f330c0b80 add yuqye(https://www.yuque.com)tools (#8960)
Co-authored-by: 佐井 <chengwu.lcw@alibaba-inc.com>
2024-10-21 10:41:43 +08:00
Tao Wang
c379448673 Added Llama 3.2 Vision Models Speech2Text Models for Groq (#9479) 2024-10-21 10:41:43 +08:00
zhuhao
05ed049cc8 feat: add yi custom llm intergration (#9482) 2024-10-21 10:41:43 +08:00
zhuhao
37d47983a1 fix: resolve the error with the db-pool-stat endpoint (#9478) 2024-10-21 10:41:43 +08:00
57 changed files with 18659 additions and 14279 deletions

View File

@@ -1,11 +1,12 @@
#!/bin/bash
cd web && npm install
npm add -g pnpm@9.12.2
cd web && pnpm install
pipx install poetry
echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc
echo 'alias start-web="cd /workspaces/dify/web && npm run dev"' >> ~/.bashrc
echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc
source /home/vscode/.bashrc
source /home/vscode/.bashrc

View File

@@ -42,7 +42,7 @@ jobs:
- name: Run npm script
if: env.FILES_CHANGED == 'true'
run: npm run auto-gen-i18n
run: pnpm run auto-gen-i18n
- name: Create Pull Request
if: env.FILES_CHANGED == 'true'

7
.gitignore vendored
View File

@@ -175,6 +175,8 @@ docker/volumes/pgvector/data/*
docker/volumes/pgvecto_rs/data/*
docker/nginx/conf.d/default.conf
docker/nginx/ssl/*
!docker/nginx/ssl/.gitkeep
docker/middleware.env
sdks/python-client/build
@@ -187,4 +189,7 @@ pyrightconfig.json
api/.vscode
.idea/
.vscode
.vscode
# pnpm
/.pnpm-store

View File

@@ -6,8 +6,9 @@ Dify is licensed under the Apache License 2.0, with the following additional con
a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
- Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
b. LOGO and copyright information: In the process of using Dify's frontend components, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend components.
b. LOGO and copyright information: In the process of using Dify's frontend, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend.
- Frontend Definition: For the purposes of this license, the "frontend" of Dify includes all components located in the `web/` directory when running Dify from the raw source code, or the "web" image when running Dify with Docker.
Please contact business@dify.ai by email to inquire about licensing matters.

View File

@@ -42,7 +42,7 @@ DB_DATABASE=dify
# Storage configuration
# use for store upload files, private keys...
# storage type: local, s3, azure-blob, google-storage, tencent-cos, huawei-obs, volcengine-tos, baidu-obs, supabase
# storage type: local, s3, aliyun-oss, azure-blob, baidu-obs, google-storage, huawei-obs, oci-storage, tencent-cos, volcengine-tos, supabase
STORAGE_TYPE=local
STORAGE_LOCAL_PATH=storage
S3_USE_AWS_MANAGED_IAM=false

View File

@@ -20,6 +20,7 @@ from app_factory import create_app
# DO NOT REMOVE BELOW
from events import event_handlers # noqa: F401
from extensions.ext_database import db
# TODO: Find a way to avoid importing models here
from models import account, dataset, model, source, task, tool, tools, web # noqa: F401

View File

@@ -35,7 +35,8 @@ from configs.middleware.vdb.weaviate_config import WeaviateConfig
class StorageConfig(BaseSettings):
STORAGE_TYPE: str = Field(
description="Type of storage to use."
" Options: 'local', 's3', 'azure-blob', 'aliyun-oss', 'google-storage'. Default is 'local'.",
" Options: 'local', 's3', 'aliyun-oss', 'azure-blob', 'baidu-obs', 'google-storage', 'huawei-obs', "
"'oci-storage', 'tencent-cos', 'volcengine-tos', 'supabase'. Default is 'local'.",
default="local",
)

View File

@@ -18,6 +18,7 @@ help:
en_US: https://console.groq.com/
supported_model_types:
- llm
- speech2text
configurate_methods:
- predefined-model
provider_credential_schema:

View File

@@ -0,0 +1,26 @@
model: llama-3.2-11b-vision-preview
label:
zh_Hans: Llama 3.2 11B Vision (Preview)
en_US: Llama 3.2 11B Vision (Preview)
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 8192
pricing:
input: '0.05'
output: '0.1'
unit: '0.000001'
currency: USD

View File

@@ -0,0 +1,26 @@
model: llama-3.2-90b-vision-preview
label:
zh_Hans: Llama 3.2 90B Vision (Preview)
en_US: Llama 3.2 90B Vision (Preview)
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 8192
pricing:
input: '0.05'
output: '0.1'
unit: '0.000001'
currency: USD

View File

@@ -0,0 +1,5 @@
model: distil-whisper-large-v3-en
model_type: speech2text
model_properties:
file_upload_limit: 1
supported_file_extensions: flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm

View File

@@ -0,0 +1,30 @@
from typing import IO, Optional
from core.model_runtime.model_providers.openai_api_compatible.speech2text.speech2text import OAICompatSpeech2TextModel
class GroqSpeech2TextModel(OAICompatSpeech2TextModel):
"""
Model class for Groq Speech to text model.
"""
def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str:
"""
Invoke speech2text model
:param model: model name
:param credentials: model credentials
:param file: audio file
:param user: unique user id
:return: text for given audio file
"""
self._add_custom_parameters(credentials)
return super()._invoke(model, credentials, file)
def validate_credentials(self, model: str, credentials: dict) -> None:
self._add_custom_parameters(credentials)
return super().validate_credentials(model, credentials)
@classmethod
def _add_custom_parameters(cls, credentials: dict) -> None:
credentials["endpoint_url"] = "https://api.groq.com/openai/v1"

View File

@@ -0,0 +1,5 @@
model: whisper-large-v3-turbo
model_type: speech2text
model_properties:
file_upload_limit: 1
supported_file_extensions: flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm

View File

@@ -0,0 +1,5 @@
model: whisper-large-v3
model_type: speech2text
model_properties:
file_upload_limit: 1
supported_file_extensions: flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm

View File

@@ -8,6 +8,7 @@ supported_model_types:
- llm
- text-embedding
- speech2text
- rerank
configurate_methods:
- customizable-model
model_credential_schema:
@@ -83,6 +84,19 @@ model_credential_schema:
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: context_size
label:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
show_on:
- variable: __model_type
value: rerank
type: text-input
default: '4096'
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: max_tokens_to_sample
label:
zh_Hans: 最大 token 上限

View File

@@ -0,0 +1,159 @@
from json import dumps
from typing import Optional
import httpx
from requests import post
from yarl import URL
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType
from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
InvokeConnectionError,
InvokeError,
InvokeRateLimitError,
InvokeServerUnavailableError,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
class OAICompatRerankModel(RerankModel):
"""
rerank model API is compatible with Jina rerank model API. So copy the JinaRerankModel class code here.
we need enhance for llama.cpp , which return raw score, not normalize score 0~1. It seems Dify need it
"""
def _invoke(
self,
model: str,
credentials: dict,
query: str,
docs: list[str],
score_threshold: Optional[float] = None,
top_n: Optional[int] = None,
user: Optional[str] = None,
) -> RerankResult:
"""
Invoke rerank model
:param model: model name
:param credentials: model credentials
:param query: search query
:param docs: docs for reranking
:param score_threshold: score threshold
:param top_n: top n documents to return
:param user: unique user id
:return: rerank result
"""
if len(docs) == 0:
return RerankResult(model=model, docs=[])
server_url = credentials["endpoint_url"]
model_name = model
if not server_url:
raise CredentialsValidateFailedError("server_url is required")
if not model_name:
raise CredentialsValidateFailedError("model_name is required")
url = server_url
headers = {"Authorization": f"Bearer {credentials.get('api_key')}", "Content-Type": "application/json"}
# TODO: Do we need truncate docs to avoid llama.cpp return error?
data = {"model": model_name, "query": query, "documents": docs, "top_n": top_n}
try:
response = post(str(URL(url) / "rerank"), headers=headers, data=dumps(data), timeout=60)
response.raise_for_status()
results = response.json()
rerank_documents = []
scores = [result["relevance_score"] for result in results["results"]]
# Min-Max Normalization: Normalize scores to 0 ~ 1.0 range
min_score = min(scores)
max_score = max(scores)
score_range = max_score - min_score if max_score != min_score else 1.0 # Avoid division by zero
for result in results["results"]:
index = result["index"]
# Retrieve document text (fallback if llama.cpp rerank doesn't return it)
text = result.get("document", {}).get("text", docs[index])
# Normalize the score
normalized_score = (result["relevance_score"] - min_score) / score_range
# Create RerankDocument object with normalized score
rerank_document = RerankDocument(
index=index,
text=text,
score=normalized_score,
)
# Apply threshold (if defined)
if score_threshold is None or normalized_score >= score_threshold:
rerank_documents.append(rerank_document)
# Sort rerank_documents by normalized score in descending order
rerank_documents.sort(key=lambda doc: doc.score, reverse=True)
return RerankResult(model=model, docs=rerank_documents)
except httpx.HTTPStatusError as e:
raise InvokeServerUnavailableError(str(e))
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
Validate model credentials
:param model: model name
:param credentials: model credentials
:return:
"""
try:
self._invoke(
model=model,
credentials=credentials,
query="What is the capital of the United States?",
docs=[
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
"Census, Carson City had a population of 55,274.",
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
"are a political division controlled by the United States. Its capital is Saipan.",
],
score_threshold=0.8,
)
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
@property
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
"""
return {
InvokeConnectionError: [httpx.ConnectError],
InvokeServerUnavailableError: [httpx.RemoteProtocolError],
InvokeRateLimitError: [],
InvokeAuthorizationError: [httpx.HTTPStatusError],
InvokeBadRequestError: [httpx.RequestError],
}
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity:
"""
generate custom model entities from credentials
"""
entity = AIModelEntity(
model=model,
label=I18nObject(en_US=model),
model_type=ModelType.RERANK,
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={},
)
return entity

View File

@@ -2,20 +2,15 @@ from typing import Optional
import httpx
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType
from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
InvokeConnectionError,
InvokeError,
InvokeRateLimitError,
InvokeServerUnavailableError,
)
from core.model_runtime.errors.invoke import InvokeError
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
from core.model_runtime.model_providers.wenxin._common import _CommonWenxin
from core.model_runtime.model_providers.wenxin.wenxin_errors import (
InternalServerError,
invoke_error_mapping,
)
class WenxinRerank(_CommonWenxin):
@@ -32,7 +27,7 @@ class WenxinRerank(_CommonWenxin):
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
raise InvokeServerUnavailableError(str(e))
raise InternalServerError(str(e))
class WenxinRerankModel(RerankModel):
@@ -93,7 +88,7 @@ class WenxinRerankModel(RerankModel):
return RerankResult(model=model, docs=rerank_documents)
except httpx.HTTPStatusError as e:
raise InvokeServerUnavailableError(str(e))
raise InternalServerError(str(e))
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
@@ -124,24 +119,4 @@ class WenxinRerankModel(RerankModel):
"""
Map model invoke error to unified error
"""
return {
InvokeConnectionError: [httpx.ConnectError],
InvokeServerUnavailableError: [httpx.RemoteProtocolError],
InvokeRateLimitError: [],
InvokeAuthorizationError: [httpx.HTTPStatusError],
InvokeBadRequestError: [httpx.RequestError],
}
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity:
"""
generate custom model entities from credentials
"""
entity = AIModelEntity(
model=model,
label=I18nObject(en_US=model),
model_type=ModelType.RERANK,
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))},
)
return entity
return invoke_error_mapping()

View File

@@ -4,12 +4,22 @@ from urllib.parse import urlparse
import tiktoken
from core.model_runtime.entities.llm_entities import LLMResult
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult
from core.model_runtime.entities.message_entities import (
PromptMessage,
PromptMessageTool,
SystemPromptMessage,
)
from core.model_runtime.entities.model_entities import (
AIModelEntity,
FetchFrom,
ModelFeature,
ModelPropertyKey,
ModelType,
ParameterRule,
ParameterType,
)
from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel
@@ -125,3 +135,58 @@ class YiLargeLanguageModel(OpenAILargeLanguageModel):
else:
parsed_url = urlparse(credentials["endpoint_url"])
credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}"
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
return AIModelEntity(
model=model,
label=I18nObject(en_US=model, zh_Hans=model),
model_type=ModelType.LLM,
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL]
if credentials.get("function_calling_type") == "tool_call"
else [],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000)),
ModelPropertyKey.MODE: LLMMode.CHAT.value,
},
parameter_rules=[
ParameterRule(
name="temperature",
use_template="temperature",
label=I18nObject(en_US="Temperature", zh_Hans="温度"),
type=ParameterType.FLOAT,
),
ParameterRule(
name="max_tokens",
use_template="max_tokens",
default=512,
min=1,
max=int(credentials.get("max_tokens", 8192)),
label=I18nObject(
en_US="Max Tokens", zh_Hans="指定生成结果长度的上限。如果生成结果截断,可以调大该参数"
),
type=ParameterType.INT,
),
ParameterRule(
name="top_p",
use_template="top_p",
label=I18nObject(
en_US="Top P",
zh_Hans="控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。",
),
type=ParameterType.FLOAT,
),
ParameterRule(
name="top_k",
use_template="top_k",
label=I18nObject(en_US="Top K", zh_Hans="取样数量"),
type=ParameterType.FLOAT,
),
ParameterRule(
name="frequency_penalty",
use_template="frequency_penalty",
label=I18nObject(en_US="Frequency Penalty", zh_Hans="重复惩罚"),
type=ParameterType.FLOAT,
),
],
)

View File

@@ -20,6 +20,7 @@ supported_model_types:
- llm
configurate_methods:
- predefined-model
- customizable-model
provider_credential_schema:
credential_form_schemas:
- variable: api_key
@@ -39,3 +40,57 @@ provider_credential_schema:
placeholder:
zh_Hans: Base URL, e.g. https://api.lingyiwanwu.com/v1
en_US: Base URL, e.g. https://api.lingyiwanwu.com/v1
model_credential_schema:
model:
label:
en_US: Model Name
zh_Hans: 模型名称
placeholder:
en_US: Enter your model name
zh_Hans: 输入模型名称
credential_form_schemas:
- variable: api_key
label:
en_US: API Key
type: secret-input
required: true
placeholder:
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key
- variable: context_size
label:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
type: text-input
default: '4096'
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: max_tokens
label:
zh_Hans: 最大 token 上限
en_US: Upper bound for max tokens
default: '4096'
type: text-input
show_on:
- variable: __model_type
value: llm
- variable: function_calling_type
label:
en_US: Function calling
type: select
required: false
default: no_call
options:
- value: no_call
label:
en_US: Not Support
zh_Hans: 不支持
- value: function_call
label:
en_US: Support
zh_Hans: 支持
show_on:
- variable: __model_type
value: llm

View File

@@ -61,6 +61,7 @@
- vectorizer
- qrcode
- tianditu
- aliyuque
- google_translate
- hap
- json_process

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 7.1 KiB

View File

@@ -0,0 +1,19 @@
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class AliYuqueProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict) -> None:
token = credentials.get("token")
if not token:
raise ToolProviderCredentialValidationError("token is required")
try:
resp = AliYuqueTool.auth(token)
if resp and resp.get("data", {}).get("id"):
return
raise ToolProviderCredentialValidationError(resp)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))

View File

@@ -0,0 +1,29 @@
identity:
author: 佐井
name: aliyuque
label:
en_US: yuque
zh_Hans: 语雀
pt_BR: yuque
description:
en_US: Yuque, https://www.yuque.com.
zh_Hans: 语雀https://www.yuque.com。
pt_BR: Yuque, https://www.yuque.com.
icon: icon.svg
tags:
- productivity
- search
credentials_for_provider:
token:
type: secret-input
required: true
label:
en_US: Yuque Team Token
zh_Hans: 语雀团队Token
placeholder:
en_US: Please input your Yuque team token
zh_Hans: 请输入你的语雀团队Token
help:
en_US: Get Alibaba Yuque team token
zh_Hans: 先获取语雀团队Token
url: https://www.yuque.com/settings/tokens

View File

@@ -0,0 +1,50 @@
"""
语雀客户端
"""
__author__ = "佐井"
__created__ = "2024-06-01 09:45:20"
from typing import Any
import requests
class AliYuqueTool:
# yuque service url
server_url = "https://www.yuque.com"
@staticmethod
def auth(token):
session = requests.Session()
session.headers.update({"Accept": "application/json", "X-Auth-Token": token})
login = session.request("GET", AliYuqueTool.server_url + "/api/v2/user")
login.raise_for_status()
resp = login.json()
return resp
def request(self, method: str, token, tool_parameters: dict[str, Any], path: str) -> str:
if not token:
raise Exception("token is required")
session = requests.Session()
session.headers.update({"accept": "application/json", "X-Auth-Token": token})
new_params = {**tool_parameters}
# 找出需要替换的变量
replacements = {k: v for k, v in new_params.items() if f"{{{k}}}" in path}
# 替换 path 中的变量
for key, value in replacements.items():
path = path.replace(f"{{{key}}}", str(value))
del new_params[key] # 从 kwargs 中删除已经替换的变量
# 请求接口
if method.upper() in {"POST", "PUT"}:
session.headers.update(
{
"Content-Type": "application/json",
}
)
response = session.request(method.upper(), self.server_url + path, json=new_params)
else:
response = session.request(method, self.server_url + path, params=new_params)
response.raise_for_status()
return response.text

View File

@@ -0,0 +1,22 @@
"""
创建文档
"""
__author__ = "佐井"
__created__ = "2024-06-01 10:45:20"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class AliYuqueCreateDocumentTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
return self.create_text_message(self.request("POST", token, tool_parameters, "/api/v2/repos/{book_id}/docs"))

View File

@@ -0,0 +1,99 @@
identity:
name: aliyuque_create_document
author: 佐井
label:
en_US: Create Document
zh_Hans: 创建文档
icon: icon.svg
description:
human:
en_US: Creates a new document within a knowledge base without automatic addition to the table of contents. Requires a subsequent call to the "knowledge base directory update API". Supports setting visibility, format, and content. # 接口英文描述
zh_Hans: 在知识库中创建新文档,但不会自动加入目录,需额外调用“知识库目录更新接口”。允许设置公开性、格式及正文内容。
llm: Creates docs in a KB.
parameters:
- name: book_id
type: number
required: true
form: llm
label:
en_US: Knowledge Base ID
zh_Hans: 知识库ID
human_description:
en_US: The unique identifier of the knowledge base where the document will be created.
zh_Hans: 文档将被创建的知识库的唯一标识。
llm_description: ID of the target knowledge base.
- name: title
type: string
required: false
form: llm
label:
en_US: Title
zh_Hans: 标题
human_description:
en_US: The title of the document, defaults to 'Untitled' if not provided.
zh_Hans: 文档标题,默认为'无标题'如未提供。
llm_description: Title of the document, defaults to 'Untitled'.
- name: public
type: select
required: false
form: llm
options:
- value: 0
label:
en_US: Private
zh_Hans: 私密
- value: 1
label:
en_US: Public
zh_Hans: 公开
- value: 2
label:
en_US: Enterprise-only
zh_Hans: 企业内公开
label:
en_US: Visibility
zh_Hans: 公开性
human_description:
en_US: Document visibility (0 Private, 1 Public, 2 Enterprise-only).
zh_Hans: 文档可见性0 私密, 1 公开, 2 企业内公开)。
llm_description: Doc visibility options, 0-private, 1-public, 2-enterprise.
- name: format
type: select
required: false
form: llm
options:
- value: markdown
label:
en_US: markdown
zh_Hans: markdown
- value: html
label:
en_US: html
zh_Hans: html
- value: lake
label:
en_US: lake
zh_Hans: lake
label:
en_US: Content Format
zh_Hans: 内容格式
human_description:
en_US: Format of the document content (markdown, HTML, Lake).
zh_Hans: 文档内容格式markdown, HTML, Lake
llm_description: Content format choices, markdown, HTML, Lake.
- name: body
type: string
required: true
form: llm
label:
en_US: Body Content
zh_Hans: 正文内容
human_description:
en_US: The actual content of the document.
zh_Hans: 文档的实际内容。
llm_description: Content of the document.

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env python3
"""
删除文档
"""
__author__ = "佐井"
__created__ = "2024-09-17 22:04"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class AliYuqueDeleteDocumentTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
return self.create_text_message(
self.request("DELETE", token, tool_parameters, "/api/v2/repos/{book_id}/docs/{id}")
)

View File

@@ -0,0 +1,37 @@
identity:
name: aliyuque_delete_document
author: 佐井
label:
en_US: Delete Document
zh_Hans: 删除文档
icon: icon.svg
description:
human:
en_US: Delete Document
zh_Hans: 根据id删除文档
llm: Delete document.
parameters:
- name: book_id
type: number
required: true
form: llm
label:
en_US: Knowledge Base ID
zh_Hans: 知识库ID
human_description:
en_US: The unique identifier of the knowledge base where the document will be created.
zh_Hans: 文档将被创建的知识库的唯一标识。
llm_description: ID of the target knowledge base.
- name: id
type: string
required: true
form: llm
label:
en_US: Document ID or Path
zh_Hans: 文档 ID or 路径
human_description:
en_US: Document ID or path.
zh_Hans: 文档 ID or 路径。
llm_description: Document ID or path.

View File

@@ -0,0 +1,24 @@
"""
获取知识库首页
"""
__author__ = "佐井"
__created__ = "2024-06-01 22:57:14"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class AliYuqueDescribeBookIndexPageTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
return self.create_text_message(
self.request("GET", token, tool_parameters, "/api/v2/repos/{group_login}/{book_slug}/index_page")
)

View File

@@ -0,0 +1,38 @@
identity:
name: aliyuque_describe_book_index_page
author: 佐井
label:
en_US: Get Repo Index Page
zh_Hans: 获取知识库首页
icon: icon.svg
description:
human:
en_US: Retrieves the homepage of a knowledge base within a group, supporting both book ID and group login with book slug access.
zh_Hans: 获取团队中知识库的首页信息可通过书籍ID或团队登录名与书籍路径访问。
llm: Fetches the knowledge base homepage using group and book identifiers with support for alternate access paths.
parameters:
- name: group_login
type: string
required: true
form: llm
label:
en_US: Group Login
zh_Hans: 团队登录名
human_description:
en_US: The login name of the group that owns the knowledge base.
zh_Hans: 拥有该知识库的团队登录名。
llm_description: Team login identifier for the knowledge base owner.
- name: book_slug
type: string
required: true
form: llm
label:
en_US: Book Slug
zh_Hans: 知识库路径
human_description:
en_US: The unique slug representing the path of the knowledge base.
zh_Hans: 知识库的唯一路径标识。
llm_description: Unique path identifier for the knowledge base.

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python3
"""
获取知识库目录
"""
__author__ = "佐井"
__created__ = "2024-09-17 15:17:11"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class YuqueDescribeBookTableOfContentsTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> (Union)[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
return self.create_text_message(self.request("GET", token, tool_parameters, "/api/v2/repos/{book_id}/toc"))

View File

@@ -0,0 +1,25 @@
identity:
name: aliyuque_describe_book_table_of_contents
author: 佐井
label:
en_US: Get Book's Table of Contents
zh_Hans: 获取知识库的目录
icon: icon.svg
description:
human:
en_US: Get Book's Table of Contents.
zh_Hans: 获取知识库的目录。
llm: Get Book's Table of Contents.
parameters:
- name: book_id
type: number
required: true
form: llm
label:
en_US: Book ID
zh_Hans: 知识库 ID
human_description:
en_US: Book ID.
zh_Hans: 知识库 ID。
llm_description: Book ID.

View File

@@ -0,0 +1,61 @@
"""
获取文档
"""
__author__ = "佐井"
__created__ = "2024-06-02 07:11:45"
import json
from typing import Any, Union
from urllib.parse import urlparse
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class AliYuqueDescribeDocumentContentTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
new_params = {**tool_parameters}
token = new_params.pop("token")
if not token or token.lower() == "none":
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
new_params = {**tool_parameters}
url = new_params.pop("url")
if not url or not url.startswith("http"):
raise Exception("url is not valid")
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) < 3:
raise Exception("url is not correct")
doc_id = path_parts[-1]
book_slug = path_parts[-2]
group_id = path_parts[-3]
# 1. 请求首页信息获取book_id
new_params["group_login"] = group_id
new_params["book_slug"] = book_slug
index_page = json.loads(
self.request("GET", token, new_params, "/api/v2/repos/{group_login}/{book_slug}/index_page")
)
book_id = index_page.get("data", {}).get("book", {}).get("id")
if not book_id:
raise Exception(f"can not parse book_id from {index_page}")
# 2. 获取文档内容
new_params["book_id"] = book_id
new_params["id"] = doc_id
data = self.request("GET", token, new_params, "/api/v2/repos/{book_id}/docs/{id}")
data = json.loads(data)
body_only = tool_parameters.get("body_only") or ""
if body_only.lower() == "true":
return self.create_text_message(data.get("data").get("body"))
else:
raw = data.get("data")
del raw["body_lake"]
del raw["body_html"]
return self.create_text_message(json.dumps(data))

View File

@@ -0,0 +1,50 @@
identity:
name: aliyuque_describe_document_content
author: 佐井
label:
en_US: Fetch Document Content
zh_Hans: 获取文档内容
icon: icon.svg
description:
human:
en_US: Retrieves document content from Yuque based on the provided document URL, which can be a normal or shared link.
zh_Hans: 根据提供的语雀文档地址(支持正常链接或分享链接)获取文档内容。
llm: Fetches Yuque document content given a URL.
parameters:
- name: url
type: string
required: true
form: llm
label:
en_US: Document URL
zh_Hans: 文档地址
human_description:
en_US: The URL of the document to retrieve content from, can be normal or shared.
zh_Hans: 需要获取内容的文档地址,可以是正常链接或分享链接。
llm_description: URL of the Yuque document to fetch content.
- name: body_only
type: string
required: false
form: llm
label:
en_US: return body content only
zh_Hans: 仅返回body内容
human_description:
en_US: true:Body content only, false:Full response with metadata.
zh_Hans: true:仅返回body内容不返回其他元数据false:返回所有元数据。
llm_description: true:Body content only, false:Full response with metadata.
- name: token
type: secret-input
required: false
form: llm
label:
en_US: Yuque API Token
zh_Hans: 语雀接口Token
human_description:
en_US: The token for calling the Yuque API defaults to the Yuque token bound to the current tool if not provided.
zh_Hans: 调用语雀接口的token如果不传则默认为当前工具绑定的语雀Token。
llm_description: If the token for calling the Yuque API is not provided, it will default to the Yuque token bound to the current tool.

View File

@@ -0,0 +1,24 @@
"""
获取文档
"""
__author__ = "佐井"
__created__ = "2024-06-01 10:45:20"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class AliYuqueDescribeDocumentsTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
return self.create_text_message(
self.request("GET", token, tool_parameters, "/api/v2/repos/{book_id}/docs/{id}")
)

View File

@@ -0,0 +1,38 @@
identity:
name: aliyuque_describe_documents
author: 佐井
label:
en_US: Get Doc Detail
zh_Hans: 获取文档详情
icon: icon.svg
description:
human:
en_US: Retrieves detailed information of a specific document identified by its ID or path within a knowledge base.
zh_Hans: 根据知识库ID和文档ID或路径获取文档详细信息。
llm: Fetches detailed doc info using ID/path from a knowledge base; supports doc lookup in Yuque.
parameters:
- name: book_id
type: number
required: true
form: llm
label:
en_US: Knowledge Base ID
zh_Hans: 知识库 ID
human_description:
en_US: Identifier for the knowledge base where the document resides.
zh_Hans: 文档所属知识库的唯一标识。
llm_description: ID of the knowledge base holding the document.
- name: id
type: string
required: true
form: llm
label:
en_US: Document ID or Path
zh_Hans: 文档 ID 或路径
human_description:
en_US: The unique identifier or path of the document to retrieve.
zh_Hans: 需要获取的文档的ID或其在知识库中的路径。
llm_description: Unique doc ID or its path for retrieval.

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env python3
"""
获取知识库目录
"""
__author__ = "佐井"
__created__ = "2024-09-17 15:17:11"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class YuqueDescribeBookTableOfContentsTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> (Union)[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
doc_ids = tool_parameters.get("doc_ids")
if doc_ids:
doc_ids = [int(doc_id.strip()) for doc_id in doc_ids.split(",")]
tool_parameters["doc_ids"] = doc_ids
return self.create_text_message(self.request("PUT", token, tool_parameters, "/api/v2/repos/{book_id}/toc"))

View File

@@ -0,0 +1,222 @@
identity:
name: aliyuque_update_book_table_of_contents
author: 佐井
label:
en_US: Update Book's Table of Contents
zh_Hans: 更新知识库目录
icon: icon.svg
description:
human:
en_US: Update Book's Table of Contents.
zh_Hans: 更新知识库目录。
llm: Update Book's Table of Contents.
parameters:
- name: book_id
type: number
required: true
form: llm
label:
en_US: Book ID
zh_Hans: 知识库 ID
human_description:
en_US: Book ID.
zh_Hans: 知识库 ID。
llm_description: Book ID.
- name: action
type: select
required: true
form: llm
options:
- value: appendNode
label:
en_US: appendNode
zh_Hans: appendNode
pt_BR: appendNode
- value: prependNode
label:
en_US: prependNode
zh_Hans: prependNode
pt_BR: prependNode
- value: editNode
label:
en_US: editNode
zh_Hans: editNode
pt_BR: editNode
- value: editNode
label:
en_US: removeNode
zh_Hans: removeNode
pt_BR: removeNode
label:
en_US: Action Type
zh_Hans: 操作
human_description:
en_US: In the operation scenario, sibling node prepending is not supported, deleting a node doesn't remove associated documents, and node deletion has two modes, 'sibling' (delete current node) and 'child' (delete current node and its children).
zh_Hans: 操作,创建场景下不支持同级头插 prependNode删除节点不会删除关联文档删除节点时action_mode=sibling (删除当前节点), action_mode=child (删除当前节点及子节点)
llm_description: In the operation scenario, sibling node prepending is not supported, deleting a node doesn't remove associated documents, and node deletion has two modes, 'sibling' (delete current node) and 'child' (delete current node and its children).
- name: action_mode
type: select
required: false
form: llm
options:
- value: sibling
label:
en_US: sibling
zh_Hans: 同级
pt_BR: sibling
- value: child
label:
en_US: child
zh_Hans: 子集
pt_BR: child
label:
en_US: Action Type
zh_Hans: 操作
human_description:
en_US: Operation mode (sibling:same level, child:child level).
zh_Hans: 操作模式 (sibling:同级, child:子级)。
llm_description: Operation mode (sibling:same level, child:child level).
- name: target_uuid
type: string
required: false
form: llm
label:
en_US: Target node UUID
zh_Hans: 目标节点 UUID
human_description:
en_US: Target node UUID, defaults to root node if left empty.
zh_Hans: 目标节点 UUID, 不填默认为根节点。
llm_description: Target node UUID, defaults to root node if left empty.
- name: node_uuid
type: string
required: false
form: llm
label:
en_US: Node UUID
zh_Hans: 操作节点 UUID
human_description:
en_US: Operation node UUID [required for move/update/delete].
zh_Hans: 操作节点 UUID [移动/更新/删除必填]。
llm_description: Operation node UUID [required for move/update/delete].
- name: doc_ids
type: string
required: false
form: llm
label:
en_US: Document IDs
zh_Hans: 文档id列表
human_description:
en_US: Document IDs [required for creating documents], separate multiple IDs with ','.
zh_Hans: 文档 IDs [创建文档必填],多个用','分隔。
llm_description: Document IDs [required for creating documents], separate multiple IDs with ','.
- name: type
type: select
required: false
form: llm
default: DOC
options:
- value: DOC
label:
en_US: DOC
zh_Hans: 文档
pt_BR: DOC
- value: LINK
label:
en_US: LINK
zh_Hans: 链接
pt_BR: LINK
- value: TITLE
label:
en_US: TITLE
zh_Hans: 分组
pt_BR: TITLE
label:
en_US: Node type
zh_Hans: 操节点类型
human_description:
en_US: Node type [required for creation] (DOC:document, LINK:external link, TITLE:group).
zh_Hans: 操节点类型 [创建必填] (DOC:文档, LINK:外链, TITLE:分组)。
llm_description: Node type [required for creation] (DOC:document, LINK:external link, TITLE:group).
- name: title
type: string
required: false
form: llm
label:
en_US: Node Name
zh_Hans: 节点名称
human_description:
en_US: Node name [required for creating groups/external links].
zh_Hans: 节点名称 [创建分组/外链必填]。
llm_description: Node name [required for creating groups/external links].
- name: url
type: string
required: false
form: llm
label:
en_US: Node URL
zh_Hans: 节点URL
human_description:
en_US: Node URL [required for creating external links].
zh_Hans: 节点 URL [创建外链必填]。
llm_description: Node URL [required for creating external links].
- name: open_window
type: select
required: false
form: llm
default: 0
options:
- value: 0
label:
en_US: DOC
zh_Hans: Current Page
pt_BR: DOC
- value: 1
label:
en_US: LINK
zh_Hans: New Page
pt_BR: LINK
label:
en_US: Open in new window
zh_Hans: 是否新窗口打开
human_description:
en_US: Open in new window [optional for external links] (0:open in current page, 1:open in new window).
zh_Hans: 是否新窗口打开 [外链选填] (0:当前页打开, 1:新窗口打开)。
llm_description: Open in new window [optional for external links] (0:open in current page, 1:open in new window).
- name: visible
type: select
required: false
form: llm
default: 1
options:
- value: 0
label:
en_US: Invisible
zh_Hans: 隐藏
pt_BR: Invisible
- value: 1
label:
en_US: Visible
zh_Hans: 可见
pt_BR: Visible
label:
en_US: Visibility
zh_Hans: 是否可见
human_description:
en_US: Visibility (0:invisible, 1:visible).
zh_Hans: 是否可见 (0:不可见, 1:可见)。
llm_description: Visibility (0:invisible, 1:visible).

View File

@@ -0,0 +1,24 @@
"""
更新文档
"""
__author__ = "佐井"
__created__ = "2024-06-19 16:50:07"
from typing import Any, Union
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.provider.builtin.aliyuque.tools.base import AliYuqueTool
from core.tools.tool.builtin_tool import BuiltinTool
class AliYuqueUpdateDocumentTool(AliYuqueTool, BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
token = self.runtime.credentials.get("token", None)
if not token:
raise Exception("token is required")
return self.create_text_message(
self.request("PUT", token, tool_parameters, "/api/v2/repos/{book_id}/docs/{id}")
)

View File

@@ -0,0 +1,87 @@
identity:
name: aliyuque_update_document
author: 佐井
label:
en_US: Update Document
zh_Hans: 更新文档
icon: icon.svg
description:
human:
en_US: Update an existing document within a specified knowledge base by providing the document ID or path.
zh_Hans: 通过提供文档ID或路径更新指定知识库中的现有文档。
llm: Update doc in a knowledge base via ID/path.
parameters:
- name: book_id
type: number
required: true
form: llm
label:
en_US: Knowledge Base ID
zh_Hans: 知识库 ID
human_description:
en_US: The unique identifier of the knowledge base where the document resides.
zh_Hans: 文档所属知识库的ID。
llm_description: ID of the knowledge base holding the doc.
- name: id
type: string
required: true
form: llm
label:
en_US: Document ID or Path
zh_Hans: 文档 ID 或 路径
human_description:
en_US: The unique identifier or the path of the document to be updated.
zh_Hans: 要更新的文档的唯一ID或路径。
llm_description: Doc's ID or path for update.
- name: title
type: string
required: false
form: llm
label:
en_US: Title
zh_Hans: 标题
human_description:
en_US: The title of the document, defaults to 'Untitled' if not provided.
zh_Hans: 文档标题,默认为'无标题'如未提供。
llm_description: Title of the document, defaults to 'Untitled'.
- name: format
type: select
required: false
form: llm
options:
- value: markdown
label:
en_US: markdown
zh_Hans: markdown
pt_BR: markdown
- value: html
label:
en_US: html
zh_Hans: html
pt_BR: html
- value: lake
label:
en_US: lake
zh_Hans: lake
pt_BR: lake
label:
en_US: Content Format
zh_Hans: 内容格式
human_description:
en_US: Format of the document content (markdown, HTML, Lake).
zh_Hans: 文档内容格式markdown, HTML, Lake
llm_description: Content format choices, markdown, HTML, Lake.
- name: body
type: string
required: true
form: llm
label:
en_US: Body Content
zh_Hans: 正文内容
human_description:
en_US: The actual content of the document.
zh_Hans: 文档的实际内容。
llm_description: Content of the document.

View File

@@ -21,7 +21,6 @@ class DiscordWebhookTool(BuiltinTool):
return self.create_text_message("Invalid parameter content")
webhook_url = tool_parameters.get("webhook_url", "")
if not webhook_url.startswith("https://discord.com/api/webhooks/"):
return self.create_text_message(
f"Invalid parameter webhook_url ${webhook_url}, \
@@ -31,13 +30,14 @@ class DiscordWebhookTool(BuiltinTool):
headers = {
"Content-Type": "application/json",
}
params = {}
payload = {
"username": tool_parameters.get("username") or user_id,
"content": content,
"avatar_url": tool_parameters.get("avatar_url") or None,
}
try:
res = httpx.post(webhook_url, headers=headers, params=params, json=payload)
res = httpx.post(webhook_url, headers=headers, json=payload)
if res.is_success:
return self.create_text_message("Text message was sent successfully")
else:

View File

@@ -38,3 +38,28 @@ parameters:
pt_BR: Content to sent to the channel or person.
llm_description: Content of the message
form: llm
- name: username
type: string
required: false
label:
en_US: Discord Webhook Username
zh_Hans: Discord Webhook用户名
pt_BR: Discord Webhook Username
human_description:
en_US: Discord Webhook Username
zh_Hans: Discord Webhook用户名
pt_BR: Discord Webhook Username
llm_description: Discord Webhook Username
form: llm
- name: avatar_url
type: string
required: false
label:
en_US: Discord Webhook Avatar
zh_Hans: Discord Webhook头像
pt_BR: Discord Webhook Avatar
human_description:
en_US: Discord Webhook Avatar URL
zh_Hans: Discord Webhook头像地址
pt_BR: Discord Webhook Avatar URL
form: form

View File

@@ -289,7 +289,7 @@ class WorkflowEntry:
new_value.append(file)
if new_value:
value = new_value
input_value = new_value
# append variable and value to variable pool
variable_pool.add([variable_node_id] + variable_key_list, input_value)

View File

@@ -0,0 +1,100 @@
import os
from typing import Union
from unittest.mock import MagicMock
import pytest
from _pytest.monkeypatch import MonkeyPatch
from tos import TosClientV2
from tos.clientv2 import DeleteObjectOutput, GetObjectOutput, HeadObjectOutput, PutObjectOutput
class AttrDict(dict):
def __getattr__(self, item):
return self.get(item)
def get_example_bucket() -> str:
return "dify"
def get_example_filename() -> str:
return "test.txt"
def get_example_data() -> bytes:
return b"test"
def get_example_filepath() -> str:
return "/test"
class MockVolcengineTosClass:
def __init__(self, ak="", sk="", endpoint="", region=""):
self.bucket_name = get_example_bucket()
self.key = get_example_filename()
self.content = get_example_data()
self.filepath = get_example_filepath()
self.resp = AttrDict(
{
"x-tos-server-side-encryption": "kms",
"x-tos-server-side-encryption-kms-key-id": "trn:kms:cn-beijing:****:keyrings/ring-test/keys/key-test",
"x-tos-server-side-encryption-customer-algorithm": "AES256",
"x-tos-version-id": "test",
"x-tos-hash-crc64ecma": 123456,
"request_id": "test",
"headers": {
"x-tos-id-2": "test",
"ETag": "123456",
},
"status": 200,
}
)
def put_object(self, bucket: str, key: str, content=None) -> PutObjectOutput:
assert bucket == self.bucket_name
assert key == self.key
assert content == self.content
return PutObjectOutput(self.resp)
def get_object(self, bucket: str, key: str) -> GetObjectOutput:
assert bucket == self.bucket_name
assert key == self.key
get_object_output = MagicMock(GetObjectOutput)
get_object_output.read.return_value = self.content
return get_object_output
def get_object_to_file(self, bucket: str, key: str, file_path: str):
assert bucket == self.bucket_name
assert key == self.key
assert file_path == self.filepath
def head_object(self, bucket: str, key: str) -> HeadObjectOutput:
assert bucket == self.bucket_name
assert key == self.key
return HeadObjectOutput(self.resp)
def delete_object(self, bucket: str, key: str):
assert bucket == self.bucket_name
assert key == self.key
return DeleteObjectOutput(self.resp)
MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
@pytest.fixture
def setup_volcengine_tos_mock(monkeypatch: MonkeyPatch):
if MOCK:
monkeypatch.setattr(TosClientV2, "__init__", MockVolcengineTosClass.__init__)
monkeypatch.setattr(TosClientV2, "put_object", MockVolcengineTosClass.put_object)
monkeypatch.setattr(TosClientV2, "get_object", MockVolcengineTosClass.get_object)
monkeypatch.setattr(TosClientV2, "get_object_to_file", MockVolcengineTosClass.get_object_to_file)
monkeypatch.setattr(TosClientV2, "head_object", MockVolcengineTosClass.head_object)
monkeypatch.setattr(TosClientV2, "delete_object", MockVolcengineTosClass.delete_object)
yield
if MOCK:
monkeypatch.undo()

View File

@@ -0,0 +1,67 @@
from collections.abc import Generator
from flask import Flask
from tos import TosClientV2
from tos.clientv2 import GetObjectOutput, HeadObjectOutput, PutObjectOutput
from extensions.storage.volcengine_tos_storage import VolcengineTosStorage
from tests.unit_tests.oss.__mock.volcengine_tos import (
get_example_bucket,
get_example_data,
get_example_filename,
get_example_filepath,
setup_volcengine_tos_mock,
)
class VolcengineTosTest:
_instance = None
def __new__(cls):
if cls._instance == None:
cls._instance = object.__new__(cls)
return cls._instance
else:
return cls._instance
def __init__(self):
self.storage = VolcengineTosStorage(app=Flask(__name__))
self.storage.bucket_name = get_example_bucket()
self.storage.client = TosClientV2(
ak="dify",
sk="dify",
endpoint="https://xxx.volces.com",
region="cn-beijing",
)
def test_save(setup_volcengine_tos_mock):
volc_tos = VolcengineTosTest()
volc_tos.storage.save(get_example_filename(), get_example_data())
def test_load_once(setup_volcengine_tos_mock):
volc_tos = VolcengineTosTest()
assert volc_tos.storage.load_once(get_example_filename()) == get_example_data()
def test_load_stream(setup_volcengine_tos_mock):
volc_tos = VolcengineTosTest()
generator = volc_tos.storage.load_stream(get_example_filename())
assert isinstance(generator, Generator)
assert next(generator) == get_example_data()
def test_download(setup_volcengine_tos_mock):
volc_tos = VolcengineTosTest()
volc_tos.storage.download(get_example_filename(), get_example_filepath())
def test_exists(setup_volcengine_tos_mock):
volc_tos = VolcengineTosTest()
assert volc_tos.storage.exists(get_example_filename())
def test_delete(setup_volcengine_tos_mock):
volc_tos = VolcengineTosTest()
volc_tos.storage.delete(get_example_filename())

View File

@@ -96,6 +96,10 @@ x-shared-env: &shared-api-worker-env
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-}
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-}
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-}
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-}
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-}
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-}
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-}
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}

9
web/.gitignore vendored
View File

@@ -44,10 +44,9 @@ package-lock.json
.pnp.cjs
.pnp.loader.mjs
.yarn/
.yarnrc.yml
# pmpm
pnpm-lock.yaml
.favorites.json
*storybook.log
# storybook
/storybook-static
*storybook.log

View File

@@ -63,7 +63,7 @@ if $web_modified; then
# check if the test file exists
if [ -f "../$test_file" ]; then
echo "Detected changes in $file, running corresponding unit tests..."
npm run test "../$test_file"
pnpm run test "../$test_file"
if [ $? -ne 0 ]; then
echo "Unit tests failed. Please fix the errors before committing."

View File

@@ -6,14 +6,12 @@ This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next
### Run by source code
To start the web frontend service, you will need [Node.js v18.x (LTS)](https://nodejs.org/en) and [NPM version 8.x.x](https://www.npmjs.com/) or [Yarn](https://yarnpkg.com/).
To start the web frontend service, you will need [Node.js v18.x (LTS)](https://nodejs.org/en) and [pnpm version 9.12.2](https://pnpm.io).
First, install the dependencies:
```bash
npm install
# or
yarn install --frozen-lockfile
pnpm install
```
Then, configure the environment variables. Create a file named `.env.local` in the current directory and copy the contents from `.env.example`. Modify the values of these environment variables according to your requirements:
@@ -43,9 +41,7 @@ NEXT_PUBLIC_SENTRY_DSN=
Finally, run the development server:
```bash
npm run dev
# or
yarn dev
pnpm run dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
@@ -59,19 +55,19 @@ You can start editing the file under folder `app`. The page auto-updates as you
First, build the app for production:
```bash
npm run build
pnpm run build
```
Then, start the server:
```bash
npm run start
pnpm run start
```
If you want to customize the host and port:
```bash
npm run start --port=3001 --host=0.0.0.0
pnpm run start --port=3001 --host=0.0.0.0
```
## Storybook
@@ -99,7 +95,7 @@ You can create a test file with a suffix of `.spec` beside the file that to be t
Run test:
```bash
npm run test
pnpm run test
```
If you are not familiar with writing tests, here is some code to refer to:

View File

@@ -1,8 +1,9 @@
const path = require('node:path')
const { open, readdir, access, mkdir, writeFile, appendFile, rm } = require('node:fs/promises')
const { parseXml } = require('@rgrove/parse-xml')
const camelCase = require('lodash/camelCase')
const template = require('lodash/template')
import path from 'node:path'
import { access, appendFile, mkdir, open, readdir, rm, writeFile } from 'node:fs/promises'
import { parseXml } from '@rgrove/parse-xml'
import { camelCase, template } from 'lodash-es'
const __dirname = path.dirname(new URL(import.meta.url).pathname)
const generateDir = async (currentPath) => {
try {

View File

@@ -13,14 +13,15 @@
"fix": "next lint --fix",
"eslint-fix": "eslint --fix",
"prepare": "cd ../ && node -e \"if (process.env.NODE_ENV !== 'production'){process.exit(1)} \" || husky install ./web/.husky",
"gen-icons": "node ./app/components/base/icons/script.js",
"gen-icons": "node ./app/components/base/icons/script.mjs",
"uglify-embed": "node ./bin/uglify-embed",
"check-i18n": "node ./i18n/check-i18n.js",
"auto-gen-i18n": "node ./i18n/auto-gen-i18n.js",
"test": "jest",
"test:watch": "jest --watch",
"storybook": "storybook dev -p 6006",
"build-storybook": "storybook build"
"build-storybook": "storybook build",
"preinstall": "npx only-allow pnpm"
},
"dependencies": {
"@babel/runtime": "^7.22.3",

17061
web/pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff