mirror of
https://github.com/langgenius/dify.git
synced 2026-01-03 13:07:19 +00:00
Compare commits
4 Commits
chore/opti
...
fix/extra-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
430657312f | ||
|
|
4aa8c4b261 | ||
|
|
be0d92639a | ||
|
|
a204eff43c |
24
.github/DISCUSSION_TEMPLATE/general.yml
vendored
24
.github/DISCUSSION_TEMPLATE/general.yml
vendored
@@ -1,24 +0,0 @@
|
||||
title: "General Discussion"
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Content
|
||||
placeholder: Please describe the content you would like to discuss.
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please limit one request per issue.
|
||||
30
.github/DISCUSSION_TEMPLATE/help.yml
vendored
30
.github/DISCUSSION_TEMPLATE/help.yml
vendored
@@ -1,30 +0,0 @@
|
||||
title: "Help"
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
|
||||
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 2. Additional context or comments
|
||||
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please limit one request per issue.
|
||||
37
.github/DISCUSSION_TEMPLATE/suggestion.yml
vendored
37
.github/DISCUSSION_TEMPLATE/suggestion.yml
vendored
@@ -1,37 +0,0 @@
|
||||
title: Suggestions for New Features
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
|
||||
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 2. Additional context or comments
|
||||
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: 3. Can you help us with this feature?
|
||||
description: Let us know! This is not a commitment, but a starting point for collaboration.
|
||||
options:
|
||||
- label: I am interested in contributing to this feature.
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please limit one request per issue.
|
||||
1
.github/workflows/api-tests.yml
vendored
1
.github/workflows/api-tests.yml
vendored
@@ -89,5 +89,6 @@ jobs:
|
||||
pgvecto-rs
|
||||
pgvector
|
||||
chroma
|
||||
myscale
|
||||
- name: Test Vector Stores
|
||||
run: poetry run -C api bash dev/pytest/pytest_vdb.sh
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -174,6 +174,5 @@ sdks/python-client/dify_client.egg-info
|
||||
.vscode/*
|
||||
!.vscode/launch.json
|
||||
pyrightconfig.json
|
||||
api/.vscode
|
||||
|
||||
.idea/
|
||||
|
||||
@@ -216,6 +216,7 @@ At the same time, please consider supporting Dify by sharing it on social media
|
||||
|
||||
* [Github Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions.
|
||||
* [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Best for: questions you have about using Dify.AI.
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
|
||||
* [Twitter](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
|
||||
|
||||
|
||||
@@ -199,6 +199,7 @@ docker compose up -d
|
||||
## المجتمع والاتصال
|
||||
* [مناقشة Github](https://github.com/langgenius/dify/discussions). الأفضل لـ: مشاركة التعليقات وطرح الأسئلة.
|
||||
* [المشكلات على GitHub](https://github.com/langgenius/dify/issues). الأفضل لـ: الأخطاء التي تواجهها في استخدام Dify.AI، واقتراحات الميزات. انظر [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [البريد الإلكتروني](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). الأفضل لـ: الأسئلة التي تتعلق باستخدام Dify.AI.
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
|
||||
* [تويتر](https://twitter.com/dify_ai). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
|
||||
|
||||
|
||||
@@ -224,6 +224,7 @@ Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en
|
||||
|
||||
* [Discusión en GitHub](https://github.com/langgenius/dify/discussions). Lo mejor para: compartir comentarios y hacer preguntas.
|
||||
* [Reporte de problemas en GitHub](https://github.com/langgenius/dify/issues). Lo mejor para: errores que encuentres usando Dify.AI y propuestas de características. Consulta nuestra [Guía de contribución](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [Correo electrónico](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Lo mejor para: preguntas que tengas sobre el uso de Dify.AI.
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
|
||||
* [Twitter](https://twitter.com/dify_ai). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
|
||||
|
||||
|
||||
@@ -222,6 +222,7 @@ Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur le
|
||||
|
||||
* [Discussion GitHub](https://github.com/langgenius/dify/discussions). Meilleur pour: partager des commentaires et poser des questions.
|
||||
* [Problèmes GitHub](https://github.com/langgenius/dify/issues). Meilleur pour: les bogues que vous rencontrez en utilisant Dify.AI et les propositions de fonctionnalités. Consultez notre [Guide de contribution](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [E-mail](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Meilleur pour: les questions que vous avez sur l'utilisation de Dify.AI.
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). Meilleur pour: partager vos applications et passer du temps avec la communauté.
|
||||
* [Twitter](https://twitter.com/dify_ai). Meilleur pour: partager vos applications et passer du temps avec la communauté.
|
||||
|
||||
|
||||
@@ -221,6 +221,7 @@ docker compose up -d
|
||||
|
||||
* [Github Discussion](https://github.com/langgenius/dify/discussions). 主に: フィードバックの共有や質問。
|
||||
* [GitHub Issues](https://github.com/langgenius/dify/issues). 主に: Dify.AIを使用する際に発生するエラーや問題については、[貢献ガイド](CONTRIBUTING_JA.md)を参照してください
|
||||
* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). 主に: Dify.AIの使用に関する質問。
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). 主に: アプリケーションの共有やコミュニティとの交流。
|
||||
* [Twitter](https://twitter.com/dify_ai). 主に: アプリケーションの共有やコミュニティとの交流。
|
||||
|
||||
@@ -238,7 +239,7 @@ docker compose up -d
|
||||
<td>無料の30分間のミーティングをスケジュール</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href='https://github.com/langgenius/dify/issues'>技術サポート</a></td>
|
||||
<td><a href='mailto:support@dify.ai?subject=[GitHub]Technical%20Support'>技術サポート</a></td>
|
||||
<td>技術的な問題やサポートに関する質問</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
||||
@@ -224,6 +224,7 @@ At the same time, please consider supporting Dify by sharing it on social media
|
||||
|
||||
). Best for: sharing feedback and asking questions.
|
||||
* [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Best for: questions you have about using Dify.AI.
|
||||
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
|
||||
* [Twitter](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
|
||||
|
||||
|
||||
@@ -214,6 +214,7 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했
|
||||
|
||||
* [Github 토론](https://github.com/langgenius/dify/discussions). 피드백 공유 및 질문하기에 적합합니다.
|
||||
* [GitHub 이슈](https://github.com/langgenius/dify/issues). Dify.AI 사용 중 발견한 버그와 기능 제안에 적합합니다. [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요.
|
||||
* [이메일](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Dify.AI 사용에 대한 질문하기에 적합합니다.
|
||||
* [디스코드](https://discord.gg/FngNHpbcY7). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
|
||||
* [트위터](https://twitter.com/dify_ai). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import os
|
||||
|
||||
from configs import dify_config
|
||||
|
||||
if os.environ.get("DEBUG", "false").lower() != 'true':
|
||||
from gevent import monkey
|
||||
|
||||
@@ -21,9 +23,7 @@ from flask import Flask, Response, request
|
||||
from flask_cors import CORS
|
||||
from werkzeug.exceptions import Unauthorized
|
||||
|
||||
import contexts
|
||||
from commands import register_commands
|
||||
from configs import dify_config
|
||||
|
||||
# DO NOT REMOVE BELOW
|
||||
from events import event_handlers
|
||||
@@ -181,10 +181,7 @@ def load_user_from_request(request_from_flask_login):
|
||||
decoded = PassportService().verify(auth_token)
|
||||
user_id = decoded.get('user_id')
|
||||
|
||||
account = AccountService.load_logged_in_account(account_id=user_id, token=auth_token)
|
||||
if account:
|
||||
contexts.tenant_id.set(account.current_tenant_id)
|
||||
return account
|
||||
return AccountService.load_logged_in_account(account_id=user_id, token=auth_token)
|
||||
|
||||
|
||||
@login_manager.unauthorized_handler
|
||||
|
||||
@@ -406,6 +406,7 @@ class DataSetConfig(BaseSettings):
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class WorkspaceConfig(BaseSettings):
|
||||
"""
|
||||
Workspace configs
|
||||
|
||||
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description='Dify version',
|
||||
default='0.6.15',
|
||||
default='0.6.14',
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
# TODO: Update all string in code to use this constant
|
||||
HIDDEN_VALUE = '[__HIDDEN__]'
|
||||
@@ -1,3 +0,0 @@
|
||||
from contextvars import ContextVar
|
||||
|
||||
tenant_id: ContextVar[str] = ContextVar('tenant_id')
|
||||
@@ -212,7 +212,7 @@ class AppCopyApi(Resource):
|
||||
parser.add_argument('icon_background', type=str, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
data = AppDslService.export_dsl(app_model=app_model, include_secret=True)
|
||||
data = AppDslService.export_dsl(app_model=app_model)
|
||||
app = AppDslService.import_and_create_new_app(
|
||||
tenant_id=current_user.current_tenant_id,
|
||||
data=data,
|
||||
@@ -234,13 +234,8 @@ class AppExportApi(Resource):
|
||||
if not current_user.is_editor:
|
||||
raise Forbidden()
|
||||
|
||||
# Add include_secret params
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('include_secret', type=inputs.boolean, default=False, location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
return {
|
||||
"data": AppDslService.export_dsl(app_model=app_model, include_secret=args['include_secret'])
|
||||
"data": AppDslService.export_dsl(app_model=app_model)
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -22,19 +22,17 @@ class RuleGenerateApi(Resource):
|
||||
@account_initialization_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('instruction', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('model_config', type=dict, required=True, nullable=False, location='json')
|
||||
parser.add_argument('no_variable', type=bool, required=True, default=False, location='json')
|
||||
parser.add_argument('audiences', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('hoping_to_solve', type=str, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
account = current_user
|
||||
|
||||
try:
|
||||
rules = LLMGenerator.generate_rule_config(
|
||||
tenant_id=account.current_tenant_id,
|
||||
instruction=args['instruction'],
|
||||
model_config=args['model_config'],
|
||||
no_variable=args['no_variable']
|
||||
account.current_tenant_id,
|
||||
args['audiences'],
|
||||
args['hoping_to_solve']
|
||||
)
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
||||
@@ -281,7 +281,7 @@ class UserSatisfactionRateStatistic(Resource):
|
||||
SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
|
||||
COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count
|
||||
FROM messages m
|
||||
LEFT JOIN message_feedbacks mf on mf.message_id=m.id and mf.rating='like'
|
||||
LEFT JOIN message_feedbacks mf on mf.message_id=m.id
|
||||
WHERE m.app_id = :app_id
|
||||
'''
|
||||
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
|
||||
|
||||
@@ -13,7 +13,6 @@ from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.app.apps.base_app_queue_manager import AppQueueManager
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.app.segments import factory
|
||||
from core.errors.error import AppInvokeQuotaExceededError
|
||||
from fields.workflow_fields import workflow_fields
|
||||
from fields.workflow_run_fields import workflow_run_node_execution_fields
|
||||
@@ -42,7 +41,7 @@ class DraftWorkflowApi(Resource):
|
||||
# The role of the current user in the ta table must be admin, owner, or editor
|
||||
if not current_user.is_editor:
|
||||
raise Forbidden()
|
||||
|
||||
|
||||
# fetch draft workflow by app_model
|
||||
workflow_service = WorkflowService()
|
||||
workflow = workflow_service.get_draft_workflow(app_model=app_model)
|
||||
@@ -65,15 +64,13 @@ class DraftWorkflowApi(Resource):
|
||||
if not current_user.is_editor:
|
||||
raise Forbidden()
|
||||
|
||||
content_type = request.headers.get('Content-Type', '')
|
||||
content_type = request.headers.get('Content-Type')
|
||||
|
||||
if 'application/json' in content_type:
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('graph', type=dict, required=True, nullable=False, location='json')
|
||||
parser.add_argument('features', type=dict, required=True, nullable=False, location='json')
|
||||
parser.add_argument('hash', type=str, required=False, location='json')
|
||||
# TODO: set this to required=True after frontend is updated
|
||||
parser.add_argument('environment_variables', type=list, required=False, location='json')
|
||||
args = parser.parse_args()
|
||||
elif 'text/plain' in content_type:
|
||||
try:
|
||||
@@ -87,8 +84,7 @@ class DraftWorkflowApi(Resource):
|
||||
args = {
|
||||
'graph': data.get('graph'),
|
||||
'features': data.get('features'),
|
||||
'hash': data.get('hash'),
|
||||
'environment_variables': data.get('environment_variables')
|
||||
'hash': data.get('hash')
|
||||
}
|
||||
except json.JSONDecodeError:
|
||||
return {'message': 'Invalid JSON data'}, 400
|
||||
@@ -98,15 +94,12 @@ class DraftWorkflowApi(Resource):
|
||||
workflow_service = WorkflowService()
|
||||
|
||||
try:
|
||||
environment_variables_list = args.get('environment_variables') or []
|
||||
environment_variables = [factory.build_variable_from_mapping(obj) for obj in environment_variables_list]
|
||||
workflow = workflow_service.sync_draft_workflow(
|
||||
app_model=app_model,
|
||||
graph=args['graph'],
|
||||
features=args['features'],
|
||||
graph=args.get('graph'),
|
||||
features=args.get('features'),
|
||||
unique_hash=args.get('hash'),
|
||||
account=current_user,
|
||||
environment_variables=environment_variables,
|
||||
account=current_user
|
||||
)
|
||||
except WorkflowHashNotEqualError:
|
||||
raise DraftWorkflowNotSync()
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import flask_restful
|
||||
from flask import request
|
||||
from flask import current_app, request
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, marshal, marshal_with, reqparse
|
||||
from werkzeug.exceptions import Forbidden, NotFound
|
||||
|
||||
import services
|
||||
from configs import dify_config
|
||||
from controllers.console import api
|
||||
from controllers.console.apikey import api_key_fields, api_key_list
|
||||
from controllers.console.app.error import ProviderNotInitializeError
|
||||
@@ -531,7 +530,7 @@ class DatasetApiBaseUrlApi(Resource):
|
||||
@account_initialization_required
|
||||
def get(self):
|
||||
return {
|
||||
'api_base_url': (dify_config.SERVICE_API_URL if dify_config.SERVICE_API_URL
|
||||
'api_base_url': (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
|
||||
else request.host_url.rstrip('/')) + '/v1'
|
||||
}
|
||||
|
||||
@@ -541,15 +540,15 @@ class DatasetRetrievalSettingApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def get(self):
|
||||
vector_type = dify_config.VECTOR_STORE
|
||||
vector_type = current_app.config['VECTOR_STORE']
|
||||
match vector_type:
|
||||
case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCENT:
|
||||
case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCENT | VectorType.ORACLE:
|
||||
return {
|
||||
'retrieval_method': [
|
||||
RetrievalMethod.SEMANTIC_SEARCH.value
|
||||
]
|
||||
}
|
||||
case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH | VectorType.ANALYTICDB | VectorType.MYSCALE | VectorType.ORACLE:
|
||||
case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH | VectorType.ANALYTICDB | VectorType.MYSCALE:
|
||||
return {
|
||||
'retrieval_method': [
|
||||
RetrievalMethod.SEMANTIC_SEARCH.value,
|
||||
@@ -567,13 +566,13 @@ class DatasetRetrievalSettingMockApi(Resource):
|
||||
@account_initialization_required
|
||||
def get(self, vector_type):
|
||||
match vector_type:
|
||||
case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCENT:
|
||||
case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCENT | VectorType.ORACLE:
|
||||
return {
|
||||
'retrieval_method': [
|
||||
RetrievalMethod.SEMANTIC_SEARCH.value
|
||||
]
|
||||
}
|
||||
case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH| VectorType.ANALYTICDB | VectorType.MYSCALE | VectorType.ORACLE:
|
||||
case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH| VectorType.ANALYTICDB | VectorType.MYSCALE:
|
||||
return {
|
||||
'retrieval_method': [
|
||||
RetrievalMethod.SEMANTIC_SEARCH.value,
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from flask import request
|
||||
from flask import current_app, request
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, marshal_with
|
||||
|
||||
import services
|
||||
from configs import dify_config
|
||||
from controllers.console import api
|
||||
from controllers.console.datasets.error import (
|
||||
FileTooLargeError,
|
||||
@@ -27,9 +26,9 @@ class FileApi(Resource):
|
||||
@account_initialization_required
|
||||
@marshal_with(upload_config_fields)
|
||||
def get(self):
|
||||
file_size_limit = dify_config.UPLOAD_FILE_SIZE_LIMIT
|
||||
batch_count_limit = dify_config.UPLOAD_FILE_BATCH_LIMIT
|
||||
image_file_size_limit = dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT
|
||||
file_size_limit = current_app.config.get("UPLOAD_FILE_SIZE_LIMIT")
|
||||
batch_count_limit = current_app.config.get("UPLOAD_FILE_BATCH_LIMIT")
|
||||
image_file_size_limit = current_app.config.get("UPLOAD_IMAGE_FILE_SIZE_LIMIT")
|
||||
return {
|
||||
'file_size_limit': file_size_limit,
|
||||
'batch_count_limit': batch_count_limit,
|
||||
@@ -77,7 +76,7 @@ class FileSupportTypeApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def get(self):
|
||||
etl_type = dify_config.ETL_TYPE
|
||||
etl_type = current_app.config['ETL_TYPE']
|
||||
allowed_extensions = UNSTRUCTURED_ALLOWED_EXTENSIONS if etl_type == 'Unstructured' else ALLOWED_EXTENSIONS
|
||||
return {'allowed_extensions': allowed_extensions}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
from flask import current_app
|
||||
from flask_restful import fields, marshal_with
|
||||
|
||||
from configs import dify_config
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import AppUnavailableError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
@@ -78,7 +78,7 @@ class AppParameterApi(InstalledAppResource):
|
||||
"transfer_methods": ["remote_url", "local_file"]
|
||||
}}),
|
||||
'system_parameters': {
|
||||
'image_file_size_limit': dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT
|
||||
'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import os
|
||||
|
||||
from flask import session
|
||||
from flask import current_app, session
|
||||
from flask_restful import Resource, reqparse
|
||||
|
||||
from configs import dify_config
|
||||
from libs.helper import str_len
|
||||
from models.model import DifySetup
|
||||
from services.account_service import TenantService
|
||||
@@ -41,7 +40,7 @@ class InitValidateAPI(Resource):
|
||||
return {'result': 'success'}, 201
|
||||
|
||||
def get_init_validate_status():
|
||||
if dify_config.EDITION == 'SELF_HOSTED':
|
||||
if current_app.config['EDITION'] == 'SELF_HOSTED':
|
||||
if os.environ.get('INIT_PASSWORD'):
|
||||
return session.get('is_init_validated') or DifySetup.query.first()
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from functools import wraps
|
||||
|
||||
from flask import request
|
||||
from flask import current_app, request
|
||||
from flask_restful import Resource, reqparse
|
||||
|
||||
from configs import dify_config
|
||||
from libs.helper import email, get_remote_ip, str_len
|
||||
from libs.password import valid_password
|
||||
from models.model import DifySetup
|
||||
@@ -18,7 +17,7 @@ from .wraps import only_edition_self_hosted
|
||||
class SetupApi(Resource):
|
||||
|
||||
def get(self):
|
||||
if dify_config.EDITION == 'SELF_HOSTED':
|
||||
if current_app.config['EDITION'] == 'SELF_HOSTED':
|
||||
setup_status = get_setup_status()
|
||||
if setup_status:
|
||||
return {
|
||||
@@ -78,7 +77,7 @@ def setup_required(view):
|
||||
|
||||
|
||||
def get_setup_status():
|
||||
if dify_config.EDITION == 'SELF_HOSTED':
|
||||
if current_app.config['EDITION'] == 'SELF_HOSTED':
|
||||
return DifySetup.query.first()
|
||||
else:
|
||||
return True
|
||||
|
||||
@@ -3,10 +3,9 @@ import json
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from flask import current_app
|
||||
from flask_restful import Resource, reqparse
|
||||
|
||||
from configs import dify_config
|
||||
|
||||
from . import api
|
||||
|
||||
|
||||
@@ -16,16 +15,16 @@ class VersionApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('current_version', type=str, required=True, location='args')
|
||||
args = parser.parse_args()
|
||||
check_update_url = dify_config.CHECK_UPDATE_URL
|
||||
check_update_url = current_app.config['CHECK_UPDATE_URL']
|
||||
|
||||
result = {
|
||||
'version': dify_config.CURRENT_VERSION,
|
||||
'version': current_app.config['CURRENT_VERSION'],
|
||||
'release_date': '',
|
||||
'release_notes': '',
|
||||
'can_auto_update': False,
|
||||
'features': {
|
||||
'can_replace_logo': dify_config.CAN_REPLACE_LOGO,
|
||||
'model_load_balancing_enabled': dify_config.MODEL_LB_ENABLED
|
||||
'can_replace_logo': current_app.config['CAN_REPLACE_LOGO'],
|
||||
'model_load_balancing_enabled': current_app.config['MODEL_LB_ENABLED']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import datetime
|
||||
|
||||
import pytz
|
||||
from flask import request
|
||||
from flask import current_app, request
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, fields, marshal_with, reqparse
|
||||
|
||||
from configs import dify_config
|
||||
from constants.languages import supported_language
|
||||
from controllers.console import api
|
||||
from controllers.console.setup import setup_required
|
||||
@@ -37,7 +36,7 @@ class AccountInitApi(Resource):
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
|
||||
if dify_config.EDITION == 'CLOUD':
|
||||
if current_app.config['EDITION'] == 'CLOUD':
|
||||
parser.add_argument('invitation_code', type=str, location='json')
|
||||
|
||||
parser.add_argument(
|
||||
@@ -46,7 +45,7 @@ class AccountInitApi(Resource):
|
||||
required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
if dify_config.EDITION == 'CLOUD':
|
||||
if current_app.config['EDITION'] == 'CLOUD':
|
||||
if not args['invitation_code']:
|
||||
raise ValueError('invitation_code is required')
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from flask import current_app
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, abort, marshal_with, reqparse
|
||||
|
||||
import services
|
||||
from configs import dify_config
|
||||
from controllers.console import api
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
|
||||
@@ -48,7 +48,7 @@ class MemberInviteEmailApi(Resource):
|
||||
|
||||
inviter = current_user
|
||||
invitation_results = []
|
||||
console_web_url = dify_config.CONSOLE_WEB_URL
|
||||
console_web_url = current_app.config.get("CONSOLE_WEB_URL")
|
||||
for invitee_email in invitee_emails:
|
||||
try:
|
||||
token = RegisterService.invite_new_member(inviter.current_tenant, invitee_email, interface_language, role=invitee_role, inviter=inviter)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import io
|
||||
|
||||
from flask import send_file
|
||||
from flask import current_app, send_file
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, reqparse
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
from configs import dify_config
|
||||
from controllers.console import api
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
@@ -105,7 +104,7 @@ class ToolBuiltinProviderIconApi(Resource):
|
||||
@setup_required
|
||||
def get(self, provider):
|
||||
icon_bytes, mimetype = BuiltinToolManageService.get_builtin_tool_provider_icon(provider)
|
||||
icon_cache_max_age = dify_config.TOOL_ICON_CACHE_MAX_AGE
|
||||
icon_cache_max_age = current_app.config.get('TOOL_ICON_CACHE_MAX_AGE')
|
||||
return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age)
|
||||
|
||||
class ToolApiProviderAddApi(Resource):
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import json
|
||||
from functools import wraps
|
||||
|
||||
from flask import abort, request
|
||||
from flask import abort, current_app, request
|
||||
from flask_login import current_user
|
||||
|
||||
from configs import dify_config
|
||||
from controllers.console.workspace.error import AccountNotInitializedError
|
||||
from services.feature_service import FeatureService
|
||||
from services.operation_service import OperationService
|
||||
@@ -27,7 +26,7 @@ def account_initialization_required(view):
|
||||
def only_edition_cloud(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
if dify_config.EDITION != 'CLOUD':
|
||||
if current_app.config['EDITION'] != 'CLOUD':
|
||||
abort(404)
|
||||
|
||||
return view(*args, **kwargs)
|
||||
@@ -38,7 +37,7 @@ def only_edition_cloud(view):
|
||||
def only_edition_self_hosted(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
if dify_config.EDITION != 'SELF_HOSTED':
|
||||
if current_app.config['EDITION'] != 'SELF_HOSTED':
|
||||
abort(404)
|
||||
|
||||
return view(*args, **kwargs)
|
||||
|
||||
@@ -342,14 +342,10 @@ class FunctionCallAgentRunner(BaseAgentRunner):
|
||||
"""
|
||||
tool_calls = []
|
||||
for prompt_message in llm_result_chunk.delta.message.tool_calls:
|
||||
args = {}
|
||||
if prompt_message.function.arguments != '':
|
||||
args = json.loads(prompt_message.function.arguments)
|
||||
|
||||
tool_calls.append((
|
||||
prompt_message.id,
|
||||
prompt_message.function.name,
|
||||
args,
|
||||
json.loads(prompt_message.function.arguments),
|
||||
))
|
||||
|
||||
return tool_calls
|
||||
@@ -363,14 +359,10 @@ class FunctionCallAgentRunner(BaseAgentRunner):
|
||||
"""
|
||||
tool_calls = []
|
||||
for prompt_message in llm_result.message.tool_calls:
|
||||
args = {}
|
||||
if prompt_message.function.arguments != '':
|
||||
args = json.loads(prompt_message.function.arguments)
|
||||
|
||||
tool_calls.append((
|
||||
prompt_message.id,
|
||||
prompt_message.function.name,
|
||||
args,
|
||||
json.loads(prompt_message.function.arguments),
|
||||
))
|
||||
|
||||
return tool_calls
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
from typing import Optional, Union
|
||||
|
||||
from core.app.app_config.entities import AppAdditionalFeatures
|
||||
from core.app.app_config.entities import AppAdditionalFeatures, EasyUIBasedAppModelConfigFrom
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.app_config.features.more_like_this.manager import MoreLikeThisConfigManager
|
||||
from core.app.app_config.features.opening_statement.manager import OpeningStatementConfigManager
|
||||
@@ -11,19 +10,37 @@ from core.app.app_config.features.suggested_questions_after_answer.manager impor
|
||||
SuggestedQuestionsAfterAnswerConfigManager,
|
||||
)
|
||||
from core.app.app_config.features.text_to_speech.manager import TextToSpeechConfigManager
|
||||
from models.model import AppMode
|
||||
from models.model import AppMode, AppModelConfig
|
||||
|
||||
|
||||
class BaseAppConfigManager:
|
||||
|
||||
@classmethod
|
||||
def convert_features(cls, config_dict: Mapping[str, Any], app_mode: AppMode) -> AppAdditionalFeatures:
|
||||
def convert_to_config_dict(cls, config_from: EasyUIBasedAppModelConfigFrom,
|
||||
app_model_config: Union[AppModelConfig, dict],
|
||||
config_dict: Optional[dict] = None) -> dict:
|
||||
"""
|
||||
Convert app model config to config dict
|
||||
:param config_from: app model config from
|
||||
:param app_model_config: app model config
|
||||
:param config_dict: app model config dict
|
||||
:return:
|
||||
"""
|
||||
if config_from != EasyUIBasedAppModelConfigFrom.ARGS:
|
||||
app_model_config_dict = app_model_config.to_dict()
|
||||
config_dict = app_model_config_dict.copy()
|
||||
|
||||
return config_dict
|
||||
|
||||
@classmethod
|
||||
def convert_features(cls, config_dict: dict, app_mode: AppMode) -> AppAdditionalFeatures:
|
||||
"""
|
||||
Convert app config to app model config
|
||||
|
||||
:param config_dict: app config
|
||||
:param app_mode: app mode
|
||||
"""
|
||||
config_dict = dict(config_dict.items())
|
||||
config_dict = config_dict.copy()
|
||||
|
||||
additional_features = AppAdditionalFeatures()
|
||||
additional_features.show_retrieve_source = RetrievalResourceConfigManager.convert(
|
||||
|
||||
@@ -62,12 +62,7 @@ class DatasetConfigManager:
|
||||
return None
|
||||
|
||||
# dataset configs
|
||||
if 'dataset_configs' in config and config.get('dataset_configs'):
|
||||
dataset_configs = config.get('dataset_configs')
|
||||
else:
|
||||
dataset_configs = {
|
||||
'retrieval_model': 'multiple'
|
||||
}
|
||||
dataset_configs = config.get('dataset_configs', {'retrieval_model': 'single'})
|
||||
query_variable = config.get('dataset_query_variable')
|
||||
|
||||
if dataset_configs['retrieval_model'] == 'single':
|
||||
@@ -88,10 +83,9 @@ class DatasetConfigManager:
|
||||
retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.value_of(
|
||||
dataset_configs['retrieval_model']
|
||||
),
|
||||
top_k=dataset_configs.get('top_k', 4),
|
||||
top_k=dataset_configs.get('top_k'),
|
||||
score_threshold=dataset_configs.get('score_threshold'),
|
||||
reranking_model=dataset_configs.get('reranking_model'),
|
||||
weights=dataset_configs.get('weights')
|
||||
reranking_model=dataset_configs.get('reranking_model')
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -159,11 +159,7 @@ class DatasetRetrieveConfigEntity(BaseModel):
|
||||
retrieve_strategy: RetrieveStrategy
|
||||
top_k: Optional[int] = None
|
||||
score_threshold: Optional[float] = None
|
||||
rerank_mode: Optional[str] = 'reranking_model'
|
||||
reranking_model: Optional[dict] = None
|
||||
weights: Optional[dict] = None
|
||||
|
||||
|
||||
|
||||
|
||||
class DatasetEntity(BaseModel):
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, Optional
|
||||
from typing import Optional
|
||||
|
||||
from core.app.app_config.entities import FileExtraConfig
|
||||
|
||||
|
||||
class FileUploadConfigManager:
|
||||
@classmethod
|
||||
def convert(cls, config: Mapping[str, Any], is_vision: bool = True) -> Optional[FileExtraConfig]:
|
||||
def convert(cls, config: dict, is_vision: bool = True) -> Optional[FileExtraConfig]:
|
||||
"""
|
||||
Convert model config to model config
|
||||
|
||||
|
||||
@@ -3,13 +3,13 @@ from core.app.app_config.entities import TextToSpeechEntity
|
||||
|
||||
class TextToSpeechConfigManager:
|
||||
@classmethod
|
||||
def convert(cls, config: dict):
|
||||
def convert(cls, config: dict) -> bool:
|
||||
"""
|
||||
Convert model config to model config
|
||||
|
||||
:param config: model config args
|
||||
"""
|
||||
text_to_speech = None
|
||||
text_to_speech = False
|
||||
text_to_speech_dict = config.get('text_to_speech')
|
||||
if text_to_speech_dict:
|
||||
if text_to_speech_dict.get('enabled'):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import contextvars
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
@@ -9,7 +8,6 @@ from typing import Union
|
||||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
|
||||
import contexts
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager
|
||||
from core.app.apps.advanced_chat.app_runner import AdvancedChatAppRunner
|
||||
@@ -109,7 +107,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
extras=extras,
|
||||
trace_manager=trace_manager
|
||||
)
|
||||
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
|
||||
|
||||
return self._generate(
|
||||
app_model=app_model,
|
||||
@@ -176,7 +173,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
inputs=args['inputs']
|
||||
)
|
||||
)
|
||||
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
|
||||
|
||||
return self._generate(
|
||||
app_model=app_model,
|
||||
@@ -229,8 +225,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
'queue_manager': queue_manager,
|
||||
'conversation_id': conversation.id,
|
||||
'message_id': message.id,
|
||||
'user': user,
|
||||
'context': contextvars.copy_context()
|
||||
})
|
||||
|
||||
worker_thread.start()
|
||||
@@ -255,9 +249,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
application_generate_entity: AdvancedChatAppGenerateEntity,
|
||||
queue_manager: AppQueueManager,
|
||||
conversation_id: str,
|
||||
message_id: str,
|
||||
user: Account,
|
||||
context: contextvars.Context) -> None:
|
||||
message_id: str) -> None:
|
||||
"""
|
||||
Generate worker in a new thread.
|
||||
:param flask_app: Flask app
|
||||
@@ -267,8 +259,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
:param message_id: message ID
|
||||
:return:
|
||||
"""
|
||||
for var, val in context.items():
|
||||
var.set(val)
|
||||
with flask_app.app_context():
|
||||
try:
|
||||
runner = AdvancedChatAppRunner()
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, Optional, cast
|
||||
from typing import Optional, cast
|
||||
|
||||
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfig
|
||||
from core.app.apps.advanced_chat.workflow_event_trigger_callback import WorkflowEventTriggerCallback
|
||||
@@ -15,7 +14,6 @@ from core.app.entities.app_invoke_entities import (
|
||||
)
|
||||
from core.app.entities.queue_entities import QueueAnnotationReplyEvent, QueueStopEvent, QueueTextChunkEvent
|
||||
from core.moderation.base import ModerationException
|
||||
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.workflow_engine_manager import WorkflowEngineManager
|
||||
@@ -89,7 +87,7 @@ class AdvancedChatAppRunner(AppRunner):
|
||||
|
||||
db.session.close()
|
||||
|
||||
workflow_callbacks: list[WorkflowCallback] = [WorkflowEventTriggerCallback(
|
||||
workflow_callbacks = [WorkflowEventTriggerCallback(
|
||||
queue_manager=queue_manager,
|
||||
workflow=workflow
|
||||
)]
|
||||
@@ -163,7 +161,7 @@ class AdvancedChatAppRunner(AppRunner):
|
||||
self, queue_manager: AppQueueManager,
|
||||
app_record: App,
|
||||
app_generate_entity: AdvancedChatAppGenerateEntity,
|
||||
inputs: Mapping[str, Any],
|
||||
inputs: dict,
|
||||
query: str,
|
||||
message_id: str
|
||||
) -> bool:
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import json
|
||||
from collections.abc import Generator
|
||||
from typing import Any, cast
|
||||
from typing import cast
|
||||
|
||||
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
|
||||
from core.app.entities.task_entities import (
|
||||
AppBlockingResponse,
|
||||
AppStreamResponse,
|
||||
ChatbotAppBlockingResponse,
|
||||
ChatbotAppStreamResponse,
|
||||
ErrorStreamResponse,
|
||||
@@ -20,13 +18,12 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
|
||||
_blocking_response_type = ChatbotAppBlockingResponse
|
||||
|
||||
@classmethod
|
||||
def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]:
|
||||
def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse) -> dict:
|
||||
"""
|
||||
Convert blocking full response.
|
||||
:param blocking_response: blocking response
|
||||
:return:
|
||||
"""
|
||||
blocking_response = cast(ChatbotAppBlockingResponse, blocking_response)
|
||||
response = {
|
||||
'event': 'message',
|
||||
'task_id': blocking_response.task_id,
|
||||
@@ -42,7 +39,7 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]:
|
||||
def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse) -> dict:
|
||||
"""
|
||||
Convert blocking simple response.
|
||||
:param blocking_response: blocking response
|
||||
@@ -56,7 +53,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def convert_stream_full_response(cls, stream_response: Generator[AppStreamResponse, None, None]) -> Generator[str, Any, None]:
|
||||
def convert_stream_full_response(cls, stream_response: Generator[ChatbotAppStreamResponse, None, None]) \
|
||||
-> Generator[str, None, None]:
|
||||
"""
|
||||
Convert stream full response.
|
||||
:param stream_response: stream response
|
||||
@@ -85,7 +83,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
|
||||
yield json.dumps(response_chunk)
|
||||
|
||||
@classmethod
|
||||
def convert_stream_simple_response(cls, stream_response: Generator[AppStreamResponse, None, None]) -> Generator[str, Any, None]:
|
||||
def convert_stream_simple_response(cls, stream_response: Generator[ChatbotAppStreamResponse, None, None]) \
|
||||
-> Generator[str, None, None]:
|
||||
"""
|
||||
Convert stream simple response.
|
||||
:param stream_response: stream response
|
||||
|
||||
@@ -118,7 +118,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
|
||||
self._stream_generate_routes = self._get_stream_generate_routes()
|
||||
self._conversation_name_generate_thread = None
|
||||
|
||||
def process(self):
|
||||
def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]:
|
||||
"""
|
||||
Process generate task pipeline.
|
||||
:return:
|
||||
@@ -141,7 +141,8 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
|
||||
else:
|
||||
return self._to_blocking_response(generator)
|
||||
|
||||
def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) -> ChatbotAppBlockingResponse:
|
||||
def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) \
|
||||
-> ChatbotAppBlockingResponse:
|
||||
"""
|
||||
Process blocking response.
|
||||
:return:
|
||||
@@ -171,7 +172,8 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
|
||||
|
||||
raise Exception('Queue listening stopped unexpectedly.')
|
||||
|
||||
def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) -> Generator[ChatbotAppStreamResponse, Any, None]:
|
||||
def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) \
|
||||
-> Generator[ChatbotAppStreamResponse, None, None]:
|
||||
"""
|
||||
To stream response.
|
||||
:return:
|
||||
|
||||
@@ -14,13 +14,13 @@ from core.app.entities.queue_entities import (
|
||||
QueueWorkflowStartedEvent,
|
||||
QueueWorkflowSucceededEvent,
|
||||
)
|
||||
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
|
||||
from core.workflow.callbacks.base_workflow_callback import BaseWorkflowCallback
|
||||
from core.workflow.entities.base_node_data_entities import BaseNodeData
|
||||
from core.workflow.entities.node_entities import NodeType
|
||||
from models.workflow import Workflow
|
||||
|
||||
|
||||
class WorkflowEventTriggerCallback(WorkflowCallback):
|
||||
class WorkflowEventTriggerCallback(BaseWorkflowCallback):
|
||||
|
||||
def __init__(self, queue_manager: AppQueueManager, workflow: Workflow):
|
||||
self._queue_manager = queue_manager
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Generator
|
||||
from typing import Any, Union
|
||||
from typing import Union
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.app.entities.task_entities import AppBlockingResponse, AppStreamResponse
|
||||
@@ -15,41 +15,44 @@ class AppGenerateResponseConverter(ABC):
|
||||
@classmethod
|
||||
def convert(cls, response: Union[
|
||||
AppBlockingResponse,
|
||||
Generator[AppStreamResponse, Any, None]
|
||||
], invoke_from: InvokeFrom):
|
||||
Generator[AppStreamResponse, None, None]
|
||||
], invoke_from: InvokeFrom) -> Union[
|
||||
dict,
|
||||
Generator[str, None, None]
|
||||
]:
|
||||
if invoke_from in [InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API]:
|
||||
if isinstance(response, AppBlockingResponse):
|
||||
if isinstance(response, cls._blocking_response_type):
|
||||
return cls.convert_blocking_full_response(response)
|
||||
else:
|
||||
def _generate_full_response() -> Generator[str, Any, None]:
|
||||
def _generate():
|
||||
for chunk in cls.convert_stream_full_response(response):
|
||||
if chunk == 'ping':
|
||||
yield f'event: {chunk}\n\n'
|
||||
else:
|
||||
yield f'data: {chunk}\n\n'
|
||||
|
||||
return _generate_full_response()
|
||||
return _generate()
|
||||
else:
|
||||
if isinstance(response, AppBlockingResponse):
|
||||
if isinstance(response, cls._blocking_response_type):
|
||||
return cls.convert_blocking_simple_response(response)
|
||||
else:
|
||||
def _generate_simple_response() -> Generator[str, Any, None]:
|
||||
def _generate():
|
||||
for chunk in cls.convert_stream_simple_response(response):
|
||||
if chunk == 'ping':
|
||||
yield f'event: {chunk}\n\n'
|
||||
else:
|
||||
yield f'data: {chunk}\n\n'
|
||||
|
||||
return _generate_simple_response()
|
||||
return _generate()
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]:
|
||||
def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict:
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]:
|
||||
def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict:
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
@@ -65,7 +68,7 @@ class AppGenerateResponseConverter(ABC):
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def _get_simple_metadata(cls, metadata: dict[str, Any]):
|
||||
def _get_simple_metadata(cls, metadata: dict) -> dict:
|
||||
"""
|
||||
Get simple metadata.
|
||||
:param metadata: metadata
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import contextvars
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
@@ -9,7 +8,6 @@ from typing import Union
|
||||
from flask import Flask, current_app
|
||||
from pydantic import ValidationError
|
||||
|
||||
import contexts
|
||||
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
|
||||
from core.app.apps.base_app_generator import BaseAppGenerator
|
||||
from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedException, PublishFrom
|
||||
@@ -40,7 +38,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
invoke_from: InvokeFrom,
|
||||
stream: bool = True,
|
||||
call_depth: int = 0,
|
||||
):
|
||||
) -> Union[dict, Generator[dict, None, None]]:
|
||||
"""
|
||||
Generate App response.
|
||||
|
||||
@@ -88,7 +86,6 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
call_depth=call_depth,
|
||||
trace_manager=trace_manager
|
||||
)
|
||||
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
|
||||
|
||||
return self._generate(
|
||||
app_model=app_model,
|
||||
@@ -129,8 +126,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
worker_thread = threading.Thread(target=self._generate_worker, kwargs={
|
||||
'flask_app': current_app._get_current_object(),
|
||||
'application_generate_entity': application_generate_entity,
|
||||
'queue_manager': queue_manager,
|
||||
'context': contextvars.copy_context()
|
||||
'queue_manager': queue_manager
|
||||
})
|
||||
|
||||
worker_thread.start()
|
||||
@@ -154,7 +150,8 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
node_id: str,
|
||||
user: Account,
|
||||
args: dict,
|
||||
stream: bool = True):
|
||||
stream: bool = True) \
|
||||
-> Union[dict, Generator[dict, None, None]]:
|
||||
"""
|
||||
Generate App response.
|
||||
|
||||
@@ -196,7 +193,6 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
inputs=args['inputs']
|
||||
)
|
||||
)
|
||||
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
|
||||
|
||||
return self._generate(
|
||||
app_model=app_model,
|
||||
@@ -209,8 +205,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
|
||||
def _generate_worker(self, flask_app: Flask,
|
||||
application_generate_entity: WorkflowAppGenerateEntity,
|
||||
queue_manager: AppQueueManager,
|
||||
context: contextvars.Context) -> None:
|
||||
queue_manager: AppQueueManager) -> None:
|
||||
"""
|
||||
Generate worker in a new thread.
|
||||
:param flask_app: Flask app
|
||||
@@ -218,8 +213,6 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
:param queue_manager: queue manager
|
||||
:return:
|
||||
"""
|
||||
for var, val in context.items():
|
||||
var.set(val)
|
||||
with flask_app.app_context():
|
||||
try:
|
||||
# workflow app
|
||||
|
||||
@@ -10,7 +10,6 @@ from core.app.entities.app_invoke_entities import (
|
||||
InvokeFrom,
|
||||
WorkflowAppGenerateEntity,
|
||||
)
|
||||
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
|
||||
from core.workflow.entities.node_entities import SystemVariable
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.workflow_engine_manager import WorkflowEngineManager
|
||||
@@ -58,7 +57,7 @@ class WorkflowAppRunner:
|
||||
|
||||
db.session.close()
|
||||
|
||||
workflow_callbacks: list[WorkflowCallback] = [WorkflowEventTriggerCallback(
|
||||
workflow_callbacks = [WorkflowEventTriggerCallback(
|
||||
queue_manager=queue_manager,
|
||||
workflow=workflow
|
||||
)]
|
||||
|
||||
@@ -14,13 +14,13 @@ from core.app.entities.queue_entities import (
|
||||
QueueWorkflowStartedEvent,
|
||||
QueueWorkflowSucceededEvent,
|
||||
)
|
||||
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
|
||||
from core.workflow.callbacks.base_workflow_callback import BaseWorkflowCallback
|
||||
from core.workflow.entities.base_node_data_entities import BaseNodeData
|
||||
from core.workflow.entities.node_entities import NodeType
|
||||
from models.workflow import Workflow
|
||||
|
||||
|
||||
class WorkflowEventTriggerCallback(WorkflowCallback):
|
||||
class WorkflowEventTriggerCallback(BaseWorkflowCallback):
|
||||
|
||||
def __init__(self, queue_manager: AppQueueManager, workflow: Workflow):
|
||||
self._queue_manager = queue_manager
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import Optional
|
||||
|
||||
from core.app.entities.queue_entities import AppQueueEvent
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
|
||||
from core.workflow.callbacks.base_workflow_callback import BaseWorkflowCallback
|
||||
from core.workflow.entities.base_node_data_entities import BaseNodeData
|
||||
from core.workflow.entities.node_entities import NodeType
|
||||
|
||||
@@ -15,7 +15,7 @@ _TEXT_COLOR_MAPPING = {
|
||||
}
|
||||
|
||||
|
||||
class WorkflowLoggingCallback(WorkflowCallback):
|
||||
class WorkflowLoggingCallback(BaseWorkflowCallback):
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.current_node_id = None
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from collections.abc import Mapping
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
|
||||
@@ -77,7 +76,7 @@ class AppGenerateEntity(BaseModel):
|
||||
# app config
|
||||
app_config: AppConfig
|
||||
|
||||
inputs: Mapping[str, Any]
|
||||
inputs: dict[str, Any]
|
||||
files: list[FileVar] = []
|
||||
user_id: str
|
||||
|
||||
@@ -141,7 +140,7 @@ class AdvancedChatAppGenerateEntity(AppGenerateEntity):
|
||||
app_config: WorkflowUIBasedAppConfig
|
||||
|
||||
conversation_id: Optional[str] = None
|
||||
query: str
|
||||
query: Optional[str] = None
|
||||
|
||||
class SingleIterationRunEntity(BaseModel):
|
||||
"""
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
from .segment_group import SegmentGroup
|
||||
from .segments import NoneSegment, Segment
|
||||
from .types import SegmentType
|
||||
from .variables import (
|
||||
ArrayVariable,
|
||||
FileVariable,
|
||||
FloatVariable,
|
||||
IntegerVariable,
|
||||
NoneVariable,
|
||||
ObjectVariable,
|
||||
SecretVariable,
|
||||
StringVariable,
|
||||
Variable,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'IntegerVariable',
|
||||
'FloatVariable',
|
||||
'ObjectVariable',
|
||||
'SecretVariable',
|
||||
'FileVariable',
|
||||
'StringVariable',
|
||||
'ArrayVariable',
|
||||
'Variable',
|
||||
'SegmentType',
|
||||
'SegmentGroup',
|
||||
'Segment',
|
||||
'NoneSegment',
|
||||
'NoneVariable',
|
||||
]
|
||||
@@ -1,67 +0,0 @@
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from core.file.file_obj import FileVar
|
||||
|
||||
from .segments import Segment, StringSegment
|
||||
from .types import SegmentType
|
||||
from .variables import (
|
||||
ArrayVariable,
|
||||
FileVariable,
|
||||
FloatVariable,
|
||||
IntegerVariable,
|
||||
NoneVariable,
|
||||
ObjectVariable,
|
||||
SecretVariable,
|
||||
StringVariable,
|
||||
Variable,
|
||||
)
|
||||
|
||||
|
||||
def build_variable_from_mapping(m: Mapping[str, Any], /) -> Variable:
|
||||
if (value_type := m.get('value_type')) is None:
|
||||
raise ValueError('missing value type')
|
||||
if not m.get('name'):
|
||||
raise ValueError('missing name')
|
||||
if (value := m.get('value')) is None:
|
||||
raise ValueError('missing value')
|
||||
match value_type:
|
||||
case SegmentType.STRING:
|
||||
return StringVariable.model_validate(m)
|
||||
case SegmentType.NUMBER if isinstance(value, int):
|
||||
return IntegerVariable.model_validate(m)
|
||||
case SegmentType.NUMBER if isinstance(value, float):
|
||||
return FloatVariable.model_validate(m)
|
||||
case SegmentType.SECRET:
|
||||
return SecretVariable.model_validate(m)
|
||||
case SegmentType.NUMBER if not isinstance(value, float | int):
|
||||
raise ValueError(f'invalid number value {value}')
|
||||
raise ValueError(f'not supported value type {value_type}')
|
||||
|
||||
|
||||
def build_anonymous_variable(value: Any, /) -> Variable:
|
||||
if value is None:
|
||||
return NoneVariable(name='anonymous')
|
||||
if isinstance(value, str):
|
||||
return StringVariable(name='anonymous', value=value)
|
||||
if isinstance(value, int):
|
||||
return IntegerVariable(name='anonymous', value=value)
|
||||
if isinstance(value, float):
|
||||
return FloatVariable(name='anonymous', value=value)
|
||||
if isinstance(value, dict):
|
||||
# TODO: Limit the depth of the object
|
||||
obj = {k: build_anonymous_variable(v) for k, v in value.items()}
|
||||
return ObjectVariable(name='anonymous', value=obj)
|
||||
if isinstance(value, list):
|
||||
# TODO: Limit the depth of the array
|
||||
elements = [build_anonymous_variable(v) for v in value]
|
||||
return ArrayVariable(name='anonymous', value=elements)
|
||||
if isinstance(value, FileVar):
|
||||
return FileVariable(name='anonymous', value=value)
|
||||
raise ValueError(f'not supported value {value}')
|
||||
|
||||
|
||||
def build_segment(value: Any, /) -> Segment:
|
||||
if isinstance(value, str):
|
||||
return StringSegment(value=value)
|
||||
raise ValueError(f'not supported value {value}')
|
||||
@@ -1,17 +0,0 @@
|
||||
import re
|
||||
|
||||
from core.app.segments import SegmentGroup, factory
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
|
||||
VARIABLE_PATTERN = re.compile(r'\{\{#([a-zA-Z0-9_]{1,50}(?:\.[a-zA-Z_][a-zA-Z0-9_]{0,29}){1,10})#\}\}')
|
||||
|
||||
|
||||
def convert_template(*, template: str, variable_pool: VariablePool):
|
||||
parts = re.split(VARIABLE_PATTERN, template)
|
||||
segments = []
|
||||
for part in parts:
|
||||
if '.' in part and (value := variable_pool.get(part.split('.'))):
|
||||
segments.append(value)
|
||||
else:
|
||||
segments.append(factory.build_segment(part))
|
||||
return SegmentGroup(segments=segments)
|
||||
@@ -1,19 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from .segments import Segment
|
||||
|
||||
|
||||
class SegmentGroup(BaseModel):
|
||||
segments: list[Segment]
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return ''.join([segment.text for segment in self.segments])
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
return ''.join([segment.log for segment in self.segments])
|
||||
|
||||
@property
|
||||
def markdown(self):
|
||||
return ''.join([segment.markdown for segment in self.segments])
|
||||
@@ -1,65 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
|
||||
from .types import SegmentType
|
||||
|
||||
|
||||
class Segment(BaseModel):
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
value_type: SegmentType
|
||||
value: Any
|
||||
|
||||
@field_validator('value_type')
|
||||
def validate_value_type(cls, value):
|
||||
"""
|
||||
This validator checks if the provided value is equal to the default value of the 'value_type' field.
|
||||
If the value is different, a ValueError is raised.
|
||||
"""
|
||||
if value != cls.model_fields['value_type'].default:
|
||||
raise ValueError("Cannot modify 'value_type'")
|
||||
return value
|
||||
|
||||
@property
|
||||
def text(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
@property
|
||||
def log(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
@property
|
||||
def markdown(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
def to_object(self) -> Any:
|
||||
if isinstance(self.value, Segment):
|
||||
return self.value.to_object()
|
||||
if isinstance(self.value, list):
|
||||
return [v.to_object() for v in self.value]
|
||||
if isinstance(self.value, dict):
|
||||
return {k: v.to_object() for k, v in self.value.items()}
|
||||
return self.value
|
||||
|
||||
|
||||
class NoneSegment(Segment):
|
||||
value_type: SegmentType = SegmentType.NONE
|
||||
value: None = None
|
||||
|
||||
@property
|
||||
def text(self) -> str:
|
||||
return 'null'
|
||||
|
||||
@property
|
||||
def log(self) -> str:
|
||||
return 'null'
|
||||
|
||||
@property
|
||||
def markdown(self) -> str:
|
||||
return 'null'
|
||||
|
||||
|
||||
class StringSegment(Segment):
|
||||
value_type: SegmentType = SegmentType.STRING
|
||||
value: str
|
||||
@@ -1,11 +0,0 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class SegmentType(str, Enum):
|
||||
NONE = 'none'
|
||||
NUMBER = 'number'
|
||||
STRING = 'string'
|
||||
SECRET = 'secret'
|
||||
ARRAY = 'array'
|
||||
OBJECT = 'object'
|
||||
FILE = 'file'
|
||||
@@ -1,88 +0,0 @@
|
||||
import json
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from core.file.file_obj import FileVar
|
||||
from core.helper import encrypter
|
||||
|
||||
from .segments import NoneSegment, Segment, StringSegment
|
||||
from .types import SegmentType
|
||||
|
||||
|
||||
class Variable(Segment):
|
||||
"""
|
||||
A variable is a segment that has a name.
|
||||
"""
|
||||
|
||||
id: str = Field(
|
||||
default='',
|
||||
description="Unique identity for variable. It's only used by environment variables now.",
|
||||
)
|
||||
name: str
|
||||
|
||||
|
||||
class StringVariable(StringSegment, Variable):
|
||||
pass
|
||||
|
||||
|
||||
class FloatVariable(Variable):
|
||||
value_type: SegmentType = SegmentType.NUMBER
|
||||
value: float
|
||||
|
||||
|
||||
class IntegerVariable(Variable):
|
||||
value_type: SegmentType = SegmentType.NUMBER
|
||||
value: int
|
||||
|
||||
|
||||
class ObjectVariable(Variable):
|
||||
value_type: SegmentType = SegmentType.OBJECT
|
||||
value: Mapping[str, Variable]
|
||||
|
||||
@property
|
||||
def text(self) -> str:
|
||||
# TODO: Process variables.
|
||||
return json.dumps(self.model_dump()['value'], ensure_ascii=False)
|
||||
|
||||
@property
|
||||
def log(self) -> str:
|
||||
# TODO: Process variables.
|
||||
return json.dumps(self.model_dump()['value'], ensure_ascii=False, indent=2)
|
||||
|
||||
@property
|
||||
def markdown(self) -> str:
|
||||
# TODO: Use markdown code block
|
||||
return json.dumps(self.model_dump()['value'], ensure_ascii=False, indent=2)
|
||||
|
||||
|
||||
class ArrayVariable(Variable):
|
||||
value_type: SegmentType = SegmentType.ARRAY
|
||||
value: Sequence[Variable]
|
||||
|
||||
@property
|
||||
def markdown(self) -> str:
|
||||
return '\n'.join(['- ' + item.markdown for item in self.value])
|
||||
|
||||
|
||||
class FileVariable(Variable):
|
||||
value_type: SegmentType = SegmentType.FILE
|
||||
# TODO: embed FileVar in this model.
|
||||
value: FileVar
|
||||
|
||||
@property
|
||||
def markdown(self) -> str:
|
||||
return self.value.to_markdown()
|
||||
|
||||
|
||||
class SecretVariable(StringVariable):
|
||||
value_type: SegmentType = SegmentType.SECRET
|
||||
|
||||
@property
|
||||
def log(self) -> str:
|
||||
return encrypter.obfuscated_token(self.value)
|
||||
|
||||
|
||||
class NoneVariable(NoneSegment, Variable):
|
||||
value_type: SegmentType = SegmentType.NONE
|
||||
value: None = None
|
||||
@@ -1,11 +1,9 @@
|
||||
import os
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any, Optional, TextIO, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
|
||||
_TEXT_COLOR_MAPPING = {
|
||||
"blue": "36;1",
|
||||
@@ -45,7 +43,7 @@ class DifyAgentCallbackHandler(BaseModel):
|
||||
def on_tool_start(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_inputs: Mapping[str, Any],
|
||||
tool_inputs: dict[str, Any],
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
print_text("\n[on_tool_start] ToolCall:" + tool_name + "\n" + str(tool_inputs) + "\n", color=self.color)
|
||||
@@ -53,8 +51,8 @@ class DifyAgentCallbackHandler(BaseModel):
|
||||
def on_tool_end(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_inputs: Mapping[str, Any],
|
||||
tool_outputs: Sequence[ToolInvokeMessage],
|
||||
tool_inputs: dict[str, Any],
|
||||
tool_outputs: str,
|
||||
message_id: Optional[str] = None,
|
||||
timer: Optional[Any] = None,
|
||||
trace_manager: Optional[TraceQueueManager] = None
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any, Union
|
||||
from typing import Union
|
||||
|
||||
import requests
|
||||
|
||||
@@ -17,7 +16,7 @@ class MessageFileParser:
|
||||
self.tenant_id = tenant_id
|
||||
self.app_id = app_id
|
||||
|
||||
def validate_and_transform_files_arg(self, files: Sequence[Mapping[str, Any]], file_extra_config: FileExtraConfig,
|
||||
def validate_and_transform_files_arg(self, files: list[dict], file_extra_config: FileExtraConfig,
|
||||
user: Union[Account, EndUser]) -> list[FileVar]:
|
||||
"""
|
||||
validate and transform files arg
|
||||
|
||||
@@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
|
||||
CODE_EXECUTION_ENDPOINT = dify_config.CODE_EXECUTION_ENDPOINT
|
||||
CODE_EXECUTION_API_KEY = dify_config.CODE_EXECUTION_API_KEY
|
||||
|
||||
CODE_EXECUTION_TIMEOUT = (10, 60)
|
||||
CODE_EXECUTION_TIMEOUT= (10, 60)
|
||||
|
||||
class CodeExecutionException(Exception):
|
||||
pass
|
||||
@@ -64,7 +64,7 @@ class CodeExecutor:
|
||||
|
||||
@classmethod
|
||||
def execute_code(cls,
|
||||
language: CodeLanguage,
|
||||
language: Literal['python3', 'javascript', 'jinja2'],
|
||||
preload: str,
|
||||
code: str,
|
||||
dependencies: Optional[list[CodeDependency]] = None) -> str:
|
||||
@@ -119,7 +119,7 @@ class CodeExecutor:
|
||||
return response.data.stdout
|
||||
|
||||
@classmethod
|
||||
def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: dict, dependencies: Optional[list[CodeDependency]] = None) -> dict:
|
||||
def execute_workflow_code_template(cls, language: Literal['python3', 'javascript', 'jinja2'], code: str, inputs: dict, dependencies: Optional[list[CodeDependency]] = None) -> dict:
|
||||
"""
|
||||
Execute code
|
||||
:param language: code language
|
||||
|
||||
@@ -6,16 +6,11 @@ from models.account import Tenant
|
||||
|
||||
|
||||
def obfuscated_token(token: str):
|
||||
if not token:
|
||||
return token
|
||||
if len(token) <= 8:
|
||||
return '*' * 20
|
||||
return token[:6] + '*' * 12 + token[-2:]
|
||||
return token[:6] + '*' * (len(token) - 8) + token[-2:]
|
||||
|
||||
|
||||
def encrypt_token(tenant_id: str, token: str):
|
||||
if not (tenant := db.session.query(Tenant).filter(Tenant.id == tenant_id).first()):
|
||||
raise ValueError(f'Tenant with id {tenant_id} not found')
|
||||
tenant = db.session.query(Tenant).filter(Tenant.id == tenant_id).first()
|
||||
encrypted_token = rsa.encrypt(token, tenant.encrypt_public_key)
|
||||
return base64.b64encode(encrypted_token).decode()
|
||||
|
||||
|
||||
@@ -14,9 +14,6 @@ def get_position_map(folder_path: str, *, file_name: str = "_position.yaml") ->
|
||||
:return: a dict with name as key and index as value
|
||||
"""
|
||||
position_file_name = os.path.join(folder_path, file_name)
|
||||
if not position_file_name or not os.path.exists(position_file_name):
|
||||
return {}
|
||||
|
||||
positions = load_yaml_file(position_file_name, ignore_error=True)
|
||||
position_map = {}
|
||||
index = 0
|
||||
|
||||
@@ -3,13 +3,10 @@ import logging
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from core.llm_generator.output_parser.errors import OutputParserException
|
||||
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
|
||||
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
|
||||
from core.llm_generator.prompts import (
|
||||
CONVERSATION_TITLE_PROMPT,
|
||||
GENERATOR_QA_PROMPT,
|
||||
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
|
||||
)
|
||||
from core.llm_generator.prompts import CONVERSATION_TITLE_PROMPT, GENERATOR_QA_PROMPT
|
||||
from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
@@ -118,158 +115,55 @@ class LLMGenerator:
|
||||
return questions
|
||||
|
||||
@classmethod
|
||||
def generate_rule_config(cls, tenant_id: str, instruction: str, model_config: dict, no_variable: bool) -> dict:
|
||||
def generate_rule_config(cls, tenant_id: str, audiences: str, hoping_to_solve: str) -> dict:
|
||||
output_parser = RuleConfigGeneratorOutputParser()
|
||||
|
||||
error = ""
|
||||
error_step = ""
|
||||
rule_config = {
|
||||
"prompt": "",
|
||||
"variables": [],
|
||||
"opening_statement": "",
|
||||
"error": ""
|
||||
}
|
||||
model_parameters = {
|
||||
"max_tokens": 512,
|
||||
"temperature": 0.01
|
||||
}
|
||||
|
||||
if no_variable:
|
||||
prompt_template = PromptTemplateParser(
|
||||
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE
|
||||
)
|
||||
|
||||
prompt_generate = prompt_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
},
|
||||
remove_template_variables=False
|
||||
)
|
||||
|
||||
prompt_messages = [UserPromptMessage(content=prompt_generate)]
|
||||
|
||||
model_manager = ModelManager()
|
||||
|
||||
model_instance = model_manager.get_default_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
)
|
||||
|
||||
try:
|
||||
response = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
)
|
||||
|
||||
rule_config["prompt"] = response.message.content
|
||||
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate rule config"
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
rule_config["error"] = str(e)
|
||||
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
|
||||
return rule_config
|
||||
|
||||
# get rule config prompt, parameter and statement
|
||||
prompt_generate, parameter_generate, statement_generate = output_parser.get_format_instructions()
|
||||
|
||||
prompt_template = PromptTemplateParser(
|
||||
prompt_generate
|
||||
template=output_parser.get_format_instructions()
|
||||
)
|
||||
|
||||
parameter_template = PromptTemplateParser(
|
||||
parameter_generate
|
||||
)
|
||||
|
||||
statement_template = PromptTemplateParser(
|
||||
statement_generate
|
||||
)
|
||||
|
||||
# format the prompt_generate_prompt
|
||||
prompt_generate_prompt = prompt_template.format(
|
||||
prompt = prompt_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
"audiences": audiences,
|
||||
"hoping_to_solve": hoping_to_solve,
|
||||
"variable": "{{variable}}",
|
||||
"lanA": "{{lanA}}",
|
||||
"lanB": "{{lanB}}",
|
||||
"topic": "{{topic}}"
|
||||
},
|
||||
remove_template_variables=False
|
||||
)
|
||||
prompt_messages = [UserPromptMessage(content=prompt_generate_prompt)]
|
||||
|
||||
# get model instance
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_model_instance(
|
||||
model_instance = model_manager.get_default_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
provider=model_config.get("provider") if model_config else None,
|
||||
model=model_config.get("name") if model_config else None,
|
||||
)
|
||||
|
||||
prompt_messages = [UserPromptMessage(content=prompt)]
|
||||
|
||||
try:
|
||||
try:
|
||||
# the first step to generate the task prompt
|
||||
prompt_content = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
)
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate prefix prompt"
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
|
||||
return rule_config
|
||||
|
||||
rule_config["prompt"] = prompt_content.message.content
|
||||
|
||||
parameter_generate_prompt = parameter_template.format(
|
||||
inputs={
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
response = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters={
|
||||
"max_tokens": 512,
|
||||
"temperature": 0
|
||||
},
|
||||
remove_template_variables=False
|
||||
stream=False
|
||||
)
|
||||
parameter_messages = [UserPromptMessage(content=parameter_generate_prompt)]
|
||||
|
||||
# the second step to generate the task_parameter and task_statement
|
||||
statement_generate_prompt = statement_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
},
|
||||
remove_template_variables=False
|
||||
)
|
||||
statement_messages = [UserPromptMessage(content=statement_generate_prompt)]
|
||||
|
||||
try:
|
||||
parameter_content = model_instance.invoke_llm(
|
||||
prompt_messages=parameter_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
)
|
||||
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.content)
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate variables"
|
||||
|
||||
try:
|
||||
statement_content = model_instance.invoke_llm(
|
||||
prompt_messages=statement_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
)
|
||||
rule_config["opening_statement"] = statement_content.message.content
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate conversation opener"
|
||||
|
||||
rule_config = output_parser.parse(response.message.content)
|
||||
except InvokeError as e:
|
||||
raise e
|
||||
except OutputParserException:
|
||||
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
rule_config["error"] = str(e)
|
||||
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
rule_config = {
|
||||
"prompt": "",
|
||||
"variables": [],
|
||||
"opening_statement": ""
|
||||
}
|
||||
|
||||
return rule_config
|
||||
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
from typing import Any
|
||||
|
||||
from core.llm_generator.output_parser.errors import OutputParserException
|
||||
from core.llm_generator.prompts import (
|
||||
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE,
|
||||
RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
|
||||
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE,
|
||||
)
|
||||
from core.llm_generator.prompts import RULE_CONFIG_GENERATE_TEMPLATE
|
||||
from libs.json_in_md_parser import parse_and_check_json_markdown
|
||||
|
||||
|
||||
class RuleConfigGeneratorOutputParser:
|
||||
|
||||
def get_format_instructions(self) -> tuple[str, str, str]:
|
||||
return RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE, RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE
|
||||
def get_format_instructions(self) -> str:
|
||||
return RULE_CONFIG_GENERATE_TEMPLATE
|
||||
|
||||
def parse(self, text: str) -> Any:
|
||||
try:
|
||||
|
||||
@@ -81,73 +81,65 @@ GENERATOR_QA_PROMPT = (
|
||||
'<QA Pairs>'
|
||||
)
|
||||
|
||||
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """
|
||||
Here is a task description for which I would like you to create a high-quality prompt template for:
|
||||
<task_description>
|
||||
{{TASK_DESCRIPTION}}
|
||||
</task_description>
|
||||
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
|
||||
- Do not inlcude <input> or <output> section and variables in the prompt, assume user will add them at their own will.
|
||||
- Clear instructions for the AI that will be using this prompt, demarcated with <instructions> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
|
||||
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not include variables in the prompt. Give three pairs of input and output examples.
|
||||
- Include other relevant sections demarcated with appropriate XML tags like <examples>, <instructions>.
|
||||
- Use the same language as task description.
|
||||
- Output in ``` xml ``` and start with <instruction>
|
||||
Please generate the full prompt template with at least 300 words and output only the prompt template.
|
||||
"""
|
||||
RULE_CONFIG_GENERATE_TEMPLATE = """Given MY INTENDED AUDIENCES and HOPING TO SOLVE using a language model, please select \
|
||||
the model prompt that best suits the input.
|
||||
You will be provided with the prompt, variables, and an opening statement.
|
||||
Only the content enclosed in double curly braces, such as {{variable}}, in the prompt can be considered as a variable; \
|
||||
otherwise, it cannot exist as a variable in the variables.
|
||||
If you believe revising the original input will result in a better response from the language model, you may \
|
||||
suggest revisions.
|
||||
|
||||
RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """
|
||||
Here is a task description for which I would like you to create a high-quality prompt template for:
|
||||
<task_description>
|
||||
{{TASK_DESCRIPTION}}
|
||||
</task_description>
|
||||
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
|
||||
- Descriptive variable names surrounded by {{ }} (two curly brackets) to indicate where the actual values will be substituted in. Choose variable names that clearly indicate the type of value expected. Variable names have to be composed of number, english alphabets and underline and nothing else.
|
||||
- Clear instructions for the AI that will be using this prompt, demarcated with <instructions> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
|
||||
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not use curly brackets any other than in <instruction> section.
|
||||
- Any other relevant sections demarcated with appropriate XML tags like <input>, <output>, etc.
|
||||
- Use the same language as task description.
|
||||
- Output in ``` xml ``` and start with <instruction>
|
||||
Please generate the full prompt template and output only the prompt template.
|
||||
"""
|
||||
<<PRINCIPLES OF GOOD PROMPT>>
|
||||
Integrate the intended audience in the prompt e.g. the audience is an expert in the field.
|
||||
Break down complex tasks into a sequence of simpler prompts in an interactive conversation.
|
||||
Implement example-driven prompting (Use few-shot prompting).
|
||||
When formatting your prompt start with Instruction followed by either Example if relevant. \
|
||||
Subsequently present your content. Use one or more line breaks to separate instructions examples questions context and input data.
|
||||
Incorporate the following phrases: “Your task is” and “You MUST”.
|
||||
Incorporate the following phrases: “You will be penalized”.
|
||||
Use leading words like writing “think step by step”.
|
||||
Add to your prompt the following phrase “Ensure that your answer is unbiased and does not rely on stereotypes”.
|
||||
Assign a role to the large language models.
|
||||
Use Delimiters.
|
||||
To write an essay /text /paragraph /article or any type of text that should be detailed: “Write a detailed [essay/text/paragraph] for me on [topic] in detail by adding all the information necessary”.
|
||||
Clearly state the requirements that the model must follow in order to produce content in the form of the keywords regulations hint or instructions
|
||||
|
||||
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """
|
||||
I need to extract the following information from the input text. The <information to be extracted> tag specifies the 'type', 'description' and 'required' of the information to be extracted.
|
||||
<information to be extracted>
|
||||
variables name bounded two double curly brackets. Variable name has to be composed of number, english alphabets and underline and nothing else.
|
||||
</information to be extracted>
|
||||
<< FORMATTING >>
|
||||
Return a markdown code snippet with a JSON object formatted to look like, \
|
||||
no any other string out of markdown code snippet:
|
||||
```json
|
||||
{{{{
|
||||
"prompt": string \\ generated prompt
|
||||
"variables": list of string \\ variables
|
||||
"opening_statement": string \\ an opening statement to guide users on how to ask questions with generated prompt \
|
||||
and fill in variables, with a welcome sentence, and keep TLDR.
|
||||
}}}}
|
||||
```
|
||||
|
||||
Step 1: Carefully read the input and understand the structure of the expected output.
|
||||
Step 2: Extract relevant parameters from the provided text based on the name and description of object.
|
||||
Step 3: Structure the extracted parameters to JSON object as specified in <structure>.
|
||||
Step 4: Ensure that the list of variable_names is properly formatted and valid. The output should not contain any XML tags. Output an empty list if there is no valid variable name in input text.
|
||||
<< EXAMPLES >>
|
||||
[EXAMPLE A]
|
||||
```json
|
||||
{
|
||||
"prompt": "I need your help to translate the following {{Input_language}}paper paragraph into {{Target_language}}, in a style similar to a popular science magazine in {{Target_language}}. #### Rules Ensure accurate conveyance of the original text's facts and context during translation. Maintain the original paragraph format and retain technical terms and company abbreviations ",
|
||||
"variables": ["Input_language", "Target_language"],
|
||||
"opening_statement": " Hi. I am your translation assistant. I can help you with any translation and ensure accurate conveyance of information. "
|
||||
}
|
||||
```
|
||||
|
||||
### Structure
|
||||
Here is the structure of the expected output, I should always follow the output structure.
|
||||
["variable_name_1", "variable_name_2"]
|
||||
[EXAMPLE B]
|
||||
```json
|
||||
{
|
||||
"prompt": "Your task is to review the provided meeting notes and create a concise summary that captures the essential information, focusing on key takeaways and action items assigned to specific individuals or departments during the meeting. Use clear and professional language, and organize the summary in a logical manner using appropriate formatting such as headings, subheadings, and bullet points. Ensure that the summary is easy to understand and provides a comprehensive but succinct overview of the meeting's content, with a particular focus on clearly indicating who is responsible for each action item.",
|
||||
"variables": ["meeting_notes"],
|
||||
"opening_statement": "Hi! I'm your meeting notes summarizer AI. I can help you with any meeting notes and ensure accurate conveyance of information."
|
||||
}
|
||||
```
|
||||
|
||||
### Input Text
|
||||
Inside <text></text> XML tags, there is a text that I should extract parameters and convert to a JSON object.
|
||||
<text>
|
||||
{{INPUT_TEXT}}
|
||||
</text>
|
||||
<< MY INTENDED AUDIENCES >>
|
||||
{{audiences}}
|
||||
|
||||
### Answer
|
||||
I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text.
|
||||
"""
|
||||
<< HOPING TO SOLVE >>
|
||||
{{hoping_to_solve}}
|
||||
|
||||
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """
|
||||
<instruction>
|
||||
Step 1: Identify the purpose of the chatbot from the variable {{TASK_DESCRIPTION}} and infer chatbot's tone (e.g., friendly, professional, etc.) to add personality traits.
|
||||
Step 2: Create a coherent and engaging opening statement.
|
||||
Step 3: Ensure the output is welcoming and clearly explains what the chatbot is designed to do. Do not include any XML tags in the output.
|
||||
Please use the same language as the user's input language. If user uses chinese then generate opening statement in chinese, if user uses english then generate opening statement in english.
|
||||
Example Input:
|
||||
Provide customer support for an e-commerce website
|
||||
Example Output:
|
||||
Welcome! I'm here to assist you with any questions or issues you might have with your shopping experience. Whether you're looking for product information, need help with your order, or have any other inquiries, feel free to ask. I'm friendly, helpful, and ready to support you in any way I can.
|
||||
<Task>
|
||||
Here is the task description: {{INPUT_TEXT}}
|
||||
|
||||
You just need to generate the output
|
||||
"""
|
||||
<< OUTPUT >>
|
||||
"""
|
||||
@@ -410,7 +410,7 @@ class LBModelManager:
|
||||
self._model = model
|
||||
self._load_balancing_configs = load_balancing_configs
|
||||
|
||||
for load_balancing_config in self._load_balancing_configs[:]: # Iterate over a shallow copy of the list
|
||||
for load_balancing_config in self._load_balancing_configs:
|
||||
if load_balancing_config.name == "__inherit__":
|
||||
if not managed_credentials:
|
||||
# remove __inherit__ if managed credentials is not provided
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
- tongyi
|
||||
- wenxin
|
||||
- moonshot
|
||||
- tencent
|
||||
- jina
|
||||
- chatglm
|
||||
- yi
|
||||
|
||||
@@ -501,7 +501,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
|
||||
sub_messages.append(sub_message_dict)
|
||||
message_dict = {"role": "user", "content": sub_messages}
|
||||
elif isinstance(message, AssistantPromptMessage):
|
||||
# message = cast(AssistantPromptMessage, message)
|
||||
message = cast(AssistantPromptMessage, message)
|
||||
message_dict = {"role": "assistant", "content": message.content}
|
||||
if message.tool_calls:
|
||||
message_dict["tool_calls"] = [helper.dump_model(tool_call) for tool_call in message.tool_calls]
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
- llama-3.1-405b-reasoning
|
||||
- llama-3.1-70b-versatile
|
||||
- llama-3.1-8b-instant
|
||||
- llama3-70b-8192
|
||||
- llama3-8b-8192
|
||||
- mixtral-8x7b-32768
|
||||
- llama2-70b-4096
|
||||
@@ -1,25 +0,0 @@
|
||||
model: llama-3.1-405b-reasoning
|
||||
label:
|
||||
zh_Hans: Llama-3.1-405b-reasoning
|
||||
en_US: Llama-3.1-405b-reasoning
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 8192
|
||||
pricing:
|
||||
input: '0.05'
|
||||
output: '0.1'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,25 +0,0 @@
|
||||
model: llama-3.1-70b-versatile
|
||||
label:
|
||||
zh_Hans: Llama-3.1-70b-versatile
|
||||
en_US: Llama-3.1-70b-versatile
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 8192
|
||||
pricing:
|
||||
input: '0.05'
|
||||
output: '0.1'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,25 +0,0 @@
|
||||
model: llama-3.1-8b-instant
|
||||
label:
|
||||
zh_Hans: Llama-3.1-8b-instant
|
||||
en_US: Llama-3.1-8b-instant
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 8192
|
||||
pricing:
|
||||
input: '0.05'
|
||||
output: '0.1'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -12,9 +12,6 @@
|
||||
- google/gemini-pro
|
||||
- cohere/command-r-plus
|
||||
- cohere/command-r
|
||||
- meta-llama/llama-3.1-405b-instruct
|
||||
- meta-llama/llama-3.1-70b-instruct
|
||||
- meta-llama/llama-3.1-8b-instruct
|
||||
- meta-llama/llama-3-70b-instruct
|
||||
- meta-llama/llama-3-8b-instruct
|
||||
- mistralai/mixtral-8x22b-instruct
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
model: meta-llama/llama-3.1-405b-instruct
|
||||
label:
|
||||
en_US: llama-3.1-405b-instruct
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 512
|
||||
min: 1
|
||||
max: 128000
|
||||
pricing:
|
||||
input: "3"
|
||||
output: "3"
|
||||
unit: "0.000001"
|
||||
currency: USD
|
||||
@@ -1,23 +0,0 @@
|
||||
model: meta-llama/llama-3.1-70b-instruct
|
||||
label:
|
||||
en_US: llama-3.1-70b-instruct
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 512
|
||||
min: 1
|
||||
max: 128000
|
||||
pricing:
|
||||
input: "0.9"
|
||||
output: "0.9"
|
||||
unit: "0.000001"
|
||||
currency: USD
|
||||
@@ -1,23 +0,0 @@
|
||||
model: meta-llama/llama-3.1-8b-instruct
|
||||
label:
|
||||
en_US: llama-3.1-8b-instruct
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 512
|
||||
min: 1
|
||||
max: 128000
|
||||
pricing:
|
||||
input: "0.2"
|
||||
output: "0.2"
|
||||
unit: "0.000001"
|
||||
currency: USD
|
||||
@@ -1,13 +0,0 @@
|
||||
<svg viewBox="0 83.15545000000002 402.45098039215685 76.44" data-name="图层 1" id="图层_1"
|
||||
xmlns="http://www.w3.org/2000/svg" style="max-height: 500px" width="402.45098039215685"
|
||||
height="76.44">
|
||||
<defs>
|
||||
<style>.cls-1{fill:#4999d4}</style>
|
||||
</defs>
|
||||
<title>tencent-cloud</title>
|
||||
<path
|
||||
d="M27.569 113.353a17.56 17.56 0 0 1 33.148-3.743.158.158 0 0 0 .194.105 21.267 21.267 0 0 1 7.008-.729c.235.018.327-.116.25-.33a24.828 24.828 0 0 0-47.933 4.444.082.082 0 0 0 .016 0 18.537 18.537 0 0 0-9.85 31.533 18.007 18.007 0 0 0 10.325 5h-.001a43.066 43.066 0 0 0 5.266.282c1.68.011 33.725.008 35.067.008 2.7 0 4.457-.002 6.345-.14a18.245 18.245 0 0 0 11.723-5.15 18.532 18.532 0 0 0-12.901-31.789 18.06 18.06 0 0 0-11.704 4.285c-1.467 1.196-3.006 2.626-4.944 4.508-.642.625-13.336 12.94-21.67 21.028-1.16-.005-2.828-.021-4.306-.07a11.704 11.704 0 0 1-8.125-3.148A11.275 11.275 0 0 1 23.33 120.1a11.706 11.706 0 0 1 7.646 3.062c1.44 1.192 4.633 4 6.035 5.263a.17.17 0 0 0 .24.002l4.945-4.825a.176.176 0 0 0-.004-.27c-2.378-2.15-5.749-5.158-7.778-6.669a18.874 18.874 0 0 0-6.844-3.31zm46.482 26.094a11.704 11.704 0 0 1-8.125 3.147 168.92 168.92 0 0 1-5.204.073h-22.38c8.142-7.91 15.245-14.808 16.051-15.59.738-.717 2.398-2.306 3.83-3.595 3.145-2.831 5.974-3.4 7.976-3.382a11.275 11.275 0 0 1 7.852 19.347z"
|
||||
class="cls-1" />
|
||||
<path
|
||||
d="M302.794 129.138a.24.24 0 0 0-.419-.163 16.062 16.062 0 0 1-11.961 5.469c-7.7 0-12.674-5.32-12.674-13.552 0-8.388 4.74-13.599 12.37-13.599a17.274 17.274 0 0 1 11.828 4.996.24.24 0 0 0 .414-.168v-4.245a18.595 18.595 0 0 0-12.243-4.502 15.358 15.358 0 0 0-11.733 4.845c-2.857 3.138-4.366 7.52-4.366 12.674 0 10.478 6.592 17.518 16.404 17.518a18.517 18.517 0 0 0 12.38-4.624zM93.47 104.248v3.638h11.506v29.657h3.982v-29.657h11.506v-3.638H93.47zM390.972 115.625c-2.059-2.232-5.501-3.043-7.978-3.043a11.24 11.24 0 0 0-8.363 3.475 13.26 13.26 0 0 0-3.277 9.243c0 8.883 5.894 12.86 11.735 12.86 3.412 0 6.228-1.22 7.883-3.38v2.812h3.971V104.3h-3.97zm-.105 9.769c0 4.545-2.639 9.147-7.683 9.147-3.788 0-7.865-2.803-7.865-8.958 0-4.522 2.388-9.389 7.63-9.389 5.844 0 7.918 4.956 7.918 9.2zM308.064 104.3h4.031v33.292h-4.031zM192.033 131.427a.24.24 0 0 0-.403-.18 13.772 13.772 0 0 1-8.006 3.255c-5.344 0-8.796-3.583-8.796-9.128 0-5.546 3.452-9.129 8.796-9.129a12.973 12.973 0 0 1 8.01 2.8.24.24 0 0 0 .399-.183v-3.668a17.567 17.567 0 0 0-8.621-2.615c-7.38 0-12.34 5.142-12.34 12.795 0 7.652 4.96 12.794 12.34 12.794a14.903 14.903 0 0 0 8.62-2.624zM139.984 130.642a.241.241 0 0 0-.436-.143 12.896 12.896 0 0 1-8.605 4.214 8.725 8.725 0 0 1-6.104-2.141 8.634 8.634 0 0 1-2.372-6.07h18.629v-.285c0-5.254-.896-8.233-3.194-10.625a10.42 10.42 0 0 0-7.849-3.01c-6.969 0-11.472 4.987-11.471 12.715.254 8.04 4.822 12.84 12.218 12.84a13.447 13.447 0 0 0 9.184-3.52zm-2.653-7.564h-14.872l.006-.075a7.37 7.37 0 0 1 7.481-6.93c4.341 0 7.17 2.657 7.38 6.933zM362.877 125.584c0 6.632-3.825 8.985-7.1 8.985a6.287 6.287 0 0 1-6.549-6.263V113.15h-3.79v15.088c0 5.842 4.172 9.922 10.145 9.922 3.308 0 5.89-1.114 7.53-3.235v2.66h3.614V113.15h-3.85zM156.747 112.583a9.79 9.79 0 0 0-7.364 2.887v-2.334h-3.762v24.456h3.846v-13.935a7.592 7.592 0 0 1 1.982-5.51 6.75 6.75 0 0 1 5.197-1.91 5.994 5.994 0 0 1 6.055 6.339v15.016h3.847v-15.197a9.756 9.756 0 0 0-2.767-7.26 9.907 9.907 0 0 0-7.034-2.552zM217.156 130.642a.241.241 0 0 0-.436-.143 12.896 12.896 0 0 1-8.605 4.214 8.725 8.725 0 0 1-6.104-2.141 8.634 8.634 0 0 1-2.372-6.07h18.629v-.285c0-5.254-.895-8.233-3.193-10.625a10.42 10.42 0 0 0-7.85-3.01c-6.968 0-11.471 4.987-11.471 12.715.254 8.04 4.822 12.84 12.218 12.84a13.447 13.447 0 0 0 9.184-3.52zm-2.653-7.564h-14.871l.005-.075a7.37 7.37 0 0 1 7.481-6.93c4.342 0 7.17 2.657 7.381 6.933zM233.857 112.583a9.79 9.79 0 0 0-7.365 2.887v-2.334h-3.762v24.456h3.847v-13.935a7.592 7.592 0 0 1 1.982-5.51 6.75 6.75 0 0 1 5.196-1.91 5.994 5.994 0 0 1 6.056 6.339v15.016h3.846v-15.197a9.756 9.756 0 0 0-2.767-7.26 9.907 9.907 0 0 0-7.033-2.552zM256.236 137.917a19.963 19.963 0 0 0 5.009-1.138v-3.683a.241.241 0 0 0-.321-.229A29.455 29.455 0 0 1 256.8 134c-.402.064-.756.12-1.02-.104a.897.897 0 0 1-.263-.777V116.69h7.04v-3.559h-7.04v-6.516h-3.997v6.516h-4.012v3.558h4.012v16.815a4.207 4.207 0 0 0 1.309 3.327 5.088 5.088 0 0 0 3.406 1.085zM329.224 112.63c-7.093 0-11.859 5.13-11.859 12.764s4.766 12.764 11.859 12.764 11.858-5.13 11.858-12.764-4.765-12.764-11.858-12.764zm-8.18 12.739l-.001-.01c.014-5.41 3.299-9.043 8.18-9.043 4.893 0 8.18 3.648 8.182 9.078-.002 5.429-3.29 9.078-8.181 9.078-4.89 0-8.175-3.678-8.18-9.103z" />
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 4.5 KiB |
@@ -1,13 +0,0 @@
|
||||
<svg viewBox="0 83.15545000000002 402.45098039215685 76.44" data-name="图层 1" id="图层_1"
|
||||
xmlns="http://www.w3.org/2000/svg" style="max-height: 500px" width="402.45098039215685"
|
||||
height="76.44">
|
||||
<defs>
|
||||
<style>.cls-1{fill:#4999d4}</style>
|
||||
</defs>
|
||||
<title>tencent-cloud</title>
|
||||
<path
|
||||
d="M27.569 113.353a17.56 17.56 0 0 1 33.148-3.743.158.158 0 0 0 .194.105 21.267 21.267 0 0 1 7.008-.729c.235.018.327-.116.25-.33a24.828 24.828 0 0 0-47.933 4.444.082.082 0 0 0 .016 0 18.537 18.537 0 0 0-9.85 31.533 18.007 18.007 0 0 0 10.325 5h-.001a43.066 43.066 0 0 0 5.266.282c1.68.011 33.725.008 35.067.008 2.7 0 4.457-.002 6.345-.14a18.245 18.245 0 0 0 11.723-5.15 18.532 18.532 0 0 0-12.901-31.789 18.06 18.06 0 0 0-11.704 4.285c-1.467 1.196-3.006 2.626-4.944 4.508-.642.625-13.336 12.94-21.67 21.028-1.16-.005-2.828-.021-4.306-.07a11.704 11.704 0 0 1-8.125-3.148A11.275 11.275 0 0 1 23.33 120.1a11.706 11.706 0 0 1 7.646 3.062c1.44 1.192 4.633 4 6.035 5.263a.17.17 0 0 0 .24.002l4.945-4.825a.176.176 0 0 0-.004-.27c-2.378-2.15-5.749-5.158-7.778-6.669a18.874 18.874 0 0 0-6.844-3.31zm46.482 26.094a11.704 11.704 0 0 1-8.125 3.147 168.92 168.92 0 0 1-5.204.073h-22.38c8.142-7.91 15.245-14.808 16.051-15.59.738-.717 2.398-2.306 3.83-3.595 3.145-2.831 5.974-3.4 7.976-3.382a11.275 11.275 0 0 1 7.852 19.347z"
|
||||
class="cls-1" />
|
||||
<path
|
||||
d="M302.794 129.138a.24.24 0 0 0-.419-.163 16.062 16.062 0 0 1-11.961 5.469c-7.7 0-12.674-5.32-12.674-13.552 0-8.388 4.74-13.599 12.37-13.599a17.274 17.274 0 0 1 11.828 4.996.24.24 0 0 0 .414-.168v-4.245a18.595 18.595 0 0 0-12.243-4.502 15.358 15.358 0 0 0-11.733 4.845c-2.857 3.138-4.366 7.52-4.366 12.674 0 10.478 6.592 17.518 16.404 17.518a18.517 18.517 0 0 0 12.38-4.624zM93.47 104.248v3.638h11.506v29.657h3.982v-29.657h11.506v-3.638H93.47zM390.972 115.625c-2.059-2.232-5.501-3.043-7.978-3.043a11.24 11.24 0 0 0-8.363 3.475 13.26 13.26 0 0 0-3.277 9.243c0 8.883 5.894 12.86 11.735 12.86 3.412 0 6.228-1.22 7.883-3.38v2.812h3.971V104.3h-3.97zm-.105 9.769c0 4.545-2.639 9.147-7.683 9.147-3.788 0-7.865-2.803-7.865-8.958 0-4.522 2.388-9.389 7.63-9.389 5.844 0 7.918 4.956 7.918 9.2zM308.064 104.3h4.031v33.292h-4.031zM192.033 131.427a.24.24 0 0 0-.403-.18 13.772 13.772 0 0 1-8.006 3.255c-5.344 0-8.796-3.583-8.796-9.128 0-5.546 3.452-9.129 8.796-9.129a12.973 12.973 0 0 1 8.01 2.8.24.24 0 0 0 .399-.183v-3.668a17.567 17.567 0 0 0-8.621-2.615c-7.38 0-12.34 5.142-12.34 12.795 0 7.652 4.96 12.794 12.34 12.794a14.903 14.903 0 0 0 8.62-2.624zM139.984 130.642a.241.241 0 0 0-.436-.143 12.896 12.896 0 0 1-8.605 4.214 8.725 8.725 0 0 1-6.104-2.141 8.634 8.634 0 0 1-2.372-6.07h18.629v-.285c0-5.254-.896-8.233-3.194-10.625a10.42 10.42 0 0 0-7.849-3.01c-6.969 0-11.472 4.987-11.471 12.715.254 8.04 4.822 12.84 12.218 12.84a13.447 13.447 0 0 0 9.184-3.52zm-2.653-7.564h-14.872l.006-.075a7.37 7.37 0 0 1 7.481-6.93c4.341 0 7.17 2.657 7.38 6.933zM362.877 125.584c0 6.632-3.825 8.985-7.1 8.985a6.287 6.287 0 0 1-6.549-6.263V113.15h-3.79v15.088c0 5.842 4.172 9.922 10.145 9.922 3.308 0 5.89-1.114 7.53-3.235v2.66h3.614V113.15h-3.85zM156.747 112.583a9.79 9.79 0 0 0-7.364 2.887v-2.334h-3.762v24.456h3.846v-13.935a7.592 7.592 0 0 1 1.982-5.51 6.75 6.75 0 0 1 5.197-1.91 5.994 5.994 0 0 1 6.055 6.339v15.016h3.847v-15.197a9.756 9.756 0 0 0-2.767-7.26 9.907 9.907 0 0 0-7.034-2.552zM217.156 130.642a.241.241 0 0 0-.436-.143 12.896 12.896 0 0 1-8.605 4.214 8.725 8.725 0 0 1-6.104-2.141 8.634 8.634 0 0 1-2.372-6.07h18.629v-.285c0-5.254-.895-8.233-3.193-10.625a10.42 10.42 0 0 0-7.85-3.01c-6.968 0-11.471 4.987-11.471 12.715.254 8.04 4.822 12.84 12.218 12.84a13.447 13.447 0 0 0 9.184-3.52zm-2.653-7.564h-14.871l.005-.075a7.37 7.37 0 0 1 7.481-6.93c4.342 0 7.17 2.657 7.381 6.933zM233.857 112.583a9.79 9.79 0 0 0-7.365 2.887v-2.334h-3.762v24.456h3.847v-13.935a7.592 7.592 0 0 1 1.982-5.51 6.75 6.75 0 0 1 5.196-1.91 5.994 5.994 0 0 1 6.056 6.339v15.016h3.846v-15.197a9.756 9.756 0 0 0-2.767-7.26 9.907 9.907 0 0 0-7.033-2.552zM256.236 137.917a19.963 19.963 0 0 0 5.009-1.138v-3.683a.241.241 0 0 0-.321-.229A29.455 29.455 0 0 1 256.8 134c-.402.064-.756.12-1.02-.104a.897.897 0 0 1-.263-.777V116.69h7.04v-3.559h-7.04v-6.516h-3.997v6.516h-4.012v3.558h4.012v16.815a4.207 4.207 0 0 0 1.309 3.327 5.088 5.088 0 0 0 3.406 1.085zM329.224 112.63c-7.093 0-11.859 5.13-11.859 12.764s4.766 12.764 11.859 12.764 11.858-5.13 11.858-12.764-4.765-12.764-11.858-12.764zm-8.18 12.739l-.001-.01c.014-5.41 3.299-9.043 8.18-9.043 4.893 0 8.18 3.648 8.182 9.078-.002 5.429-3.29 9.078-8.181 9.078-4.89 0-8.175-3.678-8.18-9.103z" />
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 4.5 KiB |
@@ -1,11 +0,0 @@
|
||||
<svg viewBox="0 83.15545000000002 85 76.44" data-name="图层 1" id="图层_1"
|
||||
xmlns="http://www.w3.org/2000/svg" style="max-height: 500px" width="85"
|
||||
height="76.44">
|
||||
<defs>
|
||||
<style>.cls-1{fill:#4999d4}</style>
|
||||
</defs>
|
||||
<title>tencent-cloud</title>
|
||||
<path
|
||||
d="M27.569 113.353a17.56 17.56 0 0 1 33.148-3.743.158.158 0 0 0 .194.105 21.267 21.267 0 0 1 7.008-.729c.235.018.327-.116.25-.33a24.828 24.828 0 0 0-47.933 4.444.082.082 0 0 0 .016 0 18.537 18.537 0 0 0-9.85 31.533 18.007 18.007 0 0 0 10.325 5h-.001a43.066 43.066 0 0 0 5.266.282c1.68.011 33.725.008 35.067.008 2.7 0 4.457-.002 6.345-.14a18.245 18.245 0 0 0 11.723-5.15 18.532 18.532 0 0 0-12.901-31.789 18.06 18.06 0 0 0-11.704 4.285c-1.467 1.196-3.006 2.626-4.944 4.508-.642.625-13.336 12.94-21.67 21.028-1.16-.005-2.828-.021-4.306-.07a11.704 11.704 0 0 1-8.125-3.148A11.275 11.275 0 0 1 23.33 120.1a11.706 11.706 0 0 1 7.646 3.062c1.44 1.192 4.633 4 6.035 5.263a.17.17 0 0 0 .24.002l4.945-4.825a.176.176 0 0 0-.004-.27c-2.378-2.15-5.749-5.158-7.778-6.669a18.874 18.874 0 0 0-6.844-3.31zm46.482 26.094a11.704 11.704 0 0 1-8.125 3.147 168.92 168.92 0 0 1-5.204.073h-22.38c8.142-7.91 15.245-14.808 16.051-15.59.738-.717 2.398-2.306 3.83-3.595 3.145-2.831 5.974-3.4 7.976-3.382a11.275 11.275 0 0 1 7.852 19.347z"
|
||||
class="cls-1" />
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.3 KiB |
@@ -1,156 +0,0 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class Credential:
|
||||
def __init__(self, secret_id, secret_key):
|
||||
self.secret_id = secret_id
|
||||
self.secret_key = secret_key
|
||||
|
||||
|
||||
class FlashRecognitionRequest:
|
||||
def __init__(self, voice_format="mp3", engine_type="16k_zh"):
|
||||
self.engine_type = engine_type
|
||||
self.speaker_diarization = 0
|
||||
self.hotword_id = ""
|
||||
self.customization_id = ""
|
||||
self.filter_dirty = 0
|
||||
self.filter_modal = 0
|
||||
self.filter_punc = 0
|
||||
self.convert_num_mode = 1
|
||||
self.word_info = 0
|
||||
self.voice_format = voice_format
|
||||
self.first_channel_only = 1
|
||||
self.reinforce_hotword = 0
|
||||
self.sentence_max_length = 0
|
||||
|
||||
def set_first_channel_only(self, first_channel_only):
|
||||
self.first_channel_only = first_channel_only
|
||||
|
||||
def set_speaker_diarization(self, speaker_diarization):
|
||||
self.speaker_diarization = speaker_diarization
|
||||
|
||||
def set_filter_dirty(self, filter_dirty):
|
||||
self.filter_dirty = filter_dirty
|
||||
|
||||
def set_filter_modal(self, filter_modal):
|
||||
self.filter_modal = filter_modal
|
||||
|
||||
def set_filter_punc(self, filter_punc):
|
||||
self.filter_punc = filter_punc
|
||||
|
||||
def set_convert_num_mode(self, convert_num_mode):
|
||||
self.convert_num_mode = convert_num_mode
|
||||
|
||||
def set_word_info(self, word_info):
|
||||
self.word_info = word_info
|
||||
|
||||
def set_hotword_id(self, hotword_id):
|
||||
self.hotword_id = hotword_id
|
||||
|
||||
def set_customization_id(self, customization_id):
|
||||
self.customization_id = customization_id
|
||||
|
||||
def set_voice_format(self, voice_format):
|
||||
self.voice_format = voice_format
|
||||
|
||||
def set_sentence_max_length(self, sentence_max_length):
|
||||
self.sentence_max_length = sentence_max_length
|
||||
|
||||
def set_reinforce_hotword(self, reinforce_hotword):
|
||||
self.reinforce_hotword = reinforce_hotword
|
||||
|
||||
|
||||
class FlashRecognizer:
|
||||
"""
|
||||
reponse:
|
||||
request_id string
|
||||
status Integer
|
||||
message String
|
||||
audio_duration Integer
|
||||
flash_result Result Array
|
||||
|
||||
Result:
|
||||
text String
|
||||
channel_id Integer
|
||||
sentence_list Sentence Array
|
||||
|
||||
Sentence:
|
||||
text String
|
||||
start_time Integer
|
||||
end_time Integer
|
||||
speaker_id Integer
|
||||
word_list Word Array
|
||||
|
||||
Word:
|
||||
word String
|
||||
start_time Integer
|
||||
end_time Integer
|
||||
stable_flag: Integer
|
||||
"""
|
||||
|
||||
def __init__(self, appid, credential):
|
||||
self.credential = credential
|
||||
self.appid = appid
|
||||
|
||||
def _format_sign_string(self, param):
|
||||
signstr = "POSTasr.cloud.tencent.com/asr/flash/v1/"
|
||||
for t in param:
|
||||
if 'appid' in t:
|
||||
signstr += str(t[1])
|
||||
break
|
||||
signstr += "?"
|
||||
for x in param:
|
||||
tmp = x
|
||||
if 'appid' in x:
|
||||
continue
|
||||
for t in tmp:
|
||||
signstr += str(t)
|
||||
signstr += "="
|
||||
signstr = signstr[:-1]
|
||||
signstr += "&"
|
||||
signstr = signstr[:-1]
|
||||
return signstr
|
||||
|
||||
def _build_header(self):
|
||||
header = {"Host": "asr.cloud.tencent.com"}
|
||||
return header
|
||||
|
||||
def _sign(self, signstr, secret_key):
|
||||
hmacstr = hmac.new(secret_key.encode('utf-8'),
|
||||
signstr.encode('utf-8'), hashlib.sha1).digest()
|
||||
s = base64.b64encode(hmacstr)
|
||||
s = s.decode('utf-8')
|
||||
return s
|
||||
|
||||
def _build_req_with_signature(self, secret_key, params, header):
|
||||
query = sorted(params.items(), key=lambda d: d[0])
|
||||
signstr = self._format_sign_string(query)
|
||||
signature = self._sign(signstr, secret_key)
|
||||
header["Authorization"] = signature
|
||||
requrl = "https://"
|
||||
requrl += signstr[4::]
|
||||
return requrl
|
||||
|
||||
def _create_query_arr(self, req):
|
||||
return {
|
||||
'appid': self.appid, 'secretid': self.credential.secret_id, 'timestamp': str(int(time.time())),
|
||||
'engine_type': req.engine_type, 'voice_format': req.voice_format,
|
||||
'speaker_diarization': req.speaker_diarization, 'hotword_id': req.hotword_id,
|
||||
'customization_id': req.customization_id, 'filter_dirty': req.filter_dirty,
|
||||
'filter_modal': req.filter_modal, 'filter_punc': req.filter_punc,
|
||||
'convert_num_mode': req.convert_num_mode, 'word_info': req.word_info,
|
||||
'first_channel_only': req.first_channel_only, 'reinforce_hotword': req.reinforce_hotword,
|
||||
'sentence_max_length': req.sentence_max_length
|
||||
}
|
||||
|
||||
def recognize(self, req, data):
|
||||
header = self._build_header()
|
||||
query_arr = self._create_query_arr(req)
|
||||
req_url = self._build_req_with_signature(self.credential.secret_key, query_arr, header)
|
||||
r = requests.post(req_url, headers=header, data=data)
|
||||
return r.text
|
||||
@@ -1,92 +0,0 @@
|
||||
import json
|
||||
from typing import IO, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from core.model_runtime.errors.invoke import (
|
||||
InvokeAuthorizationError,
|
||||
InvokeConnectionError,
|
||||
InvokeError,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel
|
||||
from core.model_runtime.model_providers.tencent.speech2text.flash_recognizer import (
|
||||
Credential,
|
||||
FlashRecognitionRequest,
|
||||
FlashRecognizer,
|
||||
)
|
||||
|
||||
|
||||
class TencentSpeech2TextModel(Speech2TextModel):
|
||||
def _invoke(self, model: str, credentials: dict,
|
||||
file: IO[bytes], user: Optional[str] = None) \
|
||||
-> str:
|
||||
"""
|
||||
Invoke speech2text model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param file: audio file
|
||||
:param user: unique user id
|
||||
:return: text for given audio file
|
||||
"""
|
||||
return self._speech2text_invoke(model, credentials, file)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
Validate model credentials
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
audio_file_path = self._get_demo_file_path()
|
||||
|
||||
with open(audio_file_path, 'rb') as audio_file:
|
||||
self._speech2text_invoke(model, credentials, audio_file)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
def _speech2text_invoke(self, model: str, credentials: dict, file: IO[bytes]) -> str:
|
||||
"""
|
||||
Invoke speech2text model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param file: audio file
|
||||
:return: text for given audio file
|
||||
"""
|
||||
app_id = credentials["app_id"]
|
||||
secret_id = credentials["secret_id"]
|
||||
secret_key = credentials["secret_key"]
|
||||
voice_format = file.voice_format if hasattr(file, "voice_format") else "mp3"
|
||||
tencent_voice_recognizer = FlashRecognizer(app_id, Credential(secret_id, secret_key))
|
||||
resp = tencent_voice_recognizer.recognize(FlashRecognitionRequest(voice_format), file)
|
||||
resp = json.loads(resp)
|
||||
code = resp["code"]
|
||||
message = resp["message"]
|
||||
if code == 4002:
|
||||
raise CredentialsValidateFailedError(str(message))
|
||||
elif code != 0:
|
||||
return f"Tencent ASR Recognition failed with code {code} and message {message}"
|
||||
return "\n".join(item["text"] for item in resp["flash_result"])
|
||||
|
||||
@property
|
||||
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
|
||||
"""
|
||||
Map model invoke error to unified error
|
||||
The key is the error type thrown to the caller
|
||||
The value is the error type thrown by the model,
|
||||
which needs to be converted into a unified error type for the caller.
|
||||
|
||||
:return: Invoke error mapping
|
||||
"""
|
||||
return {
|
||||
InvokeConnectionError: [
|
||||
requests.exceptions.ConnectionError
|
||||
],
|
||||
InvokeAuthorizationError: [
|
||||
CredentialsValidateFailedError
|
||||
]
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
model: tencent
|
||||
model_type: speech2text
|
||||
model_properties:
|
||||
file_upload_limit: 25
|
||||
supported_file_extensions: flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm
|
||||
@@ -1,29 +0,0 @@
|
||||
import logging
|
||||
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TencentProvider(ModelProvider):
|
||||
def validate_provider_credentials(self, credentials: dict) -> None:
|
||||
"""
|
||||
Validate provider credentials
|
||||
|
||||
if validate failed, raise exception
|
||||
|
||||
:param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
|
||||
"""
|
||||
try:
|
||||
model_instance = self.get_model_instance(ModelType.SPEECH2TEXT)
|
||||
model_instance.validate_credentials(
|
||||
model='tencent',
|
||||
credentials=credentials
|
||||
)
|
||||
except CredentialsValidateFailedError as ex:
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
logger.exception(f'{self.get_provider_schema().provider} credentials validate failed')
|
||||
raise ex
|
||||
@@ -1,49 +0,0 @@
|
||||
provider: tencent
|
||||
label:
|
||||
zh_Hans: 腾讯云
|
||||
en_US: Tencent
|
||||
icon_small:
|
||||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
zh_Hans: icon_l_zh.svg
|
||||
en_US: icon_l_en.svg
|
||||
background: "#E5E7EB"
|
||||
help:
|
||||
title:
|
||||
en_US: Get your API key from Tencent AI
|
||||
zh_Hans: 从腾讯云获取 API Key
|
||||
url:
|
||||
en_US: https://cloud.tencent.com/product/asr
|
||||
supported_model_types:
|
||||
- speech2text
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
provider_credential_schema:
|
||||
credential_form_schemas:
|
||||
- variable: app_id
|
||||
label:
|
||||
zh_Hans: APPID
|
||||
en_US: APPID
|
||||
type: text-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的腾讯语音识别服务的 APPID
|
||||
en_US: Enter the APPID of your Tencent Cloud ASR service
|
||||
- variable: secret_id
|
||||
label:
|
||||
zh_Hans: SecretId
|
||||
en_US: SecretId
|
||||
type: secret-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的腾讯语音识别服务的 SecretId
|
||||
en_US: Enter the SecretId of your Tencent Cloud ASR service
|
||||
- variable: secret_key
|
||||
label:
|
||||
zh_Hans: SecretKey
|
||||
en_US: SecretKey
|
||||
type: secret-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的腾讯语音识别服务的 SecretKey
|
||||
en_US: Enter the SecretKey of your Tencent Cloud ASR service
|
||||
@@ -262,10 +262,6 @@ You should also complete the text started with ``` but not tell ``` directly.
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response
|
||||
"""
|
||||
if response.status_code != 200 and response.status_code != HTTPStatus.OK:
|
||||
raise ServiceUnavailableError(
|
||||
response.message
|
||||
)
|
||||
# transform assistant message to prompt message
|
||||
assistant_prompt_message = AssistantPromptMessage(
|
||||
content=response.output.choices[0].message.content,
|
||||
@@ -425,7 +421,7 @@ You should also complete the text started with ``` but not tell ``` directly.
|
||||
raise ValueError(f"Got unknown type {message}")
|
||||
|
||||
return message_text
|
||||
|
||||
|
||||
def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str:
|
||||
"""
|
||||
Format a list of messages into a full prompt for the Anthropic model
|
||||
@@ -500,9 +496,6 @@ You should also complete the text started with ``` but not tell ``` directly.
|
||||
tongyi_messages.append({
|
||||
'role': 'assistant',
|
||||
'content': content if not rich_content else [{"text": content}],
|
||||
'tool_calls': [tool_call.model_dump() for tool_call in
|
||||
prompt_message.tool_calls] if prompt_message.tool_calls else []
|
||||
|
||||
})
|
||||
elif isinstance(prompt_message, ToolPromptMessage):
|
||||
tongyi_messages.append({
|
||||
|
||||
@@ -25,6 +25,7 @@ def measure_time():
|
||||
yield timing_info
|
||||
finally:
|
||||
timing_info['end'] = datetime.now()
|
||||
print(f"Execution time: {timing_info['end'] - timing_info['start']}")
|
||||
|
||||
|
||||
def replace_text_with_content(data):
|
||||
|
||||
@@ -5,20 +5,15 @@ from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from core.rag.data_post_processor.reorder import ReorderRunner
|
||||
from core.rag.models.document import Document
|
||||
from core.rag.rerank.constants.rerank_mode import RerankMode
|
||||
from core.rag.rerank.entity.weight import KeywordSetting, VectorSetting, Weights
|
||||
from core.rag.rerank.rerank_model import RerankModelRunner
|
||||
from core.rag.rerank.weight_rerank import WeightRerankRunner
|
||||
from core.rag.rerank.rerank import RerankRunner
|
||||
|
||||
|
||||
class DataPostProcessor:
|
||||
"""Interface for data post-processing document.
|
||||
"""
|
||||
|
||||
def __init__(self, tenant_id: str, reranking_mode: str,
|
||||
reranking_model: Optional[dict] = None, weights: Optional[dict] = None,
|
||||
reorder_enabled: bool = False):
|
||||
self.rerank_runner = self._get_rerank_runner(reranking_mode, tenant_id, reranking_model, weights)
|
||||
def __init__(self, tenant_id: str, reranking_model: dict, reorder_enabled: bool = False):
|
||||
self.rerank_runner = self._get_rerank_runner(reranking_model, tenant_id)
|
||||
self.reorder_runner = self._get_reorder_runner(reorder_enabled)
|
||||
|
||||
def invoke(self, query: str, documents: list[Document], score_threshold: Optional[float] = None,
|
||||
@@ -31,37 +26,19 @@ class DataPostProcessor:
|
||||
|
||||
return documents
|
||||
|
||||
def _get_rerank_runner(self, reranking_mode: str, tenant_id: str, reranking_model: Optional[dict] = None,
|
||||
weights: Optional[dict] = None) -> Optional[RerankModelRunner | WeightRerankRunner]:
|
||||
if reranking_mode == RerankMode.WEIGHTED_SCORE.value and weights:
|
||||
return WeightRerankRunner(
|
||||
tenant_id,
|
||||
Weights(
|
||||
weight_type=weights['weight_type'],
|
||||
vector_setting=VectorSetting(
|
||||
vector_weight=weights['vector_setting']['vector_weight'],
|
||||
embedding_provider_name=weights['vector_setting']['embedding_provider_name'],
|
||||
embedding_model_name=weights['vector_setting']['embedding_model_name'],
|
||||
),
|
||||
keyword_setting=KeywordSetting(
|
||||
keyword_weight=weights['keyword_setting']['keyword_weight'],
|
||||
)
|
||||
def _get_rerank_runner(self, reranking_model: dict, tenant_id: str) -> Optional[RerankRunner]:
|
||||
if reranking_model:
|
||||
try:
|
||||
model_manager = ModelManager()
|
||||
rerank_model_instance = model_manager.get_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
provider=reranking_model['reranking_provider_name'],
|
||||
model_type=ModelType.RERANK,
|
||||
model=reranking_model['reranking_model_name']
|
||||
)
|
||||
)
|
||||
elif reranking_mode == RerankMode.RERANKING_MODEL.value:
|
||||
if reranking_model:
|
||||
try:
|
||||
model_manager = ModelManager()
|
||||
rerank_model_instance = model_manager.get_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
provider=reranking_model['reranking_provider_name'],
|
||||
model_type=ModelType.RERANK,
|
||||
model=reranking_model['reranking_model_name']
|
||||
)
|
||||
except InvokeAuthorizationError:
|
||||
return None
|
||||
return RerankModelRunner(rerank_model_instance)
|
||||
return None
|
||||
except InvokeAuthorizationError:
|
||||
return None
|
||||
return RerankRunner(rerank_model_instance)
|
||||
return None
|
||||
|
||||
def _get_reorder_runner(self, reorder_enabled) -> Optional[ReorderRunner]:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
import jieba
|
||||
from jieba.analyse import default_tfidf
|
||||
@@ -12,7 +11,7 @@ class JiebaKeywordTableHandler:
|
||||
def __init__(self):
|
||||
default_tfidf.stop_words = STOPWORDS
|
||||
|
||||
def extract_keywords(self, text: str, max_keywords_per_chunk: Optional[int] = 10) -> set[str]:
|
||||
def extract_keywords(self, text: str, max_keywords_per_chunk: int = 10) -> set[str]:
|
||||
"""Extract keywords with JIEBA tfidf."""
|
||||
keywords = jieba.analyse.extract_tags(
|
||||
sentence=text,
|
||||
|
||||
@@ -38,7 +38,7 @@ class BaseKeyword(ABC):
|
||||
raise NotImplementedError
|
||||
|
||||
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
|
||||
for text in texts[:]:
|
||||
for text in texts:
|
||||
doc_id = text.metadata['doc_id']
|
||||
exists_duplicate_node = self.text_exists(doc_id)
|
||||
if exists_duplicate_node:
|
||||
|
||||
@@ -6,7 +6,6 @@ from flask import Flask, current_app
|
||||
from core.rag.data_post_processor.data_post_processor import DataPostProcessor
|
||||
from core.rag.datasource.keyword.keyword_factory import Keyword
|
||||
from core.rag.datasource.vdb.vector_factory import Vector
|
||||
from core.rag.rerank.constants.rerank_mode import RerankMode
|
||||
from core.rag.retrieval.retrival_methods import RetrievalMethod
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset
|
||||
@@ -27,19 +26,13 @@ class RetrievalService:
|
||||
|
||||
@classmethod
|
||||
def retrieve(cls, retrival_method: str, dataset_id: str, query: str,
|
||||
top_k: int, score_threshold: Optional[float] = .0,
|
||||
reranking_model: Optional[dict] = None, reranking_mode: Optional[str] = None,
|
||||
weights: Optional[dict] = None):
|
||||
top_k: int, score_threshold: Optional[float] = .0, reranking_model: Optional[dict] = None):
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.id == dataset_id
|
||||
).first()
|
||||
if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0:
|
||||
return []
|
||||
all_documents = []
|
||||
keyword_search_documents = []
|
||||
embedding_search_documents = []
|
||||
full_text_search_documents = []
|
||||
hybrid_search_documents = []
|
||||
threads = []
|
||||
exceptions = []
|
||||
# retrieval_model source with keyword
|
||||
@@ -94,8 +87,7 @@ class RetrievalService:
|
||||
raise Exception(exception_message)
|
||||
|
||||
if retrival_method == RetrievalMethod.HYBRID_SEARCH.value:
|
||||
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_mode,
|
||||
reranking_model, weights, False)
|
||||
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_model, False)
|
||||
all_documents = data_post_processor.invoke(
|
||||
query=query,
|
||||
documents=all_documents,
|
||||
@@ -118,7 +110,7 @@ class RetrievalService:
|
||||
)
|
||||
|
||||
documents = keyword.search(
|
||||
cls.escape_query_for_search(query),
|
||||
query,
|
||||
top_k=top_k
|
||||
)
|
||||
all_documents.extend(documents)
|
||||
@@ -140,7 +132,7 @@ class RetrievalService:
|
||||
)
|
||||
|
||||
documents = vector.search_by_vector(
|
||||
cls.escape_query_for_search(query),
|
||||
query,
|
||||
search_type='similarity_score_threshold',
|
||||
top_k=top_k,
|
||||
score_threshold=score_threshold,
|
||||
@@ -151,9 +143,7 @@ class RetrievalService:
|
||||
|
||||
if documents:
|
||||
if reranking_model and retrival_method == RetrievalMethod.SEMANTIC_SEARCH.value:
|
||||
data_post_processor = DataPostProcessor(str(dataset.tenant_id),
|
||||
RerankMode.RERANKING_MODEL.value,
|
||||
reranking_model, None, False)
|
||||
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_model, False)
|
||||
all_documents.extend(data_post_processor.invoke(
|
||||
query=query,
|
||||
documents=documents,
|
||||
@@ -180,14 +170,12 @@ class RetrievalService:
|
||||
)
|
||||
|
||||
documents = vector_processor.search_by_full_text(
|
||||
cls.escape_query_for_search(query),
|
||||
query,
|
||||
top_k=top_k
|
||||
)
|
||||
if documents:
|
||||
if reranking_model and retrival_method == RetrievalMethod.FULL_TEXT_SEARCH.value:
|
||||
data_post_processor = DataPostProcessor(str(dataset.tenant_id),
|
||||
RerankMode.RERANKING_MODEL.value,
|
||||
reranking_model, None, False)
|
||||
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_model, False)
|
||||
all_documents.extend(data_post_processor.invoke(
|
||||
query=query,
|
||||
documents=documents,
|
||||
@@ -198,7 +186,3 @@ class RetrievalService:
|
||||
all_documents.extend(documents)
|
||||
except Exception as e:
|
||||
exceptions.append(str(e))
|
||||
|
||||
@staticmethod
|
||||
def escape_query_for_search(query: str) -> str:
|
||||
return query.replace('"', '\\"')
|
||||
@@ -293,18 +293,15 @@ class AnalyticdbVector(BaseVector):
|
||||
return documents
|
||||
|
||||
def delete(self) -> None:
|
||||
try:
|
||||
from alibabacloud_gpdb20160503 import models as gpdb_20160503_models
|
||||
request = gpdb_20160503_models.DeleteCollectionRequest(
|
||||
collection=self._collection_name,
|
||||
dbinstance_id=self.config.instance_id,
|
||||
namespace=self.config.namespace,
|
||||
namespace_password=self.config.namespace_password,
|
||||
region_id=self.config.region_id,
|
||||
)
|
||||
self._client.delete_collection(request)
|
||||
except Exception as e:
|
||||
raise e
|
||||
from alibabacloud_gpdb20160503 import models as gpdb_20160503_models
|
||||
request = gpdb_20160503_models.DeleteCollectionRequest(
|
||||
collection=self._collection_name,
|
||||
dbinstance_id=self.config.instance_id,
|
||||
namespace=self.config.namespace,
|
||||
namespace_password=self.config.namespace_password,
|
||||
region_id=self.config.region_id,
|
||||
)
|
||||
self._client.delete_collection(request)
|
||||
|
||||
class AnalyticdbVectorFactory(AbstractVectorFactory):
|
||||
def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings):
|
||||
|
||||
@@ -115,8 +115,7 @@ class MilvusVector(BaseVector):
|
||||
uri = "https://" + str(self._client_config.host) + ":" + str(self._client_config.port)
|
||||
else:
|
||||
uri = "http://" + str(self._client_config.host) + ":" + str(self._client_config.port)
|
||||
connections.connect(alias=alias, uri=uri, user=self._client_config.user, password=self._client_config.password,
|
||||
db_name=self._client_config.database)
|
||||
connections.connect(alias=alias, uri=uri, user=self._client_config.user, password=self._client_config.password)
|
||||
|
||||
from pymilvus import utility
|
||||
if utility.has_collection(self._collection_name, using=alias):
|
||||
@@ -131,8 +130,7 @@ class MilvusVector(BaseVector):
|
||||
uri = "https://" + str(self._client_config.host) + ":" + str(self._client_config.port)
|
||||
else:
|
||||
uri = "http://" + str(self._client_config.host) + ":" + str(self._client_config.port)
|
||||
connections.connect(alias=alias, uri=uri, user=self._client_config.user, password=self._client_config.password,
|
||||
db_name=self._client_config.database)
|
||||
connections.connect(alias=alias, uri=uri, user=self._client_config.user, password=self._client_config.password)
|
||||
|
||||
from pymilvus import utility
|
||||
if utility.has_collection(self._collection_name, using=alias):
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
import array
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
from typing import Any
|
||||
|
||||
import jieba.posseg as pseg
|
||||
import nltk
|
||||
import numpy
|
||||
import oracledb
|
||||
from nltk.corpus import stopwords
|
||||
from pydantic import BaseModel, model_validator
|
||||
|
||||
from configs import dify_config
|
||||
@@ -54,11 +50,6 @@ CREATE TABLE IF NOT EXISTS {table_name} (
|
||||
,embedding vector NOT NULL
|
||||
)
|
||||
"""
|
||||
SQL_CREATE_INDEX = """
|
||||
CREATE INDEX idx_docs_{table_name} ON {table_name}(text)
|
||||
INDEXTYPE IS CTXSYS.CONTEXT PARAMETERS
|
||||
('FILTER CTXSYS.NULL_FILTER SECTION GROUP CTXSYS.HTML_SECTION_GROUP LEXER sys.my_chinese_vgram_lexer')
|
||||
"""
|
||||
|
||||
|
||||
class OracleVector(BaseVector):
|
||||
@@ -197,53 +188,7 @@ class OracleVector(BaseVector):
|
||||
return docs
|
||||
|
||||
def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
|
||||
top_k = kwargs.get("top_k", 5)
|
||||
# just not implement fetch by score_threshold now, may be later
|
||||
score_threshold = kwargs.get("score_threshold") if kwargs.get("score_threshold") else 0.0
|
||||
if len(query) > 0:
|
||||
# Check which language the query is in
|
||||
zh_pattern = re.compile('[\u4e00-\u9fa5]+')
|
||||
match = zh_pattern.search(query)
|
||||
entities = []
|
||||
# match: query condition maybe is a chinese sentence, so using Jieba split,else using nltk split
|
||||
if match:
|
||||
words = pseg.cut(query)
|
||||
current_entity = ""
|
||||
for word, pos in words:
|
||||
if pos == 'nr' or pos == 'Ng' or pos == 'eng' or pos == 'nz' or pos == 'n' or pos == 'ORG' or pos == 'v': # nr: 人名, ns: 地名, nt: 机构名
|
||||
current_entity += word
|
||||
else:
|
||||
if current_entity:
|
||||
entities.append(current_entity)
|
||||
current_entity = ""
|
||||
if current_entity:
|
||||
entities.append(current_entity)
|
||||
else:
|
||||
try:
|
||||
nltk.data.find('tokenizers/punkt')
|
||||
nltk.data.find('corpora/stopwords')
|
||||
except LookupError:
|
||||
nltk.download('punkt')
|
||||
nltk.download('stopwords')
|
||||
print("run download")
|
||||
e_str = re.sub(r'[^\w ]', '', query)
|
||||
all_tokens = nltk.word_tokenize(e_str)
|
||||
stop_words = stopwords.words('english')
|
||||
for token in all_tokens:
|
||||
if token not in stop_words:
|
||||
entities.append(token)
|
||||
with self._get_cursor() as cur:
|
||||
cur.execute(
|
||||
f"select meta, text FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only",
|
||||
[" ACCUM ".join(entities)]
|
||||
)
|
||||
docs = []
|
||||
for record in cur:
|
||||
metadata, text = record
|
||||
docs.append(Document(page_content=text, metadata=metadata))
|
||||
return docs
|
||||
else:
|
||||
return [Document(page_content="", metadata="")]
|
||||
# do not support bm25 search
|
||||
return []
|
||||
|
||||
def delete(self) -> None:
|
||||
@@ -261,8 +206,6 @@ class OracleVector(BaseVector):
|
||||
with self._get_cursor() as cur:
|
||||
cur.execute(SQL_CREATE_TABLE.format(table_name=self.table_name))
|
||||
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
||||
with self._get_cursor() as cur:
|
||||
cur.execute(SQL_CREATE_INDEX.format(table_name=self.table_name))
|
||||
|
||||
|
||||
class OracleVectorFactory(AbstractVectorFactory):
|
||||
|
||||
@@ -396,11 +396,9 @@ class QdrantVector(BaseVector):
|
||||
documents = []
|
||||
for result in results:
|
||||
if result:
|
||||
document = self._document_from_scored_point(
|
||||
documents.append(self._document_from_scored_point(
|
||||
result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value
|
||||
)
|
||||
document.metadata['vector'] = result.vector
|
||||
documents.append(document)
|
||||
))
|
||||
|
||||
return documents
|
||||
|
||||
|
||||
@@ -198,6 +198,8 @@ class TencentVector(BaseVector):
|
||||
self._db.drop_collection(name=self._collection_name)
|
||||
|
||||
|
||||
|
||||
|
||||
class TencentVectorFactory(AbstractVectorFactory):
|
||||
def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings) -> TencentVector:
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ class BaseVector(ABC):
|
||||
raise NotImplementedError
|
||||
|
||||
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
|
||||
for text in texts[:]:
|
||||
for text in texts:
|
||||
doc_id = text.metadata['doc_id']
|
||||
exists_duplicate_node = self.text_exists(doc_id)
|
||||
if exists_duplicate_node:
|
||||
@@ -67,7 +67,3 @@ class BaseVector(ABC):
|
||||
|
||||
def _get_uuids(self, texts: list[Document]) -> list[str]:
|
||||
return [text.metadata['doc_id'] for text in texts]
|
||||
|
||||
@property
|
||||
def collection_name(self):
|
||||
return self._collection_name
|
||||
|
||||
@@ -9,7 +9,6 @@ from core.rag.datasource.entity.embedding import Embeddings
|
||||
from core.rag.datasource.vdb.vector_base import BaseVector
|
||||
from core.rag.datasource.vdb.vector_type import VectorType
|
||||
from core.rag.models.document import Document
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import Dataset
|
||||
|
||||
|
||||
@@ -135,10 +134,6 @@ class Vector:
|
||||
|
||||
def delete(self) -> None:
|
||||
self._vector_processor.delete()
|
||||
# delete collection redis cache
|
||||
if self._vector_processor.collection_name:
|
||||
collection_exist_cache_key = 'vector_indexing_{}'.format(self._vector_processor.collection_name)
|
||||
redis_client.delete(collection_exist_cache_key)
|
||||
|
||||
def _get_embeddings(self) -> Embeddings:
|
||||
model_manager = ModelManager()
|
||||
@@ -153,7 +148,7 @@ class Vector:
|
||||
return CacheEmbedding(embedding_model)
|
||||
|
||||
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
|
||||
for text in texts[:]:
|
||||
for text in texts:
|
||||
doc_id = text.metadata['doc_id']
|
||||
exists_duplicate_node = self.text_exists(doc_id)
|
||||
if exists_duplicate_node:
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class RerankMode(Enum):
|
||||
|
||||
RERANKING_MODEL = 'reranking_model'
|
||||
WEIGHTED_SCORE = 'weighted_score'
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class VectorSetting(BaseModel):
|
||||
vector_weight: float
|
||||
|
||||
embedding_provider_name: str
|
||||
|
||||
embedding_model_name: str
|
||||
|
||||
|
||||
class KeywordSetting(BaseModel):
|
||||
keyword_weight: float
|
||||
|
||||
|
||||
class Weights(BaseModel):
|
||||
"""Model for weighted rerank."""
|
||||
|
||||
weight_type: str
|
||||
|
||||
vector_setting: VectorSetting
|
||||
|
||||
keyword_setting: KeywordSetting
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user