mirror of
https://github.com/langgenius/dify.git
synced 2026-01-07 06:48:28 +00:00
Compare commits
76 Commits
feat/suppo
...
feat/paren
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4cea0248c8 | ||
|
|
e26a963163 | ||
|
|
72ca69fb2a | ||
|
|
db1956ed54 | ||
|
|
9aceceda2c | ||
|
|
1843f6ccb6 | ||
|
|
b7d9987953 | ||
|
|
b528b1e875 | ||
|
|
c4aa98e609 | ||
|
|
4757db1b6b | ||
|
|
044e7b63c2 | ||
|
|
5b7b328193 | ||
|
|
8d5a1be227 | ||
|
|
90d5765fb6 | ||
|
|
1db14793fa | ||
|
|
cbb4e95928 | ||
|
|
20c091a5e7 | ||
|
|
e9c098d024 | ||
|
|
9f75970347 | ||
|
|
f1366e8e19 | ||
|
|
0f85e3557b | ||
|
|
17ee731546 | ||
|
|
af2461cccc | ||
|
|
60c1549771 | ||
|
|
ab6dcf7032 | ||
|
|
8aae235a71 | ||
|
|
c032574491 | ||
|
|
1065917872 | ||
|
|
56e361ac44 | ||
|
|
2e00829b1e | ||
|
|
625aaceb00 | ||
|
|
98d85e6b74 | ||
|
|
319d49084b | ||
|
|
1a6a28f650 | ||
|
|
eb542067af | ||
|
|
04b9a2c605 | ||
|
|
8028e75fbb | ||
|
|
3eb51d85da | ||
|
|
79a35c2fe6 | ||
|
|
2dd4c34423 | ||
|
|
684f6b2299 | ||
|
|
b791a80b75 | ||
|
|
13006f94e2 | ||
|
|
41772c325f | ||
|
|
a4fc057a1c | ||
|
|
aae29e72ae | ||
|
|
87c831e5dd | ||
|
|
40a5f1c80a | ||
|
|
04f1e18342 | ||
|
|
365a40d11f | ||
|
|
60b5dac3ab | ||
|
|
8565c18e84 | ||
|
|
03ba4bc760 | ||
|
|
ae3a2cb272 | ||
|
|
6c8e208ef3 | ||
|
|
0181f1c08c | ||
|
|
7f00c5a02e | ||
|
|
d0648e27e2 | ||
|
|
31348af2e3 | ||
|
|
096c0ad564 | ||
|
|
16c41585e1 | ||
|
|
566ab9261d | ||
|
|
1cdadfdece | ||
|
|
448a19bf54 | ||
|
|
bba9301788 | ||
|
|
7881fb4d22 | ||
|
|
fdcee1cd45 | ||
|
|
13c62f83f4 | ||
|
|
c5b9a829c0 | ||
|
|
6f3a1c9d72 | ||
|
|
13bb4aa721 | ||
|
|
d0ef423e66 | ||
|
|
8e1aef6120 | ||
|
|
4657df17a9 | ||
|
|
27ece2fb52 | ||
|
|
ca4d0fb4cc |
@@ -1,5 +1,5 @@
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.10
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.12
|
||||
|
||||
# [Optional] Uncomment this section to install additional OS packages.
|
||||
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
# && apt-get -y install --no-install-recommends <your-package-list-here>
|
||||
# && apt-get -y install --no-install-recommends <your-package-list-here>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/anaconda
|
||||
{
|
||||
"name": "Python 3.10",
|
||||
"name": "Python 3.12",
|
||||
"build": {
|
||||
"context": "..",
|
||||
"dockerfile": "Dockerfile"
|
||||
|
||||
2
.github/actions/setup-poetry/action.yml
vendored
2
.github/actions/setup-poetry/action.yml
vendored
@@ -4,7 +4,7 @@ inputs:
|
||||
python-version:
|
||||
description: Python version to use and the Poetry installed with
|
||||
required: true
|
||||
default: '3.10'
|
||||
default: '3.11'
|
||||
poetry-version:
|
||||
description: Poetry version to set up
|
||||
required: true
|
||||
|
||||
1
.github/workflows/api-tests.yml
vendored
1
.github/workflows/api-tests.yml
vendored
@@ -20,7 +20,6 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
|
||||
|
||||
3
.github/workflows/vdb-tests.yml
vendored
3
.github/workflows/vdb-tests.yml
vendored
@@ -8,6 +8,8 @@ on:
|
||||
- api/core/rag/datasource/**
|
||||
- docker/**
|
||||
- .github/workflows/vdb-tests.yml
|
||||
- api/poetry.lock
|
||||
- api/pyproject.toml
|
||||
|
||||
concurrency:
|
||||
group: vdb-tests-${{ github.head_ref || github.run_id }}
|
||||
@@ -20,7 +22,6 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ Dify 依赖以下工具和库:
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Node.js v18.x (LTS)](http://nodejs.org)
|
||||
- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
|
||||
- [Python](https://www.python.org/) version 3.10.x
|
||||
- [Python](https://www.python.org/) version 3.11.x or 3.12.x
|
||||
|
||||
### 4. 安装
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ Dify を構築するには次の依存関係が必要です。それらがシス
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Node.js v18.x (LTS)](http://nodejs.org)
|
||||
- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
|
||||
- [Python](https://www.python.org/) version 3.10.x
|
||||
- [Python](https://www.python.org/) version 3.11.x or 3.12.x
|
||||
|
||||
### 4. インストール
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ Dify yêu cầu các phụ thuộc sau để build, hãy đảm bảo chúng đ
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Node.js v18.x (LTS)](http://nodejs.org)
|
||||
- [npm](https://www.npmjs.com/) phiên bản 8.x.x hoặc [Yarn](https://yarnpkg.com/)
|
||||
- [Python](https://www.python.org/) phiên bản 3.10.x
|
||||
- [Python](https://www.python.org/) phiên bản 3.11.x hoặc 3.12.x
|
||||
|
||||
### 4. Cài đặt
|
||||
|
||||
@@ -153,4 +153,4 @@ Và thế là xong! Khi PR của bạn được merge, bạn sẽ được giớ
|
||||
|
||||
## Nhận trợ giúp
|
||||
|
||||
Nếu bạn gặp khó khăn hoặc có câu hỏi cấp bách trong quá trình đóng góp, hãy đặt câu hỏi của bạn trong vấn đề GitHub liên quan, hoặc tham gia [Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi để trò chuyện nhanh chóng.
|
||||
Nếu bạn gặp khó khăn hoặc có câu hỏi cấp bách trong quá trình đóng góp, hãy đặt câu hỏi của bạn trong vấn đề GitHub liên quan, hoặc tham gia [Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi để trò chuyện nhanh chóng.
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
python_version = sys.version_info
|
||||
if not ((3, 11) <= python_version < (3, 13)):
|
||||
print(f"Python 3.11 or 3.12 is required, current version is {python_version.major}.{python_version.minor}")
|
||||
raise SystemExit(1)
|
||||
|
||||
from configs import dify_config
|
||||
|
||||
if not dify_config.DEBUG:
|
||||
@@ -30,9 +35,6 @@ from models import account, dataset, model, source, task, tool, tools, web # no
|
||||
|
||||
# DO NOT REMOVE ABOVE
|
||||
|
||||
if sys.version_info[:2] == (3, 10):
|
||||
print("Warning: Python 3.10 will not be supported in the next version.")
|
||||
|
||||
|
||||
warnings.simplefilter("ignore", ResourceWarning)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.11.2",
|
||||
default="0.12.0",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@@ -190,7 +190,7 @@ class AppCopyApi(Resource):
|
||||
)
|
||||
session.commit()
|
||||
|
||||
stmt = select(App).where(App.id == result.app.id)
|
||||
stmt = select(App).where(App.id == result.app_id)
|
||||
app = session.scalar(stmt)
|
||||
|
||||
return app, 201
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
import pytz
|
||||
from flask_login import current_user
|
||||
@@ -314,7 +314,7 @@ def _get_conversation(app_model, conversation_id):
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
||||
if not conversation.read_at:
|
||||
conversation.read_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
conversation.read_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
conversation.read_account_id = current_user.id
|
||||
db.session.commit()
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, marshal_with, reqparse
|
||||
@@ -75,7 +75,7 @@ class AppSite(Resource):
|
||||
setattr(site, attr_name, value)
|
||||
|
||||
site.updated_by = current_user.id
|
||||
site.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
site.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
return site
|
||||
@@ -99,7 +99,7 @@ class AppSiteAccessTokenReset(Resource):
|
||||
|
||||
site.code = Site.generate_code(16)
|
||||
site.updated_by = current_user.id
|
||||
site.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
site.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
return site
|
||||
|
||||
@@ -65,7 +65,7 @@ class ActivateApi(Resource):
|
||||
account.timezone = args["timezone"]
|
||||
account.interface_theme = "light"
|
||||
account.status = AccountStatus.ACTIVE.value
|
||||
account.initialized_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
account.initialized_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
token_pair = AccountService.login(account, ip_address=extract_remote_ip(request))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
@@ -106,7 +106,7 @@ class OAuthCallback(Resource):
|
||||
|
||||
if account.status == AccountStatus.PENDING.value:
|
||||
account.status = AccountStatus.ACTIVE.value
|
||||
account.initialized_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
account.initialized_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
try:
|
||||
|
||||
@@ -83,7 +83,7 @@ class DataSourceApi(Resource):
|
||||
if action == "enable":
|
||||
if data_source_binding.disabled:
|
||||
data_source_binding.disabled = False
|
||||
data_source_binding.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
data_source_binding.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.add(data_source_binding)
|
||||
db.session.commit()
|
||||
else:
|
||||
@@ -92,7 +92,7 @@ class DataSourceApi(Resource):
|
||||
if action == "disable":
|
||||
if not data_source_binding.disabled:
|
||||
data_source_binding.disabled = True
|
||||
data_source_binding.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
data_source_binding.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.add(data_source_binding)
|
||||
db.session.commit()
|
||||
else:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
from argparse import ArgumentTypeError
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user
|
||||
@@ -665,7 +665,7 @@ class DocumentProcessingApi(DocumentResource):
|
||||
raise InvalidActionError("Document not in indexing state.")
|
||||
|
||||
document.paused_by = current_user.id
|
||||
document.paused_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.paused_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
document.is_paused = True
|
||||
db.session.commit()
|
||||
|
||||
@@ -745,7 +745,7 @@ class DocumentMetadataApi(DocumentResource):
|
||||
document.doc_metadata[key] = value
|
||||
|
||||
document.doc_type = doc_type
|
||||
document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
return {"result": "success", "message": "Document metadata updated."}, 200
|
||||
@@ -787,7 +787,7 @@ class DocumentStatusApi(DocumentResource):
|
||||
document.enabled = True
|
||||
document.disabled_at = None
|
||||
document.disabled_by = None
|
||||
document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
# Set cache to prevent indexing the same document multiple times
|
||||
@@ -804,9 +804,9 @@ class DocumentStatusApi(DocumentResource):
|
||||
raise InvalidActionError("Document already disabled.")
|
||||
|
||||
document.enabled = False
|
||||
document.disabled_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.disabled_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
document.disabled_by = current_user.id
|
||||
document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
# Set cache to prevent indexing the same document multiple times
|
||||
@@ -821,9 +821,9 @@ class DocumentStatusApi(DocumentResource):
|
||||
raise InvalidActionError("Document already archived.")
|
||||
|
||||
document.archived = True
|
||||
document.archived_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.archived_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
document.archived_by = current_user.id
|
||||
document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
if document.enabled:
|
||||
@@ -840,7 +840,7 @@ class DocumentStatusApi(DocumentResource):
|
||||
document.archived = False
|
||||
document.archived_at = None
|
||||
document.archived_by = None
|
||||
document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
document.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
# Set cache to prevent indexing the same document multiple times
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
import pandas as pd
|
||||
from flask import request
|
||||
@@ -188,7 +188,7 @@ class DatasetDocumentSegmentApi(Resource):
|
||||
raise InvalidActionError("Segment is already disabled.")
|
||||
|
||||
segment.enabled = False
|
||||
segment.disabled_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
segment.disabled_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
segment.disabled_by = current_user.id
|
||||
db.session.commit()
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse
|
||||
@@ -46,7 +46,7 @@ class CompletionApi(InstalledAppResource):
|
||||
streaming = args["response_mode"] == "streaming"
|
||||
args["auto_generate_name"] = False
|
||||
|
||||
installed_app.last_used_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
installed_app.last_used_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
try:
|
||||
@@ -106,7 +106,7 @@ class ChatApi(InstalledAppResource):
|
||||
|
||||
args["auto_generate_name"] = False
|
||||
|
||||
installed_app.last_used_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
installed_app.last_used_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, inputs, marshal_with, reqparse
|
||||
@@ -81,7 +81,7 @@ class InstalledAppsListApi(Resource):
|
||||
tenant_id=current_tenant_id,
|
||||
app_owner_tenant_id=app.tenant_id,
|
||||
is_pinned=False,
|
||||
last_used_at=datetime.now(timezone.utc).replace(tzinfo=None),
|
||||
last_used_at=datetime.now(UTC).replace(tzinfo=None),
|
||||
)
|
||||
db.session.add(new_installed_app)
|
||||
db.session.commit()
|
||||
|
||||
@@ -60,7 +60,7 @@ class AccountInitApi(Resource):
|
||||
raise InvalidInvitationCodeError()
|
||||
|
||||
invitation_code.status = "used"
|
||||
invitation_code.used_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
invitation_code.used_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
invitation_code.used_by_tenant_id = account.current_tenant_id
|
||||
invitation_code.used_by_account_id = account.id
|
||||
|
||||
@@ -68,7 +68,7 @@ class AccountInitApi(Resource):
|
||||
account.timezone = args["timezone"]
|
||||
account.interface_theme = "light"
|
||||
account.status = "active"
|
||||
account.initialized_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
account.initialized_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from collections.abc import Callable
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from enum import Enum
|
||||
from functools import wraps
|
||||
from typing import Optional
|
||||
@@ -198,7 +198,7 @@ def validate_and_get_api_token(scope=None):
|
||||
if not api_token:
|
||||
raise Unauthorized("Access token is invalid")
|
||||
|
||||
api_token.last_used_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
api_token.last_used_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
return api_token
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
import logging
|
||||
import uuid
|
||||
from collections.abc import Mapping, Sequence
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from typing import Optional, Union, cast
|
||||
|
||||
from core.agent.entities import AgentEntity, AgentToolEntity
|
||||
@@ -412,7 +412,7 @@ class BaseAgentRunner(AppRunner):
|
||||
.first()
|
||||
)
|
||||
|
||||
db_variables.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
db_variables.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db_variables.variables_str = json.dumps(jsonable_encoder(tool_variables.pool))
|
||||
db.session.commit()
|
||||
db.session.close()
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
from core.app.app_config.entities import DatasetEntity, DatasetRetrieveConfigEntity
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from core.app.app_config.entities import (
|
||||
AdvancedChatMessageEntity,
|
||||
AdvancedChatPromptTemplateEntity,
|
||||
AdvancedCompletionPromptTemplateEntity,
|
||||
PromptTemplateEntity,
|
||||
@@ -25,7 +26,9 @@ class PromptTemplateConfigManager:
|
||||
chat_prompt_messages = []
|
||||
for message in chat_prompt_config.get("prompt", []):
|
||||
chat_prompt_messages.append(
|
||||
{"text": message["text"], "role": PromptMessageRole.value_of(message["role"])}
|
||||
AdvancedChatMessageEntity(
|
||||
**{"text": message["text"], "role": PromptMessageRole.value_of(message["role"])}
|
||||
)
|
||||
)
|
||||
|
||||
advanced_chat_prompt_template = AdvancedChatPromptTemplateEntity(messages=chat_prompt_messages)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from collections.abc import Sequence
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
@@ -88,7 +88,7 @@ class PromptTemplateEntity(BaseModel):
|
||||
advanced_completion_prompt_template: Optional[AdvancedCompletionPromptTemplateEntity] = None
|
||||
|
||||
|
||||
class VariableEntityType(str, Enum):
|
||||
class VariableEntityType(StrEnum):
|
||||
TEXT_INPUT = "text-input"
|
||||
SELECT = "select"
|
||||
PARAGRAPH = "paragraph"
|
||||
|
||||
@@ -127,7 +127,9 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
conversation_id=conversation.id if conversation else None,
|
||||
inputs=conversation.inputs
|
||||
if conversation
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
else self._prepare_user_inputs(
|
||||
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
|
||||
),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
|
||||
|
||||
@@ -134,7 +134,9 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
|
||||
conversation_id=conversation.id if conversation else None,
|
||||
inputs=conversation.inputs
|
||||
if conversation
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
else self._prepare_user_inputs(
|
||||
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
|
||||
),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from collections.abc import Mapping
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from core.app.app_config.entities import VariableEntityType
|
||||
@@ -6,7 +6,7 @@ from core.file import File, FileUploadConfig
|
||||
from factories import file_factory
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.app.app_config.entities import AppConfig, VariableEntity
|
||||
from core.app.app_config.entities import VariableEntity
|
||||
|
||||
|
||||
class BaseAppGenerator:
|
||||
@@ -14,23 +14,23 @@ class BaseAppGenerator:
|
||||
self,
|
||||
*,
|
||||
user_inputs: Optional[Mapping[str, Any]],
|
||||
app_config: "AppConfig",
|
||||
variables: Sequence["VariableEntity"],
|
||||
tenant_id: str,
|
||||
) -> Mapping[str, Any]:
|
||||
user_inputs = user_inputs or {}
|
||||
# Filter input variables from form configuration, handle required fields, default values, and option values
|
||||
variables = app_config.variables
|
||||
user_inputs = {
|
||||
var.variable: self._validate_inputs(value=user_inputs.get(var.variable), variable_entity=var)
|
||||
for var in variables
|
||||
}
|
||||
user_inputs = {k: self._sanitize_value(v) for k, v in user_inputs.items()}
|
||||
# Convert files in inputs to File
|
||||
entity_dictionary = {item.variable: item for item in app_config.variables}
|
||||
entity_dictionary = {item.variable: item for item in variables}
|
||||
# Convert single file to File
|
||||
files_inputs = {
|
||||
k: file_factory.build_from_mapping(
|
||||
mapping=v,
|
||||
tenant_id=app_config.tenant_id,
|
||||
tenant_id=tenant_id,
|
||||
config=FileUploadConfig(
|
||||
allowed_file_types=entity_dictionary[k].allowed_file_types,
|
||||
allowed_file_extensions=entity_dictionary[k].allowed_file_extensions,
|
||||
@@ -44,7 +44,7 @@ class BaseAppGenerator:
|
||||
file_list_inputs = {
|
||||
k: file_factory.build_from_mappings(
|
||||
mappings=v,
|
||||
tenant_id=app_config.tenant_id,
|
||||
tenant_id=tenant_id,
|
||||
config=FileUploadConfig(
|
||||
allowed_file_types=entity_dictionary[k].allowed_file_types,
|
||||
allowed_file_extensions=entity_dictionary[k].allowed_file_extensions,
|
||||
|
||||
@@ -132,7 +132,9 @@ class ChatAppGenerator(MessageBasedAppGenerator):
|
||||
conversation_id=conversation.id if conversation else None,
|
||||
inputs=conversation.inputs
|
||||
if conversation
|
||||
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
else self._prepare_user_inputs(
|
||||
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
|
||||
),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
|
||||
|
||||
@@ -113,7 +113,9 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
||||
app_config=app_config,
|
||||
model_conf=ModelConfigConverter.convert(app_config),
|
||||
file_upload_config=file_extra_config,
|
||||
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
inputs=self._prepare_user_inputs(
|
||||
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
|
||||
),
|
||||
query=query,
|
||||
files=file_objs,
|
||||
user_id=user.id,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Generator
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from typing import Optional, Union
|
||||
|
||||
from sqlalchemy import and_
|
||||
@@ -200,7 +200,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
|
||||
db.session.commit()
|
||||
db.session.refresh(conversation)
|
||||
else:
|
||||
conversation.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
conversation.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
message = Message(
|
||||
|
||||
@@ -96,7 +96,9 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
task_id=str(uuid.uuid4()),
|
||||
app_config=app_config,
|
||||
file_upload_config=file_extra_config,
|
||||
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
|
||||
inputs=self._prepare_user_inputs(
|
||||
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
|
||||
),
|
||||
files=system_files,
|
||||
user_id=user.id,
|
||||
stream=stream,
|
||||
|
||||
@@ -43,7 +43,6 @@ from core.workflow.graph_engine.entities.event import (
|
||||
)
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.nodes import NodeType
|
||||
from core.workflow.nodes.iteration import IterationNodeData
|
||||
from core.workflow.nodes.node_mapping import node_type_classes_mapping
|
||||
from core.workflow.workflow_entry import WorkflowEntry
|
||||
from extensions.ext_database import db
|
||||
@@ -160,8 +159,6 @@ class WorkflowBasedAppRunner(AppRunner):
|
||||
user_inputs=user_inputs,
|
||||
variable_pool=variable_pool,
|
||||
tenant_id=workflow.tenant_id,
|
||||
node_type=node_type,
|
||||
node_data=IterationNodeData(**iteration_node_config.get("data", {})),
|
||||
)
|
||||
|
||||
return graph, variable_pool
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
@@ -11,7 +11,7 @@ from core.workflow.nodes import NodeType
|
||||
from core.workflow.nodes.base import BaseNodeData
|
||||
|
||||
|
||||
class QueueEvent(str, Enum):
|
||||
class QueueEvent(StrEnum):
|
||||
"""
|
||||
QueueEvent enum
|
||||
"""
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import json
|
||||
import time
|
||||
from collections.abc import Mapping, Sequence
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any, Optional, Union, cast
|
||||
from uuid import uuid4
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
@@ -80,38 +81,38 @@ class WorkflowCycleManage:
|
||||
|
||||
inputs[f"sys.{key.value}"] = value
|
||||
|
||||
inputs = WorkflowEntry.handle_special_values(inputs)
|
||||
|
||||
triggered_from = (
|
||||
WorkflowRunTriggeredFrom.DEBUGGING
|
||||
if self._application_generate_entity.invoke_from == InvokeFrom.DEBUGGER
|
||||
else WorkflowRunTriggeredFrom.APP_RUN
|
||||
)
|
||||
|
||||
# init workflow run
|
||||
workflow_run = WorkflowRun()
|
||||
workflow_run_id = self._workflow_system_variables[SystemVariableKey.WORKFLOW_RUN_ID]
|
||||
if workflow_run_id:
|
||||
workflow_run.id = workflow_run_id
|
||||
workflow_run.tenant_id = self._workflow.tenant_id
|
||||
workflow_run.app_id = self._workflow.app_id
|
||||
workflow_run.sequence_number = new_sequence_number
|
||||
workflow_run.workflow_id = self._workflow.id
|
||||
workflow_run.type = self._workflow.type
|
||||
workflow_run.triggered_from = triggered_from.value
|
||||
workflow_run.version = self._workflow.version
|
||||
workflow_run.graph = self._workflow.graph
|
||||
workflow_run.inputs = json.dumps(inputs)
|
||||
workflow_run.status = WorkflowRunStatus.RUNNING.value
|
||||
workflow_run.created_by_role = (
|
||||
CreatedByRole.ACCOUNT.value if isinstance(self._user, Account) else CreatedByRole.END_USER.value
|
||||
)
|
||||
workflow_run.created_by = self._user.id
|
||||
# handle special values
|
||||
inputs = WorkflowEntry.handle_special_values(inputs)
|
||||
|
||||
db.session.add(workflow_run)
|
||||
db.session.commit()
|
||||
db.session.refresh(workflow_run)
|
||||
db.session.close()
|
||||
# init workflow run
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
workflow_run = WorkflowRun()
|
||||
system_id = self._workflow_system_variables[SystemVariableKey.WORKFLOW_RUN_ID]
|
||||
workflow_run.id = system_id or str(uuid4())
|
||||
workflow_run.tenant_id = self._workflow.tenant_id
|
||||
workflow_run.app_id = self._workflow.app_id
|
||||
workflow_run.sequence_number = new_sequence_number
|
||||
workflow_run.workflow_id = self._workflow.id
|
||||
workflow_run.type = self._workflow.type
|
||||
workflow_run.triggered_from = triggered_from.value
|
||||
workflow_run.version = self._workflow.version
|
||||
workflow_run.graph = self._workflow.graph
|
||||
workflow_run.inputs = json.dumps(inputs)
|
||||
workflow_run.status = WorkflowRunStatus.RUNNING
|
||||
workflow_run.created_by_role = (
|
||||
CreatedByRole.ACCOUNT if isinstance(self._user, Account) else CreatedByRole.END_USER
|
||||
)
|
||||
workflow_run.created_by = self._user.id
|
||||
workflow_run.created_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
|
||||
session.add(workflow_run)
|
||||
session.commit()
|
||||
|
||||
return workflow_run
|
||||
|
||||
@@ -144,7 +145,7 @@ class WorkflowCycleManage:
|
||||
workflow_run.elapsed_time = time.perf_counter() - start_at
|
||||
workflow_run.total_tokens = total_tokens
|
||||
workflow_run.total_steps = total_steps
|
||||
workflow_run.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
workflow_run.finished_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
|
||||
db.session.commit()
|
||||
db.session.refresh(workflow_run)
|
||||
@@ -191,7 +192,7 @@ class WorkflowCycleManage:
|
||||
workflow_run.elapsed_time = time.perf_counter() - start_at
|
||||
workflow_run.total_tokens = total_tokens
|
||||
workflow_run.total_steps = total_steps
|
||||
workflow_run.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
workflow_run.finished_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
@@ -211,7 +212,7 @@ class WorkflowCycleManage:
|
||||
for workflow_node_execution in running_workflow_node_executions:
|
||||
workflow_node_execution.status = WorkflowNodeExecutionStatus.FAILED.value
|
||||
workflow_node_execution.error = error
|
||||
workflow_node_execution.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
workflow_node_execution.finished_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
workflow_node_execution.elapsed_time = (
|
||||
workflow_node_execution.finished_at - workflow_node_execution.created_at
|
||||
).total_seconds()
|
||||
@@ -262,7 +263,7 @@ class WorkflowCycleManage:
|
||||
NodeRunMetadataKey.ITERATION_ID: event.in_iteration_id,
|
||||
}
|
||||
)
|
||||
workflow_node_execution.created_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
workflow_node_execution.created_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
|
||||
session.add(workflow_node_execution)
|
||||
session.commit()
|
||||
@@ -285,7 +286,7 @@ class WorkflowCycleManage:
|
||||
execution_metadata = (
|
||||
json.dumps(jsonable_encoder(event.execution_metadata)) if event.execution_metadata else None
|
||||
)
|
||||
finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
finished_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
elapsed_time = (finished_at - event.start_at).total_seconds()
|
||||
|
||||
db.session.query(WorkflowNodeExecution).filter(WorkflowNodeExecution.id == workflow_node_execution.id).update(
|
||||
@@ -329,7 +330,7 @@ class WorkflowCycleManage:
|
||||
inputs = WorkflowEntry.handle_special_values(event.inputs)
|
||||
process_data = WorkflowEntry.handle_special_values(event.process_data)
|
||||
outputs = WorkflowEntry.handle_special_values(event.outputs)
|
||||
finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
finished_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
elapsed_time = (finished_at - event.start_at).total_seconds()
|
||||
execution_metadata = (
|
||||
json.dumps(jsonable_encoder(event.execution_metadata)) if event.execution_metadata else None
|
||||
@@ -657,7 +658,7 @@ class WorkflowCycleManage:
|
||||
if event.error is None
|
||||
else WorkflowNodeExecutionStatus.FAILED,
|
||||
error=None,
|
||||
elapsed_time=(datetime.now(timezone.utc).replace(tzinfo=None) - event.start_at).total_seconds(),
|
||||
elapsed_time=(datetime.now(UTC).replace(tzinfo=None) - event.start_at).total_seconds(),
|
||||
total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0,
|
||||
execution_metadata=event.metadata,
|
||||
finished_at=int(time.time()),
|
||||
|
||||
@@ -240,7 +240,7 @@ class ProviderConfiguration(BaseModel):
|
||||
if provider_record:
|
||||
provider_record.encrypted_config = json.dumps(credentials)
|
||||
provider_record.is_valid = True
|
||||
provider_record.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
provider_record.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
else:
|
||||
provider_record = Provider(
|
||||
@@ -394,7 +394,7 @@ class ProviderConfiguration(BaseModel):
|
||||
if provider_model_record:
|
||||
provider_model_record.encrypted_config = json.dumps(credentials)
|
||||
provider_model_record.is_valid = True
|
||||
provider_model_record.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
provider_model_record.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
else:
|
||||
provider_model_record = ProviderModel(
|
||||
@@ -468,7 +468,7 @@ class ProviderConfiguration(BaseModel):
|
||||
|
||||
if model_setting:
|
||||
model_setting.enabled = True
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
else:
|
||||
model_setting = ProviderModelSetting(
|
||||
@@ -503,7 +503,7 @@ class ProviderConfiguration(BaseModel):
|
||||
|
||||
if model_setting:
|
||||
model_setting.enabled = False
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
else:
|
||||
model_setting = ProviderModelSetting(
|
||||
@@ -570,7 +570,7 @@ class ProviderConfiguration(BaseModel):
|
||||
|
||||
if model_setting:
|
||||
model_setting.load_balancing_enabled = True
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
else:
|
||||
model_setting = ProviderModelSetting(
|
||||
@@ -605,7 +605,7 @@ class ProviderConfiguration(BaseModel):
|
||||
|
||||
if model_setting:
|
||||
model_setting.load_balancing_enabled = False
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
model_setting.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
else:
|
||||
model_setting = ProviderModelSetting(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class FileType(str, Enum):
|
||||
class FileType(StrEnum):
|
||||
IMAGE = "image"
|
||||
DOCUMENT = "document"
|
||||
AUDIO = "audio"
|
||||
@@ -16,7 +16,7 @@ class FileType(str, Enum):
|
||||
raise ValueError(f"No matching enum found for value '{value}'")
|
||||
|
||||
|
||||
class FileTransferMethod(str, Enum):
|
||||
class FileTransferMethod(StrEnum):
|
||||
REMOTE_URL = "remote_url"
|
||||
LOCAL_FILE = "local_file"
|
||||
TOOL_FILE = "tool_file"
|
||||
@@ -29,7 +29,7 @@ class FileTransferMethod(str, Enum):
|
||||
raise ValueError(f"No matching enum found for value '{value}'")
|
||||
|
||||
|
||||
class FileBelongsTo(str, Enum):
|
||||
class FileBelongsTo(StrEnum):
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
|
||||
@@ -41,7 +41,7 @@ class FileBelongsTo(str, Enum):
|
||||
raise ValueError(f"No matching enum found for value '{value}'")
|
||||
|
||||
|
||||
class FileAttribute(str, Enum):
|
||||
class FileAttribute(StrEnum):
|
||||
TYPE = "type"
|
||||
SIZE = "size"
|
||||
NAME = "name"
|
||||
@@ -51,5 +51,5 @@ class FileAttribute(str, Enum):
|
||||
EXTENSION = "extension"
|
||||
|
||||
|
||||
class ArrayFileAttribute(str, Enum):
|
||||
class ArrayFileAttribute(StrEnum):
|
||||
LENGTH = "length"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
from collections.abc import Mapping
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from threading import Lock
|
||||
from typing import Any, Optional
|
||||
|
||||
@@ -31,7 +31,7 @@ class CodeExecutionResponse(BaseModel):
|
||||
data: Data
|
||||
|
||||
|
||||
class CodeLanguage(str, Enum):
|
||||
class CodeLanguage(StrEnum):
|
||||
PYTHON3 = "python3"
|
||||
JINJA2 = "jinja2"
|
||||
JAVASCRIPT = "javascript"
|
||||
|
||||
@@ -86,7 +86,7 @@ class IndexingRunner:
|
||||
except ProviderTokenNotInitError as e:
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e.description)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
except ObjectDeletedError:
|
||||
logging.warning("Document deleted, document id: {}".format(dataset_document.id))
|
||||
@@ -94,7 +94,7 @@ class IndexingRunner:
|
||||
logging.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
def run_in_splitting_status(self, dataset_document: DatasetDocument):
|
||||
@@ -142,13 +142,13 @@ class IndexingRunner:
|
||||
except ProviderTokenNotInitError as e:
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e.description)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
def run_in_indexing_status(self, dataset_document: DatasetDocument):
|
||||
@@ -200,13 +200,13 @@ class IndexingRunner:
|
||||
except ProviderTokenNotInitError as e:
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e.description)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
|
||||
def indexing_estimate(
|
||||
@@ -372,7 +372,7 @@ class IndexingRunner:
|
||||
after_indexing_status="splitting",
|
||||
extra_update_params={
|
||||
DatasetDocument.word_count: sum(len(text_doc.page_content) for text_doc in text_docs),
|
||||
DatasetDocument.parsing_completed_at: datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
DatasetDocument.parsing_completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -464,7 +464,7 @@ class IndexingRunner:
|
||||
doc_store.add_documents(documents)
|
||||
|
||||
# update document status to indexing
|
||||
cur_time = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
cur_time = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
self._update_document_index_status(
|
||||
document_id=dataset_document.id,
|
||||
after_indexing_status="indexing",
|
||||
@@ -479,7 +479,7 @@ class IndexingRunner:
|
||||
dataset_document_id=dataset_document.id,
|
||||
update_params={
|
||||
DocumentSegment.status: "indexing",
|
||||
DocumentSegment.indexing_at: datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
DocumentSegment.indexing_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -680,7 +680,7 @@ class IndexingRunner:
|
||||
after_indexing_status="completed",
|
||||
extra_update_params={
|
||||
DatasetDocument.tokens: tokens,
|
||||
DatasetDocument.completed_at: datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
DatasetDocument.completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
|
||||
DatasetDocument.error: None,
|
||||
},
|
||||
@@ -705,7 +705,7 @@ class IndexingRunner:
|
||||
{
|
||||
DocumentSegment.status: "completed",
|
||||
DocumentSegment.enabled: True,
|
||||
DocumentSegment.completed_at: datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
DocumentSegment.completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -738,7 +738,7 @@ class IndexingRunner:
|
||||
{
|
||||
DocumentSegment.status: "completed",
|
||||
DocumentSegment.enabled: True,
|
||||
DocumentSegment.completed_at: datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
DocumentSegment.completed_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -849,7 +849,7 @@ class IndexingRunner:
|
||||
doc_store.add_documents(documents)
|
||||
|
||||
# update document status to indexing
|
||||
cur_time = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
cur_time = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
self._update_document_index_status(
|
||||
document_id=dataset_document.id,
|
||||
after_indexing_status="indexing",
|
||||
@@ -864,7 +864,7 @@ class IndexingRunner:
|
||||
dataset_document_id=dataset_document.id,
|
||||
update_params={
|
||||
DocumentSegment.status: "indexing",
|
||||
DocumentSegment.indexing_at: datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
DocumentSegment.indexing_at: datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
},
|
||||
)
|
||||
pass
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from abc import ABC
|
||||
from collections.abc import Sequence
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
@@ -49,7 +49,7 @@ class PromptMessageFunction(BaseModel):
|
||||
function: PromptMessageTool
|
||||
|
||||
|
||||
class PromptMessageContentType(str, Enum):
|
||||
class PromptMessageContentType(StrEnum):
|
||||
"""
|
||||
Enum class for prompt message content type.
|
||||
"""
|
||||
@@ -95,7 +95,7 @@ class ImagePromptMessageContent(PromptMessageContent):
|
||||
Model class for image prompt message content.
|
||||
"""
|
||||
|
||||
class DETAIL(str, Enum):
|
||||
class DETAIL(StrEnum):
|
||||
LOW = "low"
|
||||
HIGH = "high"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from decimal import Decimal
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
@@ -92,7 +92,7 @@ class ModelFeature(Enum):
|
||||
AUDIO = "audio"
|
||||
|
||||
|
||||
class DefaultParameterName(str, Enum):
|
||||
class DefaultParameterName(StrEnum):
|
||||
"""
|
||||
Enum class for parameter template variable.
|
||||
"""
|
||||
|
||||
@@ -453,7 +453,7 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
|
||||
|
||||
return credentials_kwargs
|
||||
|
||||
def _convert_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]:
|
||||
def _convert_prompt_messages(self, prompt_messages: Sequence[PromptMessage]) -> tuple[str, list[dict]]:
|
||||
"""
|
||||
Convert prompt messages to dict list and system
|
||||
"""
|
||||
@@ -461,7 +461,15 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
|
||||
first_loop = True
|
||||
for message in prompt_messages:
|
||||
if isinstance(message, SystemPromptMessage):
|
||||
message.content = message.content.strip()
|
||||
if isinstance(message.content, str):
|
||||
message.content = message.content.strip()
|
||||
elif isinstance(message.content, list):
|
||||
# System prompt only support text
|
||||
message.content = "".join(
|
||||
c.data.strip() for c in message.content if isinstance(c, TextPromptMessageContent)
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown system prompt message content type {type(message.content)}")
|
||||
if first_loop:
|
||||
system = message.content
|
||||
first_loop = False
|
||||
|
||||
@@ -779,7 +779,7 @@ LLM_BASE_MODELS = [
|
||||
name="frequency_penalty",
|
||||
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY],
|
||||
),
|
||||
_get_max_tokens(default=512, min_val=1, max_val=4096),
|
||||
_get_max_tokens(default=512, min_val=1, max_val=16384),
|
||||
ParameterRule(
|
||||
name="seed",
|
||||
label=I18nObject(zh_Hans="种子", en_US="Seed"),
|
||||
|
||||
@@ -15,9 +15,9 @@ parameter_rules:
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 4096
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
|
||||
@@ -16,9 +16,9 @@ parameter_rules:
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
type: int
|
||||
default: 4096
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 4096
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
|
||||
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
|
||||
|
||||
@@ -5,6 +5,7 @@ label:
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- multi-tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
@@ -72,7 +73,7 @@ parameter_rules:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '1'
|
||||
output: '2'
|
||||
unit: '0.000001'
|
||||
input: "1"
|
||||
output: "2"
|
||||
unit: "0.000001"
|
||||
currency: RMB
|
||||
|
||||
@@ -5,6 +5,7 @@ label:
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- multi-tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
from collections.abc import Generator
|
||||
from typing import Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import tiktoken
|
||||
from yarl import URL
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult
|
||||
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
PromptMessage,
|
||||
PromptMessageTool,
|
||||
)
|
||||
from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel
|
||||
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
|
||||
|
||||
|
||||
class DeepSeekLargeLanguageModel(OpenAILargeLanguageModel):
|
||||
class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
||||
def _invoke(
|
||||
self,
|
||||
model: str,
|
||||
@@ -25,92 +24,15 @@ class DeepSeekLargeLanguageModel(OpenAILargeLanguageModel):
|
||||
user: Optional[str] = None,
|
||||
) -> Union[LLMResult, Generator]:
|
||||
self._add_custom_parameters(credentials)
|
||||
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
self._add_custom_parameters(credentials)
|
||||
super().validate_credentials(model, credentials)
|
||||
|
||||
# refactored from openai model runtime, use cl100k_base for calculate token number
|
||||
def _num_tokens_from_string(self, model: str, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int:
|
||||
"""
|
||||
Calculate num tokens for text completion model with tiktoken package.
|
||||
|
||||
:param model: model name
|
||||
:param text: prompt text
|
||||
:param tools: tools for tool calling
|
||||
:return: number of tokens
|
||||
"""
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
num_tokens = len(encoding.encode(text))
|
||||
|
||||
if tools:
|
||||
num_tokens += self._num_tokens_for_tools(encoding, tools)
|
||||
|
||||
return num_tokens
|
||||
|
||||
# refactored from openai model runtime, use cl100k_base for calculate token number
|
||||
def _num_tokens_from_messages(
|
||||
self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None
|
||||
) -> int:
|
||||
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
|
||||
|
||||
Official documentation: https://github.com/openai/openai-cookbook/blob/
|
||||
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
|
||||
num_tokens = 0
|
||||
messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages]
|
||||
for message in messages_dict:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
# Cast str(value) in case the message value is not a string
|
||||
# This occurs with function messages
|
||||
# TODO: The current token calculation method for the image type is not implemented,
|
||||
# which need to download the image and then get the resolution for calculation,
|
||||
# and will increase the request delay
|
||||
if isinstance(value, list):
|
||||
text = ""
|
||||
for item in value:
|
||||
if isinstance(item, dict) and item["type"] == "text":
|
||||
text += item["text"]
|
||||
|
||||
value = text
|
||||
|
||||
if key == "tool_calls":
|
||||
for tool_call in value:
|
||||
for t_key, t_value in tool_call.items():
|
||||
num_tokens += len(encoding.encode(t_key))
|
||||
if t_key == "function":
|
||||
for f_key, f_value in t_value.items():
|
||||
num_tokens += len(encoding.encode(f_key))
|
||||
num_tokens += len(encoding.encode(f_value))
|
||||
else:
|
||||
num_tokens += len(encoding.encode(t_key))
|
||||
num_tokens += len(encoding.encode(t_value))
|
||||
else:
|
||||
num_tokens += len(encoding.encode(str(value)))
|
||||
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
|
||||
# every reply is primed with <im_start>assistant
|
||||
num_tokens += 3
|
||||
|
||||
if tools:
|
||||
num_tokens += self._num_tokens_for_tools(encoding, tools)
|
||||
|
||||
return num_tokens
|
||||
|
||||
@staticmethod
|
||||
def _add_custom_parameters(credentials: dict) -> None:
|
||||
credentials["mode"] = "chat"
|
||||
credentials["openai_api_key"] = credentials["api_key"]
|
||||
if "endpoint_url" not in credentials or credentials["endpoint_url"] == "":
|
||||
credentials["openai_api_base"] = "https://api.deepseek.com"
|
||||
else:
|
||||
parsed_url = urlparse(credentials["endpoint_url"])
|
||||
credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
def _add_custom_parameters(credentials) -> None:
|
||||
credentials["endpoint_url"] = str(URL(credentials.get("endpoint_url", "https://api.deepseek.com")))
|
||||
credentials["mode"] = LLMMode.CHAT.value
|
||||
credentials["function_calling_type"] = "tool_call"
|
||||
credentials["stream_function_calling"] = "support"
|
||||
|
||||
@@ -18,7 +18,8 @@ class FishAudioProvider(ModelProvider):
|
||||
"""
|
||||
try:
|
||||
model_instance = self.get_model_instance(ModelType.TTS)
|
||||
model_instance.validate_credentials(credentials=credentials)
|
||||
# FIXME fish tts do not have model for now, so set it to empty string instead
|
||||
model_instance.validate_credentials(model="", credentials=credentials)
|
||||
except CredentialsValidateFailedError as ex:
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
|
||||
@@ -66,7 +66,7 @@ class FishAudioText2SpeechModel(TTSModel):
|
||||
voice=voice,
|
||||
)
|
||||
|
||||
def validate_credentials(self, credentials: dict, user: Optional[str] = None) -> None:
|
||||
def validate_credentials(self, model: str, credentials: dict, user: Optional[str] = None) -> None:
|
||||
"""
|
||||
Validate credentials for text2speech model
|
||||
|
||||
@@ -76,7 +76,7 @@ class FishAudioText2SpeechModel(TTSModel):
|
||||
|
||||
try:
|
||||
self.get_tts_model_voices(
|
||||
None,
|
||||
"",
|
||||
credentials={
|
||||
"api_key": credentials["api_key"],
|
||||
"api_base": credentials["api_base"],
|
||||
|
||||
@@ -122,7 +122,7 @@ class GiteeAIRerankModel(RerankModel):
|
||||
label=I18nObject(en_US=model),
|
||||
model_type=ModelType.RERANK,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))},
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))},
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
@@ -140,7 +140,7 @@ class GPUStackRerankModel(RerankModel):
|
||||
label=I18nObject(en_US=model),
|
||||
model_type=ModelType.RERANK,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))},
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))},
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
@@ -34,3 +34,11 @@ model_credential_schema:
|
||||
placeholder:
|
||||
zh_Hans: 在此输入Text Embedding Inference的服务器地址,如 http://192.168.1.100:8080
|
||||
en_US: Enter the url of your Text Embedding Inference, e.g. http://192.168.1.100:8080
|
||||
- variable: api_key
|
||||
label:
|
||||
en_US: API Key
|
||||
type: secret-input
|
||||
required: false
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Key
|
||||
en_US: Enter your API Key
|
||||
|
||||
@@ -51,8 +51,13 @@ class HuggingfaceTeiRerankModel(RerankModel):
|
||||
|
||||
server_url = server_url.removesuffix("/")
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
api_key = credentials.get("api_key")
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
try:
|
||||
results = TeiHelper.invoke_rerank(server_url, query, docs)
|
||||
results = TeiHelper.invoke_rerank(server_url, query, docs, headers)
|
||||
|
||||
rerank_documents = []
|
||||
for result in results:
|
||||
@@ -80,7 +85,11 @@ class HuggingfaceTeiRerankModel(RerankModel):
|
||||
"""
|
||||
try:
|
||||
server_url = credentials["server_url"]
|
||||
extra_args = TeiHelper.get_tei_extra_parameter(server_url, model)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
api_key = credentials.get("api_key")
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
extra_args = TeiHelper.get_tei_extra_parameter(server_url, model, headers)
|
||||
if extra_args.model_type != "reranker":
|
||||
raise CredentialsValidateFailedError("Current model is not a rerank model")
|
||||
|
||||
|
||||
@@ -26,13 +26,15 @@ cache_lock = Lock()
|
||||
|
||||
class TeiHelper:
|
||||
@staticmethod
|
||||
def get_tei_extra_parameter(server_url: str, model_name: str) -> TeiModelExtraParameter:
|
||||
def get_tei_extra_parameter(
|
||||
server_url: str, model_name: str, headers: Optional[dict] = None
|
||||
) -> TeiModelExtraParameter:
|
||||
TeiHelper._clean_cache()
|
||||
with cache_lock:
|
||||
if model_name not in cache:
|
||||
cache[model_name] = {
|
||||
"expires": time() + 300,
|
||||
"value": TeiHelper._get_tei_extra_parameter(server_url),
|
||||
"value": TeiHelper._get_tei_extra_parameter(server_url, headers),
|
||||
}
|
||||
return cache[model_name]["value"]
|
||||
|
||||
@@ -47,7 +49,7 @@ class TeiHelper:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_tei_extra_parameter(server_url: str) -> TeiModelExtraParameter:
|
||||
def _get_tei_extra_parameter(server_url: str, headers: Optional[dict] = None) -> TeiModelExtraParameter:
|
||||
"""
|
||||
get tei model extra parameter like model_type, max_input_length, max_batch_requests
|
||||
"""
|
||||
@@ -61,7 +63,7 @@ class TeiHelper:
|
||||
session.mount("https://", HTTPAdapter(max_retries=3))
|
||||
|
||||
try:
|
||||
response = session.get(url, timeout=10)
|
||||
response = session.get(url, headers=headers, timeout=10)
|
||||
except (MissingSchema, ConnectionError, Timeout) as e:
|
||||
raise RuntimeError(f"get tei model extra parameter failed, url: {url}, error: {e}")
|
||||
if response.status_code != 200:
|
||||
@@ -86,7 +88,7 @@ class TeiHelper:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def invoke_tokenize(server_url: str, texts: list[str]) -> list[list[dict]]:
|
||||
def invoke_tokenize(server_url: str, texts: list[str], headers: Optional[dict] = None) -> list[list[dict]]:
|
||||
"""
|
||||
Invoke tokenize endpoint
|
||||
|
||||
@@ -114,15 +116,15 @@ class TeiHelper:
|
||||
:param server_url: server url
|
||||
:param texts: texts to tokenize
|
||||
"""
|
||||
resp = httpx.post(
|
||||
f"{server_url}/tokenize",
|
||||
json={"inputs": texts},
|
||||
)
|
||||
url = f"{server_url}/tokenize"
|
||||
json_data = {"inputs": texts}
|
||||
resp = httpx.post(url, json=json_data, headers=headers)
|
||||
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
@staticmethod
|
||||
def invoke_embeddings(server_url: str, texts: list[str]) -> dict:
|
||||
def invoke_embeddings(server_url: str, texts: list[str], headers: Optional[dict] = None) -> dict:
|
||||
"""
|
||||
Invoke embeddings endpoint
|
||||
|
||||
@@ -147,15 +149,14 @@ class TeiHelper:
|
||||
:param texts: texts to embed
|
||||
"""
|
||||
# Use OpenAI compatible API here, which has usage tracking
|
||||
resp = httpx.post(
|
||||
f"{server_url}/v1/embeddings",
|
||||
json={"input": texts},
|
||||
)
|
||||
url = f"{server_url}/v1/embeddings"
|
||||
json_data = {"input": texts}
|
||||
resp = httpx.post(url, json=json_data, headers=headers)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
@staticmethod
|
||||
def invoke_rerank(server_url: str, query: str, docs: list[str]) -> list[dict]:
|
||||
def invoke_rerank(server_url: str, query: str, docs: list[str], headers: Optional[dict] = None) -> list[dict]:
|
||||
"""
|
||||
Invoke rerank endpoint
|
||||
|
||||
@@ -173,10 +174,7 @@ class TeiHelper:
|
||||
:param candidates: candidates to rerank
|
||||
"""
|
||||
params = {"query": query, "texts": docs, "return_text": True}
|
||||
|
||||
response = httpx.post(
|
||||
server_url + "/rerank",
|
||||
json=params,
|
||||
)
|
||||
url = f"{server_url}/rerank"
|
||||
response = httpx.post(url, json=params, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@@ -51,6 +51,10 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
|
||||
|
||||
server_url = server_url.removesuffix("/")
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
api_key = credentials["api_key"]
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
# get model properties
|
||||
context_size = self._get_context_size(model, credentials)
|
||||
max_chunks = self._get_max_chunks(model, credentials)
|
||||
@@ -60,7 +64,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
|
||||
used_tokens = 0
|
||||
|
||||
# get tokenized results from TEI
|
||||
batched_tokenize_result = TeiHelper.invoke_tokenize(server_url, texts)
|
||||
batched_tokenize_result = TeiHelper.invoke_tokenize(server_url, texts, headers)
|
||||
|
||||
for i, (text, tokenize_result) in enumerate(zip(texts, batched_tokenize_result)):
|
||||
# Check if the number of tokens is larger than the context size
|
||||
@@ -97,7 +101,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
|
||||
used_tokens = 0
|
||||
for i in _iter:
|
||||
iter_texts = inputs[i : i + max_chunks]
|
||||
results = TeiHelper.invoke_embeddings(server_url, iter_texts)
|
||||
results = TeiHelper.invoke_embeddings(server_url, iter_texts, headers)
|
||||
embeddings = results["data"]
|
||||
embeddings = [embedding["embedding"] for embedding in embeddings]
|
||||
batched_embeddings.extend(embeddings)
|
||||
@@ -127,7 +131,11 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
|
||||
|
||||
server_url = server_url.removesuffix("/")
|
||||
|
||||
batch_tokens = TeiHelper.invoke_tokenize(server_url, texts)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.get('api_key')}",
|
||||
}
|
||||
|
||||
batch_tokens = TeiHelper.invoke_tokenize(server_url, texts, headers)
|
||||
num_tokens = sum(len(tokens) for tokens in batch_tokens)
|
||||
return num_tokens
|
||||
|
||||
@@ -141,7 +149,14 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
|
||||
"""
|
||||
try:
|
||||
server_url = credentials["server_url"]
|
||||
extra_args = TeiHelper.get_tei_extra_parameter(server_url, model)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
api_key = credentials.get("api_key")
|
||||
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
extra_args = TeiHelper.get_tei_extra_parameter(server_url, model, headers)
|
||||
print(extra_args)
|
||||
if extra_args.model_type != "embedding":
|
||||
raise CredentialsValidateFailedError("Current model is not a embedding model")
|
||||
|
||||
@@ -128,7 +128,7 @@ class JinaRerankModel(RerankModel):
|
||||
label=I18nObject(en_US=model),
|
||||
model_type=ModelType.RERANK,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))},
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000))},
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
@@ -193,7 +193,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
|
||||
label=I18nObject(en_US=model),
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))},
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000))},
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
@@ -139,7 +139,7 @@ class OllamaEmbeddingModel(TextEmbeddingModel):
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")),
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)),
|
||||
ModelPropertyKey.MAX_CHUNKS: 1,
|
||||
},
|
||||
parameter_rules=[],
|
||||
|
||||
@@ -943,6 +943,9 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
||||
}
|
||||
elif isinstance(message, SystemPromptMessage):
|
||||
message = cast(SystemPromptMessage, message)
|
||||
if isinstance(message.content, list):
|
||||
text_contents = filter(lambda c: isinstance(c, TextPromptMessageContent), message.content)
|
||||
message.content = "".join(c.data for c in text_contents)
|
||||
message_dict = {"role": "system", "content": message.content}
|
||||
elif isinstance(message, ToolPromptMessage):
|
||||
message = cast(ToolPromptMessage, message)
|
||||
|
||||
@@ -176,7 +176,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")),
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)),
|
||||
ModelPropertyKey.MAX_CHUNKS: 1,
|
||||
},
|
||||
parameter_rules=[],
|
||||
|
||||
@@ -182,7 +182,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")),
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)),
|
||||
ModelPropertyKey.MAX_CHUNKS: 1,
|
||||
},
|
||||
parameter_rules=[],
|
||||
|
||||
@@ -24,4 +24,3 @@
|
||||
- meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
- google/gemma-2-27b-it
|
||||
- google/gemma-2-9b-it
|
||||
- deepseek-ai/DeepSeek-V2-Chat
|
||||
|
||||
@@ -18,6 +18,7 @@ supported_model_types:
|
||||
- text-embedding
|
||||
- rerank
|
||||
- speech2text
|
||||
- tts
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
- customizable-model
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
model: fishaudio/fish-speech-1.4
|
||||
model_type: tts
|
||||
model_properties:
|
||||
default_voice: 'fishaudio/fish-speech-1.4:alex'
|
||||
voices:
|
||||
- mode: "fishaudio/fish-speech-1.4:alex"
|
||||
name: "Alex(男声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:benjamin"
|
||||
name: "Benjamin(男声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:charles"
|
||||
name: "Charles(男声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:david"
|
||||
name: "David(男声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:anna"
|
||||
name: "Anna(女声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:bella"
|
||||
name: "Bella(女声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:claire"
|
||||
name: "Claire(女声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
- mode: "fishaudio/fish-speech-1.4:diana"
|
||||
name: "Diana(女声)"
|
||||
language: [ "zh-Hans", "en-US" ]
|
||||
audio_type: 'mp3'
|
||||
max_workers: 5
|
||||
# stream: false
|
||||
pricing:
|
||||
input: '0.015'
|
||||
output: '0'
|
||||
unit: '0.001'
|
||||
currency: RMB
|
||||
105
api/core/model_runtime/model_providers/siliconflow/tts/tts.py
Normal file
105
api/core/model_runtime/model_providers/siliconflow/tts/tts.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import concurrent.futures
|
||||
from typing import Any, Optional
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeBadRequestError
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.tts_model import TTSModel
|
||||
from core.model_runtime.model_providers.openai._common import _CommonOpenAI
|
||||
|
||||
|
||||
class SiliconFlowText2SpeechModel(_CommonOpenAI, TTSModel):
|
||||
"""
|
||||
Model class for SiliconFlow Speech to text model.
|
||||
"""
|
||||
|
||||
def _invoke(
|
||||
self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None
|
||||
) -> Any:
|
||||
"""
|
||||
_invoke text2speech model
|
||||
|
||||
:param model: model name
|
||||
:param tenant_id: user tenant id
|
||||
:param credentials: model credentials
|
||||
:param content_text: text content to be translated
|
||||
:param voice: model timbre
|
||||
:param user: unique user id
|
||||
:return: text translated to audio file
|
||||
"""
|
||||
if not voice or voice not in [
|
||||
d["value"] for d in self.get_tts_model_voices(model=model, credentials=credentials)
|
||||
]:
|
||||
voice = self._get_model_default_voice(model, credentials)
|
||||
# if streaming:
|
||||
return self._tts_invoke_streaming(model=model, credentials=credentials, content_text=content_text, voice=voice)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict, user: Optional[str] = None) -> None:
|
||||
"""
|
||||
validate credentials text2speech model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param user: unique user id
|
||||
:return: text translated to audio file
|
||||
"""
|
||||
try:
|
||||
self._tts_invoke_streaming(
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
content_text="Hello SiliconFlow!",
|
||||
voice=self._get_model_default_voice(model, credentials),
|
||||
)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> Any:
|
||||
"""
|
||||
_tts_invoke_streaming text2speech model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param content_text: text content to be translated
|
||||
:param voice: model timbre
|
||||
:return: text translated to audio file
|
||||
"""
|
||||
try:
|
||||
# doc: https://docs.siliconflow.cn/capabilities/text-to-speech
|
||||
self._add_custom_parameters(credentials)
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
client = OpenAI(**credentials_kwargs)
|
||||
model_support_voice = [
|
||||
x.get("value") for x in self.get_tts_model_voices(model=model, credentials=credentials)
|
||||
]
|
||||
if not voice or voice not in model_support_voice:
|
||||
voice = self._get_model_default_voice(model, credentials)
|
||||
if len(content_text) > 4096:
|
||||
sentences = self._split_text_into_sentences(content_text, max_length=4096)
|
||||
executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences)))
|
||||
futures = [
|
||||
executor.submit(
|
||||
client.audio.speech.with_streaming_response.create,
|
||||
model=model,
|
||||
response_format="mp3",
|
||||
input=sentences[i],
|
||||
voice=voice,
|
||||
)
|
||||
for i in range(len(sentences))
|
||||
]
|
||||
for future in futures:
|
||||
yield from future.result().__enter__().iter_bytes(1024) # noqa:PLC2801
|
||||
|
||||
else:
|
||||
response = client.audio.speech.with_streaming_response.create(
|
||||
model=model, voice=voice, response_format="mp3", input=content_text.strip()
|
||||
)
|
||||
|
||||
yield from response.__enter__().iter_bytes(1024) # noqa:PLC2801
|
||||
except Exception as ex:
|
||||
raise InvokeBadRequestError(str(ex))
|
||||
|
||||
@classmethod
|
||||
def _add_custom_parameters(cls, credentials: dict) -> None:
|
||||
credentials["openai_api_base"] = "https://api.siliconflow.cn"
|
||||
credentials["openai_api_key"] = credentials["api_key"]
|
||||
@@ -173,7 +173,7 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")),
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)),
|
||||
ModelPropertyKey.MAX_CHUNKS: 1,
|
||||
},
|
||||
parameter_rules=[],
|
||||
|
||||
@@ -166,7 +166,7 @@ class VoyageTextEmbeddingModel(TextEmbeddingModel):
|
||||
label=I18nObject(en_US=model),
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))},
|
||||
model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))},
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
model: grok-beta
|
||||
label:
|
||||
en_US: Grok beta
|
||||
en_US: Grok Beta
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- multi-tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
model: grok-vision-beta
|
||||
label:
|
||||
en_US: Grok Vision Beta
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
label:
|
||||
en_US: "Temperature"
|
||||
zh_Hans: "采样温度"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: top_p
|
||||
label:
|
||||
en_US: "Top P"
|
||||
zh_Hans: "Top P"
|
||||
type: float
|
||||
default: 0.7
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
precision: 1
|
||||
required: true
|
||||
help:
|
||||
en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time."
|
||||
zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。"
|
||||
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
label:
|
||||
en_US: "Frequency Penalty"
|
||||
zh_Hans: "频率惩罚"
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 2.0
|
||||
precision: 1
|
||||
required: false
|
||||
help:
|
||||
en_US: "Number between 0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
zh_Hans: "介于0和2.0之间的数字。正值会根据新标记在文本中迄今为止的现有频率来惩罚它们,从而降低模型一字不差地重复同一句话的可能性。"
|
||||
|
||||
- name: user
|
||||
use_template: text
|
||||
label:
|
||||
en_US: "User"
|
||||
zh_Hans: "用户"
|
||||
type: string
|
||||
required: false
|
||||
help:
|
||||
en_US: "Used to track and differentiate conversation requests from different users."
|
||||
zh_Hans: "用于追踪和区分不同用户的对话请求。"
|
||||
@@ -35,3 +35,5 @@ class XAILargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
||||
credentials["endpoint_url"] = str(URL(credentials["endpoint_url"])) or "https://api.x.ai/v1"
|
||||
credentials["mode"] = LLMMode.CHAT.value
|
||||
credentials["function_calling_type"] = "tool_call"
|
||||
credentials["stream_function_calling"] = "support"
|
||||
credentials["vision_support"] = "support"
|
||||
|
||||
@@ -63,6 +63,9 @@ from core.model_runtime.model_providers.xinference.xinference_helper import (
|
||||
)
|
||||
from core.model_runtime.utils import helper
|
||||
|
||||
DEFAULT_MAX_RETRIES = 3
|
||||
DEFAULT_INVOKE_TIMEOUT = 60
|
||||
|
||||
|
||||
class XinferenceAILargeLanguageModel(LargeLanguageModel):
|
||||
def _invoke(
|
||||
@@ -315,7 +318,12 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
|
||||
message_dict = {"role": "system", "content": message.content}
|
||||
elif isinstance(message, ToolPromptMessage):
|
||||
message = cast(ToolPromptMessage, message)
|
||||
message_dict = {"tool_call_id": message.tool_call_id, "role": "tool", "content": message.content}
|
||||
message_dict = {
|
||||
"tool_call_id": message.tool_call_id,
|
||||
"role": "tool",
|
||||
"content": message.content,
|
||||
"name": message.name,
|
||||
}
|
||||
else:
|
||||
raise ValueError(f"Unknown message type {type(message)}")
|
||||
|
||||
@@ -466,8 +474,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
|
||||
client = OpenAI(
|
||||
base_url=f'{credentials["server_url"]}/v1',
|
||||
api_key=api_key,
|
||||
max_retries=3,
|
||||
timeout=60,
|
||||
max_retries=int(credentials.get("max_retries") or DEFAULT_MAX_RETRIES),
|
||||
timeout=int(credentials.get("invoke_timeout") or DEFAULT_INVOKE_TIMEOUT),
|
||||
)
|
||||
|
||||
xinference_client = Client(
|
||||
|
||||
@@ -56,3 +56,23 @@ model_credential_schema:
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的API密钥
|
||||
en_US: Enter the api key
|
||||
- variable: invoke_timeout
|
||||
label:
|
||||
zh_Hans: 调用超时时间 (单位:秒)
|
||||
en_US: invoke timeout (unit:second)
|
||||
type: text-input
|
||||
required: true
|
||||
default: '60'
|
||||
placeholder:
|
||||
zh_Hans: 在此输入调用超时时间
|
||||
en_US: Enter invoke timeout value
|
||||
- variable: max_retries
|
||||
label:
|
||||
zh_Hans: 调用重试次数
|
||||
en_US: max retries
|
||||
type: text-input
|
||||
required: true
|
||||
default: '3'
|
||||
placeholder:
|
||||
zh_Hans: 在此输入调用重试次数
|
||||
en_US: Enter max retries
|
||||
|
||||
@@ -105,17 +105,6 @@ class ZhipuAITextEmbeddingModel(_CommonZhipuaiAI, TextEmbeddingModel):
|
||||
|
||||
return [list(map(float, e)) for e in embeddings], embedding_used_tokens
|
||||
|
||||
def embed_query(self, text: str) -> list[float]:
|
||||
"""Call out to ZhipuAI's embedding endpoint.
|
||||
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
Embeddings for the text.
|
||||
"""
|
||||
return self.embed_documents([text])[0]
|
||||
|
||||
def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
|
||||
"""
|
||||
Calculate response usage
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
@@ -122,7 +122,7 @@ trace_info_info_map = {
|
||||
}
|
||||
|
||||
|
||||
class TraceTaskName(str, Enum):
|
||||
class TraceTaskName(StrEnum):
|
||||
CONVERSATION_TRACE = "conversation"
|
||||
WORKFLOW_TRACE = "workflow"
|
||||
MESSAGE_TRACE = "message"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
@@ -39,7 +39,7 @@ def validate_input_output(v, field_name):
|
||||
return v
|
||||
|
||||
|
||||
class LevelEnum(str, Enum):
|
||||
class LevelEnum(StrEnum):
|
||||
DEBUG = "DEBUG"
|
||||
WARNING = "WARNING"
|
||||
ERROR = "ERROR"
|
||||
@@ -178,7 +178,7 @@ class LangfuseSpan(BaseModel):
|
||||
return validate_input_output(v, field_name)
|
||||
|
||||
|
||||
class UnitEnum(str, Enum):
|
||||
class UnitEnum(StrEnum):
|
||||
CHARACTERS = "CHARACTERS"
|
||||
TOKENS = "TOKENS"
|
||||
SECONDS = "SECONDS"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
@@ -8,7 +8,7 @@ from pydantic_core.core_schema import ValidationInfo
|
||||
from core.ops.utils import replace_text_with_content
|
||||
|
||||
|
||||
class LangSmithRunType(str, Enum):
|
||||
class LangSmithRunType(StrEnum):
|
||||
tool = "tool"
|
||||
chain = "chain"
|
||||
llm = "llm"
|
||||
|
||||
@@ -445,7 +445,7 @@ class TraceTask:
|
||||
"ls_provider": message_data.model_provider,
|
||||
"ls_model_name": message_data.model_id,
|
||||
"status": message_data.status,
|
||||
"from_end_user_id": message_data.from_account_id,
|
||||
"from_end_user_id": message_data.from_end_user_id,
|
||||
"from_account_id": message_data.from_account_id,
|
||||
"agent_based": message_data.agent_based,
|
||||
"workflow_run_id": message_data.workflow_run_id,
|
||||
@@ -521,7 +521,7 @@ class TraceTask:
|
||||
"ls_provider": message_data.model_provider,
|
||||
"ls_model_name": message_data.model_id,
|
||||
"status": message_data.status,
|
||||
"from_end_user_id": message_data.from_account_id,
|
||||
"from_end_user_id": message_data.from_end_user_id,
|
||||
"from_account_id": message_data.from_account_id,
|
||||
"agent_based": message_data.agent_based,
|
||||
"workflow_run_id": message_data.workflow_run_id,
|
||||
@@ -570,7 +570,7 @@ class TraceTask:
|
||||
"ls_provider": message_data.model_provider,
|
||||
"ls_model_name": message_data.model_id,
|
||||
"status": message_data.status,
|
||||
"from_end_user_id": message_data.from_account_id,
|
||||
"from_end_user_id": message_data.from_end_user_id,
|
||||
"from_account_id": message_data.from_account_id,
|
||||
"agent_based": message_data.agent_based,
|
||||
"workflow_run_id": message_data.workflow_run_id,
|
||||
|
||||
@@ -23,7 +23,7 @@ if TYPE_CHECKING:
|
||||
from core.file.models import File
|
||||
|
||||
|
||||
class ModelMode(str, enum.Enum):
|
||||
class ModelMode(enum.StrEnum):
|
||||
COMPLETION = "completion"
|
||||
CHAT = "chat"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class KeyWordType(str, Enum):
|
||||
class KeyWordType(StrEnum):
|
||||
JIEBA = "jieba"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class VectorType(str, Enum):
|
||||
class VectorType(StrEnum):
|
||||
ANALYTICDB = "analyticdb"
|
||||
CHROMA = "chroma"
|
||||
MILVUS = "milvus"
|
||||
|
||||
@@ -114,10 +114,10 @@ class WordExtractor(BaseExtractor):
|
||||
mime_type=mime_type or "",
|
||||
created_by=self.user_id,
|
||||
created_by_role=CreatedByRole.ACCOUNT,
|
||||
created_at=datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
created_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
used=True,
|
||||
used_by=self.user_id,
|
||||
used_at=datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None),
|
||||
used_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
|
||||
)
|
||||
|
||||
db.session.add(upload_file)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class RerankMode(str, Enum):
|
||||
class RerankMode(StrEnum):
|
||||
RERANKING_MODEL = "reranking_model"
|
||||
WEIGHTED_SCORE = "weighted_score"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Any, Optional, Union, cast
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
@@ -137,7 +137,7 @@ class ToolParameterOption(BaseModel):
|
||||
|
||||
|
||||
class ToolParameter(BaseModel):
|
||||
class ToolParameterType(str, Enum):
|
||||
class ToolParameterType(StrEnum):
|
||||
STRING = "string"
|
||||
NUMBER = "number"
|
||||
BOOLEAN = "boolean"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.font_manager import FontProperties, fontManager
|
||||
|
||||
@@ -5,7 +6,7 @@ from core.tools.provider.builtin_tool_provider import BuiltinToolProviderControl
|
||||
|
||||
|
||||
def set_chinese_font():
|
||||
font_list = [
|
||||
to_find_fonts = [
|
||||
"PingFang SC",
|
||||
"SimHei",
|
||||
"Microsoft YaHei",
|
||||
@@ -15,16 +16,16 @@ def set_chinese_font():
|
||||
"Noto Sans CJK SC",
|
||||
"Noto Sans CJK JP",
|
||||
]
|
||||
|
||||
for font in font_list:
|
||||
if font in fontManager.ttflist:
|
||||
chinese_font = FontProperties(font)
|
||||
if chinese_font.get_name() == font:
|
||||
return chinese_font
|
||||
installed_fonts = frozenset(fontInfo.name for fontInfo in fontManager.ttflist)
|
||||
for font in to_find_fonts:
|
||||
if font in installed_fonts:
|
||||
return FontProperties(font)
|
||||
|
||||
return FontProperties()
|
||||
|
||||
|
||||
# use non-interactive backend to prevent `RuntimeError: main thread is not in main loop`
|
||||
matplotlib.use("Agg")
|
||||
# use a business theme
|
||||
plt.style.use("seaborn-v0_8-darkgrid")
|
||||
plt.rcParams["axes.unicode_minus"] = False
|
||||
|
||||
@@ -18,6 +18,12 @@ class DuckDuckGoImageSearchTool(BuiltinTool):
|
||||
"size": tool_parameters.get("size"),
|
||||
"max_results": tool_parameters.get("max_results"),
|
||||
}
|
||||
|
||||
# Add query_prefix handling
|
||||
query_prefix = tool_parameters.get("query_prefix", "").strip()
|
||||
final_query = f"{query_prefix} {query_dict['keywords']}".strip()
|
||||
query_dict["keywords"] = final_query
|
||||
|
||||
response = DDGS().images(**query_dict)
|
||||
markdown_result = "\n\n"
|
||||
json_result = []
|
||||
|
||||
@@ -86,3 +86,14 @@ parameters:
|
||||
en_US: The size of the image to be searched.
|
||||
zh_Hans: 要搜索的图片的大小
|
||||
form: form
|
||||
- name: query_prefix
|
||||
label:
|
||||
en_US: Query Prefix
|
||||
zh_Hans: 查询前缀
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
form: form
|
||||
human_description:
|
||||
en_US: Specific Search e.g. "site:unsplash.com"
|
||||
zh_Hans: 定向搜索 e.g. "site:unsplash.com"
|
||||
|
||||
@@ -7,7 +7,7 @@ from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
SUMMARY_PROMPT = """
|
||||
User's query:
|
||||
User's query:
|
||||
{query}
|
||||
|
||||
Here are the news results:
|
||||
@@ -30,6 +30,12 @@ class DuckDuckGoNewsSearchTool(BuiltinTool):
|
||||
"safesearch": "moderate",
|
||||
"region": "wt-wt",
|
||||
}
|
||||
|
||||
# Add query_prefix handling
|
||||
query_prefix = tool_parameters.get("query_prefix", "").strip()
|
||||
final_query = f"{query_prefix} {query_dict['keywords']}".strip()
|
||||
query_dict["keywords"] = final_query
|
||||
|
||||
try:
|
||||
response = list(DDGS().news(**query_dict))
|
||||
if not response:
|
||||
|
||||
@@ -69,3 +69,14 @@ parameters:
|
||||
en_US: Whether to pass the news results to llm for summarization.
|
||||
zh_Hans: 是否需要将新闻结果传给大模型总结
|
||||
form: form
|
||||
- name: query_prefix
|
||||
label:
|
||||
en_US: Query Prefix
|
||||
zh_Hans: 查询前缀
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
form: form
|
||||
human_description:
|
||||
en_US: Specific Search e.g. "site:msn.com"
|
||||
zh_Hans: 定向搜索 e.g. "site:msn.com"
|
||||
|
||||
@@ -7,7 +7,7 @@ from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
SUMMARY_PROMPT = """
|
||||
User's query:
|
||||
User's query:
|
||||
{query}
|
||||
|
||||
Here is the search engine result:
|
||||
@@ -26,7 +26,12 @@ class DuckDuckGoSearchTool(BuiltinTool):
|
||||
query = tool_parameters.get("query")
|
||||
max_results = tool_parameters.get("max_results", 5)
|
||||
require_summary = tool_parameters.get("require_summary", False)
|
||||
response = DDGS().text(query, max_results=max_results)
|
||||
|
||||
# Add query_prefix handling
|
||||
query_prefix = tool_parameters.get("query_prefix", "").strip()
|
||||
final_query = f"{query_prefix} {query}".strip()
|
||||
|
||||
response = DDGS().text(final_query, max_results=max_results)
|
||||
if require_summary:
|
||||
results = "\n".join([res.get("body") for res in response])
|
||||
results = self.summary_results(user_id=user_id, content=results, query=query)
|
||||
|
||||
@@ -39,3 +39,14 @@ parameters:
|
||||
en_US: Whether to pass the search results to llm for summarization.
|
||||
zh_Hans: 是否需要将搜索结果传给大模型总结
|
||||
form: form
|
||||
- name: query_prefix
|
||||
label:
|
||||
en_US: Query Prefix
|
||||
zh_Hans: 查询前缀
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
form: form
|
||||
human_description:
|
||||
en_US: Specific Search e.g. "site:wikipedia.org"
|
||||
zh_Hans: 定向搜索 e.g. "site:wikipedia.org"
|
||||
|
||||
@@ -24,7 +24,7 @@ max-width: 100%; border-radius: 8px;">
|
||||
|
||||
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInvokeMessage]:
|
||||
query_dict = {
|
||||
"keywords": tool_parameters.get("query"),
|
||||
"keywords": tool_parameters.get("query"), # LLM's query
|
||||
"region": tool_parameters.get("region", "wt-wt"),
|
||||
"safesearch": tool_parameters.get("safesearch", "moderate"),
|
||||
"timelimit": tool_parameters.get("timelimit"),
|
||||
@@ -40,6 +40,12 @@ max-width: 100%; border-radius: 8px;">
|
||||
# Get proxy URL from parameters
|
||||
proxy_url = tool_parameters.get("proxy_url", "").strip()
|
||||
|
||||
query_prefix = tool_parameters.get("query_prefix", "").strip()
|
||||
final_query = f"{query_prefix} {query_dict['keywords']}".strip()
|
||||
|
||||
# Update the keywords in query_dict with the final_query
|
||||
query_dict["keywords"] = final_query
|
||||
|
||||
response = DDGS().videos(**query_dict)
|
||||
|
||||
# Create HTML result with embedded iframes
|
||||
@@ -51,9 +57,13 @@ max-width: 100%; border-radius: 8px;">
|
||||
embed_html = res.get("embed_html", "")
|
||||
description = res.get("description", "")
|
||||
content_url = res.get("content", "")
|
||||
transcript_url = None
|
||||
|
||||
# Handle TED.com videos
|
||||
if not embed_html and "ted.com/talks" in content_url:
|
||||
if "ted.com/talks" in content_url:
|
||||
# Create transcript URL
|
||||
transcript_url = f"{content_url}/transcript"
|
||||
# Create embed URL
|
||||
embed_url = content_url.replace("www.ted.com", "embed.ted.com")
|
||||
if proxy_url:
|
||||
embed_url = f"{proxy_url}{embed_url}"
|
||||
@@ -68,8 +78,14 @@ max-width: 100%; border-radius: 8px;">
|
||||
|
||||
markdown_result += f"{title}\n\n"
|
||||
markdown_result += f"{embed_html}\n\n"
|
||||
if description:
|
||||
markdown_result += f"{description}\n\n"
|
||||
markdown_result += "---\n\n"
|
||||
|
||||
json_result.append(self.create_json_message(res))
|
||||
# Add transcript_url to the JSON result if available
|
||||
result_dict = res.copy()
|
||||
if transcript_url:
|
||||
result_dict["transcript_url"] = transcript_url
|
||||
json_result.append(self.create_json_message(result_dict))
|
||||
|
||||
return [self.create_text_message(markdown_result)] + json_result
|
||||
|
||||
@@ -95,3 +95,14 @@ parameters:
|
||||
en_US: Proxy URL
|
||||
zh_Hans: 视频代理地址
|
||||
form: form
|
||||
- name: query_prefix
|
||||
label:
|
||||
en_US: Query Prefix
|
||||
zh_Hans: 查询前缀
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
form: form
|
||||
human_description:
|
||||
en_US: Specific Search e.g. "site:www.ted.com"
|
||||
zh_Hans: 定向搜索 e.g. "site:www.ted.com"
|
||||
|
||||
@@ -69,14 +69,16 @@ class GitlabFilesTool(BuiltinTool):
|
||||
self.fetch_files(site_url, access_token, identifier, branch, item_path, is_repository)
|
||||
)
|
||||
else: # It's a file
|
||||
encoded_item_path = urllib.parse.quote(item_path, safe="")
|
||||
if is_repository:
|
||||
file_url = (
|
||||
f"{domain}/api/v4/projects/{encoded_identifier}/repository/files"
|
||||
f"/{item_path}/raw?ref={branch}"
|
||||
f"/{encoded_item_path}/raw?ref={branch}"
|
||||
)
|
||||
else:
|
||||
file_url = (
|
||||
f"{domain}/api/v4/projects/{project_id}/repository/files/{item_path}/raw?ref={branch}"
|
||||
f"{domain}/api/v4/projects/{project_id}/repository/files"
|
||||
f"{encoded_item_path}/raw?ref={branch}"
|
||||
)
|
||||
|
||||
file_response = requests.get(file_url, headers=headers)
|
||||
|
||||
@@ -45,7 +45,7 @@ class SearchAPI:
|
||||
def _process_response(res: dict, type: str) -> str:
|
||||
"""Process response from SearchAPI."""
|
||||
if "error" in res:
|
||||
raise ValueError(f"Got error from SearchApi: {res['error']}")
|
||||
return res["error"]
|
||||
|
||||
toret = ""
|
||||
if type == "text":
|
||||
|
||||
@@ -45,7 +45,7 @@ class SearchAPI:
|
||||
def _process_response(res: dict, type: str) -> str:
|
||||
"""Process response from SearchAPI."""
|
||||
if "error" in res:
|
||||
raise ValueError(f"Got error from SearchApi: {res['error']}")
|
||||
return res["error"]
|
||||
|
||||
toret = ""
|
||||
if type == "text":
|
||||
|
||||
@@ -45,7 +45,7 @@ class SearchAPI:
|
||||
def _process_response(res: dict, type: str) -> str:
|
||||
"""Process response from SearchAPI."""
|
||||
if "error" in res:
|
||||
raise ValueError(f"Got error from SearchApi: {res['error']}")
|
||||
return res["error"]
|
||||
|
||||
toret = ""
|
||||
if type == "text":
|
||||
|
||||
@@ -45,7 +45,7 @@ class SearchAPI:
|
||||
def _process_response(res: dict) -> str:
|
||||
"""Process response from SearchAPI."""
|
||||
if "error" in res:
|
||||
raise ValueError(f"Got error from SearchApi: {res['error']}")
|
||||
return res["error"]
|
||||
|
||||
toret = ""
|
||||
if "transcripts" in res and "text" in res["transcripts"][0]:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user