Compare commits

..

22 Commits

Author SHA1 Message Date
-LAN-
c2328cb676 chore: update image versions to 0.9.1-fix1 2024-10-11 00:56:37 +08:00
-LAN-
36d3221a05 fix(migrations): correct parent_message_id for service-api records (#9132) 2024-10-10 16:47:35 +08:00
-LAN-
40f2e7d821 release(all): bump version to 0.9.2
- Added new release branch "release/0.9.2" to GitHub workflow.
- Updated the CURRENT_VERSION field in the packaging configuration.
- Updated Docker images for api, worker, and web services to version 0.9.2.
- Updated web application package.json version to 0.9.2.
2024-10-10 16:47:04 +08:00
呆萌闷油瓶
f45042aa8e fix:ddg ratelimit 202 (#9047) 2024-10-07 22:13:41 +08:00
aiscrm
2ab8bc679f fix: Missing model information in llm span of Langfuse #9029 (#9030)
Co-authored-by: corel <corelchen@qq.com>
2024-10-07 18:03:30 +08:00
zhuhao
2571b0c4e3 feat: add baidu obs storage (#9024) 2024-10-07 11:09:27 +08:00
zhuhao
959a81a41b refactor: remove the duplicate definitions across different modules (#9022) 2024-10-07 11:08:06 +08:00
Bowen Liang
4480b469a6 chore: fix the yanked dependency vesion aiohappyeyeballs 2.4.2 (#9020) 2024-10-07 11:07:34 +08:00
zg0d233
fcfa1252a0 fix bug when adding openai or openai-compatible stt model instance (#9006) 2024-10-07 11:06:38 +08:00
zhuhao
e1e2d0b364 fix: failed to open links to images generated by QR code tool when using Huawei OBS (#9034) 2024-10-07 11:06:08 +08:00
crazywoola
9815a0911b fix: tools description is missing (#8999) 2024-10-03 21:53:11 +08:00
Giannis Kepas
dc5839b6bb feat: Update AWS Bedrock supported regions (#8992) 2024-10-03 15:17:28 +08:00
zhaoyi233
4373777871 Update json_in_md_parser.py (#8983)
Co-authored-by: crazywoola <427733928@qq.com>
2024-10-03 10:20:56 +08:00
ice yao
415d27c8bf feat(Tools): add discord incoming webhook for sending messages (#7852) 2024-10-02 13:18:35 +08:00
omr
5366820a2f fix: corrected typo (#8979) 2024-10-02 12:54:22 +08:00
Hash Brown
5f8a27074e fix: chat API is not bringing the conversation/session history (#8965) 2024-10-01 12:10:36 +08:00
zhuhao
24ba9fdf6c feat: enhance stepfun image generation tool (#8954) 2024-10-01 10:55:54 +08:00
zhuhao
824a0dd63e feat: add qwen2.5-72b and llama3.2 for openrouter (#8956) 2024-10-01 10:55:51 +08:00
ice yao
c2d606d587 chore: remove unexecuted scripts to avoid ambiguity (#8946) 2024-10-01 09:15:18 +08:00
omr
2deaece7e2 refactor: remove unnecessary comment (#8949) 2024-10-01 09:14:49 +08:00
CXwudi
0d84221b2c chore: sort Gemini models (#8951) 2024-10-01 09:14:36 +08:00
CXwudi
cdd7e55a88 chore: add missing models from Voyage (#8950) 2024-10-01 09:14:21 +08:00
701 changed files with 10990 additions and 16529 deletions

View File

@@ -125,7 +125,7 @@ jobs:
with:
images: ${{ env[matrix.image_name_env] }}
tags: |
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') && !contains(github.ref, '-') }}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') && !contains(github.ref, '-beta') }}
type=ref,event=branch
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}

View File

@@ -68,7 +68,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
プロンプトの作成、モデルパフォーマンスの比較が行え、チャットベースのアプリに音声合成などの機能も追加できます。
**4. RAGパイプライン**:
ドキュメントの取り込みから検索までをカバーする広範なRAG機能ができます。ほかにもPDF、PPT、その他の一般的なドキュメントフォーマットからのテキスト抽出のサーポイントも提供します。
ドキュメントの取り込みから検索までをカバーする広範なRAG機能ができます。ほかにもPDF、PPT、その他の一般的なドキュメントフォーマットからのテキスト抽出のサートも提供します。
**5. エージェント機能**:
LLM Function CallingやReActに基づくエージェントの定義が可能で、AIエージェント用のプリビルトまたはカスタムツールを追加できます。Difyには、Google検索、DALL·E、Stable Diffusion、WolframAlphaなどのAIエージェント用の50以上の組み込みツールが提供します。

View File

@@ -39,7 +39,7 @@ DB_DATABASE=dify
# Storage configuration
# use for store upload files, private keys...
# storage type: local, s3, azure-blob, google-storage, tencent-cos, huawei-obs, volcengine-tos
# storage type: local, s3, azure-blob, google-storage, tencent-cos, huawei-obs, volcengine-tos, baidu-obs
STORAGE_TYPE=local
STORAGE_LOCAL_PATH=storage
S3_USE_AWS_MANAGED_IAM=false
@@ -79,6 +79,12 @@ HUAWEI_OBS_SECRET_KEY=your-secret-key
HUAWEI_OBS_ACCESS_KEY=your-access-key
HUAWEI_OBS_SERVER=your-server-url
# Baidu OBS Storage Configuration
BAIDU_OBS_BUCKET_NAME=your-bucket-name
BAIDU_OBS_SECRET_KEY=your-secret-key
BAIDU_OBS_ACCESS_KEY=your-access-key
BAIDU_OBS_ENDPOINT=your-server-url
# OCI Storage configuration
OCI_ENDPOINT=your-endpoint
OCI_BUCKET_NAME=your-bucket-name
@@ -201,8 +207,6 @@ OPENSEARCH_SECURE=true
UPLOAD_FILE_SIZE_LIMIT=15
UPLOAD_FILE_BATCH_LIMIT=5
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
# Model Configuration
MULTIMODAL_SEND_IMAGE_FORMAT=base64
@@ -277,7 +281,6 @@ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000
WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
MAX_VARIABLE_SIZE=204800
# App configuration
APP_MAX_EXECUTION_TIME=1200

View File

@@ -1,15 +1,8 @@
{
"version": "0.2.0",
"compounds": [
{
"name": "Launch Flask and Celery",
"configurations": ["Python: Flask", "Python: Celery"]
}
],
"configurations": [
{
"name": "Python: Flask",
"consoleName": "Flask",
"type": "debugpy",
"request": "launch",
"python": "${workspaceFolder}/.venv/bin/python",
@@ -24,12 +17,12 @@
},
"args": [
"run",
"--host=0.0.0.0",
"--port=5001"
]
},
{
"name": "Python: Celery",
"consoleName": "Celery",
"type": "debugpy",
"request": "launch",
"python": "${workspaceFolder}/.venv/bin/python",
@@ -52,10 +45,10 @@
"-c",
"1",
"--loglevel",
"DEBUG",
"info",
"-Q",
"dataset,generation,mail,ops_trace,app_deletion"
]
}
},
]
}
}

View File

@@ -117,7 +117,7 @@ def create_app() -> Flask:
logging.basicConfig(
level=app.config.get("LOG_LEVEL"),
format=app.config["LOG_FORMAT"],
format=app.config.get("LOG_FORMAT"),
datefmt=app.config.get("LOG_DATEFORMAT"),
handlers=log_handlers,
force=True,
@@ -134,7 +134,6 @@ def create_app() -> Flask:
return datetime.utcfromtimestamp(seconds).astimezone(timezone).timetuple()
for handler in logging.root.handlers:
assert handler.formatter
handler.formatter.converter = time_converter
initialize_extensions(app)
register_blueprints(app)

View File

@@ -19,7 +19,7 @@ from extensions.ext_redis import redis_client
from libs.helper import email as email_validate
from libs.password import hash_password, password_pattern, valid_password
from libs.rsa import generate_key_pair
from models import Tenant
from models.account import Tenant
from models.dataset import Dataset, DatasetCollectionBinding, DocumentSegment
from models.dataset import Document as DatasetDocument
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
@@ -449,14 +449,14 @@ def convert_to_agent_apps():
# fetch first 1000 apps
sql_query = """SELECT a.id AS id FROM apps a
INNER JOIN app_model_configs am ON a.app_model_config_id=am.id
WHERE a.mode = 'chat'
AND am.agent_mode is not null
WHERE a.mode = 'chat'
AND am.agent_mode is not null
AND (
am.agent_mode like '%"strategy": "function_call"%'
am.agent_mode like '%"strategy": "function_call"%'
OR am.agent_mode like '%"strategy": "react"%'
)
)
AND (
am.agent_mode like '{"enabled": true%'
am.agent_mode like '{"enabled": true%'
OR am.agent_mode like '{"max_iteration": %'
) ORDER BY a.created_at DESC LIMIT 1000
"""

View File

@@ -1,4 +1,4 @@
from typing import Annotated, Literal, Optional
from typing import Annotated, Optional
from pydantic import AliasChoices, Field, HttpUrl, NegativeInt, NonNegativeInt, PositiveInt, computed_field
from pydantic_settings import BaseSettings
@@ -11,11 +11,11 @@ class SecurityConfig(BaseSettings):
Security-related configurations for the application
"""
SECRET_KEY: str = Field(
SECRET_KEY: Optional[str] = Field(
description="Secret key for secure session cookie signing."
"Make sure you are changing this key for your deployment with a strong key."
"Generate a strong key using `openssl rand -base64 42` or set via the `SECRET_KEY` environment variable.",
default="",
default=None,
)
RESET_PASSWORD_TOKEN_EXPIRY_HOURS: PositiveInt = Field(
@@ -177,16 +177,6 @@ class FileUploadConfig(BaseSettings):
default=10,
)
UPLOAD_VIDEO_FILE_SIZE_LIMIT: NonNegativeInt = Field(
description="video file size limit in Megabytes for uploading files",
default=100,
)
UPLOAD_AUDIO_FILE_SIZE_LIMIT: NonNegativeInt = Field(
description="audio file size limit in Megabytes for uploading files",
default=50,
)
BATCH_UPLOAD_LIMIT: NonNegativeInt = Field(
description="Maximum number of files allowed in a batch upload operation",
default=20,
@@ -359,8 +349,8 @@ class WorkflowConfig(BaseSettings):
)
MAX_VARIABLE_SIZE: PositiveInt = Field(
description="Maximum size in bytes for a single variable in workflows. Default to 200 KB.",
default=200 * 1024,
description="Maximum size in bytes for a single variable in workflows. Default to 5KB.",
default=5 * 1024,
)
@@ -478,7 +468,6 @@ class RagEtlConfig(BaseSettings):
Configuration for RAG ETL processes
"""
# TODO: This config is not only for rag etl, it is also for file upload, we should move it to file upload config
ETL_TYPE: str = Field(
description="RAG ETL type ('dify' or 'Unstructured'), default to 'dify'",
default="dify",
@@ -540,7 +529,7 @@ class IndexingConfig(BaseSettings):
class ImageFormatConfig(BaseSettings):
MULTIMODAL_SEND_IMAGE_FORMAT: Literal["base64", "url"] = Field(
MULTIMODAL_SEND_IMAGE_FORMAT: str = Field(
description="Format for sending images in multimodal contexts ('base64' or 'url'), default is base64",
default="base64",
)

View File

@@ -8,6 +8,7 @@ from configs.middleware.cache.redis_config import RedisConfig
from configs.middleware.storage.aliyun_oss_storage_config import AliyunOSSStorageConfig
from configs.middleware.storage.amazon_s3_storage_config import S3StorageConfig
from configs.middleware.storage.azure_blob_storage_config import AzureBlobStorageConfig
from configs.middleware.storage.baidu_obs_storage_config import BaiduOBSStorageConfig
from configs.middleware.storage.google_cloud_storage_config import GoogleCloudStorageConfig
from configs.middleware.storage.huawei_obs_storage_config import HuaweiCloudOBSStorageConfig
from configs.middleware.storage.oci_storage_config import OCIStorageConfig
@@ -200,12 +201,13 @@ class MiddlewareConfig(
StorageConfig,
AliyunOSSStorageConfig,
AzureBlobStorageConfig,
BaiduOBSStorageConfig,
GoogleCloudStorageConfig,
TencentCloudCOSStorageConfig,
HuaweiCloudOBSStorageConfig,
VolcengineTOSStorageConfig,
S3StorageConfig,
OCIStorageConfig,
S3StorageConfig,
TencentCloudCOSStorageConfig,
VolcengineTOSStorageConfig,
# configs of vdb and vdb providers
VectorStoreConfig,
AnalyticdbConfig,

View File

@@ -0,0 +1,29 @@
from typing import Optional
from pydantic import BaseModel, Field
class BaiduOBSStorageConfig(BaseModel):
"""
Configuration settings for Baidu Object Storage Service (OBS)
"""
BAIDU_OBS_BUCKET_NAME: Optional[str] = Field(
description="Name of the Baidu OBS bucket to store and retrieve objects (e.g., 'my-obs-bucket')",
default=None,
)
BAIDU_OBS_ACCESS_KEY: Optional[str] = Field(
description="Access Key ID for authenticating with Baidu OBS",
default=None,
)
BAIDU_OBS_SECRET_KEY: Optional[str] = Field(
description="Secret Access Key for authenticating with Baidu OBS",
default=None,
)
BAIDU_OBS_ENDPOINT: Optional[str] = Field(
description="URL of the Baidu OSS endpoint for your chosen region (e.g., 'https://.bj.bcebos.com')",
default=None,
)

View File

@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="0.10.0-beta1",
default="0.9.1-fix1",
)
COMMIT_SHA: str = Field(

View File

@@ -1,21 +1,2 @@
from configs import dify_config
HIDDEN_VALUE = "[__HIDDEN__]"
UUID_NIL = "00000000-0000-0000-0000-000000000000"
IMAGE_EXTENSIONS = ["jpg", "jpeg", "png", "webp", "gif", "svg"]
IMAGE_EXTENSIONS.extend([ext.upper() for ext in IMAGE_EXTENSIONS])
VIDEO_EXTENSIONS = ["mp4", "mov", "mpeg", "mpga"]
VIDEO_EXTENSIONS.extend([ext.upper() for ext in VIDEO_EXTENSIONS])
AUDIO_EXTENSIONS = ["mp3", "m4a", "wav", "webm", "amr"]
AUDIO_EXTENSIONS.extend([ext.upper() for ext in AUDIO_EXTENSIONS])
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "pdf", "html", "htm", "xlsx", "xls", "docx", "csv"]
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])
if dify_config.ETL_TYPE == "Unstructured":
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "pdf", "html", "htm", "xlsx", "xls"]
DOCUMENT_EXTENSIONS.extend(("docx", "csv", "eml", "msg", "pptx", "ppt", "xml", "epub"))
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])

View File

@@ -1,9 +1,7 @@
from contextvars import ContextVar
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.entities.variable_pool import VariablePool
tenant_id: ContextVar[str] = ContextVar("tenant_id")
workflow_variable_pool: ContextVar["VariablePool"] = ContextVar("workflow_variable_pool")
workflow_variable_pool: ContextVar[VariablePool] = ContextVar("workflow_variable_pool")

View File

@@ -22,8 +22,7 @@ from fields.conversation_fields import (
)
from libs.helper import DatetimeString
from libs.login import login_required
from models import Conversation, EndUser, Message, MessageAnnotation
from models.model import AppMode
from models.model import AppMode, Conversation, EndUser, Message, MessageAnnotation
class CompletionConversationApi(Resource):

View File

@@ -12,7 +12,7 @@ from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from fields.app_fields import app_site_fields
from libs.login import login_required
from models import Site
from models.model import Site
def parse_app_site_args():

View File

@@ -13,14 +13,14 @@ from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from factories import variable_factory
from core.app.segments import factory
from core.errors.error import AppInvokeQuotaExceededError
from fields.workflow_fields import workflow_fields
from fields.workflow_run_fields import workflow_run_node_execution_fields
from libs import helper
from libs.helper import TimestampField, uuid_value
from libs.login import current_user, login_required
from models import App
from models.model import AppMode
from models.model import App, AppMode
from services.app_dsl_service import AppDslService
from services.app_generate_service import AppGenerateService
from services.errors.app import WorkflowHashNotEqualError
@@ -101,13 +101,9 @@ class DraftWorkflowApi(Resource):
try:
environment_variables_list = args.get("environment_variables") or []
environment_variables = [
variable_factory.build_variable_from_mapping(obj) for obj in environment_variables_list
]
environment_variables = [factory.build_variable_from_mapping(obj) for obj in environment_variables_list]
conversation_variables_list = args.get("conversation_variables") or []
conversation_variables = [
variable_factory.build_variable_from_mapping(obj) for obj in conversation_variables_list
]
conversation_variables = [factory.build_variable_from_mapping(obj) for obj in conversation_variables_list]
workflow = workflow_service.sync_draft_workflow(
app_model=app_model,
graph=args["graph"],
@@ -277,15 +273,17 @@ class DraftWorkflowRunApi(Resource):
parser.add_argument("files", type=list, required=False, location="json")
args = parser.parse_args()
response = AppGenerateService.generate(
app_model=app_model,
user=current_user,
args=args,
invoke_from=InvokeFrom.DEBUGGER,
streaming=True,
)
try:
response = AppGenerateService.generate(
app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.DEBUGGER, streaming=True
)
return helper.compact_generate_response(response)
return helper.compact_generate_response(response)
except (ValueError, AppInvokeQuotaExceededError) as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class WorkflowTaskStopApi(Resource):

View File

@@ -7,8 +7,7 @@ from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
from libs.login import login_required
from models import App
from models.model import AppMode
from models.model import App, AppMode
from services.workflow_app_service import WorkflowAppService

View File

@@ -13,8 +13,7 @@ from fields.workflow_run_fields import (
)
from libs.helper import uuid_value
from libs.login import login_required
from models import App
from models.model import AppMode
from models.model import App, AppMode
from services.workflow_run_service import WorkflowRunService

View File

@@ -10,11 +10,11 @@ from controllers.console import api
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from enums import WorkflowRunTriggeredFrom
from extensions.ext_database import db
from libs.helper import DatetimeString
from libs.login import login_required
from models.model import AppMode
from models.workflow import WorkflowRunTriggeredFrom
class WorkflowDailyRunsStatistic(Resource):

View File

@@ -5,8 +5,7 @@ from typing import Optional, Union
from controllers.console.app.error import AppNotFoundError
from extensions.ext_database import db
from libs.login import current_user
from models import App
from models.model import AppMode
from models.model import App, AppMode
def get_app_model(view: Optional[Callable] = None, *, mode: Union[AppMode, list[AppMode]] = None):

View File

@@ -15,7 +15,7 @@ from controllers.console.setup import setup_required
from extensions.ext_database import db
from libs.helper import email as email_validate
from libs.password import hash_password, valid_password
from models import Account
from models.account import Account
from services.account_service import AccountService
from services.errors.account import RateLimitExceededError

View File

@@ -9,7 +9,7 @@ from controllers.console import api
from controllers.console.setup import setup_required
from libs.helper import email, get_remote_ip
from libs.password import valid_password
from models import Account
from models.account import Account
from services.account_service import AccountService, TenantService

View File

@@ -11,8 +11,7 @@ from constants.languages import languages
from extensions.ext_database import db
from libs.helper import get_remote_ip
from libs.oauth import GitHubOAuth, GoogleOAuth, OAuthUserInfo
from models import Account
from models.account import AccountStatus
from models.account import Account, AccountStatus
from services.account_service import AccountService, RegisterService, TenantService
from .. import api

View File

@@ -15,7 +15,8 @@ from core.rag.extractor.notion_extractor import NotionExtractor
from extensions.ext_database import db
from fields.data_source_fields import integrate_list_fields, integrate_notion_info_list_fields
from libs.login import login_required
from models import DataSourceOauthBinding, Document
from models.dataset import Document
from models.source import DataSourceOauthBinding
from services.dataset_service import DatasetService, DocumentService
from tasks.document_indexing_sync_task import document_indexing_sync_task

View File

@@ -24,8 +24,8 @@ from fields.app_fields import related_app_list
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
from fields.document_fields import document_status_fields
from libs.login import login_required
from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
from models.dataset import DatasetPermissionEnum
from models.dataset import Dataset, DatasetPermissionEnum, Document, DocumentSegment
from models.model import ApiToken, UploadFile
from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService

View File

@@ -46,7 +46,8 @@ from fields.document_fields import (
document_with_segments_fields,
)
from libs.login import login_required
from models import Dataset, DatasetProcessRule, Document, DocumentSegment, UploadFile
from models.dataset import Dataset, DatasetProcessRule, Document, DocumentSegment
from models.model import UploadFile
from services.dataset_service import DatasetService, DocumentService
from tasks.add_document_to_index_task import add_document_to_index_task
from tasks.remove_document_from_index_task import remove_document_from_index_task

View File

@@ -24,7 +24,7 @@ from extensions.ext_database import db
from extensions.ext_redis import redis_client
from fields.segment_fields import segment_fields
from libs.login import login_required
from models import DocumentSegment
from models.dataset import DocumentSegment
from services.dataset_service import DatasetService, DocumentService, SegmentService
from tasks.batch_create_segment_to_index_task import batch_create_segment_to_index_task
from tasks.disable_segment_from_index_task import disable_segment_from_index_task

View File

@@ -1,12 +1,9 @@
import urllib.parse
from flask import request
from flask_login import current_user
from flask_restful import Resource, marshal_with
import services
from configs import dify_config
from constants import DOCUMENT_EXTENSIONS
from controllers.console import api
from controllers.console.datasets.error import (
FileTooLargeError,
@@ -16,10 +13,9 @@ from controllers.console.datasets.error import (
)
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.helper import ssrf_proxy
from fields.file_fields import file_fields, remote_file_info_fields, upload_config_fields
from fields.file_fields import file_fields, upload_config_fields
from libs.login import login_required
from services.file_service import FileService
from services.file_service import ALLOWED_EXTENSIONS, UNSTRUCTURED_ALLOWED_EXTENSIONS, FileService
PREVIEW_WORDS_LIMIT = 3000
@@ -55,7 +51,7 @@ class FileApi(Resource):
if len(request.files) > 1:
raise TooManyFilesError()
try:
upload_file = FileService.upload_file(file=file, user=current_user)
upload_file = FileService.upload_file(file, current_user)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
@@ -79,24 +75,11 @@ class FileSupportTypeApi(Resource):
@login_required
@account_initialization_required
def get(self):
return {"allowed_extensions": DOCUMENT_EXTENSIONS}
class RemoteFileInfoApi(Resource):
@marshal_with(remote_file_info_fields)
def get(self, url):
decoded_url = urllib.parse.unquote(url)
try:
response = ssrf_proxy.head(decoded_url)
return {
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(response.headers.get("Content-Length", 0)),
}
except Exception as e:
return {"error": str(e)}, 400
etl_type = dify_config.ETL_TYPE
allowed_extensions = UNSTRUCTURED_ALLOWED_EXTENSIONS if etl_type == "Unstructured" else ALLOWED_EXTENSIONS
return {"allowed_extensions": allowed_extensions}
api.add_resource(FileApi, "/files/upload")
api.add_resource(FilePreviewApi, "/files/<uuid:file_id>/preview")
api.add_resource(FileSupportTypeApi, "/files/support-type")
api.add_resource(RemoteFileInfoApi, "/remote-files/<path:url>")

View File

@@ -11,7 +11,7 @@ from controllers.console.wraps import account_initialization_required, cloud_edi
from extensions.ext_database import db
from fields.installed_app_fields import installed_app_list_fields
from libs.login import login_required
from models import App, InstalledApp, RecommendedApp
from models.model import App, InstalledApp, RecommendedApp
from services.account_service import TenantService

View File

@@ -7,7 +7,7 @@ from werkzeug.exceptions import NotFound
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.login import login_required
from models import InstalledApp
from models.model import InstalledApp
def installed_app_required(view=None):

View File

@@ -20,7 +20,7 @@ from extensions.ext_database import db
from fields.member_fields import account_fields
from libs.helper import TimestampField, timezone
from libs.login import login_required
from models import AccountIntegrate, InvitationCode
from models.account import AccountIntegrate, InvitationCode
from services.account_service import AccountService
from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError

View File

@@ -360,15 +360,16 @@ class ToolWorkflowProviderCreateApi(Resource):
args = reqparser.parse_args()
return WorkflowToolManageService.create_workflow_tool(
user_id=user_id,
tenant_id=tenant_id,
workflow_app_id=args["workflow_app_id"],
name=args["name"],
label=args["label"],
icon=args["icon"],
description=args["description"],
parameters=args["parameters"],
privacy_policy=args["privacy_policy"],
user_id,
tenant_id,
args["workflow_app_id"],
args["name"],
args["label"],
args["icon"],
args["description"],
args["parameters"],
args["privacy_policy"],
args.get("labels", []),
)

View File

@@ -198,7 +198,7 @@ class WebappLogoWorkspaceApi(Resource):
raise UnsupportedFileTypeError()
try:
upload_file = FileService.upload_file(file=file, user=current_user)
upload_file = FileService.upload_file(file, current_user, True)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)

View File

@@ -0,0 +1,7 @@
from libs.exception import BaseHTTPException
class UnsupportedFileTypeError(BaseHTTPException):
error_code = "unsupported_file_type"
description = "File type not allowed."
code = 415

View File

@@ -4,7 +4,7 @@ from werkzeug.exceptions import NotFound
import services
from controllers.files import api
from libs.exception import BaseHTTPException
from controllers.files.error import UnsupportedFileTypeError
from services.account_service import TenantService
from services.file_service import FileService
@@ -21,36 +21,7 @@ class ImagePreviewApi(Resource):
return {"content": "Invalid request."}, 400
try:
generator, mimetype = FileService.get_image_preview(
file_id=file_id,
timestamp=timestamp,
nonce=nonce,
sign=sign,
)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
return Response(generator, mimetype=mimetype)
class FilePreviewApi(Resource):
def get(self, file_id):
file_id = str(file_id)
timestamp = request.args.get("timestamp")
nonce = request.args.get("nonce")
sign = request.args.get("sign")
if not timestamp or not nonce or not sign:
return {"content": "Invalid request."}, 400
try:
generator, mimetype = FileService.get_signed_file_preview(
file_id=file_id,
timestamp=timestamp,
nonce=nonce,
sign=sign,
)
generator, mimetype = FileService.get_image_preview(file_id, timestamp, nonce, sign)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
@@ -78,11 +49,4 @@ class WorkspaceWebappLogoApi(Resource):
api.add_resource(ImagePreviewApi, "/files/<uuid:file_id>/image-preview")
api.add_resource(FilePreviewApi, "/files/<uuid:file_id>/file-preview")
api.add_resource(WorkspaceWebappLogoApi, "/files/workspaces/<uuid:workspace_id>/webapp-logo")
class UnsupportedFileTypeError(BaseHTTPException):
error_code = "unsupported_file_type"
description = "File type not allowed."
code = 415

View File

@@ -3,8 +3,8 @@ from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden, NotFound
from controllers.files import api
from controllers.files.error import UnsupportedFileTypeError
from core.tools.tool_file_manager import ToolFileManager
from libs.exception import BaseHTTPException
class ToolFilePreviewApi(Resource):
@@ -43,9 +43,3 @@ class ToolFilePreviewApi(Resource):
api.add_resource(ToolFilePreviewApi, "/files/tools/<uuid:file_id>.<string:extension>")
class UnsupportedFileTypeError(BaseHTTPException):
error_code = "unsupported_file_type"
description = "File type not allowed."
code = 415

View File

@@ -4,6 +4,7 @@ from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError, NotFound
import services
from constants import UUID_NIL
from controllers.service_api import api
from controllers.service_api.app.error import (
AppUnavailableError,
@@ -107,6 +108,7 @@ class ChatApi(Resource):
parser.add_argument("conversation_id", type=uuid_value, location="json")
parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")
parser.add_argument("auto_generate_name", type=bool, required=False, default=True, location="json")
parser.add_argument("parent_message_id", type=uuid_value, required=False, default=UUID_NIL, location="json")
args = parser.parse_args()

View File

@@ -1,5 +1,3 @@
import urllib.parse
from flask import request
from flask_restful import marshal_with
@@ -7,8 +5,7 @@ import services
from controllers.web import api
from controllers.web.error import FileTooLargeError, NoFileUploadedError, TooManyFilesError, UnsupportedFileTypeError
from controllers.web.wraps import WebApiResource
from core.helper import ssrf_proxy
from fields.file_fields import file_fields, remote_file_info_fields
from fields.file_fields import file_fields
from services.file_service import FileService
@@ -34,19 +31,4 @@ class FileApi(WebApiResource):
return upload_file, 201
class RemoteFileInfoApi(WebApiResource):
@marshal_with(remote_file_info_fields)
def get(self, url):
decoded_url = urllib.parse.unquote(url)
try:
response = ssrf_proxy.head(decoded_url)
return {
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(response.headers.get("Content-Length", 0)),
}
except Exception as e:
return {"error": str(e)}, 400
api.add_resource(FileApi, "/files/upload")
api.add_resource(RemoteFileInfoApi, "/remote-files/<path:url>")

View File

@@ -22,7 +22,6 @@ from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotIni
from core.model_runtime.errors.invoke import InvokeError
from fields.conversation_fields import message_file_fields
from fields.message_fields import agent_thought_fields
from fields.raws import FilesContainedField
from libs import helper
from libs.helper import TimestampField, uuid_value
from models.model import AppMode
@@ -59,7 +58,7 @@ class MessageListApi(WebApiResource):
"id": fields.String,
"conversation_id": fields.String,
"parent_message_id": fields.String,
"inputs": FilesContainedField,
"inputs": fields.Raw,
"query": fields.String,
"answer": fields.String(attribute="re_sign_file_url_answer"),
"message_files": fields.List(fields.Nested(message_file_fields), attribute="files"),

View File

@@ -16,14 +16,13 @@ from core.app.entities.app_invoke_entities import (
)
from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.file import file_manager
from core.file.message_file_parser import MessageFileParser
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities import (
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
LLMUsage,
PromptMessage,
PromptMessageContent,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
@@ -41,8 +40,8 @@ from core.tools.entities.tool_entities import (
from core.tools.tool.dataset_retriever_tool import DatasetRetrieverTool
from core.tools.tool.tool import Tool
from core.tools.tool_manager import ToolManager
from core.tools.utils.tool_parameter_converter import ToolParameterConverter
from extensions.ext_database import db
from factories import file_factory
from models.model import Conversation, Message, MessageAgentThought
from models.tools import ToolConversationVariables
@@ -67,6 +66,23 @@ class BaseAgentRunner(AppRunner):
db_variables: Optional[ToolConversationVariables] = None,
model_instance: ModelInstance = None,
) -> None:
"""
Agent runner
:param tenant_id: tenant id
:param application_generate_entity: application generate entity
:param conversation: conversation
:param app_config: app generate entity
:param model_config: model config
:param config: dataset config
:param queue_manager: queue manager
:param message: message
:param user_id: user id
:param memory: memory
:param prompt_messages: prompt messages
:param variables_pool: variables pool
:param db_variables: db variables
:param model_instance: model instance
"""
self.tenant_id = tenant_id
self.application_generate_entity = application_generate_entity
self.conversation = conversation
@@ -164,7 +180,7 @@ class BaseAgentRunner(AppRunner):
if parameter.form != ToolParameter.ToolParameterForm.LLM:
continue
parameter_type = parameter.type.as_normal_type()
parameter_type = ToolParameterConverter.get_parameter_type(parameter.type)
enum = []
if parameter.type == ToolParameter.ToolParameterType.SELECT:
enum = [option.value for option in parameter.options]
@@ -249,7 +265,7 @@ class BaseAgentRunner(AppRunner):
if parameter.form != ToolParameter.ToolParameterForm.LLM:
continue
parameter_type = parameter.type.as_normal_type()
parameter_type = ToolParameterConverter.get_parameter_type(parameter.type)
enum = []
if parameter.type == ToolParameter.ToolParameterType.SELECT:
enum = [option.value for option in parameter.options]
@@ -495,24 +511,26 @@ class BaseAgentRunner(AppRunner):
return result
def organize_agent_user_prompt(self, message: Message) -> UserPromptMessage:
message_file_parser = MessageFileParser(
tenant_id=self.tenant_id,
app_id=self.app_config.app_id,
)
files = message.message_files
if files:
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
if file_extra_config:
file_objs = file_factory.build_from_message_files(
message_files=files, tenant_id=self.tenant_id, config=file_extra_config
)
file_objs = message_file_parser.transform_message_files(files, file_extra_config)
else:
file_objs = []
if not file_objs:
return UserPromptMessage(content=message.query)
else:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
prompt_message_contents = [TextPromptMessageContent(data=message.query)]
for file_obj in file_objs:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
prompt_message_contents.append(file_obj.prompt_message_content)
return UserPromptMessage(content=prompt_message_contents)
else:

View File

@@ -1,11 +1,9 @@
import json
from core.agent.cot_agent_runner import CotAgentRunner
from core.file import file_manager
from core.model_runtime.entities import (
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageContent,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
@@ -34,10 +32,9 @@ class CotChatAgentRunner(CotAgentRunner):
Organize user query
"""
if self.files:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=query))
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file_obj in self.files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
prompt_message_contents.append(file_obj.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:

View File

@@ -7,15 +7,10 @@ from typing import Any, Union
from core.agent.base_agent_runner import BaseAgentRunner
from core.app.apps.base_app_queue_manager import PublishFrom
from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent
from core.file import file_manager
from core.model_runtime.entities import (
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
LLMResult,
LLMResultChunk,
LLMResultChunkDelta,
LLMUsage,
PromptMessage,
PromptMessageContent,
PromptMessageContentType,
SystemPromptMessage,
TextPromptMessageContent,
@@ -395,10 +390,9 @@ class FunctionCallAgentRunner(BaseAgentRunner):
Organize user query
"""
if self.files:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=query))
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file_obj in self.files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
prompt_message_contents.append(file_obj.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:

View File

@@ -14,7 +14,7 @@ class CotAgentOutputParser:
) -> Generator[Union[str, AgentScratchpadUnit.Action], None, None]:
def parse_action(json_str):
try:
action = json.loads(json_str)
action = json.loads(json_str, strict=False)
action_name = None
action_input = None

View File

@@ -53,11 +53,12 @@ class BasicVariablesConfigManager:
VariableEntity(
type=variable_type,
variable=variable.get("variable"),
description=variable.get("description", ""),
description=variable.get("description"),
label=variable.get("label"),
required=variable.get("required", False),
max_length=variable.get("max_length"),
options=variable.get("options", []),
options=variable.get("options"),
default=variable.get("default"),
)
)

View File

@@ -1,12 +1,11 @@
from collections.abc import Sequence
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel, Field
from pydantic import BaseModel
from core.file import FileExtraConfig, FileTransferMethod, FileType
from core.file.file_obj import FileExtraConfig
from core.model_runtime.entities.message_entities import PromptMessageRole
from models.model import AppMode
from models import AppMode
class ModelConfigEntity(BaseModel):
@@ -70,7 +69,7 @@ class PromptTemplateEntity(BaseModel):
ADVANCED = "advanced"
@classmethod
def value_of(cls, value: str):
def value_of(cls, value: str) -> "PromptType":
"""
Get value of given mode.
@@ -94,8 +93,6 @@ class VariableEntityType(str, Enum):
PARAGRAPH = "paragraph"
NUMBER = "number"
EXTERNAL_DATA_TOOL = "external_data_tool"
FILE = "file"
FILE_LIST = "file-list"
class VariableEntity(BaseModel):
@@ -105,14 +102,13 @@ class VariableEntity(BaseModel):
variable: str
label: str
description: str = ""
description: Optional[str] = None
type: VariableEntityType
required: bool = False
max_length: Optional[int] = None
options: Sequence[str] = Field(default_factory=list)
allowed_file_types: Sequence[FileType] = Field(default_factory=list)
allowed_file_extensions: Sequence[str] = Field(default_factory=list)
allowed_file_upload_methods: Sequence[FileTransferMethod] = Field(default_factory=list)
options: Optional[list[str]] = None
default: Optional[str] = None
hint: Optional[str] = None
class ExternalDataVariableEntity(BaseModel):
@@ -140,7 +136,7 @@ class DatasetRetrieveConfigEntity(BaseModel):
MULTIPLE = "multiple"
@classmethod
def value_of(cls, value: str):
def value_of(cls, value: str) -> "RetrieveStrategy":
"""
Get value of given mode.

View File

@@ -1,13 +1,12 @@
from collections.abc import Mapping
from typing import Any
from typing import Any, Optional
from core.file.models import FileExtraConfig
from models import FileUploadConfig
from core.file.file_obj import FileExtraConfig
class FileUploadConfigManager:
@classmethod
def convert(cls, config: Mapping[str, Any], is_vision: bool = True):
def convert(cls, config: Mapping[str, Any], is_vision: bool = True) -> Optional[FileExtraConfig]:
"""
Convert model config to model config
@@ -16,18 +15,19 @@ class FileUploadConfigManager:
"""
file_upload_dict = config.get("file_upload")
if file_upload_dict:
if file_upload_dict.get("enabled"):
data = {
"image_config": {
"number_limits": file_upload_dict["number_limits"],
"transfer_methods": file_upload_dict["allowed_file_upload_methods"],
if file_upload_dict.get("image"):
if "enabled" in file_upload_dict["image"] and file_upload_dict["image"]["enabled"]:
image_config = {
"number_limits": file_upload_dict["image"]["number_limits"],
"transfer_methods": file_upload_dict["image"]["transfer_methods"],
}
}
if is_vision:
data["image_config"]["detail"] = file_upload_dict.get("image", {}).get("detail", "low")
if is_vision:
image_config["detail"] = file_upload_dict["image"]["detail"]
return FileExtraConfig.model_validate(data)
return FileExtraConfig(image_config=image_config)
return None
@classmethod
def validate_and_set_defaults(cls, config: dict, is_vision: bool = True) -> tuple[dict, list[str]]:
@@ -39,7 +39,29 @@ class FileUploadConfigManager:
"""
if not config.get("file_upload"):
config["file_upload"] = {}
else:
FileUploadConfig.model_validate(config["file_upload"])
if not isinstance(config["file_upload"], dict):
raise ValueError("file_upload must be of dict type")
# check image config
if not config["file_upload"].get("image"):
config["file_upload"]["image"] = {"enabled": False}
if config["file_upload"]["image"]["enabled"]:
number_limits = config["file_upload"]["image"]["number_limits"]
if number_limits < 1 or number_limits > 6:
raise ValueError("number_limits must be in [1, 6]")
if is_vision:
detail = config["file_upload"]["image"]["detail"]
if detail not in {"high", "low"}:
raise ValueError("detail must be in ['high', 'low']")
transfer_methods = config["file_upload"]["image"]["transfer_methods"]
if not isinstance(transfer_methods, list):
raise ValueError("transfer_methods must be of list type")
for method in transfer_methods:
if method not in {"remote_url", "local_file"}:
raise ValueError("transfer_methods must be in ['remote_url', 'local_file']")
return config, ["file_upload"]

View File

@@ -17,6 +17,6 @@ class WorkflowVariablesConfigManager:
# variables
for variable in user_input_form:
variables.append(VariableEntity.model_validate(variable))
variables.append(VariableEntity(**variable))
return variables

View File

@@ -20,11 +20,10 @@ from core.app.apps.message_based_app_generator import MessageBasedAppGenerator
from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager
from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom
from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse
from core.file.message_file_parser import MessageFileParser
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.ops.ops_trace_manager import TraceQueueManager
from enums import CreatedByRole
from extensions.ext_database import db
from factories import file_factory
from models.account import Account
from models.model import App, Conversation, EndUser, Message
from models.workflow import Workflow
@@ -96,16 +95,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
# parse files
files = args["files"] if args.get("files") else []
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
file_objs = message_file_parser.validate_and_transform_files_arg(files, file_extra_config, user)
else:
file_objs = []
@@ -113,9 +106,8 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
app_config = AdvancedChatAppConfigManager.get_app_config(app_model=app_model, workflow=workflow)
# get tracing instance
trace_manager = TraceQueueManager(
app_id=app_model.id, user_id=user.id if isinstance(user, Account) else user.session_id
)
user_id = user.id if isinstance(user, Account) else user.session_id
trace_manager = TraceQueueManager(app_model.id, user_id)
if invoke_from == InvokeFrom.DEBUGGER:
# always enable retriever resource in debugger mode
@@ -126,9 +118,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
task_id=str(uuid.uuid4()),
app_config=app_config,
conversation_id=conversation.id if conversation else None,
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
inputs=conversation.inputs if conversation else self._get_cleaned_inputs(inputs, app_config),
query=query,
files=file_objs,
parent_message_id=args.get("parent_message_id"),

View File

@@ -1,26 +1,30 @@
import logging
import os
from collections.abc import Mapping
from typing import Any, cast
from sqlalchemy import select
from sqlalchemy.orm import Session
from configs import dify_config
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfig
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.apps.workflow_app_runner import WorkflowBasedAppRunner
from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom
from core.app.apps.workflow_logging_callback import WorkflowLoggingCallback
from core.app.entities.app_invoke_entities import (
AdvancedChatAppGenerateEntity,
InvokeFrom,
)
from core.app.entities.queue_entities import (
QueueAnnotationReplyEvent,
QueueStopEvent,
QueueTextChunkEvent,
)
from core.moderation.base import ModerationError
from core.workflow.callbacks import WorkflowCallback, WorkflowLoggingCallback
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.workflow_entry import WorkflowEntry
from enums import UserFrom
from extensions.ext_database import db
from models.model import App, Conversation, EndUser, Message
from models.workflow import ConversationVariable, WorkflowType
@@ -40,6 +44,12 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
conversation: Conversation,
message: Message,
) -> None:
"""
:param application_generate_entity: application generate entity
:param queue_manager: application queue manager
:param conversation: conversation
:param message: message
"""
super().__init__(queue_manager)
self.application_generate_entity = application_generate_entity
@@ -47,6 +57,10 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
self.message = message
def run(self) -> None:
"""
Run application
:return:
"""
app_config = self.application_generate_entity.app_config
app_config = cast(AdvancedChatAppConfig, app_config)
@@ -67,7 +81,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
user_id = self.application_generate_entity.user_id
workflow_callbacks: list[WorkflowCallback] = []
if dify_config.DEBUG:
if bool(os.environ.get("DEBUG", "False").lower() == "true"):
workflow_callbacks.append(WorkflowLoggingCallback())
if self.application_generate_entity.single_iteration_run:
@@ -184,6 +198,15 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
query: str,
message_id: str,
) -> bool:
"""
Handle input moderation
:param app_record: app record
:param app_generate_entity: application generate entity
:param inputs: inputs
:param query: query
:param message_id: message id
:return:
"""
try:
# process sensitive_word_avoidance
_, inputs, query = self.moderation_for_inputs(
@@ -203,6 +226,14 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
def handle_annotation_reply(
self, app_record: App, message: Message, query: str, app_generate_entity: AdvancedChatAppGenerateEntity
) -> bool:
"""
Handle annotation reply
:param app_record: app record
:param message: message
:param query: query
:param app_generate_entity: application generate entity
"""
# annotation reply
annotation_reply = self.query_app_annotations_to_reply(
app_record=app_record,
message=message,
@@ -224,6 +255,8 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
def _complete_with_stream_output(self, text: str, stopped_by: QueueStopEvent.StopBy) -> None:
"""
Direct output
:param text: text
:return:
"""
self._publish_event(QueueTextChunkEvent(text=text))

View File

@@ -1,7 +1,7 @@
import json
import logging
import time
from collections.abc import Generator, Mapping
from collections.abc import Generator
from typing import Any, Optional, Union
from constants.tts_auto_play_timeout import TTS_AUTO_PLAY_TIMEOUT, TTS_AUTO_PLAY_YIELD_CPU_TIME
@@ -49,7 +49,6 @@ from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.ops_trace_manager import TraceQueueManager
from core.workflow.enums import SystemVariableKey
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from enums.workflow_nodes import NodeType
from events.message_event import message_was_created
from extensions.ext_database import db
from models.account import Account
@@ -113,7 +112,6 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
self._task_state = WorkflowTaskState()
self._conversation_name_generate_thread = None
self._recorded_files: list[Mapping[str, Any]] = []
def process(self):
"""
@@ -292,10 +290,6 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
elif isinstance(event, QueueNodeSucceededEvent):
workflow_node_execution = self._handle_workflow_node_execution_success(event)
# Record files if it's an answer node or end node
if event.node_type in [NodeType.ANSWER, NodeType.END]:
self._recorded_files.extend(self._fetch_files_from_node_outputs(event.outputs or {}))
response = self._workflow_node_finish_to_stream_response(
event=event,
task_id=self._application_generate_entity.task_id,
@@ -362,7 +356,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
start_at=graph_runtime_state.start_at,
total_tokens=graph_runtime_state.total_tokens,
total_steps=graph_runtime_state.node_run_steps,
outputs=event.outputs,
outputs=json.dumps(event.outputs) if event.outputs else None,
conversation_id=self._conversation.id,
trace_manager=trace_manager,
)
@@ -534,7 +528,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
del extras["metadata"]["annotation_reply"]
return MessageEndStreamResponse(
task_id=self._application_generate_entity.task_id, id=self._message.id, files=self._recorded_files, **extras
task_id=self._application_generate_entity.task_id, id=self._message.id, **extras
)
def _handle_output_moderation_chunk(self, text: str) -> bool:

View File

@@ -17,12 +17,12 @@ from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskSt
from core.app.apps.message_based_app_generator import MessageBasedAppGenerator
from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager
from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, InvokeFrom
from core.file.message_file_parser import MessageFileParser
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.ops.ops_trace_manager import TraceQueueManager
from enums import CreatedByRole
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser
from models.account import Account
from models.model import App, EndUser
logger = logging.getLogger(__name__)
@@ -49,12 +49,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
) -> dict: ...
def generate(
self,
app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
stream: bool = True,
self, app_model: App, user: Union[Account, EndUser], args: Any, invoke_from: InvokeFrom, stream: bool = True
) -> Union[dict, Generator[dict, None, None]]:
"""
Generate App response.
@@ -102,19 +97,12 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
# always enable retriever resource in debugger mode
override_model_config_dict["retriever_resource"] = {"enabled": True}
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
files = args.get("files") or []
files = args["files"] if args.get("files") else []
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
file_objs = message_file_parser.validate_and_transform_files_arg(files, file_extra_config, user)
else:
file_objs = []
@@ -127,7 +115,8 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
)
# get tracing instance
trace_manager = TraceQueueManager(app_model.id, user.id if isinstance(user, Account) else user.session_id)
user_id = user.id if isinstance(user, Account) else user.session_id
trace_manager = TraceQueueManager(app_model.id, user_id)
# init application generate entity
application_generate_entity = AgentChatAppGenerateEntity(
@@ -135,9 +124,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
app_config=app_config,
model_conf=ModelConfigConverter.convert(app_config),
conversation_id=conversation.id if conversation else None,
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
inputs=conversation.inputs if conversation else self._get_cleaned_inputs(inputs, app_config),
query=query,
files=file_objs,
parent_message_id=args.get("parent_message_id"),

View File

@@ -1,92 +1,35 @@
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, Optional
from typing import Any, Optional
from core.app.app_config.entities import VariableEntityType
from core.file import File, FileExtraConfig
from factories import file_factory
if TYPE_CHECKING:
from core.app.app_config.entities import AppConfig, VariableEntity
from enums import CreatedByRole
from core.app.app_config.entities import AppConfig, VariableEntity, VariableEntityType
class BaseAppGenerator:
def _prepare_user_inputs(
self,
*,
user_inputs: Optional[Mapping[str, Any]],
app_config: "AppConfig",
user_id: str,
role: "CreatedByRole",
) -> Mapping[str, Any]:
def _get_cleaned_inputs(self, user_inputs: Optional[Mapping[str, Any]], app_config: AppConfig) -> Mapping[str, Any]:
user_inputs = user_inputs or {}
# Filter input variables from form configuration, handle required fields, default values, and option values
variables = app_config.variables
user_inputs = {var.variable: self._validate_input(inputs=user_inputs, var=var) for var in variables}
user_inputs = {k: self._sanitize_value(v) for k, v in user_inputs.items()}
# Convert files in inputs to File
entity_dictionary = {item.variable: item for item in app_config.variables}
# Convert single file to File
files_inputs = {
k: file_factory.build_from_mapping(
mapping=v,
tenant_id=app_config.tenant_id,
user_id=user_id,
role=role,
config=FileExtraConfig(
allowed_file_types=entity_dictionary[k].allowed_file_types,
allowed_extensions=entity_dictionary[k].allowed_file_extensions,
allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods,
),
)
for k, v in user_inputs.items()
if isinstance(v, dict) and entity_dictionary[k].type == VariableEntityType.FILE
}
# Convert list of files to File
file_list_inputs = {
k: file_factory.build_from_mappings(
mappings=v,
tenant_id=app_config.tenant_id,
user_id=user_id,
role=role,
config=FileExtraConfig(
allowed_file_types=entity_dictionary[k].allowed_file_types,
allowed_extensions=entity_dictionary[k].allowed_file_extensions,
allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods,
),
)
for k, v in user_inputs.items()
if isinstance(v, list)
# Ensure skip List<File>
and all(isinstance(item, dict) for item in v)
and entity_dictionary[k].type == VariableEntityType.FILE_LIST
}
# Merge all inputs
user_inputs = {**user_inputs, **files_inputs, **file_list_inputs}
filtered_inputs = {var.variable: self._validate_input(inputs=user_inputs, var=var) for var in variables}
filtered_inputs = {k: self._sanitize_value(v) for k, v in filtered_inputs.items()}
return filtered_inputs
# Check if all files are converted to File
if any(filter(lambda v: isinstance(v, dict), user_inputs.values())):
raise ValueError("Invalid input type")
if any(
filter(lambda v: isinstance(v, dict), filter(lambda item: isinstance(item, list), user_inputs.values()))
):
raise ValueError("Invalid input type")
return user_inputs
def _validate_input(self, *, inputs: Mapping[str, Any], var: "VariableEntity"):
def _validate_input(self, *, inputs: Mapping[str, Any], var: VariableEntity):
user_input_value = inputs.get(var.variable)
if not user_input_value:
if var.required:
raise ValueError(f"{var.variable} is required in input form")
else:
return None
if var.type in {
VariableEntityType.TEXT_INPUT,
VariableEntityType.SELECT,
VariableEntityType.PARAGRAPH,
} and not isinstance(user_input_value, str):
if var.required and not user_input_value:
raise ValueError(f"{var.variable} is required in input form")
if not var.required and not user_input_value:
# TODO: should we return None here if the default value is None?
return var.default or ""
if (
var.type
in {
VariableEntityType.TEXT_INPUT,
VariableEntityType.SELECT,
VariableEntityType.PARAGRAPH,
}
and user_input_value
and not isinstance(user_input_value, str)
):
raise ValueError(f"(type '{var.type}') {var.variable} in input form must be a string")
if var.type == VariableEntityType.NUMBER and isinstance(user_input_value, str):
# may raise ValueError if user_input_value is not a valid number
@@ -98,24 +41,12 @@ class BaseAppGenerator:
except ValueError:
raise ValueError(f"{var.variable} in input form must be a valid number")
if var.type == VariableEntityType.SELECT:
options = var.options
options = var.options or []
if user_input_value not in options:
raise ValueError(f"{var.variable} in input form must be one of the following: {options}")
elif var.type in {VariableEntityType.TEXT_INPUT, VariableEntityType.PARAGRAPH}:
if var.max_length and len(user_input_value) > var.max_length:
if var.max_length and user_input_value and len(user_input_value) > var.max_length:
raise ValueError(f"{var.variable} in input form must be less than {var.max_length} characters")
elif var.type == VariableEntityType.FILE:
if not isinstance(user_input_value, dict) and not isinstance(user_input_value, File):
raise ValueError(f"{var.variable} in input form must be a file")
elif var.type == VariableEntityType.FILE_LIST:
if not (
isinstance(user_input_value, list)
and (
all(isinstance(item, dict) for item in user_input_value)
or all(isinstance(item, File) for item in user_input_value)
)
):
raise ValueError(f"{var.variable} in input form must be a list of files")
return user_input_value

View File

@@ -27,7 +27,7 @@ from core.prompt.simple_prompt_transform import ModelMode, SimplePromptTransform
from models.model import App, AppMode, Message, MessageAnnotation
if TYPE_CHECKING:
from core.file.models import File
from core.file.file_obj import FileVar
class AppRunner:
@@ -37,7 +37,7 @@ class AppRunner:
model_config: ModelConfigWithCredentialsEntity,
prompt_template_entity: PromptTemplateEntity,
inputs: dict[str, str],
files: list["File"],
files: list["FileVar"],
query: Optional[str] = None,
) -> int:
"""
@@ -137,7 +137,7 @@ class AppRunner:
model_config: ModelConfigWithCredentialsEntity,
prompt_template_entity: PromptTemplateEntity,
inputs: dict[str, str],
files: list["File"],
files: list["FileVar"],
query: Optional[str] = None,
context: Optional[str] = None,
memory: Optional[TokenBufferMemory] = None,

View File

@@ -17,11 +17,10 @@ from core.app.apps.chat.generate_response_converter import ChatAppGenerateRespon
from core.app.apps.message_based_app_generator import MessageBasedAppGenerator
from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager
from core.app.entities.app_invoke_entities import ChatAppGenerateEntity, InvokeFrom
from core.file.message_file_parser import MessageFileParser
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.ops.ops_trace_manager import TraceQueueManager
from enums import CreatedByRole
from extensions.ext_database import db
from factories import file_factory
from models.account import Account
from models.model import App, EndUser
@@ -100,19 +99,12 @@ class ChatAppGenerator(MessageBasedAppGenerator):
# always enable retriever resource in debugger mode
override_model_config_dict["retriever_resource"] = {"enabled": True}
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
files = args["files"] if args.get("files") else []
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
file_objs = message_file_parser.validate_and_transform_files_arg(files, file_extra_config, user)
else:
file_objs = []
@@ -125,7 +117,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
)
# get tracing instance
trace_manager = TraceQueueManager(app_id=app_model.id)
trace_manager = TraceQueueManager(app_model.id)
# init application generate entity
application_generate_entity = ChatAppGenerateEntity(
@@ -133,17 +125,15 @@ class ChatAppGenerator(MessageBasedAppGenerator):
app_config=app_config,
model_conf=ModelConfigConverter.convert(app_config),
conversation_id=conversation.id if conversation else None,
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
inputs=conversation.inputs if conversation else self._get_cleaned_inputs(inputs, app_config),
query=query,
files=file_objs,
parent_message_id=args.get("parent_message_id"),
user_id=user.id,
stream=stream,
invoke_from=invoke_from,
extras=extras,
trace_manager=trace_manager,
stream=stream,
)
# init generate records

View File

@@ -17,12 +17,12 @@ from core.app.apps.completion.generate_response_converter import CompletionAppGe
from core.app.apps.message_based_app_generator import MessageBasedAppGenerator
from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager
from core.app.entities.app_invoke_entities import CompletionAppGenerateEntity, InvokeFrom
from core.file.message_file_parser import MessageFileParser
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.ops.ops_trace_manager import TraceQueueManager
from enums import CreatedByRole
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser, Message
from models.account import Account
from models.model import App, EndUser, Message
from services.errors.app import MoreLikeThisDisabledError
from services.errors.message import MessageNotExistsError
@@ -88,19 +88,12 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
tenant_id=app_model.tenant_id, config=args.get("model_config")
)
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
files = args["files"] if args.get("files") else []
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
file_objs = message_file_parser.validate_and_transform_files_arg(files, file_extra_config, user)
else:
file_objs = []
@@ -110,7 +103,6 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
)
# get tracing instance
user_id = user.id if isinstance(user, Account) else user.session_id
trace_manager = TraceQueueManager(app_model.id)
# init application generate entity
@@ -118,7 +110,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
task_id=str(uuid.uuid4()),
app_config=app_config,
model_conf=ModelConfigConverter.convert(app_config),
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
inputs=self._get_cleaned_inputs(inputs, app_config),
query=query,
files=file_objs,
user_id=user.id,
@@ -259,16 +251,10 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
override_model_config_dict["model"] = model_dict
# parse files
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict)
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=message.files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
file_objs = message_file_parser.validate_and_transform_files_arg(message.files, file_extra_config, user)
else:
file_objs = []

View File

@@ -26,7 +26,7 @@ from core.app.entities.task_entities import (
from core.app.task_pipeline.easy_ui_based_generate_task_pipeline import EasyUIBasedGenerateTaskPipeline
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from extensions.ext_database import db
from models import Account
from models.account import Account
from models.model import App, AppMode, AppModelConfig, Conversation, EndUser, Message, MessageFile
from services.errors.app_model_config import AppModelConfigBrokenError
from services.errors.conversation import ConversationCompletedError, ConversationNotExistsError
@@ -238,7 +238,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
type=file.type.value,
transfer_method=file.transfer_method.value,
belongs_to="user",
url=file.remote_url,
url=file.url,
upload_file_id=file.related_id,
created_by_role=("account" if account_id else "end_user"),
created_by=account_id or end_user_id,

View File

@@ -3,7 +3,7 @@ import logging
import os
import threading
import uuid
from collections.abc import Generator, Mapping, Sequence
from collections.abc import Generator
from typing import Any, Literal, Optional, Union, overload
from flask import Flask, current_app
@@ -20,12 +20,13 @@ from core.app.apps.workflow.generate_response_converter import WorkflowAppGenera
from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline
from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity
from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse
from core.file.message_file_parser import MessageFileParser
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.ops.ops_trace_manager import TraceQueueManager
from enums import CreatedByRole
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser, Workflow
from models.account import Account
from models.model import App, EndUser
from models.workflow import Workflow
logger = logging.getLogger(__name__)
@@ -62,45 +63,48 @@ class WorkflowAppGenerator(BaseAppGenerator):
app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: Mapping[str, Any],
args: dict,
invoke_from: InvokeFrom,
stream: bool = True,
call_depth: int = 0,
workflow_thread_pool_id: Optional[str] = None,
):
files: Sequence[Mapping[str, Any]] = args.get("files") or []
"""
Generate App response.
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
:param app_model: App
:param workflow: Workflow
:param user: account or end user
:param args: request args
:param invoke_from: invoke from source
:param stream: is stream
:param call_depth: call depth
:param workflow_thread_pool_id: workflow thread pool id
"""
inputs = args["inputs"]
# parse files
files = args["files"] if args.get("files") else []
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
system_files = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
if file_extra_config:
file_objs = message_file_parser.validate_and_transform_files_arg(files, file_extra_config, user)
else:
file_objs = []
# convert to app config
app_config = WorkflowAppConfigManager.get_app_config(
app_model=app_model,
workflow=workflow,
)
app_config = WorkflowAppConfigManager.get_app_config(app_model=app_model, workflow=workflow)
# get tracing instance
trace_manager = TraceQueueManager(
app_id=app_model.id,
user_id=user.id if isinstance(user, Account) else user.session_id,
)
user_id = user.id if isinstance(user, Account) else user.session_id
trace_manager = TraceQueueManager(app_model.id, user_id)
inputs: Mapping[str, Any] = args["inputs"]
# init application generate entity
application_generate_entity = WorkflowAppGenerateEntity(
task_id=str(uuid.uuid4()),
app_config=app_config,
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
files=system_files,
inputs=self._get_cleaned_inputs(inputs, app_config),
files=file_objs,
user_id=user.id,
stream=stream,
invoke_from=invoke_from,

View File

@@ -1,19 +1,20 @@
import logging
import os
from typing import Optional, cast
from configs import dify_config
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.apps.workflow.app_config_manager import WorkflowAppConfig
from core.app.apps.workflow_app_runner import WorkflowBasedAppRunner
from core.app.apps.workflow_logging_callback import WorkflowLoggingCallback
from core.app.entities.app_invoke_entities import (
InvokeFrom,
WorkflowAppGenerateEntity,
)
from core.workflow.callbacks import WorkflowCallback, WorkflowLoggingCallback
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.workflow_entry import WorkflowEntry
from enums import UserFrom
from extensions.ext_database import db
from models.model import App, EndUser
from models.workflow import WorkflowType
@@ -70,7 +71,7 @@ class WorkflowAppRunner(WorkflowBasedAppRunner):
db.session.close()
workflow_callbacks: list[WorkflowCallback] = []
if dify_config.DEBUG:
if bool(os.environ.get("DEBUG", "False").lower() == "true"):
workflow_callbacks.append(WorkflowLoggingCallback())
# if only single iteration run is requested

View File

@@ -1,3 +1,4 @@
import json
import logging
import time
from collections.abc import Generator
@@ -327,7 +328,9 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
start_at=graph_runtime_state.start_at,
total_tokens=graph_runtime_state.total_tokens,
total_steps=graph_runtime_state.node_run_steps,
outputs=event.outputs,
outputs=json.dumps(event.outputs)
if isinstance(event, QueueWorkflowSucceededEvent) and event.outputs
else None,
conversation_id=None,
trace_manager=trace_manager,
)

View File

@@ -20,6 +20,7 @@ from core.app.entities.queue_entities import (
QueueWorkflowStartedEvent,
QueueWorkflowSucceededEvent,
)
from core.workflow.entities.node_entities import NodeType
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.graph_engine.entities.event import (
GraphEngineEvent,
@@ -44,7 +45,6 @@ from core.workflow.nodes.base_node import BaseNode
from core.workflow.nodes.iteration.entities import IterationNodeData
from core.workflow.nodes.node_mapping import node_classes
from core.workflow.workflow_entry import WorkflowEntry
from enums import NodeType
from extensions.ext_database import db
from models.model import App
from models.workflow import Workflow

View File

@@ -1,6 +1,7 @@
from typing import Optional
from core.model_runtime.utils.encoders import jsonable_encoder
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
from core.workflow.graph_engine.entities.event import (
GraphEngineEvent,
GraphRunFailedEvent,
@@ -19,8 +20,6 @@ from core.workflow.graph_engine.entities.event import (
ParallelBranchRunSucceededEvent,
)
from .base_workflow_callback import WorkflowCallback
_TEXT_COLOR_MAPPING = {
"blue": "36;1",
"yellow": "33;1",

View File

@@ -1,4 +1,4 @@
from collections.abc import Mapping, Sequence
from collections.abc import Mapping
from enum import Enum
from typing import Any, Optional
@@ -6,7 +6,7 @@ from pydantic import BaseModel, ConfigDict
from core.app.app_config.entities import AppConfig, EasyUIBasedAppConfig, WorkflowUIBasedAppConfig
from core.entities.provider_configuration import ProviderModelBundle
from core.file.models import File
from core.file.file_obj import FileVar
from core.model_runtime.entities.model_entities import AIModelEntity
from core.ops.ops_trace_manager import TraceQueueManager
@@ -22,7 +22,7 @@ class InvokeFrom(Enum):
DEBUGGER = "debugger"
@classmethod
def value_of(cls, value: str):
def value_of(cls, value: str) -> "InvokeFrom":
"""
Get value of given mode.
@@ -81,7 +81,7 @@ class AppGenerateEntity(BaseModel):
app_config: AppConfig
inputs: Mapping[str, Any]
files: Sequence[File]
files: list[FileVar] = []
user_id: str
# extras

View File

@@ -6,9 +6,8 @@ from pydantic import BaseModel, field_validator
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
from core.workflow.entities.base_node_data_entities import BaseNodeData
from core.workflow.entities.node_entities import NodeRunMetadataKey
from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeType
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from enums import NodeType
class QueueEvent(str, Enum):

View File

@@ -1,4 +1,3 @@
from collections.abc import Mapping
from enum import Enum
from typing import Any, Optional
@@ -120,7 +119,6 @@ class MessageEndStreamResponse(StreamResponse):
event: StreamEvent = StreamEvent.MESSAGE_END
id: str
metadata: dict = {}
files: Optional[list[Mapping[str, Any]]] = None
class MessageFileStreamResponse(StreamResponse):

View File

@@ -1,12 +1,7 @@
from .segment_group import SegmentGroup
from .segments import (
ArrayAnySegment,
ArrayFileSegment,
ArrayNumberSegment,
ArrayObjectSegment,
ArraySegment,
ArrayStringSegment,
FileSegment,
FloatSegment,
IntegerSegment,
NoneSegment,
@@ -20,7 +15,6 @@ from .variables import (
ArrayNumberVariable,
ArrayObjectVariable,
ArrayStringVariable,
FileVariable,
FloatVariable,
IntegerVariable,
NoneVariable,
@@ -52,10 +46,4 @@ __all__ = [
"ArrayNumberVariable",
"ArrayObjectVariable",
"ArraySegment",
"ArrayFileSegment",
"ArrayNumberSegment",
"ArrayObjectSegment",
"ArrayStringSegment",
"FileSegment",
"FileVariable",
]

View File

@@ -2,32 +2,29 @@ from collections.abc import Mapping
from typing import Any
from configs import dify_config
from core.file import File
from core.variables import (
from .exc import VariableError
from .segments import (
ArrayAnySegment,
ArrayFileSegment,
ArrayNumberSegment,
ArrayNumberVariable,
ArrayObjectSegment,
ArrayObjectVariable,
ArrayStringSegment,
ArrayStringVariable,
FileSegment,
FloatSegment,
FloatVariable,
IntegerSegment,
IntegerVariable,
NoneSegment,
ObjectSegment,
Segment,
StringSegment,
)
from .types import SegmentType
from .variables import (
ArrayNumberVariable,
ArrayObjectVariable,
ArrayStringVariable,
FloatVariable,
IntegerVariable,
ObjectVariable,
SecretVariable,
Segment,
SegmentType,
StringSegment,
StringVariable,
Variable,
)
from core.variables.exc import VariableError
def build_variable_from_mapping(mapping: Mapping[str, Any], /) -> Variable:
@@ -74,22 +71,6 @@ def build_segment(value: Any, /) -> Segment:
return FloatSegment(value=value)
if isinstance(value, dict):
return ObjectSegment(value=value)
if isinstance(value, File):
return FileSegment(value=value)
if isinstance(value, list):
items = [build_segment(item) for item in value]
types = {item.value_type for item in items}
if len(types) != 1:
return ArrayAnySegment(value=value)
match types.pop():
case SegmentType.STRING:
return ArrayStringSegment(value=value)
case SegmentType.NUMBER:
return ArrayNumberSegment(value=value)
case SegmentType.OBJECT:
return ArrayObjectSegment(value=value)
case SegmentType.FILE:
return ArrayFileSegment(value=value)
case _:
raise ValueError(f"not supported value {value}")
return ArrayAnySegment(value=value)
raise ValueError(f"not supported value {value}")

View File

@@ -0,0 +1,18 @@
import re
from core.workflow.entities.variable_pool import VariablePool
from . import SegmentGroup, factory
VARIABLE_PATTERN = re.compile(r"\{\{#([a-zA-Z0-9_]{1,50}(?:\.[a-zA-Z_][a-zA-Z0-9_]{0,29}){1,10})#\}\}")
def convert_template(*, template: str, variable_pool: VariablePool):
parts = re.split(VARIABLE_PATTERN, template)
segments = []
for part in filter(lambda x: x, parts):
if "." in part and (value := variable_pool.get(part.split("."))):
segments.append(value)
else:
segments.append(factory.build_segment(part))
return SegmentGroup(value=segments)

View File

@@ -5,8 +5,6 @@ from typing import Any
from pydantic import BaseModel, ConfigDict, field_validator
from core.file import File
from .types import SegmentType
@@ -41,9 +39,6 @@ class Segment(BaseModel):
@property
def size(self) -> int:
"""
Return the size of the value in bytes.
"""
return sys.getsizeof(self.value)
def to_object(self) -> Any:
@@ -104,27 +99,13 @@ class ArraySegment(Segment):
def markdown(self) -> str:
items = []
for item in self.value:
items.append(str(item))
if hasattr(item, "to_markdown"):
items.append(item.to_markdown())
else:
items.append(str(item))
return "\n".join(items)
class FileSegment(Segment):
value_type: SegmentType = SegmentType.FILE
value: File
@property
def markdown(self) -> str:
return self.value.markdown
@property
def log(self) -> str:
return str(self.value)
@property
def text(self) -> str:
return str(self.value)
class ArrayAnySegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_ANY
value: Sequence[Any]
@@ -143,15 +124,3 @@ class ArrayNumberSegment(ArraySegment):
class ArrayObjectSegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_OBJECT
value: Sequence[Mapping[str, Any]]
class ArrayFileSegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_FILE
value: Sequence[File]
@property
def markdown(self) -> str:
items = []
for item in self.value:
items.append(item.markdown)
return "\n".join(items)

View File

@@ -11,7 +11,5 @@ class SegmentType(str, Enum):
ARRAY_NUMBER = "array[number]"
ARRAY_OBJECT = "array[object]"
OBJECT = "object"
FILE = "file"
ARRAY_FILE = "array[file]"
GROUP = "group"

View File

@@ -7,7 +7,6 @@ from .segments import (
ArrayNumberSegment,
ArrayObjectSegment,
ArrayStringSegment,
FileSegment,
FloatSegment,
IntegerSegment,
NoneSegment,
@@ -74,7 +73,3 @@ class SecretVariable(StringVariable):
class NoneVariable(NoneSegment, Variable):
value_type: SegmentType = SegmentType.NONE
value: None = None
class FileVariable(FileSegment, Variable):
pass

View File

@@ -1,6 +1,5 @@
import json
import time
from collections.abc import Mapping
from datetime import datetime, timezone
from typing import Any, Optional, Union, cast
@@ -28,15 +27,15 @@ from core.app.entities.task_entities import (
WorkflowStartStreamResponse,
WorkflowTaskState,
)
from core.file import FILE_MODEL_IDENTITY, File
from core.file.file_obj import FileVar
from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.entities.trace_entity import TraceTaskName
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.tools.tool_manager import ToolManager
from core.workflow.entities.node_entities import NodeType
from core.workflow.enums import SystemVariableKey
from core.workflow.nodes.tool.entities import ToolNodeData
from core.workflow.workflow_entry import WorkflowEntry
from enums import NodeType, WorkflowRunTriggeredFrom
from extensions.ext_database import db
from models.account import Account
from models.model import EndUser
@@ -48,6 +47,7 @@ from models.workflow import (
WorkflowNodeExecutionTriggeredFrom,
WorkflowRun,
WorkflowRunStatus,
WorkflowRunTriggeredFrom,
)
@@ -113,7 +113,7 @@ class WorkflowCycleManage:
start_at: float,
total_tokens: int,
total_steps: int,
outputs: Mapping[str, Any] | None = None,
outputs: Optional[str] = None,
conversation_id: Optional[str] = None,
trace_manager: Optional[TraceQueueManager] = None,
) -> WorkflowRun:
@@ -129,10 +129,8 @@ class WorkflowCycleManage:
"""
workflow_run = self._refetch_workflow_run(workflow_run.id)
outputs = WorkflowEntry.handle_special_values(outputs)
workflow_run.status = WorkflowRunStatus.SUCCEEDED.value
workflow_run.outputs = json.dumps(outputs) if outputs else None
workflow_run.outputs = outputs
workflow_run.elapsed_time = time.perf_counter() - start_at
workflow_run.total_tokens = total_tokens
workflow_run.total_steps = total_steps
@@ -262,11 +260,10 @@ class WorkflowCycleManage:
inputs = WorkflowEntry.handle_special_values(event.inputs)
outputs = WorkflowEntry.handle_special_values(event.outputs)
process_data = WorkflowEntry.handle_special_values(event.process_data)
workflow_node_execution.status = WorkflowNodeExecutionStatus.SUCCEEDED.value
workflow_node_execution.inputs = json.dumps(inputs) if inputs else None
workflow_node_execution.process_data = json.dumps(process_data) if process_data else None
workflow_node_execution.process_data = json.dumps(event.process_data) if event.process_data else None
workflow_node_execution.outputs = json.dumps(outputs) if outputs else None
workflow_node_execution.execution_metadata = (
json.dumps(jsonable_encoder(event.execution_metadata)) if event.execution_metadata else None
@@ -290,13 +287,12 @@ class WorkflowCycleManage:
inputs = WorkflowEntry.handle_special_values(event.inputs)
outputs = WorkflowEntry.handle_special_values(event.outputs)
process_data = WorkflowEntry.handle_special_values(event.process_data)
workflow_node_execution.status = WorkflowNodeExecutionStatus.FAILED.value
workflow_node_execution.error = event.error
workflow_node_execution.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
workflow_node_execution.inputs = json.dumps(inputs) if inputs else None
workflow_node_execution.process_data = json.dumps(process_data) if process_data else None
workflow_node_execution.process_data = json.dumps(event.process_data) if event.process_data else None
workflow_node_execution.outputs = json.dumps(outputs) if outputs else None
workflow_node_execution.elapsed_time = (workflow_node_execution.finished_at - event.start_at).total_seconds()
@@ -613,11 +609,11 @@ class WorkflowCycleManage:
if not outputs_dict:
return []
files = [self._fetch_files_from_variable_value(output_value) for output_value in outputs_dict.values()]
# Remove None
files = [file for file in files if file]
# Flatten list
files = [file for sublist in files for file in sublist]
files = []
for output_var, output_value in outputs_dict.items():
file_vars = self._fetch_files_from_variable_value(output_value)
if file_vars:
files.extend(file_vars)
return files
@@ -643,7 +639,7 @@ class WorkflowCycleManage:
return files
def _get_file_var_from_value(self, value: Union[dict, list]) -> Mapping[str, str | int | None] | None:
def _get_file_var_from_value(self, value: Union[dict, list]) -> Optional[dict]:
"""
Get file var from value
:param value: variable value
@@ -652,11 +648,14 @@ class WorkflowCycleManage:
if not value:
return None
if isinstance(value, dict) and value.get("model_identity") == FILE_MODEL_IDENTITY:
return value
elif isinstance(value, File):
if isinstance(value, dict):
if "__variant" in value and value["__variant"] == FileVar.__name__:
return value
elif isinstance(value, FileVar):
return value.to_dict()
return None
def _refetch_workflow_run(self, workflow_run_id: str) -> WorkflowRun:
"""
Refetch workflow run

View File

@@ -44,7 +44,6 @@ class DatasetIndexToolCallbackHandler:
DocumentSegment.index_node_id == document.metadata["doc_id"]
)
# if 'dataset_id' in document.metadata:
if "dataset_id" in document.metadata:
query = query.filter(DocumentSegment.dataset_id == document.metadata["dataset_id"])

View File

@@ -0,0 +1,29 @@
import enum
from typing import Any
from pydantic import BaseModel
class PromptMessageFileType(enum.Enum):
IMAGE = "image"
@staticmethod
def value_of(value):
for member in PromptMessageFileType:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class PromptMessageFile(BaseModel):
type: PromptMessageFileType
data: Any = None
class ImagePromptMessageFile(PromptMessageFile):
class DETAIL(enum.Enum):
LOW = "low"
HIGH = "high"
type: PromptMessageFileType = PromptMessageFileType.IMAGE
detail: DETAIL = DETAIL.LOW

View File

@@ -1,19 +0,0 @@
from .constants import FILE_MODEL_IDENTITY
from .enums import ArrayFileAttribute, FileAttribute, FileBelongsTo, FileTransferMethod, FileType
from .models import (
File,
FileExtraConfig,
ImageConfig,
)
__all__ = [
"FileType",
"FileExtraConfig",
"FileTransferMethod",
"FileBelongsTo",
"File",
"ImageConfig",
"FileAttribute",
"ArrayFileAttribute",
"FILE_MODEL_IDENTITY",
]

View File

@@ -1 +0,0 @@
FILE_MODEL_IDENTITY = "__dify__file__"

View File

@@ -1,55 +0,0 @@
from enum import Enum
class FileType(str, Enum):
IMAGE = "image"
DOCUMENT = "document"
AUDIO = "audio"
VIDEO = "video"
CUSTOM = "custom"
@staticmethod
def value_of(value):
for member in FileType:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class FileTransferMethod(str, Enum):
REMOTE_URL = "remote_url"
LOCAL_FILE = "local_file"
TOOL_FILE = "tool_file"
@staticmethod
def value_of(value):
for member in FileTransferMethod:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class FileBelongsTo(str, Enum):
USER = "user"
ASSISTANT = "assistant"
@staticmethod
def value_of(value):
for member in FileBelongsTo:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class FileAttribute(str, Enum):
TYPE = "type"
SIZE = "size"
NAME = "name"
MIME_TYPE = "mime_type"
TRANSFER_METHOD = "transfer_method"
URL = "url"
EXTENSION = "extension"
class ArrayFileAttribute(str, Enum):
LENGTH = "length"

View File

@@ -1,136 +0,0 @@
import base64
from configs import dify_config
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from extensions.ext_database import db
from extensions.ext_storage import storage
from models import UploadFile
from . import helpers
from .enums import FileAttribute
from .models import File, FileTransferMethod, FileType
from .tool_file_parser import ToolFileParser
def get_attr(*, file: "File", attr: "FileAttribute"):
match attr:
case FileAttribute.TYPE:
return file.type.value
case FileAttribute.SIZE:
return file.size
case FileAttribute.NAME:
return file.filename
case FileAttribute.MIME_TYPE:
return file.mime_type
case FileAttribute.TRANSFER_METHOD:
return file.transfer_method.value
case FileAttribute.URL:
return file.remote_url
case FileAttribute.EXTENSION:
return file.extension
case _:
raise ValueError(f"Invalid file attribute: {attr}")
def to_prompt_message_content(file: "File", /):
"""
Convert a File object to an ImagePromptMessageContent object.
This function takes a File object and converts it to an ImagePromptMessageContent
object, which can be used as a prompt for image-based AI models.
Args:
file (File): The File object to convert. Must be of type FileType.IMAGE.
Returns:
ImagePromptMessageContent: An object containing the image data and detail level.
Raises:
ValueError: If the file is not an image or if the file data is missing.
Note:
The detail level of the image prompt is determined by the file's extra_config.
If not specified, it defaults to ImagePromptMessageContent.DETAIL.LOW.
"""
if file.type != FileType.IMAGE:
raise ValueError("Only image file can convert to prompt message content")
url_or_b64_data = _get_url_or_b64_data(file=file)
if url_or_b64_data is None:
raise ValueError("Missing file data")
# decide the detail of image prompt message content
if file._extra_config and file._extra_config.image_config and file._extra_config.image_config.detail:
detail = file._extra_config.image_config.detail
else:
detail = ImagePromptMessageContent.DETAIL.LOW
return ImagePromptMessageContent(data=url_or_b64_data, detail=detail)
def download(*, upload_file_id: str, tenant_id: str):
upload_file = (
db.session.query(UploadFile).filter(UploadFile.id == upload_file_id, UploadFile.tenant_id == tenant_id).first()
)
if not upload_file:
raise ValueError("upload file not found")
return _download(upload_file.key)
def _download(path: str, /):
"""
Download and return the contents of a file as bytes.
This function loads the file from storage and ensures it's in bytes format.
Args:
path (str): The path to the file in storage.
Returns:
bytes: The contents of the file as a bytes object.
Raises:
ValueError: If the loaded file is not a bytes object.
"""
data = storage.load(path, stream=False)
if not isinstance(data, bytes):
raise ValueError(f"file {path} is not a bytes object")
return data
def _get_base64(*, upload_file_id: str, tenant_id: str) -> str | None:
upload_file = (
db.session.query(UploadFile).filter(UploadFile.id == upload_file_id, UploadFile.tenant_id == tenant_id).first()
)
if not upload_file:
return None
data = _download(upload_file.key)
if data is None:
return None
encoded_string = base64.b64encode(data).decode("utf-8")
return f"data:{upload_file.mime_type};base64,{encoded_string}"
def _get_url_or_b64_data(file: "File"):
if file.type == FileType.IMAGE:
if file.transfer_method == FileTransferMethod.REMOTE_URL:
return file.remote_url
elif file.transfer_method == FileTransferMethod.LOCAL_FILE:
if file.related_id is None:
raise ValueError("Missing file related_id")
if dify_config.MULTIMODAL_SEND_IMAGE_FORMAT == "url":
return helpers.get_signed_image_url(upload_file_id=file.related_id)
return _get_base64(upload_file_id=file.related_id, tenant_id=file.tenant_id)
elif file.transfer_method == FileTransferMethod.TOOL_FILE:
# add sign url
if file.related_id is None or file.extension is None:
raise ValueError("Missing file related_id or extension")
return ToolFileParser.get_tool_file_manager().sign_file(
tool_file_id=file.related_id, extension=file.extension
)

145
api/core/file/file_obj.py Normal file
View File

@@ -0,0 +1,145 @@
import enum
from typing import Any, Optional
from pydantic import BaseModel
from core.file.tool_file_parser import ToolFileParser
from core.file.upload_file_parser import UploadFileParser
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from extensions.ext_database import db
class FileExtraConfig(BaseModel):
"""
File Upload Entity.
"""
image_config: Optional[dict[str, Any]] = None
class FileType(enum.Enum):
IMAGE = "image"
@staticmethod
def value_of(value):
for member in FileType:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class FileTransferMethod(enum.Enum):
REMOTE_URL = "remote_url"
LOCAL_FILE = "local_file"
TOOL_FILE = "tool_file"
@staticmethod
def value_of(value):
for member in FileTransferMethod:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class FileBelongsTo(enum.Enum):
USER = "user"
ASSISTANT = "assistant"
@staticmethod
def value_of(value):
for member in FileBelongsTo:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class FileVar(BaseModel):
id: Optional[str] = None # message file id
tenant_id: str
type: FileType
transfer_method: FileTransferMethod
url: Optional[str] = None # remote url
related_id: Optional[str] = None
extra_config: Optional[FileExtraConfig] = None
filename: Optional[str] = None
extension: Optional[str] = None
mime_type: Optional[str] = None
def to_dict(self) -> dict:
return {
"__variant": self.__class__.__name__,
"tenant_id": self.tenant_id,
"type": self.type.value,
"transfer_method": self.transfer_method.value,
"url": self.preview_url,
"remote_url": self.url,
"related_id": self.related_id,
"filename": self.filename,
"extension": self.extension,
"mime_type": self.mime_type,
}
def to_markdown(self) -> str:
"""
Convert file to markdown
:return:
"""
preview_url = self.preview_url
if self.type == FileType.IMAGE:
text = f'![{self.filename or ""}]({preview_url})'
else:
text = f"[{self.filename or preview_url}]({preview_url})"
return text
@property
def data(self) -> Optional[str]:
"""
Get image data, file signed url or base64 data
depending on config MULTIMODAL_SEND_IMAGE_FORMAT
:return:
"""
return self._get_data()
@property
def preview_url(self) -> Optional[str]:
"""
Get signed preview url
:return:
"""
return self._get_data(force_url=True)
@property
def prompt_message_content(self) -> ImagePromptMessageContent:
if self.type == FileType.IMAGE:
image_config = self.extra_config.image_config
return ImagePromptMessageContent(
data=self.data,
detail=ImagePromptMessageContent.DETAIL.HIGH
if image_config.get("detail") == "high"
else ImagePromptMessageContent.DETAIL.LOW,
)
def _get_data(self, force_url: bool = False) -> Optional[str]:
from models.model import UploadFile
if self.type == FileType.IMAGE:
if self.transfer_method == FileTransferMethod.REMOTE_URL:
return self.url
elif self.transfer_method == FileTransferMethod.LOCAL_FILE:
upload_file = (
db.session.query(UploadFile)
.filter(UploadFile.id == self.related_id, UploadFile.tenant_id == self.tenant_id)
.first()
)
return UploadFileParser.get_image_data(upload_file=upload_file, force_url=force_url)
elif self.transfer_method == FileTransferMethod.TOOL_FILE:
extension = self.extension
# add sign url
return ToolFileParser.get_tool_file_manager().sign_file(
tool_file_id=self.related_id, extension=extension
)
return None

View File

@@ -1,61 +0,0 @@
import base64
import hashlib
import hmac
import os
import time
from configs import dify_config
def get_signed_image_url(upload_file_id: str) -> str:
url = f"{dify_config.FILES_URL}/files/{upload_file_id}/image-preview"
timestamp = str(int(time.time()))
nonce = os.urandom(16).hex()
key = dify_config.SECRET_KEY.encode()
msg = f"image-preview|{upload_file_id}|{timestamp}|{nonce}"
sign = hmac.new(key, msg.encode(), hashlib.sha256).digest()
encoded_sign = base64.urlsafe_b64encode(sign).decode()
return f"{url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}"
def get_signed_file_url(upload_file_id: str) -> str:
url = f"{dify_config.FILES_URL}/files/{upload_file_id}/file-preview"
timestamp = str(int(time.time()))
nonce = os.urandom(16).hex()
key = dify_config.SECRET_KEY.encode()
msg = f"file-preview|{upload_file_id}|{timestamp}|{nonce}"
sign = hmac.new(key, msg.encode(), hashlib.sha256).digest()
encoded_sign = base64.urlsafe_b64encode(sign).decode()
return f"{url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}"
def verify_image_signature(*, upload_file_id: str, timestamp: str, nonce: str, sign: str) -> bool:
data_to_sign = f"image-preview|{upload_file_id}|{timestamp}|{nonce}"
secret_key = dify_config.SECRET_KEY.encode()
recalculated_sign = hmac.new(secret_key, data_to_sign.encode(), hashlib.sha256).digest()
recalculated_encoded_sign = base64.urlsafe_b64encode(recalculated_sign).decode()
# verify signature
if sign != recalculated_encoded_sign:
return False
current_time = int(time.time())
return current_time - int(timestamp) <= dify_config.FILES_ACCESS_TIMEOUT
def verify_file_signature(*, upload_file_id: str, timestamp: str, nonce: str, sign: str) -> bool:
data_to_sign = f"file-preview|{upload_file_id}|{timestamp}|{nonce}"
secret_key = dify_config.SECRET_KEY.encode()
recalculated_sign = hmac.new(secret_key, data_to_sign.encode(), hashlib.sha256).digest()
recalculated_encoded_sign = base64.urlsafe_b64encode(recalculated_sign).decode()
# verify signature
if sign != recalculated_encoded_sign:
return False
current_time = int(time.time())
return current_time - int(timestamp) <= dify_config.FILES_ACCESS_TIMEOUT

View File

@@ -0,0 +1,225 @@
import re
from collections.abc import Mapping, Sequence
from typing import Any, Union
from urllib.parse import parse_qs, urlparse
import requests
from core.file.file_obj import FileBelongsTo, FileExtraConfig, FileTransferMethod, FileType, FileVar
from extensions.ext_database import db
from models.account import Account
from models.model import EndUser, MessageFile, UploadFile
from services.file_service import IMAGE_EXTENSIONS
class MessageFileParser:
def __init__(self, tenant_id: str, app_id: str) -> None:
self.tenant_id = tenant_id
self.app_id = app_id
def validate_and_transform_files_arg(
self, files: Sequence[Mapping[str, Any]], file_extra_config: FileExtraConfig, user: Union[Account, EndUser]
) -> list[FileVar]:
"""
validate and transform files arg
:param files:
:param file_extra_config:
:param user:
:return:
"""
for file in files:
if not isinstance(file, dict):
raise ValueError("Invalid file format, must be dict")
if not file.get("type"):
raise ValueError("Missing file type")
FileType.value_of(file.get("type"))
if not file.get("transfer_method"):
raise ValueError("Missing file transfer method")
FileTransferMethod.value_of(file.get("transfer_method"))
if file.get("transfer_method") == FileTransferMethod.REMOTE_URL.value:
if not file.get("url"):
raise ValueError("Missing file url")
if not file.get("url").startswith("http"):
raise ValueError("Invalid file url")
if file.get("transfer_method") == FileTransferMethod.LOCAL_FILE.value and not file.get("upload_file_id"):
raise ValueError("Missing file upload_file_id")
if file.get("transform_method") == FileTransferMethod.TOOL_FILE.value and not file.get("tool_file_id"):
raise ValueError("Missing file tool_file_id")
# transform files to file objs
type_file_objs = self._to_file_objs(files, file_extra_config)
# validate files
new_files = []
for file_type, file_objs in type_file_objs.items():
if file_type == FileType.IMAGE:
# parse and validate files
image_config = file_extra_config.image_config
# check if image file feature is enabled
if not image_config:
continue
# Validate number of files
if len(files) > image_config["number_limits"]:
raise ValueError(f"Number of image files exceeds the maximum limit {image_config['number_limits']}")
for file_obj in file_objs:
# Validate transfer method
if file_obj.transfer_method.value not in image_config["transfer_methods"]:
raise ValueError(f"Invalid transfer method: {file_obj.transfer_method.value}")
# Validate file type
if file_obj.type != FileType.IMAGE:
raise ValueError(f"Invalid file type: {file_obj.type}")
if file_obj.transfer_method == FileTransferMethod.REMOTE_URL:
# check remote url valid and is image
result, error = self._check_image_remote_url(file_obj.url)
if result is False:
raise ValueError(error)
elif file_obj.transfer_method == FileTransferMethod.LOCAL_FILE:
# get upload file from upload_file_id
upload_file = (
db.session.query(UploadFile)
.filter(
UploadFile.id == file_obj.related_id,
UploadFile.tenant_id == self.tenant_id,
UploadFile.created_by == user.id,
UploadFile.created_by_role == ("account" if isinstance(user, Account) else "end_user"),
UploadFile.extension.in_(IMAGE_EXTENSIONS),
)
.first()
)
# check upload file is belong to tenant and user
if not upload_file:
raise ValueError("Invalid upload file")
new_files.append(file_obj)
# return all file objs
return new_files
def transform_message_files(self, files: list[MessageFile], file_extra_config: FileExtraConfig):
"""
transform message files
:param files:
:param file_extra_config:
:return:
"""
# transform files to file objs
type_file_objs = self._to_file_objs(files, file_extra_config)
# return all file objs
return [file_obj for file_objs in type_file_objs.values() for file_obj in file_objs]
def _to_file_objs(
self, files: list[Union[dict, MessageFile]], file_extra_config: FileExtraConfig
) -> dict[FileType, list[FileVar]]:
"""
transform files to file objs
:param files:
:param file_extra_config:
:return:
"""
type_file_objs: dict[FileType, list[FileVar]] = {
# Currently only support image
FileType.IMAGE: []
}
if not files:
return type_file_objs
# group by file type and convert file args or message files to FileObj
for file in files:
if isinstance(file, MessageFile):
if file.belongs_to == FileBelongsTo.ASSISTANT.value:
continue
file_obj = self._to_file_obj(file, file_extra_config)
if file_obj.type not in type_file_objs:
continue
type_file_objs[file_obj.type].append(file_obj)
return type_file_objs
def _to_file_obj(self, file: Union[dict, MessageFile], file_extra_config: FileExtraConfig):
"""
transform file to file obj
:param file:
:return:
"""
if isinstance(file, dict):
transfer_method = FileTransferMethod.value_of(file.get("transfer_method"))
if transfer_method != FileTransferMethod.TOOL_FILE:
return FileVar(
tenant_id=self.tenant_id,
type=FileType.value_of(file.get("type")),
transfer_method=transfer_method,
url=file.get("url") if transfer_method == FileTransferMethod.REMOTE_URL else None,
related_id=file.get("upload_file_id") if transfer_method == FileTransferMethod.LOCAL_FILE else None,
extra_config=file_extra_config,
)
return FileVar(
tenant_id=self.tenant_id,
type=FileType.value_of(file.get("type")),
transfer_method=transfer_method,
url=None,
related_id=file.get("tool_file_id"),
extra_config=file_extra_config,
)
else:
return FileVar(
id=file.id,
tenant_id=self.tenant_id,
type=FileType.value_of(file.type),
transfer_method=FileTransferMethod.value_of(file.transfer_method),
url=file.url,
related_id=file.upload_file_id or None,
extra_config=file_extra_config,
)
def _check_image_remote_url(self, url):
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/91.0.4472.124 Safari/537.36"
}
def is_s3_presigned_url(url):
try:
parsed_url = urlparse(url)
if "amazonaws.com" not in parsed_url.netloc:
return False
query_params = parse_qs(parsed_url.query)
required_params = ["Signature", "Expires"]
for param in required_params:
if param not in query_params:
return False
if not query_params["Expires"][0].isdigit():
return False
signature = query_params["Signature"][0]
if not re.match(r"^[A-Za-z0-9+/]+={0,2}$", signature):
return False
return True
except Exception:
return False
if is_s3_presigned_url(url):
response = requests.get(url, headers=headers, allow_redirects=True)
if response.status_code in {200, 304}:
return True, ""
response = requests.head(url, headers=headers, allow_redirects=True)
if response.status_code in {200, 304}:
return True, ""
else:
return False, "URL does not exist."
except requests.RequestException as e:
return False, f"Error checking URL: {e}"

View File

@@ -1,140 +0,0 @@
from collections.abc import Mapping, Sequence
from typing import Optional
from pydantic import BaseModel, Field, model_validator
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from . import helpers
from .constants import FILE_MODEL_IDENTITY
from .enums import FileTransferMethod, FileType
from .tool_file_parser import ToolFileParser
class ImageConfig(BaseModel):
"""
NOTE: This part of validation is deprecated, but still used in app features "Image Upload".
"""
number_limits: int = 0
transfer_methods: Sequence[FileTransferMethod] = Field(default_factory=list)
detail: ImagePromptMessageContent.DETAIL | None = None
class FileExtraConfig(BaseModel):
"""
File Upload Entity.
"""
image_config: Optional[ImageConfig] = None
allowed_file_types: Sequence[FileType] = Field(default_factory=list)
allowed_extensions: Sequence[str] = Field(default_factory=list)
allowed_upload_methods: Sequence[FileTransferMethod] = Field(default_factory=list)
number_limits: int = 0
class File(BaseModel):
model_identity: str = FILE_MODEL_IDENTITY
id: Optional[str] = None # message file id
tenant_id: str
type: FileType
transfer_method: FileTransferMethod
remote_url: Optional[str] = None # remote url
related_id: Optional[str] = None
filename: Optional[str] = None
extension: Optional[str] = None
mime_type: Optional[str] = None
size: int = 0
_extra_config: FileExtraConfig | None = None
def to_dict(self) -> Mapping[str, str | int | None]:
data = self.model_dump()
return {
**data,
"url": self.generate_url(),
}
@property
def markdown(self) -> str:
url = self.generate_url()
if self.type == FileType.IMAGE:
text = f'![{self.filename or ""}]({url})'
else:
text = f"[{self.filename or url}]({url})"
return text
def generate_url(self) -> Optional[str]:
if self.type == FileType.IMAGE:
if self.transfer_method == FileTransferMethod.REMOTE_URL:
return self.remote_url
elif self.transfer_method == FileTransferMethod.LOCAL_FILE:
if self.related_id is None:
raise ValueError("Missing file related_id")
return helpers.get_signed_image_url(upload_file_id=self.related_id)
elif self.transfer_method == FileTransferMethod.TOOL_FILE:
assert self.related_id is not None
assert self.extension is not None
return ToolFileParser.get_tool_file_manager().sign_file(
tool_file_id=self.related_id, extension=self.extension
)
else:
if self.transfer_method == FileTransferMethod.REMOTE_URL:
return self.remote_url
elif self.transfer_method == FileTransferMethod.LOCAL_FILE:
if self.related_id is None:
raise ValueError("Missing file related_id")
return helpers.get_signed_file_url(upload_file_id=self.related_id)
elif self.transfer_method == FileTransferMethod.TOOL_FILE:
assert self.related_id is not None
assert self.extension is not None
return ToolFileParser.get_tool_file_manager().sign_file(
tool_file_id=self.related_id, extension=self.extension
)
@model_validator(mode="after")
def validate_after(self):
match self.transfer_method:
case FileTransferMethod.REMOTE_URL:
if not self.remote_url:
raise ValueError("Missing file url")
if not isinstance(self.remote_url, str) or not self.remote_url.startswith("http"):
raise ValueError("Invalid file url")
case FileTransferMethod.LOCAL_FILE:
if not self.related_id:
raise ValueError("Missing file related_id")
case FileTransferMethod.TOOL_FILE:
if not self.related_id:
raise ValueError("Missing file related_id")
# Validate the extra config.
if not self._extra_config:
return self
if self._extra_config.allowed_file_types:
if self.type not in self._extra_config.allowed_file_types and self.type != FileType.CUSTOM:
raise ValueError(f"Invalid file type: {self.type}")
if self._extra_config.allowed_extensions and self.extension not in self._extra_config.allowed_extensions:
raise ValueError(f"Invalid file extension: {self.extension}")
if (
self._extra_config.allowed_upload_methods
and self.transfer_method not in self._extra_config.allowed_upload_methods
):
raise ValueError(f"Invalid transfer method: {self.transfer_method}")
match self.type:
case FileType.IMAGE:
# NOTE: This part of validation is deprecated, but still used in app features "Image Upload".
if not self._extra_config.image_config:
return self
# TODO: skip check if transfer_methods is empty, because many test cases are not setting this field
if (
self._extra_config.image_config.transfer_methods
and self.transfer_method not in self._extra_config.image_config.transfer_methods
):
raise ValueError(f"Invalid transfer method: {self.transfer_method}")
return self

View File

@@ -1,9 +1,4 @@
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from core.tools.tool_file_manager import ToolFileManager
tool_file_manager: dict[str, Any] = {"manager": None}
tool_file_manager = {"manager": None}
class ToolFileParser:

View File

@@ -0,0 +1,79 @@
import base64
import hashlib
import hmac
import logging
import os
import time
from typing import Optional
from configs import dify_config
from extensions.ext_storage import storage
IMAGE_EXTENSIONS = ["jpg", "jpeg", "png", "webp", "gif", "svg"]
IMAGE_EXTENSIONS.extend([ext.upper() for ext in IMAGE_EXTENSIONS])
class UploadFileParser:
@classmethod
def get_image_data(cls, upload_file, force_url: bool = False) -> Optional[str]:
if not upload_file:
return None
if upload_file.extension not in IMAGE_EXTENSIONS:
return None
if dify_config.MULTIMODAL_SEND_IMAGE_FORMAT == "url" or force_url:
return cls.get_signed_temp_image_url(upload_file.id)
else:
# get image file base64
try:
data = storage.load(upload_file.key)
except FileNotFoundError:
logging.error(f"File not found: {upload_file.key}")
return None
encoded_string = base64.b64encode(data).decode("utf-8")
return f"data:{upload_file.mime_type};base64,{encoded_string}"
@classmethod
def get_signed_temp_image_url(cls, upload_file_id) -> str:
"""
get signed url from upload file
:param upload_file: UploadFile object
:return:
"""
base_url = dify_config.FILES_URL
image_preview_url = f"{base_url}/files/{upload_file_id}/image-preview"
timestamp = str(int(time.time()))
nonce = os.urandom(16).hex()
data_to_sign = f"image-preview|{upload_file_id}|{timestamp}|{nonce}"
secret_key = dify_config.SECRET_KEY.encode()
sign = hmac.new(secret_key, data_to_sign.encode(), hashlib.sha256).digest()
encoded_sign = base64.urlsafe_b64encode(sign).decode()
return f"{image_preview_url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}"
@classmethod
def verify_image_file_signature(cls, upload_file_id: str, timestamp: str, nonce: str, sign: str) -> bool:
"""
verify signature
:param upload_file_id: file id
:param timestamp: timestamp
:param nonce: nonce
:param sign: signature
:return:
"""
data_to_sign = f"image-preview|{upload_file_id}|{timestamp}|{nonce}"
secret_key = dify_config.SECRET_KEY.encode()
recalculated_sign = hmac.new(secret_key, data_to_sign.encode(), hashlib.sha256).digest()
recalculated_encoded_sign = base64.urlsafe_b64encode(recalculated_sign).decode()
# verify signature
if sign != recalculated_encoded_sign:
return False
current_time = int(time.time())
return current_time - int(timestamp) <= dify_config.FILES_ACCESS_TIMEOUT

View File

@@ -13,11 +13,8 @@ SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "")
SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "")
SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3"))
proxy_mounts = (
{
"http://": httpx.HTTPTransport(SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(SSRF_PROXY_HTTPS_URL),
}
proxies = (
{"http://": SSRF_PROXY_HTTP_URL, "https://": SSRF_PROXY_HTTPS_URL}
if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL
else None
)
@@ -36,14 +33,11 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
while retries <= max_retries:
try:
if SSRF_PROXY_ALL_URL:
with httpx.Client(proxy=SSRF_PROXY_ALL_URL) as client:
response = client.request(method=method, url=url, **kwargs)
elif proxy_mounts:
with httpx.Client(mounts=proxy_mounts) as client:
response = client.request(method=method, url=url, **kwargs)
response = httpx.request(method=method, url=url, proxy=SSRF_PROXY_ALL_URL, **kwargs)
elif proxies:
response = httpx.request(method=method, url=url, proxies=proxies, **kwargs)
else:
with httpx.Client() as client:
response = client.request(method=method, url=url, **kwargs)
response = httpx.request(method=method, url=url, **kwargs)
if response.status_code not in STATUS_FORCELIST:
return response

View File

@@ -1,20 +1,18 @@
from typing import Optional
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.file import file_manager
from core.file.message_file_parser import MessageFileParser
from core.model_manager import ModelInstance
from core.model_runtime.entities import (
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessage,
PromptMessageContent,
PromptMessageRole,
TextPromptMessageContent,
UserPromptMessage,
)
from core.prompt.utils.extract_thread_messages import extract_thread_messages
from extensions.ext_database import db
from factories import file_factory
from models.model import AppMode, Conversation, Message, MessageFile
from models.workflow import WorkflowRun
@@ -63,6 +61,7 @@ class TokenBufferMemory:
thread_messages.pop(0)
messages = list(reversed(thread_messages))
message_file_parser = MessageFileParser(tenant_id=app_record.tenant_id, app_id=app_record.id)
prompt_messages = []
for message in messages:
files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all()
@@ -81,20 +80,17 @@ class TokenBufferMemory:
workflow_run.workflow.features_dict, is_vision=False
)
if file_extra_config and app_record:
file_objs = file_factory.build_from_message_files(
message_files=files, tenant_id=app_record.tenant_id, config=file_extra_config
)
if file_extra_config:
file_objs = message_file_parser.transform_message_files(files, file_extra_config)
else:
file_objs = []
if not file_objs:
prompt_messages.append(UserPromptMessage(content=message.query))
else:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
prompt_message_contents = [TextPromptMessageContent(data=message.query)]
for file_obj in file_objs:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
prompt_message_contents.append(file_obj.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:

View File

@@ -1,36 +0,0 @@
from .llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from .message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessage,
PromptMessageContent,
PromptMessageContentType,
PromptMessageRole,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
ToolPromptMessage,
UserPromptMessage,
)
from .model_entities import ModelPropertyKey
__all__ = [
"ImagePromptMessageContent",
"PromptMessage",
"PromptMessageRole",
"LLMUsage",
"ModelPropertyKey",
"AssistantPromptMessage",
"PromptMessage",
"PromptMessageContent",
"PromptMessageRole",
"SystemPromptMessage",
"TextPromptMessageContent",
"UserPromptMessage",
"PromptMessageTool",
"ToolPromptMessage",
"PromptMessageContentType",
"LLMResult",
"LLMResultChunk",
"LLMResultChunkDelta",
]

View File

@@ -79,7 +79,7 @@ class ImagePromptMessageContent(PromptMessageContent):
Model class for image prompt message content.
"""
class DETAIL(str, Enum):
class DETAIL(Enum):
LOW = "low"
HIGH = "high"

View File

@@ -1,4 +1,5 @@
import logging
import os
import re
import time
from abc import abstractmethod
@@ -7,7 +8,6 @@ from typing import Optional, Union
from pydantic import ConfigDict
from configs import dify_config
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.callbacks.logging_callback import LoggingCallback
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
@@ -77,7 +77,7 @@ class LargeLanguageModel(AIModel):
callbacks = callbacks or []
if dify_config.DEBUG:
if bool(os.environ.get("DEBUG", "False").lower() == "true"):
callbacks.append(LoggingCallback())
# trigger before invoke callbacks

View File

@@ -50,34 +50,62 @@ provider_credential_schema:
label:
en_US: US East (N. Virginia)
zh_Hans: 美国东部 (弗吉尼亚北部)
- value: us-east-2
label:
en_US: US East (Ohio)
zh_Hans: 美国东部 (弗吉尼亚北部)
- value: us-west-2
label:
en_US: US West (Oregon)
zh_Hans: 美国西部 (俄勒冈州)
- value: ap-south-1
label:
en_US: Asia Pacific (Mumbai)
zh_Hans: 亚太地区(孟买)
- value: ap-southeast-1
label:
en_US: Asia Pacific (Singapore)
zh_Hans: 亚太地区 (新加坡)
- value: ap-northeast-1
label:
en_US: Asia Pacific (Tokyo)
zh_Hans: 亚太地区 (东京)
- value: eu-central-1
label:
en_US: Europe (Frankfurt)
zh_Hans: 欧洲 (法兰克福)
- value: eu-west-2
label:
en_US: Eu west London (London)
zh_Hans: 欧洲西部 (伦敦)
- value: us-gov-west-1
label:
en_US: AWS GovCloud (US-West)
zh_Hans: AWS GovCloud (US-West)
- value: ap-southeast-2
label:
en_US: Asia Pacific (Sydney)
zh_Hans: 亚太地区 (悉尼)
- value: ap-northeast-1
label:
en_US: Asia Pacific (Tokyo)
zh_Hans: 亚太地区 (东京)
- value: ap-northeast-2
label:
en_US: Asia Pacific (Seoul)
zh_Hans: 亚太地区(首尔)
- value: ca-central-1
label:
en_US: Canada (Central)
zh_Hans: 加拿大(中部)
- value: eu-central-1
label:
en_US: Europe (Frankfurt)
zh_Hans: 欧洲 (法兰克福)
- value: eu-west-1
label:
en_US: Europe (Ireland)
zh_Hans: 欧洲(爱尔兰)
- value: eu-west-2
label:
en_US: Europe (London)
zh_Hans: 欧洲西部 (伦敦)
- value: eu-west-3
label:
en_US: Europe (Paris)
zh_Hans: 欧洲(巴黎)
- value: sa-east-1
label:
en_US: South America (São Paulo)
zh_Hans: 南美洲(圣保罗)
- value: us-gov-west-1
label:
en_US: AWS GovCloud (US-West)
zh_Hans: AWS GovCloud (US-West)
- variable: model_for_validation
required: false
label:

View File

@@ -0,0 +1,15 @@
- gemini-1.5-pro
- gemini-1.5-pro-latest
- gemini-1.5-pro-001
- gemini-1.5-pro-002
- gemini-1.5-pro-exp-0801
- gemini-1.5-pro-exp-0827
- gemini-1.5-flash
- gemini-1.5-flash-latest
- gemini-1.5-flash-001
- gemini-1.5-flash-002
- gemini-1.5-flash-exp-0827
- gemini-1.5-flash-8b-exp-0827
- gemini-1.5-flash-8b-exp-0924
- gemini-pro
- gemini-pro-vision

View File

@@ -2,6 +2,8 @@ from typing import IO, Optional
from openai import OpenAI
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel
from core.model_runtime.model_providers.openai._common import _CommonOpenAI
@@ -58,3 +60,18 @@ class OpenAISpeech2TextModel(_CommonOpenAI, Speech2TextModel):
response = client.audio.transcriptions.create(model=model, file=file)
return response.text
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
"""
used to define customizable model schema
"""
entity = AIModelEntity(
model=model,
label=I18nObject(en_US=model),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.SPEECH2TEXT,
model_properties={},
parameter_rules=[],
)
return entity

View File

@@ -3,6 +3,8 @@ from urllib.parse import urljoin
import requests
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType
from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel
@@ -59,3 +61,18 @@ class OAICompatSpeech2TextModel(_CommonOaiApiCompat, Speech2TextModel):
self._invoke(model, credentials, audio_file)
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
"""
used to define customizable model schema
"""
entity = AIModelEntity(
model=model,
label=I18nObject(en_US=model),
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.SPEECH2TEXT,
model_properties={},
parameter_rules=[],
)
return entity

View File

@@ -14,6 +14,10 @@
- google/gemini-pro
- cohere/command-r-plus
- cohere/command-r
- meta-llama/llama-3.2-1b-instruct
- meta-llama/llama-3.2-3b-instruct
- meta-llama/llama-3.2-11b-vision-instruct
- meta-llama/llama-3.2-90b-vision-instruct
- meta-llama/llama-3.1-405b-instruct
- meta-llama/llama-3.1-70b-instruct
- meta-llama/llama-3.1-8b-instruct
@@ -22,6 +26,7 @@
- mistralai/mixtral-8x22b-instruct
- mistralai/mixtral-8x7b-instruct
- mistralai/mistral-7b-instruct
- qwen/qwen-2.5-72b-instruct
- qwen/qwen-2-72b-instruct
- deepseek/deepseek-chat
- deepseek/deepseek-coder

View File

@@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-11b-vision-instruct
label:
zh_Hans: llama-3.2-11b-vision-instruct
en_US: llama-3.2-11b-vision-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.055'
output: '0.055'
unit: '0.000001'
currency: USD

View File

@@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-1b-instruct
label:
zh_Hans: llama-3.2-1b-instruct
en_US: llama-3.2-1b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.01'
output: '0.02'
unit: '0.000001'
currency: USD

View File

@@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-3b-instruct
label:
zh_Hans: llama-3.2-3b-instruct
en_US: llama-3.2-3b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.03'
output: '0.05'
unit: '0.000001'
currency: USD

View File

@@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-90b-vision-instruct
label:
zh_Hans: llama-3.2-90b-vision-instruct
en_US: llama-3.2-90b-vision-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.35'
output: '0.4'
unit: '0.000001'
currency: USD

View File

@@ -0,0 +1,30 @@
model: qwen/qwen-2.5-72b-instruct
label:
en_US: qwen-2.5-72b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: max_tokens
use_template: max_tokens
type: int
default: 512
min: 1
max: 8192
help:
zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
- name: top_p
use_template: top_p
- name: frequency_penalty
use_template: frequency_penalty
pricing:
input: "0.35"
output: "0.4"
unit: "0.000001"
currency: USD

View File

@@ -0,0 +1,4 @@
- rerank-2
- rerank-lite-2
- rerank-1
- rerank-lite-1

Some files were not shown because too many files have changed in this diff Show More