mirror of
https://github.com/langgenius/dify.git
synced 2026-01-07 06:48:28 +00:00
Compare commits
28 Commits
feat/no-ro
...
chore/remo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8668782d6 | ||
|
|
4a89403566 | ||
|
|
e0c05b2123 | ||
|
|
85b99580ea | ||
|
|
15fbedfcad | ||
|
|
1e6d0de48b | ||
|
|
cad751c00c | ||
|
|
a47276ac24 | ||
|
|
20403c69b2 | ||
|
|
ffc04f2a9b | ||
|
|
d1580791e4 | ||
|
|
c74eb4fcf3 | ||
|
|
a798534337 | ||
|
|
470883858e | ||
|
|
4f4911686d | ||
|
|
6d479dcdbb | ||
|
|
24348c40a6 | ||
|
|
a39b50adbb | ||
|
|
81832c14ee | ||
|
|
b86022c64a | ||
|
|
45e816a9f6 | ||
|
|
667b1c37a3 | ||
|
|
b75d533f9b | ||
|
|
aece55d82f | ||
|
|
c432b398f4 | ||
|
|
9cb2645793 | ||
|
|
6ac61bd585 | ||
|
|
b02165ffe6 |
@@ -6,11 +6,10 @@ cd web && pnpm install
|
||||
pipx install uv
|
||||
|
||||
echo "alias start-api=\"cd $WORKSPACE_ROOT/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug\"" >> ~/.bashrc
|
||||
echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage\"" >> ~/.bashrc
|
||||
echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor\"" >> ~/.bashrc
|
||||
echo "alias start-web=\"cd $WORKSPACE_ROOT/web && pnpm dev\"" >> ~/.bashrc
|
||||
echo "alias start-web-prod=\"cd $WORKSPACE_ROOT/web && pnpm build && pnpm start\"" >> ~/.bashrc
|
||||
echo "alias start-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d\"" >> ~/.bashrc
|
||||
echo "alias stop-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env down\"" >> ~/.bashrc
|
||||
|
||||
source /home/vscode/.bashrc
|
||||
|
||||
|
||||
30
agent_runs/invisible-worm-177/penetration_test_report.md
Normal file
30
agent_runs/invisible-worm-177/penetration_test_report.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Security Penetration Test Report
|
||||
|
||||
**Generated:** 2025-11-16 14:02:56 UTC
|
||||
|
||||
Executive Summary:
|
||||
Conducted a thorough white-box security assessment of the API located in /workspace/api, focusing on authentication, authorization, business logic vulnerabilities, and IDOR in key endpoints such as /installed-apps.
|
||||
|
||||
Methodology:
|
||||
- Full recursive file listing and static code analysis to identify HTTP routes and sensitive endpoint implementations.
|
||||
- Focused static analysis on endpoints handling sensitive actions, authentication, and role-based authorization.
|
||||
- Created specialized agents for authentication and business logic vulnerability testing.
|
||||
- Dynamic testing attempted for IDOR and authorization bypass, limited by local API server unavailability.
|
||||
- All findings documented with recommended next steps.
|
||||
|
||||
Findings:
|
||||
- Discovered multiple /installed-apps endpoints with solid authentication and multi-layered authorization checks enforcing tenant and role ownership.
|
||||
- No exploitable access control bypass or privilege escalation vulnerabilities confirmed.
|
||||
- Dynamic vulnerability testing for IDOR hampered due to connection refusals, preventing full validation.
|
||||
- Created a high-priority note recommending environment verification and retesting of dynamic attacks once the API server is accessible.
|
||||
|
||||
Recommendations:
|
||||
- Verify and restore access to the local API server to enable full dynamic testing.
|
||||
- Retry dynamic testing for IDOR and authorization bypass attacks to confirm security.
|
||||
- Continue layered security reviews focusing on evolving business logic and role enforcement.
|
||||
- Consider adding automated integration tests validating authorization policies.
|
||||
|
||||
Conclusion:
|
||||
The static analysis phase confirmed robust authentication and authorization controls in key sensitive endpoints; however, dynamic testing limitations prevent final validation. Once dynamic testing is possible, verify no IDOR or broken function-level authorization issues remain. This assessment provides a strong foundation for secure API usage and further iterative validation.
|
||||
|
||||
Severity: Medium (due to testing environment constraints limiting dynamic verification)
|
||||
@@ -161,7 +161,7 @@ WEB_API_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||||
# Set COOKIE_DOMAIN when the console frontend and API are on different subdomains.
|
||||
# Provide the registrable domain (e.g. example.com); leading dots are optional.
|
||||
COOKIE_DOMAIN=
|
||||
COOKIE_DOMAIN=localhost:5001
|
||||
|
||||
# Vector database configuration
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
|
||||
|
||||
@@ -73,8 +73,7 @@ COPY --from=packages ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
# Download nltk data
|
||||
RUN mkdir -p /usr/local/share/nltk_data && NLTK_DATA=/usr/local/share/nltk_data python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger'); nltk.download('stopwords')" \
|
||||
&& chmod -R 755 /usr/local/share/nltk_data
|
||||
RUN python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger')"
|
||||
|
||||
ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache
|
||||
|
||||
@@ -87,15 +86,7 @@ COPY . /app/api/
|
||||
COPY docker/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Create non-root user and set permissions
|
||||
RUN groupadd -r -g 1001 dify && \
|
||||
useradd -r -u 1001 -g 1001 -s /bin/bash dify && \
|
||||
mkdir -p /home/dify && \
|
||||
chown -R 1001:1001 /app /home/dify ${TIKTOKEN_CACHE_DIR} /entrypoint.sh
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ENV COMMIT_SHA=${COMMIT_SHA}
|
||||
ENV NLTK_DATA=/usr/local/share/nltk_data
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
|
||||
11
api/bin/env
Normal file
11
api/bin/env
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
# add binaries to PATH if they aren't added yet
|
||||
# affix colons on either side of $PATH to simplify matching
|
||||
case ":${PATH}:" in
|
||||
*:"$HOME/.local/bin":*)
|
||||
;;
|
||||
*)
|
||||
# Prepending path in case a system-installed binary needs to be overridden
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
;;
|
||||
esac
|
||||
4
api/bin/env.fish
Normal file
4
api/bin/env.fish
Normal file
@@ -0,0 +1,4 @@
|
||||
if not contains "$HOME/.local/bin" $PATH
|
||||
# Prepending path in case a system-installed binary needs to be overridden
|
||||
set -x PATH "$HOME/.local/bin" $PATH
|
||||
end
|
||||
BIN
api/bin/uv
Executable file
BIN
api/bin/uv
Executable file
Binary file not shown.
BIN
api/bin/uvx
Executable file
BIN
api/bin/uvx
Executable file
Binary file not shown.
7343
api/constants/pipeline_templates.json
Normal file
7343
api/constants/pipeline_templates.json
Normal file
File diff suppressed because one or more lines are too long
@@ -15,11 +15,12 @@ from controllers.console.wraps import (
|
||||
setup_required,
|
||||
)
|
||||
from core.ops.ops_trace_manager import OpsTraceManager
|
||||
from core.workflow.enums import NodeType
|
||||
from extensions.ext_database import db
|
||||
from fields.app_fields import app_detail_fields, app_detail_fields_with_site, app_pagination_fields
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from libs.validators import validate_description_length
|
||||
from models import App
|
||||
from models import App, Workflow
|
||||
from services.app_dsl_service import AppDslService, ImportMode
|
||||
from services.app_service import AppService
|
||||
from services.enterprise.enterprise_service import EnterpriseService
|
||||
@@ -106,6 +107,35 @@ class AppListApi(Resource):
|
||||
if str(app.id) in res:
|
||||
app.access_mode = res[str(app.id)].access_mode
|
||||
|
||||
workflow_capable_app_ids = [
|
||||
str(app.id) for app in app_pagination.items if app.mode in {"workflow", "advanced-chat"}
|
||||
]
|
||||
draft_trigger_app_ids: set[str] = set()
|
||||
if workflow_capable_app_ids:
|
||||
draft_workflows = (
|
||||
db.session.execute(
|
||||
select(Workflow).where(
|
||||
Workflow.version == Workflow.VERSION_DRAFT,
|
||||
Workflow.app_id.in_(workflow_capable_app_ids),
|
||||
)
|
||||
)
|
||||
.scalars()
|
||||
.all()
|
||||
)
|
||||
trigger_node_types = {
|
||||
NodeType.TRIGGER_WEBHOOK,
|
||||
NodeType.TRIGGER_SCHEDULE,
|
||||
NodeType.TRIGGER_PLUGIN,
|
||||
}
|
||||
for workflow in draft_workflows:
|
||||
for _, node_data in workflow.walk_nodes():
|
||||
if node_data.get("type") in trigger_node_types:
|
||||
draft_trigger_app_ids.add(str(workflow.app_id))
|
||||
break
|
||||
|
||||
for app in app_pagination.items:
|
||||
app.has_draft_trigger = str(app.id) in draft_trigger_app_ids
|
||||
|
||||
return marshal(app_pagination, app_pagination_fields), 200
|
||||
|
||||
@api.doc("create_app")
|
||||
|
||||
@@ -216,7 +216,6 @@ def setup_required(view: Callable[P, R]):
|
||||
raise NotInitValidateError()
|
||||
elif dify_config.EDITION == "SELF_HOSTED" and not db.session.query(DifySetup).first():
|
||||
raise NotSetupError()
|
||||
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
@@ -138,6 +138,10 @@ class StreamableHTTPTransport:
|
||||
) -> bool:
|
||||
"""Handle an SSE event, returning True if the response is complete."""
|
||||
if sse.event == "message":
|
||||
# ping event send by server will be recognized as a message event with empty data by httpx-sse's SSEDecoder
|
||||
if not sse.data.strip():
|
||||
return False
|
||||
|
||||
try:
|
||||
message = JSONRPCMessage.model_validate_json(sse.data)
|
||||
logger.debug("SSE message: %s", message)
|
||||
|
||||
@@ -52,7 +52,7 @@ class OpenAIModeration(Moderation):
|
||||
text = "\n".join(str(inputs.values()))
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_model_instance(
|
||||
tenant_id=self.tenant_id, provider="openai", model_type=ModelType.MODERATION, model="text-moderation-stable"
|
||||
tenant_id=self.tenant_id, provider="openai", model_type=ModelType.MODERATION, model="omni-moderation-latest"
|
||||
)
|
||||
|
||||
openai_moderation = model_instance.invoke_moderation(text=text)
|
||||
|
||||
@@ -302,7 +302,8 @@ class OracleVector(BaseVector):
|
||||
nltk.data.find("tokenizers/punkt")
|
||||
nltk.data.find("corpora/stopwords")
|
||||
except LookupError:
|
||||
raise LookupError("Unable to find the required NLTK data package: punkt and stopwords")
|
||||
nltk.download("punkt")
|
||||
nltk.download("stopwords")
|
||||
e_str = re.sub(r"[^\w ]", "", query)
|
||||
all_tokens = nltk.word_tokenize(e_str)
|
||||
stop_words = stopwords.words("english")
|
||||
|
||||
@@ -152,13 +152,15 @@ class WordExtractor(BaseExtractor):
|
||||
# Initialize a row, all of which are empty by default
|
||||
row_cells = [""] * total_cols
|
||||
col_index = 0
|
||||
for cell in row.cells:
|
||||
while col_index < len(row.cells):
|
||||
# make sure the col_index is not out of range
|
||||
while col_index < total_cols and row_cells[col_index] != "":
|
||||
while col_index < len(row.cells) and row_cells[col_index] != "":
|
||||
col_index += 1
|
||||
# if col_index is out of range the loop is jumped
|
||||
if col_index >= total_cols:
|
||||
if col_index >= len(row.cells):
|
||||
break
|
||||
# get the correct cell
|
||||
cell = row.cells[col_index]
|
||||
cell_content = self._parse_cell(cell, image_map).strip()
|
||||
cell_colspan = cell.grid_span or 1
|
||||
for i in range(cell_colspan):
|
||||
|
||||
@@ -54,6 +54,9 @@ class TenantIsolatedTaskQueue:
|
||||
serialized_data = wrapper.serialize()
|
||||
serialized_tasks.append(serialized_data)
|
||||
|
||||
if not serialized_tasks:
|
||||
return
|
||||
|
||||
redis_client.lpush(self._queue, *serialized_tasks)
|
||||
|
||||
def pull_tasks(self, count: int = 1) -> Sequence[Any]:
|
||||
|
||||
@@ -202,6 +202,35 @@ class SegmentType(StrEnum):
|
||||
raise ValueError(f"element_type is only supported by array type, got {self}")
|
||||
return _ARRAY_ELEMENT_TYPES_MAPPING.get(self)
|
||||
|
||||
@staticmethod
|
||||
def get_zero_value(t: "SegmentType"):
|
||||
# Lazy import to avoid circular dependency
|
||||
from factories import variable_factory
|
||||
|
||||
match t:
|
||||
case (
|
||||
SegmentType.ARRAY_OBJECT
|
||||
| SegmentType.ARRAY_ANY
|
||||
| SegmentType.ARRAY_STRING
|
||||
| SegmentType.ARRAY_NUMBER
|
||||
| SegmentType.ARRAY_BOOLEAN
|
||||
):
|
||||
return variable_factory.build_segment_with_type(t, [])
|
||||
case SegmentType.OBJECT:
|
||||
return variable_factory.build_segment({})
|
||||
case SegmentType.STRING:
|
||||
return variable_factory.build_segment("")
|
||||
case SegmentType.INTEGER:
|
||||
return variable_factory.build_segment(0)
|
||||
case SegmentType.FLOAT:
|
||||
return variable_factory.build_segment(0.0)
|
||||
case SegmentType.NUMBER:
|
||||
return variable_factory.build_segment(0)
|
||||
case SegmentType.BOOLEAN:
|
||||
return variable_factory.build_segment(False)
|
||||
case _:
|
||||
raise ValueError(f"unsupported variable type: {t}")
|
||||
|
||||
|
||||
_ARRAY_ELEMENT_TYPES_MAPPING: Mapping[SegmentType, SegmentType] = {
|
||||
# ARRAY_ANY does not have corresponding element type.
|
||||
|
||||
@@ -2,7 +2,6 @@ from collections.abc import Callable, Mapping, Sequence
|
||||
from typing import TYPE_CHECKING, Any, TypeAlias
|
||||
|
||||
from core.variables import SegmentType, Variable
|
||||
from core.variables.segments import BooleanSegment
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID
|
||||
from core.workflow.conversation_variable_updater import ConversationVariableUpdater
|
||||
from core.workflow.entities import GraphInitParams
|
||||
@@ -12,7 +11,6 @@ from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig
|
||||
from core.workflow.nodes.base.node import Node
|
||||
from core.workflow.nodes.variable_assigner.common import helpers as common_helpers
|
||||
from core.workflow.nodes.variable_assigner.common.exc import VariableOperatorNodeError
|
||||
from factories import variable_factory
|
||||
|
||||
from ..common.impl import conversation_variable_updater_factory
|
||||
from .node_data import VariableAssignerData, WriteMode
|
||||
@@ -116,7 +114,7 @@ class VariableAssignerNode(Node):
|
||||
updated_variable = original_variable.model_copy(update={"value": updated_value})
|
||||
|
||||
case WriteMode.CLEAR:
|
||||
income_value = get_zero_value(original_variable.value_type)
|
||||
income_value = SegmentType.get_zero_value(original_variable.value_type)
|
||||
updated_variable = original_variable.model_copy(update={"value": income_value.to_object()})
|
||||
|
||||
# Over write the variable.
|
||||
@@ -143,24 +141,3 @@ class VariableAssignerNode(Node):
|
||||
process_data=common_helpers.set_updated_variables({}, updated_variables),
|
||||
outputs={},
|
||||
)
|
||||
|
||||
|
||||
def get_zero_value(t: SegmentType):
|
||||
# TODO(QuantumGhost): this should be a method of `SegmentType`.
|
||||
match t:
|
||||
case SegmentType.ARRAY_OBJECT | SegmentType.ARRAY_STRING | SegmentType.ARRAY_NUMBER | SegmentType.ARRAY_BOOLEAN:
|
||||
return variable_factory.build_segment_with_type(t, [])
|
||||
case SegmentType.OBJECT:
|
||||
return variable_factory.build_segment({})
|
||||
case SegmentType.STRING:
|
||||
return variable_factory.build_segment("")
|
||||
case SegmentType.INTEGER:
|
||||
return variable_factory.build_segment(0)
|
||||
case SegmentType.FLOAT:
|
||||
return variable_factory.build_segment(0.0)
|
||||
case SegmentType.NUMBER:
|
||||
return variable_factory.build_segment(0)
|
||||
case SegmentType.BOOLEAN:
|
||||
return BooleanSegment(value=False)
|
||||
case _:
|
||||
raise VariableOperatorNodeError(f"unsupported variable type: {t}")
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
from core.variables import SegmentType
|
||||
|
||||
# Note: This mapping is duplicated with `get_zero_value`. Consider refactoring to avoid redundancy.
|
||||
EMPTY_VALUE_MAPPING = {
|
||||
SegmentType.STRING: "",
|
||||
SegmentType.NUMBER: 0,
|
||||
SegmentType.BOOLEAN: False,
|
||||
SegmentType.OBJECT: {},
|
||||
SegmentType.ARRAY_ANY: [],
|
||||
SegmentType.ARRAY_STRING: [],
|
||||
SegmentType.ARRAY_NUMBER: [],
|
||||
SegmentType.ARRAY_OBJECT: [],
|
||||
SegmentType.ARRAY_BOOLEAN: [],
|
||||
}
|
||||
@@ -16,7 +16,6 @@ from core.workflow.nodes.variable_assigner.common.exc import VariableOperatorNod
|
||||
from core.workflow.nodes.variable_assigner.common.impl import conversation_variable_updater_factory
|
||||
|
||||
from . import helpers
|
||||
from .constants import EMPTY_VALUE_MAPPING
|
||||
from .entities import VariableAssignerNodeData, VariableOperationItem
|
||||
from .enums import InputType, Operation
|
||||
from .exc import (
|
||||
@@ -249,7 +248,7 @@ class VariableAssignerNode(Node):
|
||||
case Operation.OVER_WRITE:
|
||||
return value
|
||||
case Operation.CLEAR:
|
||||
return EMPTY_VALUE_MAPPING[variable.value_type]
|
||||
return SegmentType.get_zero_value(variable.value_type).to_object()
|
||||
case Operation.APPEND:
|
||||
return variable.value + [value]
|
||||
case Operation.EXTEND:
|
||||
|
||||
@@ -3,7 +3,7 @@ import io
|
||||
import json
|
||||
from collections.abc import Generator
|
||||
|
||||
from google.cloud import storage as google_cloud_storage
|
||||
from google.cloud import storage as google_cloud_storage # type: ignore
|
||||
|
||||
from configs import dify_config
|
||||
from extensions.storage.base_storage import BaseStorage
|
||||
|
||||
@@ -116,6 +116,7 @@ app_partial_fields = {
|
||||
"access_mode": fields.String,
|
||||
"create_user_name": fields.String,
|
||||
"author_name": fields.String,
|
||||
"has_draft_trigger": fields.Boolean,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ from configs import dify_config
|
||||
from core.rag.index_processor.constant.built_in_field import BuiltInField, MetadataDataSource
|
||||
from core.rag.retrieval.retrieval_methods import RetrievalMethod
|
||||
from extensions.ext_storage import storage
|
||||
from models.base import TypeBase
|
||||
from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule
|
||||
|
||||
from .account import Account
|
||||
@@ -906,17 +907,21 @@ class ChildChunk(Base):
|
||||
return db.session.query(DocumentSegment).where(DocumentSegment.id == self.segment_id).first()
|
||||
|
||||
|
||||
class AppDatasetJoin(Base):
|
||||
class AppDatasetJoin(TypeBase):
|
||||
__tablename__ = "app_dataset_joins"
|
||||
__table_args__ = (
|
||||
sa.PrimaryKeyConstraint("id", name="app_dataset_join_pkey"),
|
||||
sa.Index("app_dataset_join_app_dataset_idx", "dataset_id", "app_id"),
|
||||
)
|
||||
|
||||
id = mapped_column(StringUUID, primary_key=True, nullable=False, server_default=sa.text("uuid_generate_v4()"))
|
||||
app_id = mapped_column(StringUUID, nullable=False)
|
||||
dataset_id = mapped_column(StringUUID, nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=sa.func.current_timestamp())
|
||||
id: Mapped[str] = mapped_column(
|
||||
StringUUID, primary_key=True, nullable=False, server_default=sa.text("uuid_generate_v4()"), init=False
|
||||
)
|
||||
app_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime, nullable=False, server_default=sa.func.current_timestamp(), init=False
|
||||
)
|
||||
|
||||
@property
|
||||
def app(self):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "dify-api"
|
||||
version = "1.9.2"
|
||||
version = "1.10.0"
|
||||
requires-python = ">=3.11,<3.13"
|
||||
|
||||
dependencies = [
|
||||
@@ -37,7 +37,7 @@ dependencies = [
|
||||
"numpy~=1.26.4",
|
||||
"openpyxl~=3.1.5",
|
||||
"opik~=1.8.72",
|
||||
"litellm==1.77.1", # Pinned to avoid madoka dependency issue
|
||||
"litellm==1.77.1", # Pinned to avoid madoka dependency issue
|
||||
"opentelemetry-api==1.27.0",
|
||||
"opentelemetry-distro==0.48b0",
|
||||
"opentelemetry-exporter-otlp==1.27.0",
|
||||
@@ -79,7 +79,6 @@ dependencies = [
|
||||
"tiktoken~=0.9.0",
|
||||
"transformers~=4.56.1",
|
||||
"unstructured[docx,epub,md,ppt,pptx]~=0.16.1",
|
||||
"weave~=0.51.0",
|
||||
"yarl~=1.18.3",
|
||||
"webvtt-py~=0.5.1",
|
||||
"sseclient-py~=1.8.0",
|
||||
@@ -90,6 +89,7 @@ dependencies = [
|
||||
"croniter>=6.0.0",
|
||||
"weaviate-client==4.17.0",
|
||||
"apscheduler>=3.11.0",
|
||||
"weave>=0.52.16",
|
||||
]
|
||||
# Before adding new dependency, consider place it in
|
||||
# alphabet order (a-z) and suitable group.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
from typing import Any
|
||||
from typing import Any, TypedDict
|
||||
|
||||
from core.app.app_config.entities import (
|
||||
DatasetEntity,
|
||||
@@ -28,6 +28,12 @@ from models.model import App, AppMode, AppModelConfig
|
||||
from models.workflow import Workflow, WorkflowType
|
||||
|
||||
|
||||
class _NodeType(TypedDict):
|
||||
id: str
|
||||
position: None
|
||||
data: dict[str, Any]
|
||||
|
||||
|
||||
class WorkflowConverter:
|
||||
"""
|
||||
App Convert to Workflow Mode
|
||||
@@ -217,7 +223,7 @@ class WorkflowConverter:
|
||||
|
||||
return app_config
|
||||
|
||||
def _convert_to_start_node(self, variables: list[VariableEntity]):
|
||||
def _convert_to_start_node(self, variables: list[VariableEntity]) -> _NodeType:
|
||||
"""
|
||||
Convert to Start Node
|
||||
:param variables: list of variables
|
||||
@@ -235,7 +241,7 @@ class WorkflowConverter:
|
||||
|
||||
def _convert_to_http_request_node(
|
||||
self, app_model: App, variables: list[VariableEntity], external_data_variables: list[ExternalDataVariableEntity]
|
||||
) -> tuple[list[dict], dict[str, str]]:
|
||||
) -> tuple[list[_NodeType], dict[str, str]]:
|
||||
"""
|
||||
Convert API Based Extension to HTTP Request Node
|
||||
:param app_model: App instance
|
||||
@@ -285,7 +291,7 @@ class WorkflowConverter:
|
||||
request_body_json = json.dumps(request_body)
|
||||
request_body_json = request_body_json.replace(r"\{\{", "{{").replace(r"\}\}", "}}")
|
||||
|
||||
http_request_node = {
|
||||
http_request_node: _NodeType = {
|
||||
"id": f"http_request_{index}",
|
||||
"position": None,
|
||||
"data": {
|
||||
@@ -303,7 +309,7 @@ class WorkflowConverter:
|
||||
nodes.append(http_request_node)
|
||||
|
||||
# append code node for response body parsing
|
||||
code_node: dict[str, Any] = {
|
||||
code_node: _NodeType = {
|
||||
"id": f"code_{index}",
|
||||
"position": None,
|
||||
"data": {
|
||||
@@ -326,7 +332,7 @@ class WorkflowConverter:
|
||||
|
||||
def _convert_to_knowledge_retrieval_node(
|
||||
self, new_app_mode: AppMode, dataset_config: DatasetEntity, model_config: ModelConfigEntity
|
||||
) -> dict | None:
|
||||
) -> _NodeType | None:
|
||||
"""
|
||||
Convert datasets to Knowledge Retrieval Node
|
||||
:param new_app_mode: new app mode
|
||||
@@ -384,7 +390,7 @@ class WorkflowConverter:
|
||||
prompt_template: PromptTemplateEntity,
|
||||
file_upload: FileUploadConfig | None = None,
|
||||
external_data_variable_node_mapping: dict[str, str] | None = None,
|
||||
):
|
||||
) -> _NodeType:
|
||||
"""
|
||||
Convert to LLM Node
|
||||
:param original_app_mode: original app mode
|
||||
@@ -561,7 +567,7 @@ class WorkflowConverter:
|
||||
|
||||
return template
|
||||
|
||||
def _convert_to_end_node(self):
|
||||
def _convert_to_end_node(self) -> _NodeType:
|
||||
"""
|
||||
Convert to End Node
|
||||
:return:
|
||||
@@ -577,7 +583,7 @@ class WorkflowConverter:
|
||||
},
|
||||
}
|
||||
|
||||
def _convert_to_answer_node(self):
|
||||
def _convert_to_answer_node(self) -> _NodeType:
|
||||
"""
|
||||
Convert to Answer Node
|
||||
:return:
|
||||
@@ -598,7 +604,7 @@ class WorkflowConverter:
|
||||
"""
|
||||
return {"id": f"{source}-{target}", "source": source, "target": target}
|
||||
|
||||
def _append_node(self, graph: dict, node: dict):
|
||||
def _append_node(self, graph: dict[str, Any], node: _NodeType):
|
||||
"""
|
||||
Append Node to Graph
|
||||
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
"""Primarily used for testing merged cell scenarios"""
|
||||
|
||||
from docx import Document
|
||||
|
||||
from core.rag.extractor.word_extractor import WordExtractor
|
||||
|
||||
|
||||
def _generate_table_with_merged_cells():
|
||||
doc = Document()
|
||||
|
||||
"""
|
||||
The table looks like this:
|
||||
+-----+-----+-----+
|
||||
| 1-1 & 1-2 | 1-3 |
|
||||
+-----+-----+-----+
|
||||
| 2-1 | 2-2 | 2-3 |
|
||||
| & |-----+-----+
|
||||
| 3-1 | 3-2 | 3-3 |
|
||||
+-----+-----+-----+
|
||||
"""
|
||||
table = doc.add_table(rows=3, cols=3)
|
||||
table.style = "Table Grid"
|
||||
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
cell = table.cell(i, j)
|
||||
cell.text = f"{i + 1}-{j + 1}"
|
||||
|
||||
# Merge cells
|
||||
cell_0_0 = table.cell(0, 0)
|
||||
cell_0_1 = table.cell(0, 1)
|
||||
merged_cell_1 = cell_0_0.merge(cell_0_1)
|
||||
merged_cell_1.text = "1-1 & 1-2"
|
||||
|
||||
cell_1_0 = table.cell(1, 0)
|
||||
cell_2_0 = table.cell(2, 0)
|
||||
merged_cell_2 = cell_1_0.merge(cell_2_0)
|
||||
merged_cell_2.text = "2-1 & 3-1"
|
||||
|
||||
ground_truth = [["1-1 & 1-2", "", "1-3"], ["2-1 & 3-1", "2-2", "2-3"], ["2-1 & 3-1", "3-2", "3-3"]]
|
||||
|
||||
return doc.tables[0], ground_truth
|
||||
|
||||
|
||||
def test_parse_row():
|
||||
table, gt = _generate_table_with_merged_cells()
|
||||
extractor = object.__new__(WordExtractor)
|
||||
for idx, row in enumerate(table.rows):
|
||||
assert extractor._parse_row(row, {}, 3) == gt[idx]
|
||||
@@ -179,7 +179,7 @@ class TestTenantIsolatedTaskQueue:
|
||||
"""Test pushing empty task list."""
|
||||
sample_queue.push_tasks([])
|
||||
|
||||
mock_redis.lpush.assert_called_once_with("tenant_self_test-key_task_queue:tenant-123")
|
||||
mock_redis.lpush.assert_not_called()
|
||||
|
||||
@patch("core.rag.pipeline.queue.redis_client")
|
||||
def test_pull_tasks_default_count(self, mock_redis, sample_queue):
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import pytest
|
||||
|
||||
from core.variables.types import ArrayValidation, SegmentType
|
||||
|
||||
|
||||
@@ -83,3 +85,81 @@ class TestSegmentTypeIsValidArrayValidation:
|
||||
value = [1, 2, 3]
|
||||
# validation is None, skip
|
||||
assert SegmentType.ARRAY_STRING.is_valid(value, array_validation=ArrayValidation.NONE)
|
||||
|
||||
|
||||
class TestSegmentTypeGetZeroValue:
|
||||
"""
|
||||
Test class for SegmentType.get_zero_value static method.
|
||||
|
||||
Provides comprehensive coverage of all supported SegmentType values to ensure
|
||||
correct zero value generation for each type.
|
||||
"""
|
||||
|
||||
def test_array_types_return_empty_list(self):
|
||||
"""Test that all array types return empty list segments."""
|
||||
array_types = [
|
||||
SegmentType.ARRAY_ANY,
|
||||
SegmentType.ARRAY_STRING,
|
||||
SegmentType.ARRAY_NUMBER,
|
||||
SegmentType.ARRAY_OBJECT,
|
||||
SegmentType.ARRAY_BOOLEAN,
|
||||
]
|
||||
|
||||
for seg_type in array_types:
|
||||
result = SegmentType.get_zero_value(seg_type)
|
||||
assert result.value == []
|
||||
assert result.value_type == seg_type
|
||||
|
||||
def test_object_returns_empty_dict(self):
|
||||
"""Test that OBJECT type returns empty dictionary segment."""
|
||||
result = SegmentType.get_zero_value(SegmentType.OBJECT)
|
||||
assert result.value == {}
|
||||
assert result.value_type == SegmentType.OBJECT
|
||||
|
||||
def test_string_returns_empty_string(self):
|
||||
"""Test that STRING type returns empty string segment."""
|
||||
result = SegmentType.get_zero_value(SegmentType.STRING)
|
||||
assert result.value == ""
|
||||
assert result.value_type == SegmentType.STRING
|
||||
|
||||
def test_integer_returns_zero(self):
|
||||
"""Test that INTEGER type returns zero segment."""
|
||||
result = SegmentType.get_zero_value(SegmentType.INTEGER)
|
||||
assert result.value == 0
|
||||
assert result.value_type == SegmentType.INTEGER
|
||||
|
||||
def test_float_returns_zero_point_zero(self):
|
||||
"""Test that FLOAT type returns 0.0 segment."""
|
||||
result = SegmentType.get_zero_value(SegmentType.FLOAT)
|
||||
assert result.value == 0.0
|
||||
assert result.value_type == SegmentType.FLOAT
|
||||
|
||||
def test_number_returns_zero(self):
|
||||
"""Test that NUMBER type returns zero segment."""
|
||||
result = SegmentType.get_zero_value(SegmentType.NUMBER)
|
||||
assert result.value == 0
|
||||
# NUMBER type with integer value returns INTEGER segment type
|
||||
# (NUMBER is a union type that can be INTEGER or FLOAT)
|
||||
assert result.value_type == SegmentType.INTEGER
|
||||
# Verify that exposed_type returns NUMBER for frontend compatibility
|
||||
assert result.value_type.exposed_type() == SegmentType.NUMBER
|
||||
|
||||
def test_boolean_returns_false(self):
|
||||
"""Test that BOOLEAN type returns False segment."""
|
||||
result = SegmentType.get_zero_value(SegmentType.BOOLEAN)
|
||||
assert result.value is False
|
||||
assert result.value_type == SegmentType.BOOLEAN
|
||||
|
||||
def test_unsupported_types_raise_value_error(self):
|
||||
"""Test that unsupported types raise ValueError."""
|
||||
unsupported_types = [
|
||||
SegmentType.SECRET,
|
||||
SegmentType.FILE,
|
||||
SegmentType.NONE,
|
||||
SegmentType.GROUP,
|
||||
SegmentType.ARRAY_FILE,
|
||||
]
|
||||
|
||||
for seg_type in unsupported_types:
|
||||
with pytest.raises(ValueError, match="unsupported variable type"):
|
||||
SegmentType.get_zero_value(seg_type)
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
"""
|
||||
Utilities for detecting if database service is available for workflow tests.
|
||||
"""
|
||||
|
||||
import psycopg2
|
||||
import pytest
|
||||
|
||||
from configs import dify_config
|
||||
|
||||
|
||||
def is_database_available() -> bool:
|
||||
"""
|
||||
Check if the database service is available by attempting to connect to it.
|
||||
|
||||
Returns:
|
||||
True if database is available, False otherwise.
|
||||
"""
|
||||
try:
|
||||
# Try to establish a database connection using a context manager
|
||||
with psycopg2.connect(
|
||||
host=dify_config.DB_HOST,
|
||||
port=dify_config.DB_PORT,
|
||||
database=dify_config.DB_DATABASE,
|
||||
user=dify_config.DB_USERNAME,
|
||||
password=dify_config.DB_PASSWORD,
|
||||
connect_timeout=2, # 2 second timeout
|
||||
) as conn:
|
||||
pass # Connection established and will be closed automatically
|
||||
return True
|
||||
except (psycopg2.OperationalError, psycopg2.Error):
|
||||
return False
|
||||
|
||||
|
||||
def skip_if_database_unavailable():
|
||||
"""
|
||||
Pytest skip decorator that skips tests when database service is unavailable.
|
||||
|
||||
Usage:
|
||||
@skip_if_database_unavailable()
|
||||
def test_my_workflow():
|
||||
...
|
||||
"""
|
||||
return pytest.mark.skipif(
|
||||
not is_database_available(),
|
||||
reason="Database service is not available (connection refused or authentication failed)",
|
||||
)
|
||||
@@ -6,9 +6,11 @@ This module tests the iteration node's ability to:
|
||||
2. Preserve nested array structure when flatten_output=False
|
||||
"""
|
||||
|
||||
from .test_database_utils import skip_if_database_unavailable
|
||||
from .test_table_runner import TableTestRunner, WorkflowTestCase
|
||||
|
||||
|
||||
@skip_if_database_unavailable()
|
||||
def test_iteration_with_flatten_output_enabled():
|
||||
"""
|
||||
Test iteration node with flatten_output=True (default behavior).
|
||||
@@ -37,6 +39,7 @@ def test_iteration_with_flatten_output_enabled():
|
||||
)
|
||||
|
||||
|
||||
@skip_if_database_unavailable()
|
||||
def test_iteration_with_flatten_output_disabled():
|
||||
"""
|
||||
Test iteration node with flatten_output=False.
|
||||
@@ -65,6 +68,7 @@ def test_iteration_with_flatten_output_disabled():
|
||||
)
|
||||
|
||||
|
||||
@skip_if_database_unavailable()
|
||||
def test_iteration_flatten_output_comparison():
|
||||
"""
|
||||
Run both flatten_output configurations in parallel to verify the difference.
|
||||
|
||||
@@ -199,6 +199,7 @@ def test__convert_to_knowledge_retrieval_node_for_chatbot():
|
||||
node = WorkflowConverter()._convert_to_knowledge_retrieval_node(
|
||||
new_app_mode=new_app_mode, dataset_config=dataset_config, model_config=model_config
|
||||
)
|
||||
assert node is not None
|
||||
|
||||
assert node["data"]["type"] == "knowledge-retrieval"
|
||||
assert node["data"]["query_variable_selector"] == ["sys", "query"]
|
||||
@@ -231,6 +232,7 @@ def test__convert_to_knowledge_retrieval_node_for_workflow_app():
|
||||
node = WorkflowConverter()._convert_to_knowledge_retrieval_node(
|
||||
new_app_mode=new_app_mode, dataset_config=dataset_config, model_config=model_config
|
||||
)
|
||||
assert node is not None
|
||||
|
||||
assert node["data"]["type"] == "knowledge-retrieval"
|
||||
assert node["data"]["query_variable_selector"] == ["start", dataset_config.retrieve_config.query_variable]
|
||||
|
||||
5648
api/uv.lock
generated
5648
api/uv.lock
generated
File diff suppressed because it is too large
Load Diff
16
dify-workflow-engine/README.md
Normal file
16
dify-workflow-engine/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Dify Workflow Engine
|
||||
|
||||
A standalone SDK for executing Dify workflows.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from core.workflow.workflow_entry import WorkflowEntry
|
||||
# ...
|
||||
```
|
||||
7
dify-workflow-engine/configs.py
Normal file
7
dify-workflow-engine/configs.py
Normal file
@@ -0,0 +1,7 @@
|
||||
class DifyConfig:
|
||||
WORKFLOW_CALL_MAX_DEPTH = 5
|
||||
DEBUG = True
|
||||
WORKFLOW_MAX_EXECUTION_STEPS = 100
|
||||
WORKFLOW_MAX_EXECUTION_TIME = 600
|
||||
|
||||
dify_config = DifyConfig()
|
||||
2
dify-workflow-engine/core/agent/__init__.py
Normal file
2
dify-workflow-engine/core/agent/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class Agent:
|
||||
pass
|
||||
10
dify-workflow-engine/core/agent/entities/__init__.py
Normal file
10
dify-workflow-engine/core/agent/entities/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AgentEntity(BaseModel):
|
||||
pass
|
||||
|
||||
class AgentNodeData(BaseModel):
|
||||
agent_strategy_name: str
|
||||
|
||||
class AgentToolEntity(BaseModel):
|
||||
pass
|
||||
7
dify-workflow-engine/core/agent/plugin_entities.py
Normal file
7
dify-workflow-engine/core/agent/plugin_entities.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AgentPluginEntity(BaseModel):
|
||||
pass
|
||||
|
||||
class AgentStrategyParameter(BaseModel):
|
||||
pass
|
||||
0
dify-workflow-engine/core/app/__init__.py
Normal file
0
dify-workflow-engine/core/app/__init__.py
Normal file
0
dify-workflow-engine/core/app/apps/__init__.py
Normal file
0
dify-workflow-engine/core/app/apps/__init__.py
Normal file
2
dify-workflow-engine/core/app/apps/exc.py
Normal file
2
dify-workflow-engine/core/app/apps/exc.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class GenerateTaskStoppedError(Exception):
|
||||
pass
|
||||
0
dify-workflow-engine/core/app/entities/__init__.py
Normal file
0
dify-workflow-engine/core/app/entities/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from enum import Enum
|
||||
|
||||
class InvokeFrom(Enum):
|
||||
DEBUGGER = "debugger"
|
||||
SERVICE_API = "service_api"
|
||||
WEB_APP = "web_app"
|
||||
2
dify-workflow-engine/core/callback_handler/__init__.py
Normal file
2
dify-workflow-engine/core/callback_handler/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class CallbackHandler:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class DifyWorkflowCallbackHandler:
|
||||
pass
|
||||
2
dify-workflow-engine/core/datasource/__init__.py
Normal file
2
dify-workflow-engine/core/datasource/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class Connection:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class DatasourceManager:
|
||||
pass
|
||||
@@ -0,0 +1,4 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class DatasourceEntity(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,22 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class DatasourceEntity(BaseModel):
|
||||
pass
|
||||
|
||||
class DatasourceType(BaseModel):
|
||||
pass
|
||||
|
||||
class DatasourceMessage(BaseModel):
|
||||
pass
|
||||
|
||||
class DatasourceParameter(BaseModel):
|
||||
pass
|
||||
|
||||
class DatasourceProviderType(BaseModel):
|
||||
pass
|
||||
|
||||
class GetOnlineDocumentPageContentRequest(BaseModel):
|
||||
pass
|
||||
|
||||
class OnlineDriveDownloadFileRequest(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class OnlineDocumentDatasourcePlugin:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class OnlineDriveDatasourcePlugin:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class DatasourceFileMessageTransformer:
|
||||
pass
|
||||
6
dify-workflow-engine/core/file/__init__.py
Normal file
6
dify-workflow-engine/core/file/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .models import File, FileAttribute, FileTransferMethod, FileType
|
||||
|
||||
class FileManager:
|
||||
pass
|
||||
|
||||
file_manager = FileManager()
|
||||
13
dify-workflow-engine/core/file/enums.py
Normal file
13
dify-workflow-engine/core/file/enums.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from enum import StrEnum
|
||||
|
||||
class FileType(StrEnum):
|
||||
IMAGE = "image"
|
||||
AUDIO = "audio"
|
||||
VIDEO = "video"
|
||||
DOCUMENT = "document"
|
||||
CUSTOM = "custom"
|
||||
|
||||
class FileTransferMethod(StrEnum):
|
||||
REMOTE_URL = "remote_url"
|
||||
LOCAL_FILE = "local_file"
|
||||
TOOL_FILE = "tool_file"
|
||||
14
dify-workflow-engine/core/file/models.py
Normal file
14
dify-workflow-engine/core/file/models.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class File(BaseModel):
|
||||
def to_dict(self):
|
||||
return {}
|
||||
|
||||
class FileAttribute(BaseModel):
|
||||
pass
|
||||
|
||||
class FileTransferMethod(BaseModel):
|
||||
pass
|
||||
|
||||
class FileType(BaseModel):
|
||||
pass
|
||||
2
dify-workflow-engine/core/helper/__init__.py
Normal file
2
dify-workflow-engine/core/helper/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class SSKey:
|
||||
pass
|
||||
@@ -0,0 +1,5 @@
|
||||
class CodeExecutor:
|
||||
pass
|
||||
|
||||
class CodeLanguage:
|
||||
pass
|
||||
@@ -0,0 +1,14 @@
|
||||
class CodeExecutor:
|
||||
pass
|
||||
|
||||
class CodeLanguage:
|
||||
PYTHON3 = "python3"
|
||||
JAVASCRIPT = "javascript"
|
||||
JSON = "json"
|
||||
STRING = "string"
|
||||
NUMBER = "number"
|
||||
OBJECT = "object"
|
||||
ARRAY = "array"
|
||||
|
||||
class CodeExecutionError(Exception):
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class CodeNodeProvider:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class JavascriptCodeProvider:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class Python3CodeProvider:
|
||||
pass
|
||||
2
dify-workflow-engine/core/helper/ssrf_proxy.py
Normal file
2
dify-workflow-engine/core/helper/ssrf_proxy.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class SSRFProxy:
|
||||
pass
|
||||
2
dify-workflow-engine/core/memory/__init__.py
Normal file
2
dify-workflow-engine/core/memory/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class TokenBufferMemory:
|
||||
pass
|
||||
2
dify-workflow-engine/core/memory/token_buffer_memory.py
Normal file
2
dify-workflow-engine/core/memory/token_buffer_memory.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class TokenBufferMemory:
|
||||
pass
|
||||
5
dify-workflow-engine/core/model_manager/__init__.py
Normal file
5
dify-workflow-engine/core/model_manager/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
class ModelManager:
|
||||
pass
|
||||
|
||||
class ModelInstance:
|
||||
pass
|
||||
0
dify-workflow-engine/core/model_runtime/__init__.py
Normal file
0
dify-workflow-engine/core/model_runtime/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class ModelPropertyKey:
|
||||
pass
|
||||
@@ -0,0 +1,12 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class LLMResult(BaseModel):
|
||||
pass
|
||||
|
||||
class LLMUsage(BaseModel):
|
||||
@classmethod
|
||||
def empty_usage(cls):
|
||||
return cls()
|
||||
|
||||
class LLMUsageMetadata(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class ModelType(BaseModel):
|
||||
pass
|
||||
|
||||
class AIModelEntity(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1 @@
|
||||
from .encoders import jsonable_encoder
|
||||
@@ -0,0 +1,2 @@
|
||||
def jsonable_encoder(obj):
|
||||
return obj
|
||||
2
dify-workflow-engine/core/plugin/__init__.py
Normal file
2
dify-workflow-engine/core/plugin/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class PluginManager:
|
||||
pass
|
||||
2
dify-workflow-engine/core/plugin/entities/request.py
Normal file
2
dify-workflow-engine/core/plugin/entities/request.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class InvokeCredentials:
|
||||
pass
|
||||
5
dify-workflow-engine/core/plugin/impl/exc.py
Normal file
5
dify-workflow-engine/core/plugin/impl/exc.py
Normal file
@@ -0,0 +1,5 @@
|
||||
class PluginDaemonClientSideError(Exception):
|
||||
pass
|
||||
|
||||
class PluginInvokeError(Exception):
|
||||
pass
|
||||
2
dify-workflow-engine/core/plugin/impl/plugin.py
Normal file
2
dify-workflow-engine/core/plugin/impl/plugin.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class PluginInstaller:
|
||||
pass
|
||||
2
dify-workflow-engine/core/prompt/__init__.py
Normal file
2
dify-workflow-engine/core/prompt/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class PromptTemplate:
|
||||
pass
|
||||
4
dify-workflow-engine/core/prompt/entities/__init__.py
Normal file
4
dify-workflow-engine/core/prompt/entities/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PromptEntity(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AdvancedPromptEntity(BaseModel):
|
||||
pass
|
||||
|
||||
class MemoryConfig(BaseModel):
|
||||
pass
|
||||
2
dify-workflow-engine/core/provider_manager/__init__.py
Normal file
2
dify-workflow-engine/core/provider_manager/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class ProviderManager:
|
||||
pass
|
||||
1
dify-workflow-engine/core/rag/__init__.py
Normal file
1
dify-workflow-engine/core/rag/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Mock core.rag
|
||||
4
dify-workflow-engine/core/rag/entities/__init__.py
Normal file
4
dify-workflow-engine/core/rag/entities/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class RetrievalResource(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CitationMetadata(BaseModel):
|
||||
pass
|
||||
|
||||
class RetrievalSourceMetadata(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,4 @@
|
||||
from .entities import index_processor_entities
|
||||
|
||||
class IndexProcessorBase:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class IndexProcessorBase:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class IndexProcessorFactory:
|
||||
pass
|
||||
1
dify-workflow-engine/core/rag/retrieval/__init__.py
Normal file
1
dify-workflow-engine/core/rag/retrieval/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .retrieval_service import RetrievalService
|
||||
@@ -0,0 +1,4 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class DatasetRetrieval(BaseModel):
|
||||
pass
|
||||
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import ClassVar
|
||||
|
||||
class RetrievalMethod(BaseModel):
|
||||
SEMANTIC_SEARCH: ClassVar[str] = "SEMANTIC_SEARCH"
|
||||
KEYWORD_SEARCH: ClassVar[str] = "KEYWORD_SEARCH"
|
||||
HYBRID_SEARCH: ClassVar[str] = "HYBRID_SEARCH"
|
||||
@@ -0,0 +1,2 @@
|
||||
class RetrievalService:
|
||||
pass
|
||||
2
dify-workflow-engine/core/tools/__base/tool.py
Normal file
2
dify-workflow-engine/core/tools/__base/tool.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class Tool:
|
||||
pass
|
||||
2
dify-workflow-engine/core/tools/__init__.py
Normal file
2
dify-workflow-engine/core/tools/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class ToolManager:
|
||||
pass
|
||||
4
dify-workflow-engine/core/tools/entities/__init__.py
Normal file
4
dify-workflow-engine/core/tools/entities/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class ToolEntity(BaseModel):
|
||||
pass
|
||||
19
dify-workflow-engine/core/tools/entities/tool_entities.py
Normal file
19
dify-workflow-engine/core/tools/entities/tool_entities.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
class ToolEntity(BaseModel):
|
||||
pass
|
||||
|
||||
class ToolIdentity(BaseModel):
|
||||
pass
|
||||
|
||||
class ToolInvokeMessage(BaseModel):
|
||||
pass
|
||||
|
||||
class ToolParameter(BaseModel):
|
||||
pass
|
||||
|
||||
class ToolProviderType(BaseModel):
|
||||
pass
|
||||
|
||||
class ToolSelector(BaseModel):
|
||||
pass
|
||||
14
dify-workflow-engine/core/tools/errors.py
Normal file
14
dify-workflow-engine/core/tools/errors.py
Normal file
@@ -0,0 +1,14 @@
|
||||
class ToolProviderCredentialValidationError(Exception):
|
||||
pass
|
||||
|
||||
class ToolNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
class ToolParameterValidationError(Exception):
|
||||
pass
|
||||
|
||||
class ToolInvokeError(Exception):
|
||||
pass
|
||||
|
||||
class ToolEngineInvokeError(Exception):
|
||||
pass
|
||||
2
dify-workflow-engine/core/tools/tool_engine.py
Normal file
2
dify-workflow-engine/core/tools/tool_engine.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class ToolEngine:
|
||||
pass
|
||||
2
dify-workflow-engine/core/tools/tool_manager.py
Normal file
2
dify-workflow-engine/core/tools/tool_manager.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class ToolManager:
|
||||
pass
|
||||
2
dify-workflow-engine/core/tools/utils/__init__.py
Normal file
2
dify-workflow-engine/core/tools/utils/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
class ToolUtils:
|
||||
pass
|
||||
@@ -0,0 +1,5 @@
|
||||
class ToolMessageTransformer:
|
||||
pass
|
||||
|
||||
class ToolFileMessageTransformer:
|
||||
pass
|
||||
@@ -0,0 +1,2 @@
|
||||
class WorkflowAsTool:
|
||||
pass
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user