mirror of
https://github.com/langgenius/dify.git
synced 2026-01-08 15:24:14 +00:00
Compare commits
2 Commits
feat/suppo
...
0.12.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
625aaceb00 | ||
|
|
98d85e6b74 |
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="0.11.2",
|
||||
default="0.12.0",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@@ -3,6 +3,7 @@ import time
|
||||
from collections.abc import Mapping, Sequence
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any, Optional, Union, cast
|
||||
from uuid import uuid4
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
@@ -80,38 +81,38 @@ class WorkflowCycleManage:
|
||||
|
||||
inputs[f"sys.{key.value}"] = value
|
||||
|
||||
inputs = WorkflowEntry.handle_special_values(inputs)
|
||||
|
||||
triggered_from = (
|
||||
WorkflowRunTriggeredFrom.DEBUGGING
|
||||
if self._application_generate_entity.invoke_from == InvokeFrom.DEBUGGER
|
||||
else WorkflowRunTriggeredFrom.APP_RUN
|
||||
)
|
||||
|
||||
# init workflow run
|
||||
workflow_run = WorkflowRun()
|
||||
workflow_run_id = self._workflow_system_variables[SystemVariableKey.WORKFLOW_RUN_ID]
|
||||
if workflow_run_id:
|
||||
workflow_run.id = workflow_run_id
|
||||
workflow_run.tenant_id = self._workflow.tenant_id
|
||||
workflow_run.app_id = self._workflow.app_id
|
||||
workflow_run.sequence_number = new_sequence_number
|
||||
workflow_run.workflow_id = self._workflow.id
|
||||
workflow_run.type = self._workflow.type
|
||||
workflow_run.triggered_from = triggered_from.value
|
||||
workflow_run.version = self._workflow.version
|
||||
workflow_run.graph = self._workflow.graph
|
||||
workflow_run.inputs = json.dumps(inputs)
|
||||
workflow_run.status = WorkflowRunStatus.RUNNING.value
|
||||
workflow_run.created_by_role = (
|
||||
CreatedByRole.ACCOUNT.value if isinstance(self._user, Account) else CreatedByRole.END_USER.value
|
||||
)
|
||||
workflow_run.created_by = self._user.id
|
||||
# handle special values
|
||||
inputs = WorkflowEntry.handle_special_values(inputs)
|
||||
|
||||
db.session.add(workflow_run)
|
||||
db.session.commit()
|
||||
db.session.refresh(workflow_run)
|
||||
db.session.close()
|
||||
# init workflow run
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
workflow_run = WorkflowRun()
|
||||
system_id = self._workflow_system_variables[SystemVariableKey.WORKFLOW_RUN_ID]
|
||||
workflow_run.id = system_id or str(uuid4())
|
||||
workflow_run.tenant_id = self._workflow.tenant_id
|
||||
workflow_run.app_id = self._workflow.app_id
|
||||
workflow_run.sequence_number = new_sequence_number
|
||||
workflow_run.workflow_id = self._workflow.id
|
||||
workflow_run.type = self._workflow.type
|
||||
workflow_run.triggered_from = triggered_from.value
|
||||
workflow_run.version = self._workflow.version
|
||||
workflow_run.graph = self._workflow.graph
|
||||
workflow_run.inputs = json.dumps(inputs)
|
||||
workflow_run.status = WorkflowRunStatus.RUNNING
|
||||
workflow_run.created_by_role = (
|
||||
CreatedByRole.ACCOUNT if isinstance(self._user, Account) else CreatedByRole.END_USER
|
||||
)
|
||||
workflow_run.created_by = self._user.id
|
||||
workflow_run.created_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
|
||||
session.add(workflow_run)
|
||||
session.commit()
|
||||
|
||||
return workflow_run
|
||||
|
||||
|
||||
@@ -720,8 +720,10 @@ class IndexingRunner:
|
||||
|
||||
tokens = 0
|
||||
if embedding_model_instance:
|
||||
page_content_list = [document.page_content for document in chunk_documents]
|
||||
tokens += sum(embedding_model_instance.get_text_embedding_num_tokens(page_content_list))
|
||||
tokens += sum(
|
||||
embedding_model_instance.get_text_embedding_num_tokens([document.page_content])
|
||||
for document in chunk_documents
|
||||
)
|
||||
|
||||
# load index
|
||||
index_processor.load(dataset, chunk_documents, with_keywords=False)
|
||||
|
||||
@@ -183,7 +183,7 @@ class ModelInstance:
|
||||
input_type=input_type,
|
||||
)
|
||||
|
||||
def get_text_embedding_num_tokens(self, texts: list[str]) -> list[int]:
|
||||
def get_text_embedding_num_tokens(self, texts: list[str]) -> int:
|
||||
"""
|
||||
Get number of tokens for text embedding
|
||||
|
||||
|
||||
@@ -78,13 +78,8 @@ class DatasetDocumentStore:
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
model=self._dataset.embedding_model,
|
||||
)
|
||||
if embedding_model:
|
||||
page_content_list = [doc.page_content for doc in docs]
|
||||
tokens_list = embedding_model.get_text_embedding_num_tokens(page_content_list)
|
||||
else:
|
||||
tokens_list = [0] * len(docs)
|
||||
|
||||
for doc, tokens in zip(docs, tokens_list):
|
||||
for doc in docs:
|
||||
if not isinstance(doc, Document):
|
||||
raise ValueError("doc must be a Document")
|
||||
|
||||
@@ -96,6 +91,12 @@ class DatasetDocumentStore:
|
||||
f"doc_id {doc.metadata['doc_id']} already exists. Set allow_update to True to overwrite."
|
||||
)
|
||||
|
||||
# calc embedding use tokens
|
||||
if embedding_model:
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[doc.page_content])
|
||||
else:
|
||||
tokens = 0
|
||||
|
||||
if not segment_document:
|
||||
max_position += 1
|
||||
|
||||
|
||||
@@ -65,9 +65,8 @@ class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter)
|
||||
chunks = [text]
|
||||
|
||||
final_chunks = []
|
||||
chunks_lengths = self._length_function(chunks)
|
||||
for chunk, chunk_length in zip(chunks, chunks_lengths):
|
||||
if chunk_length > self._chunk_size:
|
||||
for chunk in chunks:
|
||||
if self._length_function(chunk) > self._chunk_size:
|
||||
final_chunks.extend(self.recursive_split_text(chunk))
|
||||
else:
|
||||
final_chunks.append(chunk)
|
||||
@@ -94,8 +93,8 @@ class FixedRecursiveCharacterTextSplitter(EnhanceRecursiveCharacterTextSplitter)
|
||||
# Now go merging things, recursively splitting longer texts.
|
||||
_good_splits = []
|
||||
_good_splits_lengths = [] # cache the lengths of the splits
|
||||
s_lens = self._length_function(splits)
|
||||
for s, s_len in zip(splits, s_lens):
|
||||
for s in splits:
|
||||
s_len = self._length_function(s)
|
||||
if s_len < self._chunk_size:
|
||||
_good_splits.append(s)
|
||||
_good_splits_lengths.append(s_len)
|
||||
|
||||
@@ -45,7 +45,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
|
||||
self,
|
||||
chunk_size: int = 4000,
|
||||
chunk_overlap: int = 200,
|
||||
length_function: Callable[[str], [int]] = len,
|
||||
length_function: Callable[[str], int] = len,
|
||||
keep_separator: bool = False,
|
||||
add_start_index: bool = False,
|
||||
) -> None:
|
||||
@@ -224,8 +224,8 @@ class CharacterTextSplitter(TextSplitter):
|
||||
splits = _split_text_with_regex(text, self._separator, self._keep_separator)
|
||||
_separator = "" if self._keep_separator else self._separator
|
||||
_good_splits_lengths = [] # cache the lengths of the splits
|
||||
if splits:
|
||||
_good_splits_lengths.extend(self._length_function(splits))
|
||||
for split in splits:
|
||||
_good_splits_lengths.append(self._length_function(split))
|
||||
return self._merge_splits(splits, _separator, _good_splits_lengths)
|
||||
|
||||
|
||||
@@ -478,8 +478,9 @@ class RecursiveCharacterTextSplitter(TextSplitter):
|
||||
_good_splits = []
|
||||
_good_splits_lengths = [] # cache the lengths of the splits
|
||||
_separator = "" if self._keep_separator else separator
|
||||
s_lens = self._length_function(splits)
|
||||
for s, s_len in zip(splits, s_lens):
|
||||
|
||||
for s in splits:
|
||||
s_len = self._length_function(s)
|
||||
if s_len < self._chunk_size:
|
||||
_good_splits.append(s)
|
||||
_good_splits_lengths.append(s_len)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
from collections.abc import Mapping, Sequence
|
||||
from datetime import UTC, datetime
|
||||
from enum import Enum
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
@@ -314,7 +314,7 @@ class Workflow(db.Model):
|
||||
)
|
||||
|
||||
|
||||
class WorkflowRunStatus(Enum):
|
||||
class WorkflowRunStatus(StrEnum):
|
||||
"""
|
||||
Workflow Run Status Enum
|
||||
"""
|
||||
@@ -393,13 +393,13 @@ class WorkflowRun(db.Model):
|
||||
version = db.Column(db.String(255), nullable=False)
|
||||
graph = db.Column(db.Text)
|
||||
inputs = db.Column(db.Text)
|
||||
status = db.Column(db.String(255), nullable=False)
|
||||
outputs: Mapped[str] = db.Column(db.Text)
|
||||
status = db.Column(db.String(255), nullable=False) # running, succeeded, failed, stopped
|
||||
outputs: Mapped[str] = mapped_column(sa.Text, default="{}")
|
||||
error = db.Column(db.Text)
|
||||
elapsed_time = db.Column(db.Float, nullable=False, server_default=db.text("0"))
|
||||
total_tokens = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
|
||||
total_steps = db.Column(db.Integer, server_default=db.text("0"))
|
||||
created_by_role = db.Column(db.String(255), nullable=False)
|
||||
created_by_role = db.Column(db.String(255), nullable=False) # account, end_user
|
||||
created_by = db.Column(StringUUID, nullable=False)
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)"))
|
||||
finished_at = db.Column(db.DateTime)
|
||||
|
||||
@@ -1390,7 +1390,7 @@ class SegmentService:
|
||||
model=dataset.embedding_model,
|
||||
)
|
||||
# calc embedding use tokens
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])[0]
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])
|
||||
lock_name = "add_segment_lock_document_id_{}".format(document.id)
|
||||
with redis_client.lock(lock_name, timeout=600):
|
||||
max_position = (
|
||||
@@ -1467,11 +1467,9 @@ class SegmentService:
|
||||
if dataset.indexing_technique == "high_quality" and embedding_model:
|
||||
# calc embedding use tokens
|
||||
if document.doc_form == "qa_model":
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(
|
||||
texts=[content + segment_item["answer"]]
|
||||
)[0]
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment_item["answer"]])
|
||||
else:
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])[0]
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])
|
||||
segment_document = DocumentSegment(
|
||||
tenant_id=current_user.current_tenant_id,
|
||||
dataset_id=document.dataset_id,
|
||||
@@ -1579,9 +1577,9 @@ class SegmentService:
|
||||
|
||||
# calc embedding use tokens
|
||||
if document.doc_form == "qa_model":
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment.answer])[0]
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment.answer])
|
||||
else:
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])[0]
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])
|
||||
segment.content = content
|
||||
segment.index_node_hash = segment_hash
|
||||
segment.word_count = len(content)
|
||||
|
||||
@@ -58,16 +58,12 @@ def batch_create_segment_to_index_task(
|
||||
model=dataset.embedding_model,
|
||||
)
|
||||
word_count_change = 0
|
||||
if embedding_model:
|
||||
tokens_list = embedding_model.get_text_embedding_num_tokens(
|
||||
texts=[segment["content"] for segment in content]
|
||||
)
|
||||
else:
|
||||
tokens_list = [0] * len(content)
|
||||
for segment, tokens in zip(content, tokens_list):
|
||||
for segment in content:
|
||||
content = segment["content"]
|
||||
doc_id = str(uuid.uuid4())
|
||||
segment_hash = helper.generate_text_hash(content)
|
||||
# calc embedding use tokens
|
||||
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content]) if embedding_model else 0
|
||||
max_position = (
|
||||
db.session.query(func.max(DocumentSegment.position))
|
||||
.filter(DocumentSegment.document_id == dataset_document.id)
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.11.2
|
||||
image: langgenius/dify-api:0.12.0
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@@ -227,7 +227,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.11.2
|
||||
image: langgenius/dify-api:0.12.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_WEB_URL: ''
|
||||
@@ -397,7 +397,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.11.2
|
||||
image: langgenius/dify-web:0.12.0
|
||||
restart: always
|
||||
environment:
|
||||
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
|
||||
|
||||
@@ -291,7 +291,7 @@ x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.11.2
|
||||
image: langgenius/dify-api:0.12.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -311,7 +311,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.11.2
|
||||
image: langgenius/dify-api:0.12.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -330,7 +330,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.11.2
|
||||
image: langgenius/dify-web:0.12.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.11.2",
|
||||
"version": "0.12.0",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=18.17.0"
|
||||
|
||||
Reference in New Issue
Block a user