mirror of
https://github.com/langgenius/dify.git
synced 2026-01-05 14:05:59 +00:00
Compare commits
21 Commits
0.13.0
...
feat/suppo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ac0d99281e | ||
|
|
bbdadec1bc | ||
|
|
fa9709faa8 | ||
|
|
eca466bdaa | ||
|
|
d56abec195 | ||
|
|
961e25f608 | ||
|
|
138bf698b0 | ||
|
|
e5bb4cca12 | ||
|
|
5e2cb0e3a8 | ||
|
|
16a65cb367 | ||
|
|
1bae9b8ff7 | ||
|
|
d7c1f43b49 | ||
|
|
f933af9f57 | ||
|
|
91e1ff5e30 | ||
|
|
5908e10549 | ||
|
|
464e6354c5 | ||
|
|
d470e55f8c | ||
|
|
98a1b01b0c | ||
|
|
e240424be5 | ||
|
|
1cb5a12abb | ||
|
|
ff2a4a6fcd |
@@ -62,6 +62,7 @@ from .datasets import (
|
||||
external,
|
||||
hit_testing,
|
||||
website,
|
||||
fta_test,
|
||||
)
|
||||
|
||||
# Import explore controllers
|
||||
|
||||
145
api/controllers/console/datasets/fta_test.py
Normal file
145
api/controllers/console/datasets/fta_test.py
Normal file
@@ -0,0 +1,145 @@
|
||||
import json
|
||||
|
||||
import requests
|
||||
from flask import Response
|
||||
from flask_restful import Resource, reqparse
|
||||
from sqlalchemy import text
|
||||
|
||||
from controllers.console import api
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_storage import storage
|
||||
from models.fta import ComponentFailure, ComponentFailureStats
|
||||
|
||||
|
||||
class FATTestApi(Resource):
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("log_process_data", nullable=False, required=True, type=str, location="args")
|
||||
args = parser.parse_args()
|
||||
print(args["log_process_data"])
|
||||
# Extract the JSON string from the text field
|
||||
json_str = args["log_process_data"].strip("```json\\n").strip("```").strip().replace("\\n", "")
|
||||
log_data = json.loads(json_str)
|
||||
db.session.query(ComponentFailure).delete()
|
||||
for data in log_data:
|
||||
if not isinstance(data, dict):
|
||||
raise TypeError("Data must be a dictionary.")
|
||||
|
||||
required_keys = {"Date", "Component", "FailureMode", "Cause", "RepairAction", "Technician"}
|
||||
if not required_keys.issubset(data.keys()):
|
||||
raise ValueError(f"Data dictionary must contain the following keys: {required_keys}")
|
||||
|
||||
try:
|
||||
# Clear existing stats
|
||||
component_failure = ComponentFailure(
|
||||
Date=data["Date"],
|
||||
Component=data["Component"],
|
||||
FailureMode=data["FailureMode"],
|
||||
Cause=data["Cause"],
|
||||
RepairAction=data["RepairAction"],
|
||||
Technician=data["Technician"],
|
||||
)
|
||||
db.session.add(component_failure)
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
# Clear existing stats
|
||||
db.session.query(ComponentFailureStats).delete()
|
||||
|
||||
# Insert calculated statistics
|
||||
try:
|
||||
db.session.execute(
|
||||
text("""
|
||||
INSERT INTO component_failure_stats ("Component", "FailureMode", "Cause", "PossibleAction", "Probability", "MTBF")
|
||||
SELECT
|
||||
cf."Component",
|
||||
cf."FailureMode",
|
||||
cf."Cause",
|
||||
cf."RepairAction" as "PossibleAction",
|
||||
COUNT(*) * 1.0 / (SELECT COUNT(*) FROM component_failure WHERE "Component" = cf."Component") AS "Probability",
|
||||
COALESCE(AVG(EXTRACT(EPOCH FROM (next_failure_date::timestamp - cf."Date"::timestamp)) / 86400.0),0)AS "MTBF"
|
||||
FROM (
|
||||
SELECT
|
||||
"Component",
|
||||
"FailureMode",
|
||||
"Cause",
|
||||
"RepairAction",
|
||||
"Date",
|
||||
LEAD("Date") OVER (PARTITION BY "Component", "FailureMode", "Cause" ORDER BY "Date") AS next_failure_date
|
||||
FROM
|
||||
component_failure
|
||||
) cf
|
||||
GROUP BY
|
||||
cf."Component", cf."FailureMode", cf."Cause", cf."RepairAction";
|
||||
""")
|
||||
)
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
print(f"Error during stats calculation: {e}")
|
||||
# output format
|
||||
# [
|
||||
# (17, 'Hydraulic system', 'Leak', 'Hose rupture', 'Replaced hydraulic hose', 0.3333333333333333, None),
|
||||
# (18, 'Hydraulic system', 'Leak', 'Seal Wear', 'Replaced the faulty seal', 0.3333333333333333, None),
|
||||
# (19, 'Hydraulic system', 'Pressure drop', 'Fluid leak', 'Replaced hydraulic fluid and seals', 0.3333333333333333, None)
|
||||
# ]
|
||||
|
||||
component_failure_stats = db.session.query(ComponentFailureStats).all()
|
||||
# Convert stats to list of tuples format
|
||||
stats_list = []
|
||||
for stat in component_failure_stats:
|
||||
stats_list.append(
|
||||
(
|
||||
stat.StatID,
|
||||
stat.Component,
|
||||
stat.FailureMode,
|
||||
stat.Cause,
|
||||
stat.PossibleAction,
|
||||
stat.Probability,
|
||||
stat.MTBF,
|
||||
)
|
||||
)
|
||||
return {"data": stats_list}, 200
|
||||
|
||||
|
||||
# generate-fault-tree
|
||||
class GenerateFaultTreeApi(Resource):
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("llm_text", nullable=False, required=True, type=str, location="args")
|
||||
args = parser.parse_args()
|
||||
entities = args["llm_text"].replace("```", "").replace("\\n", "\n")
|
||||
print(entities)
|
||||
request_data = {"fault_tree_text": entities}
|
||||
url = "https://fta.cognitech-dev.live/generate-fault-tree"
|
||||
headers = {"accept": "application/json", "Content-Type": "application/json"}
|
||||
|
||||
response = requests.post(url, json=request_data, headers=headers)
|
||||
print(response.json())
|
||||
return {"data": response.json()}, 200
|
||||
|
||||
|
||||
class ExtractSVGApi(Resource):
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("svg_text", nullable=False, required=True, type=str, location="args")
|
||||
args = parser.parse_args()
|
||||
# svg_text = ''.join(args["svg_text"].splitlines())
|
||||
svg_text = args["svg_text"].replace("\n", "")
|
||||
svg_text = svg_text.replace('"', '"')
|
||||
print(svg_text)
|
||||
svg_text_json = json.loads(svg_text)
|
||||
svg_content = svg_text_json.get("data").get("svg_content")[0]
|
||||
svg_content = svg_content.replace("\n", "").replace('"', '"')
|
||||
file_key = "fta_svg/" + "fat.svg"
|
||||
if storage.exists(file_key):
|
||||
storage.delete(file_key)
|
||||
storage.save(file_key, svg_content.encode("utf-8"))
|
||||
generator = storage.load(file_key, stream=True)
|
||||
|
||||
return Response(generator, mimetype="image/svg+xml")
|
||||
|
||||
|
||||
api.add_resource(FATTestApi, "/fta/db-handler")
|
||||
api.add_resource(GenerateFaultTreeApi, "/fta/generate-fault-tree")
|
||||
api.add_resource(ExtractSVGApi, "/fta/extract-svg")
|
||||
@@ -1,4 +1,6 @@
|
||||
import base64
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
from configs import dify_config
|
||||
from core.file import file_repository
|
||||
@@ -18,6 +20,38 @@ from .models import File, FileTransferMethod, FileType
|
||||
from .tool_file_parser import ToolFileParser
|
||||
|
||||
|
||||
def download_to_target_path(f: File, temp_dir: str, /):
|
||||
if f.transfer_method == FileTransferMethod.TOOL_FILE:
|
||||
tool_file = file_repository.get_tool_file(session=db.session(), file=f)
|
||||
suffix = Path(tool_file.file_key).suffix
|
||||
target_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
|
||||
_download_file_to_target_path(tool_file.file_key, target_path)
|
||||
return target_path
|
||||
elif f.transfer_method == FileTransferMethod.LOCAL_FILE:
|
||||
upload_file = file_repository.get_upload_file(session=db.session(), file=f)
|
||||
suffix = Path(upload_file.key).suffix
|
||||
target_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
|
||||
_download_file_to_target_path(upload_file.key, target_path)
|
||||
return target_path
|
||||
else:
|
||||
raise ValueError(f"Unsupported transfer method: {f.transfer_method}")
|
||||
|
||||
|
||||
def _download_file_to_target_path(path: str, target_path: str, /):
|
||||
"""
|
||||
Download and return the contents of a file as bytes.
|
||||
|
||||
This function loads the file from storage and ensures it's in bytes format.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file in storage.
|
||||
target_path (str): The path to the target file.
|
||||
Raises:
|
||||
ValueError: If the loaded file is not a bytes object.
|
||||
"""
|
||||
storage.download(path, target_path)
|
||||
|
||||
|
||||
def get_attr(*, file: File, attr: FileAttribute):
|
||||
match attr:
|
||||
case FileAttribute.TYPE:
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
model: amazon.nova-lite-v1:0
|
||||
label:
|
||||
en_US: Nova Lite V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.00006'
|
||||
output: '0.00024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -0,0 +1,52 @@
|
||||
model: amazon.nova-micro-v1:0
|
||||
label:
|
||||
en_US: Nova Micro V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.000035'
|
||||
output: '0.00014'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -0,0 +1,52 @@
|
||||
model: amazon.nova-pro-v1:0
|
||||
label:
|
||||
en_US: Nova Pro V1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0032'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -70,6 +70,8 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
{"prefix": "cohere.command-r", "support_system_prompts": True, "support_tool_use": True},
|
||||
{"prefix": "amazon.titan", "support_system_prompts": False, "support_tool_use": False},
|
||||
{"prefix": "ai21.jamba-1-5", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "amazon.nova", "support_system_prompts": True, "support_tool_use": False},
|
||||
{"prefix": "us.amazon.nova", "support_system_prompts": True, "support_tool_use": False},
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@@ -194,6 +196,13 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
if model_info["support_tool_use"] and tools:
|
||||
parameters["toolConfig"] = self._convert_converse_tool_config(tools=tools)
|
||||
try:
|
||||
# for issue #10976
|
||||
conversations_list = parameters["messages"]
|
||||
# if two consecutive user messages found, combine them into one message
|
||||
for i in range(len(conversations_list) - 2, -1, -1):
|
||||
if conversations_list[i]["role"] == conversations_list[i + 1]["role"]:
|
||||
conversations_list[i]["content"].extend(conversations_list.pop(i + 1)["content"])
|
||||
|
||||
if stream:
|
||||
response = bedrock_client.converse_stream(**parameters)
|
||||
return self._handle_converse_stream_response(
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-lite-v1:0
|
||||
label:
|
||||
en_US: Nova Lite V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.00006'
|
||||
output: '0.00024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-micro-v1:0
|
||||
label:
|
||||
en_US: Nova Micro V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.000035'
|
||||
output: '0.00014'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -0,0 +1,52 @@
|
||||
model: us.amazon.nova-pro-v1:0
|
||||
label:
|
||||
en_US: Nova Pro V1 (US.Cross Region Inference)
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 300000
|
||||
parameter_rules:
|
||||
- name: max_new_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 5000
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
required: false
|
||||
type: float
|
||||
default: 1
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 生成内容的随机性。
|
||||
en_US: The amount of randomness injected into the response.
|
||||
- name: top_p
|
||||
required: false
|
||||
type: float
|
||||
default: 0.999
|
||||
min: 0.000
|
||||
max: 1.000
|
||||
help:
|
||||
zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。
|
||||
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
|
||||
- name: top_k
|
||||
required: false
|
||||
type: int
|
||||
default: 0
|
||||
min: 0
|
||||
# tip docs from aws has error, max value is 500
|
||||
max: 500
|
||||
help:
|
||||
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
|
||||
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0032'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,7 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 10240
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -8,6 +8,7 @@ features:
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
|
||||
@@ -4,6 +4,7 @@ label:
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2048
|
||||
features:
|
||||
- vision
|
||||
parameter_rules:
|
||||
|
||||
@@ -4,6 +4,7 @@ label:
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
features:
|
||||
- vision
|
||||
- video
|
||||
|
||||
@@ -22,18 +22,6 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
|
||||
from core.model_runtime.model_providers.zhipuai._common import _CommonZhipuaiAI
|
||||
from core.model_runtime.utils import helper
|
||||
|
||||
GLM_JSON_MODE_PROMPT = """You should always follow the instructions and output a valid JSON object.
|
||||
The structure of the JSON object you can found in the instructions, use {"answer": "$your_answer"} as the default structure
|
||||
if you are not sure about the structure.
|
||||
|
||||
And you should always end the block with a "```" to indicate the end of the JSON object.
|
||||
|
||||
<instructions>
|
||||
{{instructions}}
|
||||
</instructions>
|
||||
|
||||
```JSON""" # noqa: E501
|
||||
|
||||
|
||||
class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
def _invoke(
|
||||
@@ -64,42 +52,8 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
|
||||
# invoke model
|
||||
# stop = stop or []
|
||||
# self._transform_json_prompts(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
return self._generate(model, credentials_kwargs, prompt_messages, model_parameters, tools, stop, stream, user)
|
||||
|
||||
# def _transform_json_prompts(self, model: str, credentials: dict,
|
||||
# prompt_messages: list[PromptMessage], model_parameters: dict,
|
||||
# tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None,
|
||||
# stream: bool = True, user: str | None = None) \
|
||||
# -> None:
|
||||
# """
|
||||
# Transform json prompts to model prompts
|
||||
# """
|
||||
# if "}\n\n" not in stop:
|
||||
# stop.append("}\n\n")
|
||||
|
||||
# # check if there is a system message
|
||||
# if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage):
|
||||
# # override the system message
|
||||
# prompt_messages[0] = SystemPromptMessage(
|
||||
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content)
|
||||
# )
|
||||
# else:
|
||||
# # insert the system message
|
||||
# prompt_messages.insert(0, SystemPromptMessage(
|
||||
# content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", "Please output a valid JSON object.")
|
||||
# ))
|
||||
# # check if the last message is a user message
|
||||
# if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage):
|
||||
# # add ```JSON\n to the last message
|
||||
# prompt_messages[-1].content += "\n```JSON\n"
|
||||
# else:
|
||||
# # append a user message
|
||||
# prompt_messages.append(UserPromptMessage(
|
||||
# content="```JSON\n"
|
||||
# ))
|
||||
|
||||
def get_num_tokens(
|
||||
self,
|
||||
model: str,
|
||||
@@ -170,7 +124,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
:return: full response or stream response chunk generator result
|
||||
"""
|
||||
extra_model_kwargs = {}
|
||||
# request to glm-4v-plus with stop words will always response "finish_reason":"network_error"
|
||||
# request to glm-4v-plus with stop words will always respond "finish_reason":"network_error"
|
||||
if stop and model != "glm-4v-plus":
|
||||
extra_model_kwargs["stop"] = stop
|
||||
|
||||
@@ -186,7 +140,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
# resolve zhipuai model not support system message and user message, assistant message must be in sequence
|
||||
new_prompt_messages: list[PromptMessage] = []
|
||||
for prompt_message in prompt_messages:
|
||||
copy_prompt_message = prompt_message.copy()
|
||||
copy_prompt_message = prompt_message.model_copy()
|
||||
if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}:
|
||||
if isinstance(copy_prompt_message.content, list):
|
||||
# check if model is 'glm-4v'
|
||||
@@ -238,59 +192,38 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters)
|
||||
else:
|
||||
params = {"model": model, "messages": [], **model_parameters}
|
||||
# glm model
|
||||
if not model.startswith("chatglm"):
|
||||
for prompt_message in new_prompt_messages:
|
||||
if prompt_message.role == PromptMessageRole.TOOL:
|
||||
for prompt_message in new_prompt_messages:
|
||||
if prompt_message.role == PromptMessageRole.TOOL:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "tool",
|
||||
"content": prompt_message.content,
|
||||
"tool_call_id": prompt_message.tool_call_id,
|
||||
}
|
||||
)
|
||||
elif isinstance(prompt_message, AssistantPromptMessage):
|
||||
if prompt_message.tool_calls:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "tool",
|
||||
"role": "assistant",
|
||||
"content": prompt_message.content,
|
||||
"tool_call_id": prompt_message.tool_call_id,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tool_call.id,
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments,
|
||||
},
|
||||
}
|
||||
for tool_call in prompt_message.tool_calls
|
||||
],
|
||||
}
|
||||
)
|
||||
elif isinstance(prompt_message, AssistantPromptMessage):
|
||||
if prompt_message.tool_calls:
|
||||
params["messages"].append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": prompt_message.content,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": tool_call.id,
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments,
|
||||
},
|
||||
}
|
||||
for tool_call in prompt_message.tool_calls
|
||||
],
|
||||
}
|
||||
)
|
||||
else:
|
||||
params["messages"].append({"role": "assistant", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append(
|
||||
{"role": prompt_message.role.value, "content": prompt_message.content}
|
||||
)
|
||||
else:
|
||||
# chatglm model
|
||||
for prompt_message in new_prompt_messages:
|
||||
# merge system message to user message
|
||||
if prompt_message.role in {
|
||||
PromptMessageRole.SYSTEM,
|
||||
PromptMessageRole.TOOL,
|
||||
PromptMessageRole.USER,
|
||||
}:
|
||||
if len(params["messages"]) > 0 and params["messages"][-1]["role"] == "user":
|
||||
params["messages"][-1]["content"] += "\n\n" + prompt_message.content
|
||||
else:
|
||||
params["messages"].append({"role": "user", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append(
|
||||
{"role": prompt_message.role.value, "content": prompt_message.content}
|
||||
)
|
||||
params["messages"].append({"role": "assistant", "content": prompt_message.content})
|
||||
else:
|
||||
params["messages"].append({"role": prompt_message.role.value, "content": prompt_message.content})
|
||||
|
||||
if tools and len(tools) > 0:
|
||||
params["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools]
|
||||
@@ -406,7 +339,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
Handle llm stream response
|
||||
|
||||
:param model: model name
|
||||
:param response: response
|
||||
:param responses: response
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response chunk generator result
|
||||
"""
|
||||
@@ -505,7 +438,7 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
|
||||
if tools and len(tools) > 0:
|
||||
text += "\n\nTools:"
|
||||
for tool in tools:
|
||||
text += f"\n{tool.json()}"
|
||||
text += f"\n{tool.model_dump_json()}"
|
||||
|
||||
# trim off the trailing ' ' that might come from the "Assistant: "
|
||||
return text.rstrip()
|
||||
|
||||
@@ -5,7 +5,7 @@ BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找
|
||||
CHAT_APP_COMPLETION_PROMPT_CONFIG = {
|
||||
"completion_prompt_config": {
|
||||
"prompt": {
|
||||
"text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
|
||||
"text": "{{#pre_prompt#}}\nHere are the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
|
||||
},
|
||||
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
|
||||
},
|
||||
|
||||
@@ -375,7 +375,6 @@ class TidbOnQdrantVector(BaseVector):
|
||||
for result in results:
|
||||
if result:
|
||||
document = self._document_from_scored_point(result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value)
|
||||
document.metadata["vector"] = result.vector
|
||||
documents.append(document)
|
||||
|
||||
return documents
|
||||
@@ -394,6 +393,7 @@ class TidbOnQdrantVector(BaseVector):
|
||||
) -> Document:
|
||||
return Document(
|
||||
page_content=scored_point.payload.get(content_payload_key),
|
||||
vector=scored_point.vector,
|
||||
metadata=scored_point.payload.get(metadata_payload_key) or {},
|
||||
)
|
||||
|
||||
|
||||
BIN
api/core/tools/provider/builtin/file_extractor/_assets/icon.png
Normal file
BIN
api/core/tools/provider/builtin/file_extractor/_assets/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.3 KiB |
@@ -0,0 +1,8 @@
|
||||
from typing import Any
|
||||
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class FileExtractorProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
|
||||
pass
|
||||
@@ -0,0 +1,15 @@
|
||||
identity:
|
||||
author: Jyong
|
||||
name: file_extractor
|
||||
label:
|
||||
en_US: File Extractor
|
||||
zh_Hans: 文件提取
|
||||
pt_BR: File Extractor
|
||||
description:
|
||||
en_US: Extract text from file
|
||||
zh_Hans: 从文件中提取文本
|
||||
pt_BR: Extract text from file
|
||||
icon: icon.png
|
||||
tags:
|
||||
- utilities
|
||||
- productivity
|
||||
@@ -0,0 +1,45 @@
|
||||
import tempfile
|
||||
from typing import Any, Union
|
||||
|
||||
from core.file.enums import FileType
|
||||
from core.file.file_manager import download_to_target_path
|
||||
from core.rag.extractor.text_extractor import TextExtractor
|
||||
from core.rag.splitter.fixed_text_splitter import FixedRecursiveCharacterTextSplitter
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.errors import ToolParameterValidationError
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class FileExtractorTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any],
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
# image file for workflow mode
|
||||
file = tool_parameters.get("text_file")
|
||||
if file and file.type != FileType.DOCUMENT:
|
||||
raise ToolParameterValidationError("Not a valid document")
|
||||
|
||||
if file:
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
file_path = download_to_target_path(file, temp_dir)
|
||||
extractor = TextExtractor(file_path, autodetect_encoding=True)
|
||||
documents = extractor.extract()
|
||||
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
|
||||
chunk_size=tool_parameters.get("max_token", 500),
|
||||
chunk_overlap=0,
|
||||
fixed_separator=tool_parameters.get("separator", "\n\n"),
|
||||
separators=["\n\n", "。", ". ", " ", ""],
|
||||
embedding_model_instance=None,
|
||||
)
|
||||
chunks = character_splitter.split_documents(documents)
|
||||
|
||||
content = "\n".join([chunk.page_content for chunk in chunks])
|
||||
return self.create_text_message(content)
|
||||
|
||||
else:
|
||||
raise ToolParameterValidationError("Please provide either file")
|
||||
@@ -0,0 +1,49 @@
|
||||
identity:
|
||||
name: text extractor
|
||||
author: Jyong
|
||||
label:
|
||||
en_US: Text extractor
|
||||
zh_Hans: Text 文本解析
|
||||
description:
|
||||
en_US: Extract content from text file and support split to chunks by split characters and token length
|
||||
zh_Hans: 支持从文本文件中提取内容并支持通过分割字符和令牌长度分割成块
|
||||
pt_BR: Extract content from text file and support split to chunks by split characters and token length
|
||||
description:
|
||||
human:
|
||||
en_US: Text extractor is a text extract tool
|
||||
zh_Hans: Text extractor 是一个文本提取工具
|
||||
pt_BR: Text extractor is a text extract tool
|
||||
llm: Text extractor is a tool used to extract text file
|
||||
parameters:
|
||||
- name: text_file
|
||||
type: file
|
||||
label:
|
||||
en_US: Text file
|
||||
human_description:
|
||||
en_US: The text file to be extracted.
|
||||
zh_Hans: 要提取的 text 文档。
|
||||
llm_description: you should not input this parameter. just input the image_id.
|
||||
form: llm
|
||||
- name: separator
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: split character
|
||||
zh_Hans: 分隔符号
|
||||
human_description:
|
||||
en_US: Text content split character
|
||||
zh_Hans: 用于文档分隔的符号
|
||||
llm_description: it is used for split content to chunks
|
||||
form: form
|
||||
- name: max_token
|
||||
type: number
|
||||
required: false
|
||||
label:
|
||||
en_US: Maximum chunk length
|
||||
zh_Hans: 最大分段长度
|
||||
human_description:
|
||||
en_US: Maximum chunk length
|
||||
zh_Hans: 最大分段长度
|
||||
llm_description: it is used for limit chunk's max length
|
||||
form: form
|
||||
|
||||
@@ -6,9 +6,9 @@ identity:
|
||||
zh_Hans: GitLab 合并请求查询
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for query GitLab merge requests, Input should be a exists reposity or branch.
|
||||
en_US: A tool for query GitLab merge requests, Input should be a exists repository or branch.
|
||||
zh_Hans: 一个用于查询 GitLab 代码合并请求的工具,输入的内容应该是一个已存在的仓库名或者分支。
|
||||
llm: A tool for query GitLab merge requests, Input should be a exists reposity or branch.
|
||||
llm: A tool for query GitLab merge requests, Input should be a exists repository or branch.
|
||||
parameters:
|
||||
- name: repository
|
||||
type: string
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import logging
|
||||
from collections.abc import Mapping, Sequence
|
||||
from mimetypes import guess_extension
|
||||
from os import path
|
||||
from typing import Any
|
||||
|
||||
from configs import dify_config
|
||||
from core.file import File, FileTransferMethod, FileType
|
||||
from core.file import File, FileTransferMethod
|
||||
from core.tools.tool_file_manager import ToolFileManager
|
||||
from core.workflow.entities.node_entities import NodeRunResult
|
||||
from core.workflow.entities.variable_entities import VariableSelector
|
||||
@@ -150,11 +148,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
||||
content = response.content
|
||||
|
||||
if is_file and content_type:
|
||||
# extract filename from url
|
||||
filename = path.basename(url)
|
||||
# extract extension if possible
|
||||
extension = guess_extension(content_type) or ".bin"
|
||||
|
||||
tool_file = ToolFileManager.create_file_by_raw(
|
||||
user_id=self.user_id,
|
||||
tenant_id=self.tenant_id,
|
||||
@@ -165,7 +158,6 @@ class HttpRequestNode(BaseNode[HttpRequestNodeData]):
|
||||
|
||||
mapping = {
|
||||
"tool_file_id": tool_file.id,
|
||||
"type": FileType.IMAGE.value,
|
||||
"transfer_method": FileTransferMethod.TOOL_FILE.value,
|
||||
}
|
||||
file = file_factory.build_from_mapping(
|
||||
|
||||
@@ -116,7 +116,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
variable_pool.add([self.node_id, "item"], iterator_list_value[0])
|
||||
|
||||
# init graph engine
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine, GraphEngineThreadPool
|
||||
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id=self.tenant_id,
|
||||
@@ -162,8 +162,7 @@ class IterationNode(BaseNode[IterationNodeData]):
|
||||
if self.node_data.is_parallel:
|
||||
futures: list[Future] = []
|
||||
q = Queue()
|
||||
thread_pool = graph_engine.workflow_thread_pool_mapping[graph_engine.thread_pool_id]
|
||||
thread_pool._max_workers = self.node_data.parallel_nums
|
||||
thread_pool = GraphEngineThreadPool(max_workers=self.node_data.parallel_nums, max_submit_count=100)
|
||||
for index, item in enumerate(iterator_list_value):
|
||||
future: Future = thread_pool.submit(
|
||||
self._run_single_iter_parallel,
|
||||
|
||||
@@ -815,7 +815,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
||||
"completion_model": {
|
||||
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
|
||||
"prompt": {
|
||||
"text": "Here is the chat histories between human and assistant, inside "
|
||||
"text": "Here are the chat histories between human and assistant, inside "
|
||||
"<histories></histories> XML tags.\n\n<histories>\n{{"
|
||||
"#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
|
||||
"edition_type": "basic",
|
||||
|
||||
@@ -98,7 +98,7 @@ Step 3: Structure the extracted parameters to JSON object as specified in <struc
|
||||
Step 4: Ensure that the JSON object is properly formatted and valid. The output should not contain any XML tags. Only the JSON object should be outputted.
|
||||
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
@@ -125,7 +125,7 @@ CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and out
|
||||
The structure of the JSON object you can found in the instructions.
|
||||
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
|
||||
@@ -8,7 +8,7 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """
|
||||
### Constraint
|
||||
DO NOT include anything other than the JSON array in your response.
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
@@ -66,7 +66,7 @@ User:{{"input_text": ["bad service, slow to bring the food"], "categories": [{{"
|
||||
Assistant:{{"keywords": ["bad service", "slow", "food", "tip", "terrible", "waitresses"],"category_id": "f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name": "Experience"}}
|
||||
</example>
|
||||
### Memory
|
||||
Here is the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
Here are the chat histories between human and assistant, inside <histories></histories> XML tags.
|
||||
<histories>
|
||||
{histories}
|
||||
</histories>
|
||||
|
||||
@@ -0,0 +1,96 @@
|
||||
"""add_fat_test
|
||||
|
||||
Revision ID: 49f175ff56cb
|
||||
Revises: 43fa78bc3b7d
|
||||
Create Date: 2024-11-05 03:26:22.578321
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import models as models
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '49f175ff56cb'
|
||||
down_revision = '01d6889832f7'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('component_failure',
|
||||
sa.Column('FailureID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('Date', sa.Date(), nullable=False),
|
||||
sa.Column('Component', sa.String(length=255), nullable=False),
|
||||
sa.Column('FailureMode', sa.String(length=255), nullable=False),
|
||||
sa.Column('Cause', sa.String(length=255), nullable=False),
|
||||
sa.Column('RepairAction', sa.Text(), nullable=True),
|
||||
sa.Column('Technician', sa.String(length=255), nullable=False),
|
||||
sa.PrimaryKeyConstraint('FailureID', name=op.f('component_failure_pkey')),
|
||||
sa.UniqueConstraint('Date', 'Component', 'FailureMode', 'Cause', 'Technician', name='unique_failure_entry')
|
||||
)
|
||||
op.create_table('component_failure_stats',
|
||||
sa.Column('StatID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('Component', sa.String(length=255), nullable=False),
|
||||
sa.Column('FailureMode', sa.String(length=255), nullable=False),
|
||||
sa.Column('Cause', sa.String(length=255), nullable=False),
|
||||
sa.Column('PossibleAction', sa.Text(), nullable=True),
|
||||
sa.Column('Probability', sa.Float(), nullable=False),
|
||||
sa.Column('MTBF', sa.Float(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('StatID', name=op.f('component_failure_stats_pkey'))
|
||||
)
|
||||
op.create_table('incident_data',
|
||||
sa.Column('IncidentID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('IncidentDescription', sa.Text(), nullable=False),
|
||||
sa.Column('IncidentDate', sa.Date(), nullable=False),
|
||||
sa.Column('Consequences', sa.Text(), nullable=True),
|
||||
sa.Column('ResponseActions', sa.Text(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('IncidentID', name=op.f('incident_data_pkey'))
|
||||
)
|
||||
op.create_table('maintenance',
|
||||
sa.Column('MaintenanceID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('MaintenanceType', sa.String(length=255), nullable=False),
|
||||
sa.Column('MaintenanceDate', sa.Date(), nullable=False),
|
||||
sa.Column('ServiceDescription', sa.Text(), nullable=True),
|
||||
sa.Column('PartsReplaced', sa.Text(), nullable=True),
|
||||
sa.Column('Technician', sa.String(length=255), nullable=False),
|
||||
sa.PrimaryKeyConstraint('MaintenanceID', name=op.f('maintenance_pkey'))
|
||||
)
|
||||
op.create_table('operational_data',
|
||||
sa.Column('OperationID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('CraneUsage', sa.Integer(), nullable=False),
|
||||
sa.Column('LoadWeight', sa.Float(), nullable=False),
|
||||
sa.Column('LoadFrequency', sa.Integer(), nullable=False),
|
||||
sa.Column('EnvironmentalConditions', sa.Text(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('OperationID', name=op.f('operational_data_pkey'))
|
||||
)
|
||||
op.create_table('reliability_data',
|
||||
sa.Column('ComponentID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('ComponentName', sa.String(length=255), nullable=False),
|
||||
sa.Column('MTBF', sa.Float(), nullable=False),
|
||||
sa.Column('FailureRate', sa.Float(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('ComponentID', name=op.f('reliability_data_pkey'))
|
||||
)
|
||||
op.create_table('safety_data',
|
||||
sa.Column('SafetyID', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('SafetyInspectionDate', sa.Date(), nullable=False),
|
||||
sa.Column('SafetyFindings', sa.Text(), nullable=True),
|
||||
sa.Column('SafetyIncidentDescription', sa.Text(), nullable=True),
|
||||
sa.Column('ComplianceStatus', sa.String(length=50), nullable=False),
|
||||
sa.PrimaryKeyConstraint('SafetyID', name=op.f('safety_data_pkey'))
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('safety_data')
|
||||
op.drop_table('reliability_data')
|
||||
op.drop_table('operational_data')
|
||||
op.drop_table('maintenance')
|
||||
op.drop_table('incident_data')
|
||||
op.drop_table('component_failure_stats')
|
||||
op.drop_table('component_failure')
|
||||
# ### end Alembic commands ###
|
||||
78
api/models/fta.py
Normal file
78
api/models/fta.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from extensions.ext_database import db
|
||||
|
||||
|
||||
class ComponentFailure(db.Model):
|
||||
__tablename__ = "component_failure"
|
||||
__table_args__ = (
|
||||
db.UniqueConstraint("Date", "Component", "FailureMode", "Cause", "Technician", name="unique_failure_entry"),
|
||||
)
|
||||
|
||||
FailureID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
Date = db.Column(db.Date, nullable=False)
|
||||
Component = db.Column(db.String(255), nullable=False)
|
||||
FailureMode = db.Column(db.String(255), nullable=False)
|
||||
Cause = db.Column(db.String(255), nullable=False)
|
||||
RepairAction = db.Column(db.Text, nullable=True)
|
||||
Technician = db.Column(db.String(255), nullable=False)
|
||||
|
||||
|
||||
class Maintenance(db.Model):
|
||||
__tablename__ = "maintenance"
|
||||
|
||||
MaintenanceID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
MaintenanceType = db.Column(db.String(255), nullable=False)
|
||||
MaintenanceDate = db.Column(db.Date, nullable=False)
|
||||
ServiceDescription = db.Column(db.Text, nullable=True)
|
||||
PartsReplaced = db.Column(db.Text, nullable=True)
|
||||
Technician = db.Column(db.String(255), nullable=False)
|
||||
|
||||
|
||||
class OperationalData(db.Model):
|
||||
__tablename__ = "operational_data"
|
||||
|
||||
OperationID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
CraneUsage = db.Column(db.Integer, nullable=False)
|
||||
LoadWeight = db.Column(db.Float, nullable=False)
|
||||
LoadFrequency = db.Column(db.Integer, nullable=False)
|
||||
EnvironmentalConditions = db.Column(db.Text, nullable=True)
|
||||
|
||||
|
||||
class IncidentData(db.Model):
|
||||
__tablename__ = "incident_data"
|
||||
|
||||
IncidentID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
IncidentDescription = db.Column(db.Text, nullable=False)
|
||||
IncidentDate = db.Column(db.Date, nullable=False)
|
||||
Consequences = db.Column(db.Text, nullable=True)
|
||||
ResponseActions = db.Column(db.Text, nullable=True)
|
||||
|
||||
|
||||
class ReliabilityData(db.Model):
|
||||
__tablename__ = "reliability_data"
|
||||
|
||||
ComponentID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
ComponentName = db.Column(db.String(255), nullable=False)
|
||||
MTBF = db.Column(db.Float, nullable=False)
|
||||
FailureRate = db.Column(db.Float, nullable=False)
|
||||
|
||||
|
||||
class SafetyData(db.Model):
|
||||
__tablename__ = "safety_data"
|
||||
|
||||
SafetyID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
SafetyInspectionDate = db.Column(db.Date, nullable=False)
|
||||
SafetyFindings = db.Column(db.Text, nullable=True)
|
||||
SafetyIncidentDescription = db.Column(db.Text, nullable=True)
|
||||
ComplianceStatus = db.Column(db.String(50), nullable=False)
|
||||
|
||||
|
||||
class ComponentFailureStats(db.Model):
|
||||
__tablename__ = "component_failure_stats"
|
||||
|
||||
StatID = db.Column(db.Integer, primary_key=True, autoincrement=True)
|
||||
Component = db.Column(db.String(255), nullable=False)
|
||||
FailureMode = db.Column(db.String(255), nullable=False)
|
||||
Cause = db.Column(db.String(255), nullable=False)
|
||||
PossibleAction = db.Column(db.Text, nullable=True)
|
||||
Probability = db.Column(db.Float, nullable=False)
|
||||
MTBF = db.Column(db.Float, nullable=False)
|
||||
@@ -29,6 +29,7 @@ import { useAppContext } from '@/context/app-context'
|
||||
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
import type { InputForm } from '@/app/components/base/chat/chat/type'
|
||||
import { getLastAnswer } from '@/app/components/base/chat/utils'
|
||||
|
||||
type ChatItemProps = {
|
||||
modelAndParameter: ModelAndParameter
|
||||
@@ -101,7 +102,7 @@ const ChatItem: FC<ChatItemProps> = ({
|
||||
query: message,
|
||||
inputs,
|
||||
model_config: configData,
|
||||
parent_message_id: chatListRef.current.at(-1)?.id || null,
|
||||
parent_message_id: getLastAnswer(chatListRef.current)?.id || null,
|
||||
}
|
||||
|
||||
if ((config.file_upload as any).enabled && files?.length && supportVision)
|
||||
|
||||
@@ -318,7 +318,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
|
||||
const targetTone = TONE_LIST.find((item: any) => {
|
||||
let res = true
|
||||
validatedParams.forEach((param) => {
|
||||
res = item.config?.[param] === detail.model_config?.configs?.completion_params?.[param]
|
||||
res = item.config?.[param] === detail?.model_config.model?.completion_params?.[param]
|
||||
})
|
||||
return res
|
||||
})?.name ?? 'custom'
|
||||
|
||||
@@ -76,7 +76,7 @@ const Logs: FC<ILogsProps> = ({ appDetail }) => {
|
||||
<div className='flex flex-col h-full'>
|
||||
<h1 className='text-text-primary system-xl-semibold'>{t('appLog.workflowTitle')}</h1>
|
||||
<p className='text-text-tertiary system-sm-regular'>{t('appLog.workflowSubtitle')}</p>
|
||||
<div className='flex flex-col py-4 flex-1'>
|
||||
<div className='flex flex-col py-4 flex-1 max-h-[calc(100%-16px)]'>
|
||||
<Filter queryParams={queryParams} setQueryParams={setQueryParams} />
|
||||
{/* workflow log */}
|
||||
{total === undefined
|
||||
|
||||
@@ -84,7 +84,7 @@ const FileImageItem = ({
|
||||
className='absolute bottom-0.5 right-0.5 flex items-center justify-center w-6 h-6 rounded-lg bg-components-actionbar-bg shadow-md'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
downloadFile(url || '', name)
|
||||
downloadFile(url || base64Url || '', name)
|
||||
}}
|
||||
>
|
||||
<RiDownloadLine className='w-4 h-4 text-text-tertiary' />
|
||||
|
||||
@@ -80,7 +80,7 @@ const FileItem = ({
|
||||
}
|
||||
</div>
|
||||
{
|
||||
showDownloadAction && (
|
||||
showDownloadAction && url && (
|
||||
<ActionButton
|
||||
size='m'
|
||||
className='hidden group-hover/file-item:flex absolute -right-1 -top-1'
|
||||
|
||||
47
web/app/components/base/skeleton/index.tsx
Normal file
47
web/app/components/base/skeleton/index.tsx
Normal file
@@ -0,0 +1,47 @@
|
||||
import type { ComponentProps, FC } from 'react'
|
||||
import classNames from '@/utils/classnames'
|
||||
|
||||
type SkeletonProps = ComponentProps<'div'>
|
||||
|
||||
export const SkeletonContanier: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('flex flex-col gap-1', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonRow: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('flex items-center gap-2', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonRectangle: FC<SkeletonProps> = (props) => {
|
||||
const { className, children, ...rest } = props
|
||||
return (
|
||||
<div className={classNames('h-2 rounded-sm opacity-20 bg-text-tertiary my-1', className)} {...rest}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export const SkeletonPoint: FC = () =>
|
||||
<div className='text-text-quaternary text-xs font-medium'>·</div>
|
||||
|
||||
/** Usage
|
||||
* <SkeletonContanier>
|
||||
* <SkeletonRow>
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* <SkeletonPoint />
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* </SkeletonRow>
|
||||
* <SkeletonRow>
|
||||
* <SkeletonRectangle className="w-96" />
|
||||
* </SkeletonRow>
|
||||
* <SkeletonRow>
|
||||
*/
|
||||
@@ -30,7 +30,9 @@ const nodeDefault: NodeDefault<AssignerNodeType> = {
|
||||
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.assignedVariable') })
|
||||
|
||||
if (!errorMessages && value.operation !== WriteMode.clear) {
|
||||
if (value.operation === WriteMode.set) {
|
||||
if (value.operation === WriteMode.set || value.operation === WriteMode.increment
|
||||
|| value.operation === WriteMode.decrement || value.operation === WriteMode.multiply
|
||||
|| value.operation === WriteMode.divide) {
|
||||
if (!value.value && typeof value.value !== 'number')
|
||||
errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.variable') })
|
||||
}
|
||||
|
||||
@@ -173,7 +173,8 @@ const InputVarList: FC<Props> = ({
|
||||
value={varInput?.type === VarKindType.constant ? (varInput?.value || '') : (varInput?.value || [])}
|
||||
onChange={handleNotMixedTypeChange(variable)}
|
||||
onOpen={handleOpen(index)}
|
||||
defaultVarKindType={VarKindType.variable}
|
||||
defaultVarKindType={isNumber ? VarKindType.constant : VarKindType.variable}
|
||||
isSupportConstantValue={isSupportConstantValue}
|
||||
filterVar={isNumber ? filterVar : undefined}
|
||||
availableVars={isSelect ? availableVars : undefined}
|
||||
schema={schema}
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Übersetzen',
|
||||
Programming: 'Programmieren',
|
||||
HR: 'Personalwesen',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Arbeitsablauf',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -30,11 +30,13 @@ const translation = {
|
||||
nameRequired: 'App name is required',
|
||||
},
|
||||
category: {
|
||||
Agent: 'Agent',
|
||||
Assistant: 'Assistant',
|
||||
Writing: 'Writing',
|
||||
Translate: 'Translate',
|
||||
Programming: 'Programming',
|
||||
HR: 'HR',
|
||||
Workflow: 'Workflow',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traducción',
|
||||
Programming: 'Programación',
|
||||
HR: 'Recursos Humanos',
|
||||
Agent: 'Agente',
|
||||
Workflow: 'Flujo de trabajo',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'ترجمه',
|
||||
Programming: 'برنامهنویسی',
|
||||
HR: 'منابع انسانی',
|
||||
Agent: 'عامل',
|
||||
Workflow: 'گردش',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traduire',
|
||||
Programming: 'Programmation',
|
||||
HR: 'RH',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Flux de travail',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,8 @@ const translation = {
|
||||
Translate: 'अनुवाद',
|
||||
Programming: 'प्रोग्रामिंग',
|
||||
HR: 'मानव संसाधन',
|
||||
Workflow: 'कार्यप्रवाह',
|
||||
Agent: 'आढ़तिया',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,8 @@ const translation = {
|
||||
Translate: 'Traduzione',
|
||||
Programming: 'Programmazione',
|
||||
HR: 'Risorse Umane',
|
||||
Workflow: 'Flusso di lavoro',
|
||||
Agent: 'Agente',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: '翻訳',
|
||||
Programming: 'プログラミング',
|
||||
HR: '人事',
|
||||
Workflow: 'ワークフロー',
|
||||
Agent: 'エージェント',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,8 @@ const translation = {
|
||||
Translate: 'Tłumaczenie',
|
||||
Programming: 'Programowanie',
|
||||
HR: 'HR',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Przepływ pracy',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traduzir',
|
||||
Programming: 'Programação',
|
||||
HR: 'RH',
|
||||
Workflow: 'Fluxo de trabalho',
|
||||
Agent: 'Agente',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Traducere',
|
||||
Programming: 'Programare',
|
||||
HR: 'Resurse Umane',
|
||||
Agent: 'Agent',
|
||||
Workflow: 'Flux de lucru',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Перевод',
|
||||
Programming: 'Программирование',
|
||||
HR: 'HR',
|
||||
Agent: 'Агент',
|
||||
Workflow: 'Рабочий процесс',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Prevajanje',
|
||||
Programming: 'Programiranje',
|
||||
HR: 'Kadri',
|
||||
Workflow: 'Potek dela',
|
||||
Agent: 'Agent',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'แปล',
|
||||
Programming: 'โปรแกรม',
|
||||
HR: 'ชั่วโมง',
|
||||
Workflow: 'เวิร์กโฟลว์',
|
||||
Agent: 'ตัวแทน',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Çeviri',
|
||||
Programming: 'Programlama',
|
||||
HR: 'İK',
|
||||
Agent: 'Aracı',
|
||||
Workflow: 'İş Akışı',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Переклад',
|
||||
Programming: 'Програмування',
|
||||
HR: 'HR',
|
||||
Workflow: 'Робочий процес',
|
||||
Agent: 'Агент',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: 'Dịch thuật',
|
||||
Programming: 'Lập trình',
|
||||
HR: 'Nhân sự',
|
||||
Agent: 'Người đại lý',
|
||||
Workflow: 'Quy trình làm việc',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -30,11 +30,13 @@ const translation = {
|
||||
nameRequired: '应用程序名称不能为空',
|
||||
},
|
||||
category: {
|
||||
Agent: 'Agent',
|
||||
Assistant: '助手',
|
||||
Writing: '写作',
|
||||
Translate: '翻译',
|
||||
Programming: '编程',
|
||||
HR: '人力资源',
|
||||
Workflow: '工作流',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,8 @@ const translation = {
|
||||
Translate: '翻譯',
|
||||
Programming: '程式設計',
|
||||
HR: '人力資源',
|
||||
Agent: '代理',
|
||||
Workflow: '工作流',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user