mirror of
https://github.com/langgenius/dify.git
synced 2026-01-20 14:04:17 +00:00
Compare commits
209 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c2efe2e1e | ||
|
|
ec29bcf013 | ||
|
|
c9f18aae0f | ||
|
|
df03c89a48 | ||
|
|
eb8584613b | ||
|
|
9000f4ad05 | ||
|
|
0b1259fc4a | ||
|
|
eb0e51d44d | ||
|
|
f633d1ee92 | ||
|
|
f1e4d5ed6c | ||
|
|
b5498a373a | ||
|
|
b73607da80 | ||
|
|
106604682a | ||
|
|
cd7ac20d80 | ||
|
|
be3ebea45b | ||
|
|
5a6219c726 | ||
|
|
4124e804a0 | ||
|
|
8d1a34bbb9 | ||
|
|
abead647e2 | ||
|
|
07ed728605 | ||
|
|
d796fcc0e7 | ||
|
|
7b7ac7a495 | ||
|
|
c8145ce581 | ||
|
|
da7f8ad936 | ||
|
|
41df910771 | ||
|
|
01704a4c1b | ||
|
|
8ab9eb9857 | ||
|
|
e1439c4a5b | ||
|
|
e7f1d0deea | ||
|
|
0998b01321 | ||
|
|
743071d9bb | ||
|
|
f54905e685 | ||
|
|
0afad94378 | ||
|
|
e350511102 | ||
|
|
fd443941a2 | ||
|
|
b146aaaeb7 | ||
|
|
dd4b03e812 | ||
|
|
bf69b97639 | ||
|
|
6d59b8d85b | ||
|
|
e3dc9f3c31 | ||
|
|
42a42a7962 | ||
|
|
c05e03fc09 | ||
|
|
7016ccef10 | ||
|
|
6f636093b6 | ||
|
|
3e698074e7 | ||
|
|
dc9194ca00 | ||
|
|
6efa882ca3 | ||
|
|
fcd5fcca83 | ||
|
|
95212af935 | ||
|
|
da2113bde9 | ||
|
|
fc3f14c0ee | ||
|
|
296e2ef90f | ||
|
|
dcb8939c7f | ||
|
|
31a6aabfe5 | ||
|
|
2e9997110a | ||
|
|
e1304dc0c3 | ||
|
|
5aa82629dd | ||
|
|
dcdec98c8e | ||
|
|
3d76f09c3a | ||
|
|
b3f4e90862 | ||
|
|
4902ddaf87 | ||
|
|
a83318cf4b | ||
|
|
12faecdf89 | ||
|
|
c92bc84316 | ||
|
|
48c2168dff | ||
|
|
16c722d1d8 | ||
|
|
5cffcd6336 | ||
|
|
0bf816f2e8 | ||
|
|
6104b91d3f | ||
|
|
33c8cb7b3b | ||
|
|
4f286c9073 | ||
|
|
fd1e40d22e | ||
|
|
e2b8f40275 | ||
|
|
78409dfec1 | ||
|
|
98345c0f65 | ||
|
|
95c6bd1c8a | ||
|
|
8c77f2dc03 | ||
|
|
11e95d2a61 | ||
|
|
abaefe22bc | ||
|
|
bda06df668 | ||
|
|
4a505c19df | ||
|
|
d417ccaf49 | ||
|
|
b4aa1900e2 | ||
|
|
713902dc47 | ||
|
|
627a9e2ce1 | ||
|
|
2ae7a70be9 | ||
|
|
e58703877b | ||
|
|
9c4be5d098 | ||
|
|
d1801b1f2e | ||
|
|
4b5ec242e7 | ||
|
|
d5b48a0aa3 | ||
|
|
6372cb7b41 | ||
|
|
931d3390f0 | ||
|
|
ff388fe3e6 | ||
|
|
ef1c1a12d2 | ||
|
|
24b1a625b3 | ||
|
|
6cf258a809 | ||
|
|
c66fda7c71 | ||
|
|
ac850e559f | ||
|
|
7df36fe9f5 | ||
|
|
161724fb17 | ||
|
|
cf05e9cf78 | ||
|
|
44f911a0a8 | ||
|
|
32527b26d5 | ||
|
|
e008faf729 | ||
|
|
46d235bca0 | ||
|
|
a91b780936 | ||
|
|
aa4c6874f1 | ||
|
|
6d7a54915e | ||
|
|
f30b1c2358 | ||
|
|
42968cb945 | ||
|
|
87034e26ae | ||
|
|
becd03a4aa | ||
|
|
a1aa325ce3 | ||
|
|
34cba83ac4 | ||
|
|
fb11264f42 | ||
|
|
bad31dfff1 | ||
|
|
dd3844d1d3 | ||
|
|
bc22076ad8 | ||
|
|
754e646b0c | ||
|
|
9feafb6dbd | ||
|
|
c7fcfc863d | ||
|
|
d565802ea1 | ||
|
|
ea1d459423 | ||
|
|
377d11d13b | ||
|
|
d65da600e5 | ||
|
|
82189e1bc5 | ||
|
|
7f8bfb6e4a | ||
|
|
7f70cadacb | ||
|
|
98f2e2c729 | ||
|
|
395fdc4960 | ||
|
|
094b049c94 | ||
|
|
5d77730c78 | ||
|
|
a743d5dc71 | ||
|
|
c23135c9e8 | ||
|
|
0722beeb0b | ||
|
|
8b89447549 | ||
|
|
8047d08b3b | ||
|
|
30792a1e1a | ||
|
|
91db2207b3 | ||
|
|
59a86dabee | ||
|
|
d87d66ab88 | ||
|
|
032d849f17 | ||
|
|
c451f54925 | ||
|
|
6a857e01f6 | ||
|
|
2da780e4dc | ||
|
|
71edaba9df | ||
|
|
37134c5987 | ||
|
|
0c2a459c30 | ||
|
|
e0fc7f69dd | ||
|
|
bbf1639c63 | ||
|
|
cb12b4436f | ||
|
|
0277a37fca | ||
|
|
ac3577bc56 | ||
|
|
0811a23cd4 | ||
|
|
05c6d57f29 | ||
|
|
58c62f0a34 | ||
|
|
2174225259 | ||
|
|
6a0f1dad7f | ||
|
|
40cadab8a6 | ||
|
|
6157f57872 | ||
|
|
c4bb07184d | ||
|
|
af5d6ca27d | ||
|
|
78f2ec8f32 | ||
|
|
a113356695 | ||
|
|
43753c8e9a | ||
|
|
fc8c765215 | ||
|
|
86a1859d02 | ||
|
|
360986f38d | ||
|
|
1be0d26c1f | ||
|
|
c167a1f4f4 | ||
|
|
5eb0ca9b9d | ||
|
|
6e26ed2bb7 | ||
|
|
058d9c3525 | ||
|
|
b247fbb2ef | ||
|
|
bc6f122364 | ||
|
|
815d77856d | ||
|
|
05eaef84bb | ||
|
|
770c461a8f | ||
|
|
16b6ffd915 | ||
|
|
9701b573e0 | ||
|
|
83cd14104d | ||
|
|
e2988acc2f | ||
|
|
cea4669b76 | ||
|
|
17b4d4c7b2 | ||
|
|
e95f0fcceb | ||
|
|
6d5d6f0f24 | ||
|
|
7ce8faf176 | ||
|
|
f31e3313b0 | ||
|
|
f6ac98a37d | ||
|
|
f8e7e301cd | ||
|
|
35bafb3235 | ||
|
|
ae5d2ecf48 | ||
|
|
1907d2a90a | ||
|
|
4448a54cc1 | ||
|
|
bfc0d606dc | ||
|
|
3306228840 | ||
|
|
d7e00ae691 | ||
|
|
0e2e2db3fa | ||
|
|
bf90d34c2f | ||
|
|
383af7bf76 | ||
|
|
ac910ed200 | ||
|
|
7709d9df20 | ||
|
|
e61415223b | ||
|
|
a30945312a | ||
|
|
bf682302ee | ||
|
|
72191f5b13 | ||
|
|
e324e59930 | ||
|
|
727caccfc9 |
@@ -1,13 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
npm add -g pnpm@9.12.2
|
||||
npm add -g pnpm@10.8.0
|
||||
cd web && pnpm install
|
||||
pipx install poetry
|
||||
|
||||
echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
|
||||
echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc
|
||||
echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc
|
||||
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc
|
||||
echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify down"' >> ~/.bashrc
|
||||
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d"' >> ~/.bashrc
|
||||
echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env down"' >> ~/.bashrc
|
||||
|
||||
source /home/vscode/.bashrc
|
||||
|
||||
2
.github/DISCUSSION_TEMPLATE/general.yml
vendored
2
.github/DISCUSSION_TEMPLATE/general.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
2
.github/DISCUSSION_TEMPLATE/help.yml
vendored
2
.github/DISCUSSION_TEMPLATE/help.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
2
.github/DISCUSSION_TEMPLATE/suggestion.yml
vendored
2
.github/DISCUSSION_TEMPLATE/suggestion.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -14,7 +14,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/document_issue.yml
vendored
2
.github/ISSUE_TEMPLATE/document_issue.yml
vendored
@@ -12,7 +12,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -12,7 +12,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/tracker.yml
vendored
2
.github/ISSUE_TEMPLATE/tracker.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: "👾 Tracker"
|
||||
description: For inner usages, please donot use this template.
|
||||
description: For inner usages, please do not use this template.
|
||||
title: "[Tracker] "
|
||||
labels:
|
||||
- tracker
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/translation_issue.yml
vendored
4
.github/ISSUE_TEMPLATE/translation_issue.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: "🌐 Localization/Translation issue"
|
||||
description: Report incorrect translations. [please use English :)]
|
||||
description: Report incorrect translations. [please use English :)]
|
||||
labels:
|
||||
- translation
|
||||
body:
|
||||
@@ -12,7 +12,7 @@ body:
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
|
||||
required: true
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
- label: "[FOR CHINESE USERS] 请务必使用英文提交 Issue,否则会被关闭。谢谢!:)"
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
9
.github/workflows/api-tests.yml
vendored
9
.github/workflows/api-tests.yml
vendored
@@ -53,9 +53,14 @@ jobs:
|
||||
- name: Run dify config tests
|
||||
run: poetry run -P api python dev/pytest/pytest_config_tests.py
|
||||
|
||||
- name: Cache MyPy
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: api/.mypy_cache
|
||||
key: mypy-${{ matrix.python-version }}-${{ runner.os }}-${{ hashFiles('api/poetry.lock') }}
|
||||
|
||||
- name: Run mypy
|
||||
run: |
|
||||
poetry run -C api python -m mypy --install-types --non-interactive .
|
||||
run: dev/run-mypy
|
||||
|
||||
- name: Set up dotenvs
|
||||
run: |
|
||||
|
||||
4
.github/workflows/build-push.yml
vendored
4
.github/workflows/build-push.yml
vendored
@@ -6,8 +6,8 @@ on:
|
||||
- "main"
|
||||
- "deploy/dev"
|
||||
- "deploy/enterprise"
|
||||
release:
|
||||
types: [published]
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
concurrency:
|
||||
group: build-push-${{ github.head_ref || github.run_id }}
|
||||
|
||||
3
.github/workflows/style.yml
vendored
3
.github/workflows/style.yml
vendored
@@ -82,7 +82,7 @@ jobs:
|
||||
uses: actions/setup-node@v4
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
cache-dependency-path: ./web/package.json
|
||||
|
||||
@@ -153,6 +153,7 @@ jobs:
|
||||
env:
|
||||
BASH_SEVERITY: warning
|
||||
DEFAULT_BRANCH: main
|
||||
FILTER_REGEX_INCLUDE: pnpm-lock.yaml
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IGNORE_GENERATED_FILES: true
|
||||
IGNORE_GITIGNORED_FILES: true
|
||||
|
||||
2
.github/workflows/tool-test-sdks.yaml
vendored
2
.github/workflows/tool-test-sdks.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [16, 18, 20]
|
||||
node-version: [16, 18, 20, 22]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
|
||||
- name: Set up Node.js
|
||||
if: env.FILES_CHANGED == 'true'
|
||||
uses: actions/setup-node@v2
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 'lts/*'
|
||||
|
||||
|
||||
1
.github/workflows/vdb-tests.yml
vendored
1
.github/workflows/vdb-tests.yml
vendored
@@ -76,7 +76,6 @@ jobs:
|
||||
milvus-standalone
|
||||
pgvecto-rs
|
||||
pgvector
|
||||
opengauss
|
||||
chroma
|
||||
elasticsearch
|
||||
|
||||
|
||||
39
.github/workflows/web-tests.yml
vendored
39
.github/workflows/web-tests.yml
vendored
@@ -31,25 +31,26 @@ jobs:
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: web/**
|
||||
# to run pnpm, should install package canvas, but it always install failed on amd64 under ubuntu-latest
|
||||
# - name: Install pnpm
|
||||
# uses: pnpm/action-setup@v4
|
||||
# with:
|
||||
# version: 10
|
||||
# run_install: false
|
||||
|
||||
# - name: Setup Node.js
|
||||
# uses: actions/setup-node@v4
|
||||
# if: steps.changed-files.outputs.any_changed == 'true'
|
||||
# with:
|
||||
# node-version: 20
|
||||
# cache: pnpm
|
||||
# cache-dependency-path: ./web/package.json
|
||||
- name: Install pnpm
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
# - name: Install dependencies
|
||||
# if: steps.changed-files.outputs.any_changed == 'true'
|
||||
# run: pnpm install --frozen-lockfile
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
cache-dependency-path: ./web/package.json
|
||||
|
||||
# - name: Run tests
|
||||
# if: steps.changed-files.outputs.any_changed == 'true'
|
||||
# run: pnpm test
|
||||
- name: Install dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: pnpm test
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -103,6 +103,7 @@ celerybeat.pid
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.env-local
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
|
||||
@@ -18,7 +18,7 @@ Need to update an existing model runtime, tool, or squash some bugs? Head over t
|
||||
|
||||
Join the fun, contribute, and let's build something awesome together! 💡✨
|
||||
|
||||
Don't forget to link an existing issue or open an new issue in the PR's description.
|
||||
Don't forget to link an existing issue or open a new issue in the PR's description.
|
||||
|
||||
### Bug reports
|
||||
|
||||
@@ -68,7 +68,7 @@ How we prioritize:
|
||||
4. Please add tests for your changes accordingly
|
||||
5. Ensure your code passes the existing tests
|
||||
6. Please link the issue in the PR description, `fixes #<issue_number>`
|
||||
7. Get merrged!
|
||||
7. Get merged!
|
||||
### Setup the project
|
||||
|
||||
#### Frontend
|
||||
@@ -90,4 +90,4 @@ We recommend reviewing this document carefully before proceeding with the setup,
|
||||
Feel free to reach out if you encounter any issues during the setup process.
|
||||
## Getting Help
|
||||
|
||||
If you ever get stuck or got a burning question while contributing, simply shoot your queries our way via the related GitHub issue, or hop onto our [Discord](https://discord.gg/8Tpq4AcN9c) for a quick chat.
|
||||
If you ever get stuck or get a burning question while contributing, simply shoot your queries our way via the related GitHub issue, or hop onto our [Discord](https://discord.gg/8Tpq4AcN9c) for a quick chat.
|
||||
|
||||
93
CONTRIBUTING_ES.md
Normal file
93
CONTRIBUTING_ES.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# CONTRIBUIR
|
||||
|
||||
Así que estás buscando contribuir a Dify - eso es fantástico, estamos ansiosos por ver lo que haces. Como una startup con personal y financiación limitados, tenemos grandes ambiciones de diseñar el flujo de trabajo más intuitivo para construir y gestionar aplicaciones LLM. Cualquier ayuda de la comunidad cuenta, realmente.
|
||||
|
||||
Necesitamos ser ágiles y enviar rápidamente dado donde estamos, pero también queremos asegurarnos de que colaboradores como tú obtengan una experiencia lo más fluida posible al contribuir. Hemos elaborado esta guía de contribución con ese propósito, con el objetivo de familiarizarte con la base de código y cómo trabajamos con los colaboradores, para que puedas pasar rápidamente a la parte divertida.
|
||||
|
||||
Esta guía, como Dify mismo, es un trabajo en constante progreso. Agradecemos mucho tu comprensión si a veces se queda atrás del proyecto real, y damos la bienvenida a cualquier comentario para que podamos mejorar.
|
||||
|
||||
En términos de licencia, por favor tómate un minuto para leer nuestro breve [Acuerdo de Licencia y Colaborador](./LICENSE). La comunidad también se adhiere al [código de conducta](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Antes de empezar
|
||||
|
||||
¿Buscas algo en lo que trabajar? Explora nuestros [buenos primeros issues](https://github.com/langgenius/dify/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) y elige uno para comenzar.
|
||||
|
||||
¿Tienes un nuevo modelo o herramienta genial para añadir? Abre un PR en nuestro [repositorio de plugins](https://github.com/langgenius/dify-plugins) y muéstranos lo que has construido.
|
||||
|
||||
¿Necesitas actualizar un modelo existente, herramienta o corregir algunos errores? Dirígete a nuestro [repositorio oficial de plugins](https://github.com/langgenius/dify-official-plugins) y haz tu magia.
|
||||
|
||||
¡Únete a la diversión, contribuye y construyamos algo increíble juntos! 💡✨
|
||||
|
||||
No olvides vincular un issue existente o abrir uno nuevo en la descripción del PR.
|
||||
|
||||
### Informes de errores
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Por favor, asegúrate de incluir la siguiente información al enviar un informe de error:
|
||||
|
||||
- Un título claro y descriptivo
|
||||
- Una descripción detallada del error, incluyendo cualquier mensaje de error
|
||||
- Pasos para reproducir el error
|
||||
- Comportamiento esperado
|
||||
- **Logs**, si están disponibles, para problemas del backend, esto es realmente importante, puedes encontrarlos en los logs de docker-compose
|
||||
- Capturas de pantalla o videos, si es aplicable
|
||||
|
||||
Cómo priorizamos:
|
||||
|
||||
| Tipo de Issue | Prioridad |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Errores en funciones principales (servicio en la nube, no poder iniciar sesión, aplicaciones que no funcionan, fallos de seguridad) | Crítica |
|
||||
| Errores no críticos, mejoras de rendimiento | Prioridad Media |
|
||||
| Correcciones menores (errores tipográficos, UI confusa pero funcional) | Prioridad Baja |
|
||||
|
||||
### Solicitudes de funcionalidades
|
||||
|
||||
> [!NOTE]
|
||||
> Por favor, asegúrate de incluir la siguiente información al enviar una solicitud de funcionalidad:
|
||||
|
||||
- Un título claro y descriptivo
|
||||
- Una descripción detallada de la funcionalidad
|
||||
- Un caso de uso para la funcionalidad
|
||||
- Cualquier otro contexto o capturas de pantalla sobre la solicitud de funcionalidad
|
||||
|
||||
Cómo priorizamos:
|
||||
|
||||
| Tipo de Funcionalidad | Prioridad |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Funcionalidades de alta prioridad etiquetadas por un miembro del equipo | Prioridad Alta |
|
||||
| Solicitudes populares de funcionalidades de nuestro [tablero de comentarios de la comunidad](https://github.com/langgenius/dify/discussions/categories/feedbacks) | Prioridad Media |
|
||||
| Funcionalidades no principales y mejoras menores | Prioridad Baja |
|
||||
| Valiosas pero no inmediatas | Futura-Funcionalidad |
|
||||
## Enviando tu PR
|
||||
|
||||
### Proceso de Pull Request
|
||||
|
||||
1. Haz un fork del repositorio
|
||||
2. Antes de redactar un PR, por favor crea un issue para discutir los cambios que quieres hacer
|
||||
3. Crea una nueva rama para tus cambios
|
||||
4. Por favor añade pruebas para tus cambios en consecuencia
|
||||
5. Asegúrate de que tu código pasa las pruebas existentes
|
||||
6. Por favor vincula el issue en la descripción del PR, `fixes #<número_del_issue>`
|
||||
7. ¡Fusiona tu código!
|
||||
### Configuración del proyecto
|
||||
|
||||
#### Frontend
|
||||
|
||||
Para configurar el servicio frontend, por favor consulta nuestra [guía completa](https://github.com/langgenius/dify/blob/main/web/README.md) en el archivo `web/README.md`. Este documento proporciona instrucciones detalladas para ayudarte a configurar el entorno frontend correctamente.
|
||||
|
||||
#### Backend
|
||||
|
||||
Para configurar el servicio backend, por favor consulta nuestras [instrucciones detalladas](https://github.com/langgenius/dify/blob/main/api/README.md) en el archivo `api/README.md`. Este documento contiene una guía paso a paso para ayudarte a poner en marcha el backend sin problemas.
|
||||
|
||||
#### Otras cosas a tener en cuenta
|
||||
|
||||
Recomendamos revisar este documento cuidadosamente antes de proceder con la configuración, ya que contiene información esencial sobre:
|
||||
- Requisitos previos y dependencias
|
||||
- Pasos de instalación
|
||||
- Detalles de configuración
|
||||
- Consejos comunes de solución de problemas
|
||||
|
||||
No dudes en contactarnos si encuentras algún problema durante el proceso de configuración.
|
||||
## Obteniendo Ayuda
|
||||
|
||||
Si alguna vez te quedas atascado o tienes una pregunta urgente mientras contribuyes, simplemente envíanos tus consultas a través del issue relacionado de GitHub, o únete a nuestro [Discord](https://discord.gg/8Tpq4AcN9c) para una charla rápida.
|
||||
93
CONTRIBUTING_FR.md
Normal file
93
CONTRIBUTING_FR.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# CONTRIBUER
|
||||
|
||||
Vous cherchez donc à contribuer à Dify - c'est fantastique, nous avons hâte de voir ce que vous allez faire. En tant que startup avec un personnel et un financement limités, nous avons de grandes ambitions pour concevoir le flux de travail le plus intuitif pour construire et gérer des applications LLM. Toute aide de la communauté compte, vraiment.
|
||||
|
||||
Nous devons être agiles et livrer rapidement compte tenu de notre position, mais nous voulons aussi nous assurer que des contributeurs comme vous obtiennent une expérience aussi fluide que possible lors de leur contribution. Nous avons élaboré ce guide de contribution dans ce but, visant à vous familiariser avec la base de code et comment nous travaillons avec les contributeurs, afin que vous puissiez rapidement passer à la partie amusante.
|
||||
|
||||
Ce guide, comme Dify lui-même, est un travail en constante évolution. Nous apprécions grandement votre compréhension si parfois il est en retard par rapport au projet réel, et nous accueillons tout commentaire pour nous aider à nous améliorer.
|
||||
|
||||
En termes de licence, veuillez prendre une minute pour lire notre bref [Accord de Licence et de Contributeur](./LICENSE). La communauté adhère également au [code de conduite](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Avant de vous lancer
|
||||
|
||||
Vous cherchez quelque chose à réaliser ? Parcourez nos [problèmes pour débutants](https://github.com/langgenius/dify/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) et choisissez-en un pour commencer !
|
||||
|
||||
Vous avez un nouveau modèle ou un nouvel outil à ajouter ? Ouvrez une PR dans notre [dépôt de plugins](https://github.com/langgenius/dify-plugins) et montrez-nous ce que vous avez créé.
|
||||
|
||||
Vous devez mettre à jour un modèle existant, un outil ou corriger des bugs ? Rendez-vous sur notre [dépôt officiel de plugins](https://github.com/langgenius/dify-official-plugins) et faites votre magie !
|
||||
|
||||
Rejoignez l'aventure, contribuez, et construisons ensemble quelque chose d'extraordinaire ! 💡✨
|
||||
|
||||
N'oubliez pas de lier un problème existant ou d'ouvrir un nouveau problème dans la description de votre PR.
|
||||
|
||||
### Rapports de bugs
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Veuillez vous assurer d'inclure les informations suivantes lors de la soumission d'un rapport de bug :
|
||||
|
||||
- Un titre clair et descriptif
|
||||
- Une description détaillée du bug, y compris tous les messages d'erreur
|
||||
- Les étapes pour reproduire le bug
|
||||
- Comportement attendu
|
||||
- **Logs**, si disponibles, pour les problèmes de backend, c'est vraiment important, vous pouvez les trouver dans les logs de docker-compose
|
||||
- Captures d'écran ou vidéos, si applicable
|
||||
|
||||
Comment nous priorisons :
|
||||
|
||||
| Type de Problème | Priorité |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Bugs dans les fonctions principales (service cloud, impossibilité de se connecter, applications qui ne fonctionnent pas, failles de sécurité) | Critique |
|
||||
| Bugs non critiques, améliorations de performance | Priorité Moyenne |
|
||||
| Corrections mineures (fautes de frappe, UI confuse mais fonctionnelle) | Priorité Basse |
|
||||
|
||||
### Demandes de fonctionnalités
|
||||
|
||||
> [!NOTE]
|
||||
> Veuillez vous assurer d'inclure les informations suivantes lors de la soumission d'une demande de fonctionnalité :
|
||||
|
||||
- Un titre clair et descriptif
|
||||
- Une description détaillée de la fonctionnalité
|
||||
- Un cas d'utilisation pour la fonctionnalité
|
||||
- Tout autre contexte ou captures d'écran concernant la demande de fonctionnalité
|
||||
|
||||
Comment nous priorisons :
|
||||
|
||||
| Type de Fonctionnalité | Priorité |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Fonctionnalités hautement prioritaires étiquetées par un membre de l'équipe | Priorité Haute |
|
||||
| Demandes populaires de fonctionnalités de notre [tableau de feedback communautaire](https://github.com/langgenius/dify/discussions/categories/feedbacks) | Priorité Moyenne |
|
||||
| Fonctionnalités non essentielles et améliorations mineures | Priorité Basse |
|
||||
| Précieuses mais non immédiates | Fonctionnalité Future |
|
||||
## Soumettre votre PR
|
||||
|
||||
### Processus de Pull Request
|
||||
|
||||
1. Forkez le dépôt
|
||||
2. Avant de rédiger une PR, veuillez créer un problème pour discuter des changements que vous souhaitez apporter
|
||||
3. Créez une nouvelle branche pour vos changements
|
||||
4. Veuillez ajouter des tests pour vos changements en conséquence
|
||||
5. Assurez-vous que votre code passe les tests existants
|
||||
6. Veuillez lier le problème dans la description de la PR, `fixes #<numéro_du_problème>`
|
||||
7. Faites fusionner votre code !
|
||||
### Configuration du projet
|
||||
|
||||
#### Frontend
|
||||
|
||||
Pour configurer le service frontend, veuillez consulter notre [guide complet](https://github.com/langgenius/dify/blob/main/web/README.md) dans le fichier `web/README.md`. Ce document fournit des instructions détaillées pour vous aider à configurer correctement l'environnement frontend.
|
||||
|
||||
#### Backend
|
||||
|
||||
Pour configurer le service backend, veuillez consulter nos [instructions détaillées](https://github.com/langgenius/dify/blob/main/api/README.md) dans le fichier `api/README.md`. Ce document contient un guide étape par étape pour vous aider à faire fonctionner le backend sans problème.
|
||||
|
||||
#### Autres choses à noter
|
||||
|
||||
Nous recommandons de revoir attentivement ce document avant de procéder à la configuration, car il contient des informations essentielles sur :
|
||||
- Prérequis et dépendances
|
||||
- Étapes d'installation
|
||||
- Détails de configuration
|
||||
- Conseils courants de dépannage
|
||||
|
||||
N'hésitez pas à nous contacter si vous rencontrez des problèmes pendant le processus de configuration.
|
||||
## Obtenir de l'aide
|
||||
|
||||
Si jamais vous êtes bloqué ou avez une question urgente en contribuant, envoyez-nous simplement vos questions via le problème GitHub concerné, ou rejoignez notre [Discord](https://discord.gg/8Tpq4AcN9c) pour une discussion rapide.
|
||||
93
CONTRIBUTING_KR.md
Normal file
93
CONTRIBUTING_KR.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# 기여하기
|
||||
|
||||
Dify에 기여하려고 하시는군요 - 정말 멋집니다, 당신이 무엇을 할지 기대가 됩니다. 인력과 자금이 제한된 스타트업으로서, 우리는 LLM 애플리케이션을 구축하고 관리하기 위한 가장 직관적인 워크플로우를 설계하고자 하는 큰 야망을 가지고 있습니다. 커뮤니티의 모든 도움은 정말 중요합니다.
|
||||
|
||||
우리는 현재 상황에서 민첩하게 빠르게 배포해야 하지만, 동시에 당신과 같은 기여자들이 기여하는 과정에서 최대한 원활한 경험을 얻을 수 있도록 하고 싶습니다. 우리는 이러한 목적으로 이 기여 가이드를 작성했으며, 여러분이 코드베이스와 우리가 기여자들과 어떻게 협업하는지에 대해 친숙해질 수 있도록 돕고, 빠르게 재미있는 부분으로 넘어갈 수 있도록 하고자 합니다.
|
||||
|
||||
이 가이드는 Dify 자체와 마찬가지로 끊임없이 진행 중인 작업입니다. 때로는 실제 프로젝트보다 뒤처질 수 있다는 점을 이해해 주시면 감사하겠으며, 개선을 위한 피드백은 언제든지 환영합니다.
|
||||
|
||||
라이센스 측면에서, 간략한 [라이센스 및 기여자 동의서](./LICENSE)를 읽어보는 시간을 가져주세요. 커뮤니티는 또한 [행동 강령](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)을 준수합니다.
|
||||
|
||||
## 시작하기 전에
|
||||
|
||||
처리할 작업을 찾고 계신가요? [초보자를 위한 이슈](https://github.com/langgenius/dify/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22)를 살펴보고 시작할 것을 선택하세요!
|
||||
|
||||
추가할 새로운 모델 런타임이나 도구가 있나요? 우리의 [플러그인 저장소](https://github.com/langgenius/dify-plugins)에 PR을 열고 당신이 만든 것을 보여주세요.
|
||||
|
||||
기존 모델 런타임, 도구를 업데이트하거나 버그를 수정해야 하나요? 우리의 [공식 플러그인 저장소](https://github.com/langgenius/dify-official-plugins)로 가서 당신의 마법을 펼치세요!
|
||||
|
||||
함께 즐기고, 기여하고, 멋진 것을 함께 만들어 봅시다! 💡✨
|
||||
|
||||
PR 설명에 기존 이슈를 연결하거나 새 이슈를 여는 것을 잊지 마세요.
|
||||
|
||||
### 버그 보고
|
||||
|
||||
> [!IMPORTANT]
|
||||
> 버그 보고서를 제출할 때 다음 정보를 포함해 주세요:
|
||||
|
||||
- 명확하고 설명적인 제목
|
||||
- 오류 메시지를 포함한 버그에 대한 상세한 설명
|
||||
- 버그를 재현하는 단계
|
||||
- 예상되는 동작
|
||||
- 가능한 경우 **로그**, 백엔드 이슈의 경우 매우 중요합니다. docker-compose 로그에서 찾을 수 있습니다
|
||||
- 해당되는 경우 스크린샷 또는 비디오
|
||||
|
||||
우선순위 결정 방법:
|
||||
|
||||
| 이슈 유형 | 우선순위 |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| 핵심 기능의 버그(클라우드 서비스, 로그인 불가, 애플리케이션 작동 불능, 보안 취약점) | 중대 |
|
||||
| 비중요 버그, 성능 향상 | 중간 우선순위 |
|
||||
| 사소한 수정(오타, 혼란스럽지만 작동하는 UI) | 낮은 우선순위 |
|
||||
|
||||
### 기능 요청
|
||||
|
||||
> [!NOTE]
|
||||
> 기능 요청을 제출할 때 다음 정보를 포함해 주세요:
|
||||
|
||||
- 명확하고 설명적인 제목
|
||||
- 기능에 대한 상세한 설명
|
||||
- 해당 기능의 사용 사례
|
||||
- 기능 요청에 관한 기타 컨텍스트 또는 스크린샷
|
||||
|
||||
우선순위 결정 방법:
|
||||
|
||||
| 기능 유형 | 우선순위 |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| 팀 구성원에 의해 레이블이 지정된 고우선순위 기능 | 높은 우선순위 |
|
||||
| 우리의 [커뮤니티 피드백 보드](https://github.com/langgenius/dify/discussions/categories/feedbacks)에서 인기 있는 기능 요청 | 중간 우선순위 |
|
||||
| 비핵심 기능 및 사소한 개선 | 낮은 우선순위 |
|
||||
| 가치 있지만 즉시 필요하지 않은 기능 | 미래 기능 |
|
||||
## PR 제출하기
|
||||
|
||||
### Pull Request 프로세스
|
||||
|
||||
1. 저장소를 포크하세요
|
||||
2. PR을 작성하기 전에, 변경하고자 하는 내용에 대해 논의하기 위한 이슈를 생성해 주세요
|
||||
3. 변경 사항을 위한 새 브랜치를 만드세요
|
||||
4. 변경 사항에 대한 테스트를 적절히 추가해 주세요
|
||||
5. 코드가 기존 테스트를 통과하는지 확인하세요
|
||||
6. PR 설명에 이슈를 연결해 주세요, `fixes #<이슈_번호>`
|
||||
7. 병합 완료!
|
||||
### 프로젝트 설정하기
|
||||
|
||||
#### 프론트엔드
|
||||
|
||||
프론트엔드 서비스를 설정하려면, `web/README.md` 파일에 있는 우리의 [종합 가이드](https://github.com/langgenius/dify/blob/main/web/README.md)를 참조하세요. 이 문서는 프론트엔드 환경을 적절히 설정하는 데 도움이 되는 자세한 지침을 제공합니다.
|
||||
|
||||
#### 백엔드
|
||||
|
||||
백엔드 서비스를 설정하려면, `api/README.md` 파일에 있는 우리의 [상세 지침](https://github.com/langgenius/dify/blob/main/api/README.md)을 참조하세요. 이 문서는 백엔드를 원활하게 실행하는 데 도움이 되는 단계별 가이드를 포함하고 있습니다.
|
||||
|
||||
#### 기타 참고 사항
|
||||
|
||||
설정을 진행하기 전에 이 문서를 주의 깊게 검토하는 것을 권장합니다. 다음과 같은 필수 정보가 포함되어 있습니다:
|
||||
- 필수 조건 및 종속성
|
||||
- 설치 단계
|
||||
- 구성 세부 정보
|
||||
- 일반적인 문제 해결 팁
|
||||
|
||||
설정 과정에서 문제가 발생하면 언제든지 연락해 주세요.
|
||||
## 도움 받기
|
||||
|
||||
기여하는 동안 막히거나 긴급한 질문이 있으면, 관련 GitHub 이슈를 통해 질문을 보내거나, 빠른 대화를 위해 우리의 [Discord](https://discord.gg/8Tpq4AcN9c)에 참여하세요.
|
||||
93
CONTRIBUTING_PT.md
Normal file
93
CONTRIBUTING_PT.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# CONTRIBUINDO
|
||||
|
||||
Então você está procurando contribuir para o Dify - isso é incrível, mal podemos esperar para ver o que você vai fazer. Como uma startup com equipe e financiamento limitados, temos grandes ambições de projetar o fluxo de trabalho mais intuitivo para construir e gerenciar aplicações LLM. Qualquer ajuda da comunidade conta, verdadeiramente.
|
||||
|
||||
Precisamos ser ágeis e entregar rapidamente considerando onde estamos, mas também queremos garantir que colaboradores como você tenham uma experiência o mais tranquila possível ao contribuir. Montamos este guia de contribuição com esse propósito, visando familiarizá-lo com a base de código e como trabalhamos com os colaboradores, para que você possa rapidamente passar para a parte divertida.
|
||||
|
||||
Este guia, como o próprio Dify, é um trabalho em constante evolução. Agradecemos muito a sua compreensão se às vezes ele ficar atrasado em relação ao projeto real, e damos as boas-vindas a qualquer feedback para que possamos melhorar.
|
||||
|
||||
Em termos de licenciamento, por favor, dedique um minuto para ler nosso breve [Acordo de Licença e Contribuidor](./LICENSE). A comunidade também adere ao [código de conduta](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Antes de começar
|
||||
|
||||
Procurando algo para resolver? Navegue por nossos [problemas para iniciantes](https://github.com/langgenius/dify/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) e escolha um para começar!
|
||||
|
||||
Tem um novo modelo ou ferramenta para adicionar? Abra um PR em nosso [repositório de plugins](https://github.com/langgenius/dify-plugins) e mostre-nos o que você construiu.
|
||||
|
||||
Precisa atualizar um modelo existente, ferramenta ou corrigir alguns bugs? Vá para nosso [repositório oficial de plugins](https://github.com/langgenius/dify-official-plugins) e faça sua mágica!
|
||||
|
||||
Junte-se à diversão, contribua e vamos construir algo incrível juntos! 💡✨
|
||||
|
||||
Não se esqueça de vincular um problema existente ou abrir um novo problema na descrição do PR.
|
||||
|
||||
### Relatórios de bugs
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Por favor, certifique-se de incluir as seguintes informações ao enviar um relatório de bug:
|
||||
|
||||
- Um título claro e descritivo
|
||||
- Uma descrição detalhada do bug, incluindo quaisquer mensagens de erro
|
||||
- Passos para reproduzir o bug
|
||||
- Comportamento esperado
|
||||
- **Logs**, se disponíveis, para problemas de backend, isso é realmente importante, você pode encontrá-los nos logs do docker-compose
|
||||
- Capturas de tela ou vídeos, se aplicável
|
||||
|
||||
Como priorizamos:
|
||||
|
||||
| Tipo de Problema | Prioridade |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Bugs em funções centrais (serviço em nuvem, não conseguir fazer login, aplicações não funcionando, falhas de segurança) | Crítica |
|
||||
| Bugs não críticos, melhorias de desempenho | Prioridade Média |
|
||||
| Correções menores (erros de digitação, interface confusa mas funcional) | Prioridade Baixa |
|
||||
|
||||
### Solicitações de recursos
|
||||
|
||||
> [!NOTE]
|
||||
> Por favor, certifique-se de incluir as seguintes informações ao enviar uma solicitação de recurso:
|
||||
|
||||
- Um título claro e descritivo
|
||||
- Uma descrição detalhada do recurso
|
||||
- Um caso de uso para o recurso
|
||||
- Qualquer outro contexto ou capturas de tela sobre a solicitação de recurso
|
||||
|
||||
Como priorizamos:
|
||||
|
||||
| Tipo de Recurso | Prioridade |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Recursos de alta prioridade conforme rotulado por um membro da equipe | Prioridade Alta |
|
||||
| Solicitações populares de recursos do nosso [quadro de feedback da comunidade](https://github.com/langgenius/dify/discussions/categories/feedbacks) | Prioridade Média |
|
||||
| Recursos não essenciais e melhorias menores | Prioridade Baixa |
|
||||
| Valiosos mas não imediatos | Recurso Futuro |
|
||||
## Enviando seu PR
|
||||
|
||||
### Processo de Pull Request
|
||||
|
||||
1. Faça um fork do repositório
|
||||
2. Antes de elaborar um PR, por favor crie um problema para discutir as mudanças que você quer fazer
|
||||
3. Crie um novo branch para suas alterações
|
||||
4. Por favor, adicione testes para suas alterações conforme apropriado
|
||||
5. Certifique-se de que seu código passa nos testes existentes
|
||||
6. Por favor, vincule o problema na descrição do PR, `fixes #<número_do_problema>`
|
||||
7. Faça o merge do seu código!
|
||||
### Configurando o projeto
|
||||
|
||||
#### Frontend
|
||||
|
||||
Para configurar o serviço frontend, por favor consulte nosso [guia abrangente](https://github.com/langgenius/dify/blob/main/web/README.md) no arquivo `web/README.md`. Este documento fornece instruções detalhadas para ajudá-lo a configurar o ambiente frontend adequadamente.
|
||||
|
||||
#### Backend
|
||||
|
||||
Para configurar o serviço backend, por favor consulte nossas [instruções detalhadas](https://github.com/langgenius/dify/blob/main/api/README.md) no arquivo `api/README.md`. Este documento contém um guia passo a passo para ajudá-lo a colocar o backend em funcionamento sem problemas.
|
||||
|
||||
#### Outras coisas a observar
|
||||
|
||||
Recomendamos revisar este documento cuidadosamente antes de prosseguir com a configuração, pois ele contém informações essenciais sobre:
|
||||
- Pré-requisitos e dependências
|
||||
- Etapas de instalação
|
||||
- Detalhes de configuração
|
||||
- Dicas comuns de solução de problemas
|
||||
|
||||
Sinta-se à vontade para entrar em contato se encontrar quaisquer problemas durante o processo de configuração.
|
||||
## Obtendo Ajuda
|
||||
|
||||
Se você ficar preso ou tiver uma dúvida urgente enquanto contribui, simplesmente envie suas perguntas através do problema relacionado no GitHub, ou entre no nosso [Discord](https://discord.gg/8Tpq4AcN9c) para uma conversa rápida.
|
||||
93
CONTRIBUTING_TR.md
Normal file
93
CONTRIBUTING_TR.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# KATKIDA BULUNMAK
|
||||
|
||||
Demek Dify'a katkıda bulunmak istiyorsunuz - bu harika, ne yapacağınızı görmek için sabırsızlanıyoruz. Sınırlı personel ve finansmana sahip bir startup olarak, LLM uygulamaları oluşturmak ve yönetmek için en sezgisel iş akışını tasarlama konusunda büyük hedeflerimiz var. Topluluktan gelen her türlü yardım gerçekten önemli.
|
||||
|
||||
Bulunduğumuz noktada çevik olmamız ve hızlı hareket etmemiz gerekiyor, ancak sizin gibi katkıda bulunanların mümkün olduğunca sorunsuz bir deneyim yaşamasını da sağlamak istiyoruz. Bu katkı rehberini bu amaçla hazırladık; sizi kod tabanıyla ve katkıda bulunanlarla nasıl çalıştığımızla tanıştırmayı, böylece hızlıca eğlenceli kısma geçebilmenizi hedefliyoruz.
|
||||
|
||||
Bu rehber, Dify'ın kendisi gibi, sürekli gelişen bir çalışmadır. Bazen gerçek projenin gerisinde kalırsa anlayışınız için çok minnettarız ve gelişmemize yardımcı olacak her türlü geri bildirimi memnuniyetle karşılıyoruz.
|
||||
|
||||
Lisanslama konusunda, lütfen kısa [Lisans ve Katkıda Bulunan Anlaşmamızı](./LICENSE) okumak için bir dakikanızı ayırın. Topluluk ayrıca [davranış kurallarına](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md) da uyar.
|
||||
|
||||
## Başlamadan Önce
|
||||
|
||||
Üzerinde çalışacak bir şey mi arıyorsunuz? [İlk katkıda bulunanlar için iyi sorunlarımıza](https://github.com/langgenius/dify/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) göz atın ve başlamak için birini seçin!
|
||||
|
||||
Eklenecek harika bir yeni model runtime'ı veya aracınız mı var? [Eklenti depomuzda](https://github.com/langgenius/dify-plugins) bir PR açın ve ne yaptığınızı bize gösterin.
|
||||
|
||||
Mevcut bir model runtime'ını, aracı güncellemek veya bazı hataları düzeltmek mi istiyorsunuz? [Resmi eklenti depomuza](https://github.com/langgenius/dify-official-plugins) gidin ve sihrinizi gösterin!
|
||||
|
||||
Eğlenceye katılın, katkıda bulunun ve birlikte harika bir şeyler inşa edelim! 💡✨
|
||||
|
||||
PR açıklamasında mevcut bir sorunu bağlamayı veya yeni bir sorun açmayı unutmayın.
|
||||
|
||||
### Hata Raporları
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Lütfen bir hata raporu gönderirken aşağıdaki bilgileri dahil ettiğinizden emin olun:
|
||||
|
||||
- Net ve açıklayıcı bir başlık
|
||||
- Hata mesajları dahil hatanın ayrıntılı bir açıklaması
|
||||
- Hatayı tekrarlamak için adımlar
|
||||
- Beklenen davranış
|
||||
- Mümkünse **Loglar**, backend sorunları için, bu gerçekten önemlidir, bunları docker-compose loglarında bulabilirsiniz
|
||||
- Uygunsa ekran görüntüleri veya videolar
|
||||
|
||||
Nasıl önceliklendiriyoruz:
|
||||
|
||||
| Sorun Türü | Öncelik |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Temel işlevlerdeki hatalar (bulut hizmeti, giriş yapamama, çalışmayan uygulamalar, güvenlik açıkları) | Kritik |
|
||||
| Kritik olmayan hatalar, performans artışları | Orta Öncelik |
|
||||
| Küçük düzeltmeler (yazım hataları, kafa karıştırıcı ama çalışan UI) | Düşük Öncelik |
|
||||
|
||||
### Özellik İstekleri
|
||||
|
||||
> [!NOTE]
|
||||
> Lütfen bir özellik isteği gönderirken aşağıdaki bilgileri dahil ettiğinizden emin olun:
|
||||
|
||||
- Net ve açıklayıcı bir başlık
|
||||
- Özelliğin ayrıntılı bir açıklaması
|
||||
- Özellik için bir kullanım durumu
|
||||
- Özellik isteği hakkında diğer bağlamlar veya ekran görüntüleri
|
||||
|
||||
Nasıl önceliklendiriyoruz:
|
||||
|
||||
| Özellik Türü | Öncelik |
|
||||
| ------------------------------------------------------------ | --------------- |
|
||||
| Bir ekip üyesi tarafından etiketlenen Yüksek Öncelikli Özellikler | Yüksek Öncelik |
|
||||
| [Topluluk geri bildirim panosundan](https://github.com/langgenius/dify/discussions/categories/feedbacks) popüler özellik istekleri | Orta Öncelik |
|
||||
| Temel olmayan özellikler ve küçük geliştirmeler | Düşük Öncelik |
|
||||
| Değerli ama acil olmayan | Gelecek-Özellik |
|
||||
## PR'nizi Göndermek
|
||||
|
||||
### Pull Request Süreci
|
||||
|
||||
1. Depoyu fork edin
|
||||
2. Bir PR taslağı oluşturmadan önce, yapmak istediğiniz değişiklikleri tartışmak için lütfen bir sorun oluşturun
|
||||
3. Değişiklikleriniz için yeni bir dal oluşturun
|
||||
4. Lütfen değişiklikleriniz için uygun testler ekleyin
|
||||
5. Kodunuzun mevcut testleri geçtiğinden emin olun
|
||||
6. Lütfen PR açıklamasında sorunu bağlayın, `fixes #<sorun_numarası>`
|
||||
7. Kodunuzu birleştirin!
|
||||
### Projeyi Kurma
|
||||
|
||||
#### Frontend
|
||||
|
||||
Frontend hizmetini kurmak için, lütfen `web/README.md` dosyasındaki kapsamlı [rehberimize](https://github.com/langgenius/dify/blob/main/web/README.md) bakın. Bu belge, frontend ortamını düzgün bir şekilde kurmanıza yardımcı olacak ayrıntılı talimatlar sağlar.
|
||||
|
||||
#### Backend
|
||||
|
||||
Backend hizmetini kurmak için, lütfen `api/README.md` dosyasındaki detaylı [talimatlarımıza](https://github.com/langgenius/dify/blob/main/api/README.md) bakın. Bu belge, backend'i sorunsuz bir şekilde çalıştırmanıza yardımcı olacak adım adım bir kılavuz içerir.
|
||||
|
||||
#### Dikkat Edilecek Diğer Şeyler
|
||||
|
||||
Kuruluma geçmeden önce bu belgeyi dikkatlice incelemenizi öneririz, çünkü şunlar hakkında temel bilgiler içerir:
|
||||
- Ön koşullar ve bağımlılıklar
|
||||
- Kurulum adımları
|
||||
- Yapılandırma detayları
|
||||
- Yaygın sorun giderme ipuçları
|
||||
|
||||
Kurulum süreci sırasında herhangi bir sorunla karşılaşırsanız bizimle iletişime geçmekten çekinmeyin.
|
||||
## Yardım Almak
|
||||
|
||||
Katkıda bulunurken takılırsanız veya yanıcı bir sorunuz olursa, sorularınızı ilgili GitHub sorunu aracılığıyla bize gönderin veya hızlı bir sohbet için [Discord'umuza](https://discord.gg/8Tpq4AcN9c) katılın.
|
||||
2
LICENSE
2
LICENSE
@@ -10,8 +10,6 @@ a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you ma
|
||||
b. LOGO and copyright information: In the process of using Dify's frontend, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend.
|
||||
- Frontend Definition: For the purposes of this license, the "frontend" of Dify includes all components located in the `web/` directory when running Dify from the raw source code, or the "web" image when running Dify with Docker.
|
||||
|
||||
Please contact business@dify.ai by email to inquire about licensing matters.
|
||||
|
||||
2. As a contributor, you should agree that:
|
||||
|
||||
a. The producer can adjust the open-source agreement to be more strict or relaxed as deemed necessary.
|
||||
|
||||
@@ -254,8 +254,6 @@ docker compose up -d
|
||||
- [Discord](https://discord.gg/FngNHpbcY7)。👉:分享您的应用程序并与社区交流。
|
||||
- [X(Twitter)](https://twitter.com/dify_ai)。👉:分享您的应用程序并与社区交流。
|
||||
- [商业许可](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)。👉:有关商业用途许可 Dify.AI 的商业咨询。
|
||||
- [微信]() 👉:扫描下方二维码,添加微信好友,备注 Dify,我们将邀请您加入 Dify 社区。
|
||||
<img src="./images/wechat.png" alt="wechat" width="100"/>
|
||||
|
||||
## 安全问题
|
||||
|
||||
|
||||
@@ -26,9 +26,6 @@ ACCESS_TOKEN_EXPIRE_MINUTES=60
|
||||
# Refresh token expiration time in days
|
||||
REFRESH_TOKEN_EXPIRE_DAYS=30
|
||||
|
||||
# celery configuration
|
||||
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
|
||||
|
||||
# redis configuration
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
@@ -50,6 +47,9 @@ REDIS_USE_CLUSTERS=false
|
||||
REDIS_CLUSTERS=
|
||||
REDIS_CLUSTERS_PASSWORD=
|
||||
|
||||
# celery configuration
|
||||
CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1
|
||||
|
||||
# PostgreSQL database configuration
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
@@ -137,7 +137,7 @@ WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
|
||||
|
||||
# Vector database configuration
|
||||
# support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm, oceanbase, opengauss
|
||||
# support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm, oceanbase, opengauss, tablestore
|
||||
VECTOR_STORE=weaviate
|
||||
|
||||
# Weaviate configuration
|
||||
@@ -189,6 +189,7 @@ TENCENT_VECTOR_DB_USERNAME=dify
|
||||
TENCENT_VECTOR_DB_DATABASE=dify
|
||||
TENCENT_VECTOR_DB_SHARD=1
|
||||
TENCENT_VECTOR_DB_REPLICAS=2
|
||||
TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false
|
||||
|
||||
# ElasticSearch configuration
|
||||
ELASTICSEARCH_HOST=127.0.0.1
|
||||
@@ -212,6 +213,12 @@ PGVECTOR_DATABASE=postgres
|
||||
PGVECTOR_MIN_CONNECTION=1
|
||||
PGVECTOR_MAX_CONNECTION=5
|
||||
|
||||
# TableStore Vector configuration
|
||||
TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com
|
||||
TABLESTORE_INSTANCE_NAME=instance-name
|
||||
TABLESTORE_ACCESS_KEY_ID=xxx
|
||||
TABLESTORE_ACCESS_KEY_SECRET=xxx
|
||||
|
||||
# Tidb Vector configuration
|
||||
TIDB_VECTOR_HOST=xxx.eu-central-1.xxx.aws.tidbcloud.com
|
||||
TIDB_VECTOR_PORT=4000
|
||||
@@ -297,6 +304,7 @@ OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||||
|
||||
# openGauss configuration
|
||||
OPENGAUSS_HOST=127.0.0.1
|
||||
|
||||
@@ -37,6 +37,12 @@ select = [
|
||||
"UP", # pyupgrade rules
|
||||
"W191", # tab-indentation
|
||||
"W605", # invalid-escape-sequence
|
||||
# security related linting rules
|
||||
# RCE proctection (sort of)
|
||||
"S102", # exec-builtin, disallow use of `exec`
|
||||
"S307", # suspicious-eval-usage, disallow use of `eval` and `ast.literal_eval`
|
||||
"S301", # suspicious-pickle-usage, disallow use of `pickle` and its wrappers.
|
||||
"S302", # suspicious-marshal-usage, disallow use of `marshal` module
|
||||
]
|
||||
|
||||
ignore = [
|
||||
|
||||
@@ -12,6 +12,7 @@ from configs import dify_config
|
||||
from constants.languages import languages
|
||||
from core.rag.datasource.vdb.vector_factory import Vector
|
||||
from core.rag.datasource.vdb.vector_type import VectorType
|
||||
from core.rag.index_processor.constant.built_in_field import BuiltInField
|
||||
from core.rag.models.document import Document
|
||||
from events.app_event import app_was_created
|
||||
from extensions.ext_database import db
|
||||
@@ -25,6 +26,7 @@ from models.dataset import Document as DatasetDocument
|
||||
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
|
||||
from models.provider import Provider, ProviderModel
|
||||
from services.account_service import RegisterService, TenantService
|
||||
from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpiredLogs
|
||||
from services.plugin.data_migration import PluginDataMigration
|
||||
from services.plugin.plugin_migration import PluginMigration
|
||||
|
||||
@@ -274,6 +276,7 @@ def migrate_knowledge_vector_database():
|
||||
VectorType.ORACLE,
|
||||
VectorType.ELASTICSEARCH,
|
||||
VectorType.OPENGAUSS,
|
||||
VectorType.TABLESTORE,
|
||||
}
|
||||
lower_collection_vector_types = {
|
||||
VectorType.ANALYTICDB,
|
||||
@@ -559,36 +562,25 @@ def old_metadata_migration():
|
||||
if document.doc_metadata:
|
||||
doc_metadata = document.doc_metadata
|
||||
for key, value in doc_metadata.items():
|
||||
dataset_metadata = (
|
||||
db.session.query(DatasetMetadata)
|
||||
.filter(DatasetMetadata.dataset_id == document.dataset_id, DatasetMetadata.name == key)
|
||||
.first()
|
||||
)
|
||||
if not dataset_metadata:
|
||||
dataset_metadata = DatasetMetadata(
|
||||
tenant_id=document.tenant_id,
|
||||
dataset_id=document.dataset_id,
|
||||
name=key,
|
||||
type="string",
|
||||
created_by=document.created_by,
|
||||
)
|
||||
db.session.add(dataset_metadata)
|
||||
db.session.flush()
|
||||
dataset_metadata_binding = DatasetMetadataBinding(
|
||||
tenant_id=document.tenant_id,
|
||||
dataset_id=document.dataset_id,
|
||||
metadata_id=dataset_metadata.id,
|
||||
document_id=document.id,
|
||||
created_by=document.created_by,
|
||||
)
|
||||
db.session.add(dataset_metadata_binding)
|
||||
for field in BuiltInField:
|
||||
if field.value == key:
|
||||
break
|
||||
else:
|
||||
dataset_metadata_binding = DatasetMetadataBinding.query.filter(
|
||||
DatasetMetadataBinding.dataset_id == document.dataset_id,
|
||||
DatasetMetadataBinding.document_id == document.id,
|
||||
DatasetMetadataBinding.metadata_id == dataset_metadata.id,
|
||||
).first()
|
||||
if not dataset_metadata_binding:
|
||||
dataset_metadata = (
|
||||
db.session.query(DatasetMetadata)
|
||||
.filter(DatasetMetadata.dataset_id == document.dataset_id, DatasetMetadata.name == key)
|
||||
.first()
|
||||
)
|
||||
if not dataset_metadata:
|
||||
dataset_metadata = DatasetMetadata(
|
||||
tenant_id=document.tenant_id,
|
||||
dataset_id=document.dataset_id,
|
||||
name=key,
|
||||
type="string",
|
||||
created_by=document.created_by,
|
||||
)
|
||||
db.session.add(dataset_metadata)
|
||||
db.session.flush()
|
||||
dataset_metadata_binding = DatasetMetadataBinding(
|
||||
tenant_id=document.tenant_id,
|
||||
dataset_id=document.dataset_id,
|
||||
@@ -597,7 +589,22 @@ def old_metadata_migration():
|
||||
created_by=document.created_by,
|
||||
)
|
||||
db.session.add(dataset_metadata_binding)
|
||||
db.session.commit()
|
||||
else:
|
||||
dataset_metadata_binding = DatasetMetadataBinding.query.filter(
|
||||
DatasetMetadataBinding.dataset_id == document.dataset_id,
|
||||
DatasetMetadataBinding.document_id == document.id,
|
||||
DatasetMetadataBinding.metadata_id == dataset_metadata.id,
|
||||
).first()
|
||||
if not dataset_metadata_binding:
|
||||
dataset_metadata_binding = DatasetMetadataBinding(
|
||||
tenant_id=document.tenant_id,
|
||||
dataset_id=document.dataset_id,
|
||||
metadata_id=dataset_metadata.id,
|
||||
document_id=document.id,
|
||||
created_by=document.created_by,
|
||||
)
|
||||
db.session.add(dataset_metadata_binding)
|
||||
db.session.commit()
|
||||
page += 1
|
||||
click.echo(click.style("Old metadata migration completed.", fg="green"))
|
||||
|
||||
@@ -787,3 +794,23 @@ def install_plugins(input_file: str, output_file: str, workers: int):
|
||||
PluginMigration.install_plugins(input_file, output_file, workers)
|
||||
|
||||
click.echo(click.style("Install plugins completed.", fg="green"))
|
||||
|
||||
|
||||
@click.command("clear-free-plan-tenant-expired-logs", help="Clear free plan tenant expired logs.")
|
||||
@click.option("--days", prompt=True, help="The days to clear free plan tenant expired logs.", default=30)
|
||||
@click.option("--batch", prompt=True, help="The batch size to clear free plan tenant expired logs.", default=100)
|
||||
@click.option(
|
||||
"--tenant_ids",
|
||||
prompt=True,
|
||||
multiple=True,
|
||||
help="The tenant ids to clear free plan tenant expired logs.",
|
||||
)
|
||||
def clear_free_plan_tenant_expired_logs(days: int, batch: int, tenant_ids: list[str]):
|
||||
"""
|
||||
Clear free plan tenant expired logs.
|
||||
"""
|
||||
click.echo(click.style("Starting clear free plan tenant expired logs.", fg="white"))
|
||||
|
||||
ClearFreePlanTenantExpiredLogs.process(days, batch, tenant_ids)
|
||||
|
||||
click.echo(click.style("Clear free plan tenant expired logs completed.", fg="green"))
|
||||
|
||||
@@ -848,6 +848,11 @@ class AccountConfig(BaseSettings):
|
||||
default=5,
|
||||
)
|
||||
|
||||
EDUCATION_ENABLED: bool = Field(
|
||||
description="whether to enable education identity",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class FeatureConfig(
|
||||
# place the configs in alphabet order
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, NonNegativeInt, computed_field
|
||||
from pydantic import Field, NonNegativeInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ from .vdb.pgvector_config import PGVectorConfig
|
||||
from .vdb.pgvectors_config import PGVectoRSConfig
|
||||
from .vdb.qdrant_config import QdrantConfig
|
||||
from .vdb.relyt_config import RelytConfig
|
||||
from .vdb.tablestore_config import TableStoreConfig
|
||||
from .vdb.tencent_vector_config import TencentVectorDBConfig
|
||||
from .vdb.tidb_on_qdrant_config import TidbOnQdrantConfig
|
||||
from .vdb.tidb_vector_config import TiDBVectorConfig
|
||||
@@ -283,5 +284,6 @@ class MiddlewareConfig(
|
||||
OceanBaseVectorConfig,
|
||||
BaiduVectorDBConfig,
|
||||
OpenGaussConfig,
|
||||
TableStoreConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -33,3 +33,9 @@ class OceanBaseVectorConfig(BaseSettings):
|
||||
description="Name of the OceanBase Vector database to connect to",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH: bool = Field(
|
||||
description="Enable hybrid search features (requires OceanBase >= 4.3.5.1). Set to false for compatibility "
|
||||
"with older versions",
|
||||
default=False,
|
||||
)
|
||||
|
||||
@@ -43,3 +43,8 @@ class OpenGaussConfig(BaseSettings):
|
||||
description="Max connection of the OpenGauss database",
|
||||
default=5,
|
||||
)
|
||||
|
||||
OPENGAUSS_ENABLE_PQ: bool = Field(
|
||||
description="Enable openGauss PQ acceleration feature",
|
||||
default=False,
|
||||
)
|
||||
|
||||
30
api/configs/middleware/vdb/tablestore_config.py
Normal file
30
api/configs/middleware/vdb/tablestore_config.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class TableStoreConfig(BaseSettings):
|
||||
"""
|
||||
Configuration settings for TableStore.
|
||||
"""
|
||||
|
||||
TABLESTORE_ENDPOINT: Optional[str] = Field(
|
||||
description="Endpoint address of the TableStore server (e.g. 'https://instance-name.cn-hangzhou.ots.aliyuncs.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TABLESTORE_INSTANCE_NAME: Optional[str] = Field(
|
||||
description="Instance name to access TableStore server (eg. 'instance-name')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TABLESTORE_ACCESS_KEY_ID: Optional[str] = Field(
|
||||
description="AccessKey id for the instance name",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TABLESTORE_ACCESS_KEY_SECRET: Optional[str] = Field(
|
||||
description="AccessKey secret for the instance name",
|
||||
default=None,
|
||||
)
|
||||
@@ -48,3 +48,8 @@ class TencentVectorDBConfig(BaseSettings):
|
||||
description="Name of the specific Tencent Vector Database to connect to",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH: bool = Field(
|
||||
description="Enable hybrid search features",
|
||||
default=False,
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
|
||||
|
||||
CURRENT_VERSION: str = Field(
|
||||
description="Dify version",
|
||||
default="1.1.1",
|
||||
default="1.2.0",
|
||||
)
|
||||
|
||||
COMMIT_SHA: str = Field(
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from .apollo import ApolloSettingsSourceInfo
|
||||
|
||||
@@ -50,7 +50,15 @@ class AppListApi(Resource):
|
||||
parser.add_argument(
|
||||
"mode",
|
||||
type=str,
|
||||
choices=["chat", "workflow", "agent-chat", "channel", "all"],
|
||||
choices=[
|
||||
"completion",
|
||||
"chat",
|
||||
"advanced-chat",
|
||||
"workflow",
|
||||
"agent-chat",
|
||||
"channel",
|
||||
"all",
|
||||
],
|
||||
default="all",
|
||||
location="args",
|
||||
required=False,
|
||||
@@ -130,7 +138,6 @@ class AppApi(Resource):
|
||||
parser.add_argument("icon_type", type=str, location="json")
|
||||
parser.add_argument("icon", type=str, location="json")
|
||||
parser.add_argument("icon_background", type=str, location="json")
|
||||
parser.add_argument("max_active_requests", type=int, location="json")
|
||||
parser.add_argument("use_icon_as_answer_icon", type=bool, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from werkzeug.exceptions import Forbidden
|
||||
from controllers.console.app.wraps import get_app_model
|
||||
from controllers.console.wraps import (
|
||||
account_initialization_required,
|
||||
cloud_edition_billing_resource_check,
|
||||
setup_required,
|
||||
)
|
||||
from extensions.ext_database import db
|
||||
@@ -23,6 +24,7 @@ class AppImportApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(app_import_fields)
|
||||
@cloud_edition_billing_resource_check("apps")
|
||||
def post(self):
|
||||
# Check user role first
|
||||
if not current_user.is_editor:
|
||||
|
||||
@@ -99,53 +99,64 @@ class ForgotPasswordResetApi(Resource):
|
||||
parser.add_argument("password_confirm", type=valid_password, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
new_password = args["new_password"]
|
||||
password_confirm = args["password_confirm"]
|
||||
|
||||
if str(new_password).strip() != str(password_confirm).strip():
|
||||
# Validate passwords match
|
||||
if args["new_password"] != args["password_confirm"]:
|
||||
raise PasswordMismatchError()
|
||||
|
||||
token = args["token"]
|
||||
reset_data = AccountService.get_reset_password_data(token)
|
||||
|
||||
if reset_data is None:
|
||||
# Validate token and get reset data
|
||||
reset_data = AccountService.get_reset_password_data(args["token"])
|
||||
if not reset_data:
|
||||
raise InvalidTokenError()
|
||||
|
||||
AccountService.revoke_reset_password_token(token)
|
||||
# Revoke token to prevent reuse
|
||||
AccountService.revoke_reset_password_token(args["token"])
|
||||
|
||||
# Generate secure salt and hash password
|
||||
salt = secrets.token_bytes(16)
|
||||
base64_salt = base64.b64encode(salt).decode()
|
||||
password_hashed = hash_password(args["new_password"], salt)
|
||||
|
||||
password_hashed = hash_password(new_password, salt)
|
||||
base64_password_hashed = base64.b64encode(password_hashed).decode()
|
||||
email = reset_data.get("email", "")
|
||||
|
||||
with Session(db.engine) as session:
|
||||
account = session.execute(select(Account).filter_by(email=reset_data.get("email"))).scalar_one_or_none()
|
||||
if account:
|
||||
account.password = base64_password_hashed
|
||||
account.password_salt = base64_salt
|
||||
db.session.commit()
|
||||
tenant = TenantService.get_join_tenants(account)
|
||||
if not tenant and not FeatureService.get_system_features().is_allow_create_workspace:
|
||||
tenant = TenantService.create_tenant(f"{account.name}'s Workspace")
|
||||
TenantService.create_tenant_member(tenant, account, role="owner")
|
||||
account.current_tenant = tenant
|
||||
tenant_was_created.send(tenant)
|
||||
else:
|
||||
try:
|
||||
account = AccountService.create_account_and_tenant(
|
||||
email=reset_data.get("email", ""),
|
||||
name=reset_data.get("email", ""),
|
||||
password=password_confirm,
|
||||
interface_language=languages[0],
|
||||
)
|
||||
except WorkSpaceNotAllowedCreateError:
|
||||
pass
|
||||
except AccountRegisterError:
|
||||
raise AccountInFreezeError()
|
||||
account = session.execute(select(Account).filter_by(email=email)).scalar_one_or_none()
|
||||
|
||||
if account:
|
||||
self._update_existing_account(account, password_hashed, salt, session)
|
||||
else:
|
||||
self._create_new_account(email, args["password_confirm"])
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
def _update_existing_account(self, account, password_hashed, salt, session):
|
||||
# Update existing account credentials
|
||||
account.password = base64.b64encode(password_hashed).decode()
|
||||
account.password_salt = base64.b64encode(salt).decode()
|
||||
session.commit()
|
||||
|
||||
# Create workspace if needed
|
||||
if (
|
||||
not TenantService.get_join_tenants(account)
|
||||
and FeatureService.get_system_features().is_allow_create_workspace
|
||||
):
|
||||
tenant = TenantService.create_tenant(f"{account.name}'s Workspace")
|
||||
TenantService.create_tenant_member(tenant, account, role="owner")
|
||||
account.current_tenant = tenant
|
||||
tenant_was_created.send(tenant)
|
||||
|
||||
def _create_new_account(self, email, password):
|
||||
# Create new account if allowed
|
||||
try:
|
||||
AccountService.create_account_and_tenant(
|
||||
email=email,
|
||||
name=email,
|
||||
password=password,
|
||||
interface_language=languages[0],
|
||||
)
|
||||
except WorkSpaceNotAllowedCreateError:
|
||||
pass
|
||||
except AccountRegisterError:
|
||||
raise AccountInFreezeError()
|
||||
|
||||
|
||||
api.add_resource(ForgotPasswordSendEmailApi, "/forgot-password")
|
||||
api.add_resource(ForgotPasswordCheckApi, "/forgot-password/validity")
|
||||
|
||||
@@ -641,12 +641,10 @@ class DatasetRetrievalSettingApi(Resource):
|
||||
VectorType.RELYT
|
||||
| VectorType.TIDB_VECTOR
|
||||
| VectorType.CHROMA
|
||||
| VectorType.TENCENT
|
||||
| VectorType.PGVECTO_RS
|
||||
| VectorType.BAIDU
|
||||
| VectorType.VIKINGDB
|
||||
| VectorType.UPSTASH
|
||||
| VectorType.OCEANBASE
|
||||
):
|
||||
return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
|
||||
case (
|
||||
@@ -664,6 +662,9 @@ class DatasetRetrievalSettingApi(Resource):
|
||||
| VectorType.COUCHBASE
|
||||
| VectorType.MILVUS
|
||||
| VectorType.OPENGAUSS
|
||||
| VectorType.OCEANBASE
|
||||
| VectorType.TABLESTORE
|
||||
| VectorType.TENCENT
|
||||
):
|
||||
return {
|
||||
"retrieval_method": [
|
||||
@@ -687,12 +688,10 @@ class DatasetRetrievalSettingMockApi(Resource):
|
||||
| VectorType.RELYT
|
||||
| VectorType.TIDB_VECTOR
|
||||
| VectorType.CHROMA
|
||||
| VectorType.TENCENT
|
||||
| VectorType.PGVECTO_RS
|
||||
| VectorType.BAIDU
|
||||
| VectorType.VIKINGDB
|
||||
| VectorType.UPSTASH
|
||||
| VectorType.OCEANBASE
|
||||
):
|
||||
return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
|
||||
case (
|
||||
@@ -708,6 +707,9 @@ class DatasetRetrievalSettingMockApi(Resource):
|
||||
| VectorType.PGVECTOR
|
||||
| VectorType.LINDORM
|
||||
| VectorType.OPENGAUSS
|
||||
| VectorType.OCEANBASE
|
||||
| VectorType.TABLESTORE
|
||||
| VectorType.TENCENT
|
||||
):
|
||||
return {
|
||||
"retrieval_method": [
|
||||
|
||||
@@ -14,7 +14,12 @@ class WebsiteCrawlApi(Resource):
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument(
|
||||
"provider", type=str, choices=["firecrawl", "jinareader"], required=True, nullable=True, location="json"
|
||||
"provider",
|
||||
type=str,
|
||||
choices=["firecrawl", "watercrawl", "jinareader"],
|
||||
required=True,
|
||||
nullable=True,
|
||||
location="json",
|
||||
)
|
||||
parser.add_argument("url", type=str, required=True, nullable=True, location="json")
|
||||
parser.add_argument("options", type=dict, required=True, nullable=True, location="json")
|
||||
@@ -34,7 +39,9 @@ class WebsiteCrawlStatusApi(Resource):
|
||||
@account_initialization_required
|
||||
def get(self, job_id: str):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("provider", type=str, choices=["firecrawl", "jinareader"], required=True, location="args")
|
||||
parser.add_argument(
|
||||
"provider", type=str, choices=["firecrawl", "watercrawl", "jinareader"], required=True, location="args"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
# get crawl status
|
||||
try:
|
||||
|
||||
@@ -103,6 +103,18 @@ class AccountInFreezeError(BaseHTTPException):
|
||||
)
|
||||
|
||||
|
||||
class EducationVerifyLimitError(BaseHTTPException):
|
||||
error_code = "education_verify_limit"
|
||||
description = "Rate limit exceeded"
|
||||
code = 429
|
||||
|
||||
|
||||
class EducationActivateLimitError(BaseHTTPException):
|
||||
error_code = "education_activate_limit"
|
||||
description = "Rate limit exceeded"
|
||||
code = 429
|
||||
|
||||
|
||||
class CompilanceRateLimitError(BaseHTTPException):
|
||||
error_code = "compilance_rate_limit"
|
||||
description = "Rate limit exceeded for downloading compliance report."
|
||||
|
||||
@@ -15,7 +15,13 @@ from controllers.console.workspace.error import (
|
||||
InvalidInvitationCodeError,
|
||||
RepeatPasswordNotMatchError,
|
||||
)
|
||||
from controllers.console.wraps import account_initialization_required, enterprise_license_required, setup_required
|
||||
from controllers.console.wraps import (
|
||||
account_initialization_required,
|
||||
cloud_edition_billing_enabled,
|
||||
enterprise_license_required,
|
||||
only_edition_cloud,
|
||||
setup_required,
|
||||
)
|
||||
from extensions.ext_database import db
|
||||
from fields.member_fields import account_fields
|
||||
from libs.helper import TimestampField, timezone
|
||||
@@ -292,6 +298,79 @@ class AccountDeleteUpdateFeedbackApi(Resource):
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
class EducationVerifyApi(Resource):
|
||||
verify_fields = {
|
||||
"token": fields.String,
|
||||
}
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@only_edition_cloud
|
||||
@cloud_edition_billing_enabled
|
||||
@marshal_with(verify_fields)
|
||||
def get(self):
|
||||
account = current_user
|
||||
|
||||
return BillingService.EducationIdentity.verify(account.id, account.email)
|
||||
|
||||
|
||||
class EducationApi(Resource):
|
||||
status_fields = {
|
||||
"result": fields.Boolean,
|
||||
}
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@only_edition_cloud
|
||||
@cloud_edition_billing_enabled
|
||||
def post(self):
|
||||
account = current_user
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("token", type=str, required=True, location="json")
|
||||
parser.add_argument("institution", type=str, required=True, location="json")
|
||||
parser.add_argument("role", type=str, required=True, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
return BillingService.EducationIdentity.activate(account, args["token"], args["institution"], args["role"])
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@only_edition_cloud
|
||||
@cloud_edition_billing_enabled
|
||||
@marshal_with(status_fields)
|
||||
def get(self):
|
||||
account = current_user
|
||||
|
||||
return BillingService.EducationIdentity.is_active(account.id)
|
||||
|
||||
|
||||
class EducationAutoCompleteApi(Resource):
|
||||
data_fields = {
|
||||
"data": fields.List(fields.String),
|
||||
"curr_page": fields.Integer,
|
||||
"has_next": fields.Boolean,
|
||||
}
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@only_edition_cloud
|
||||
@cloud_edition_billing_enabled
|
||||
@marshal_with(data_fields)
|
||||
def get(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("keywords", type=str, required=True, location="args")
|
||||
parser.add_argument("page", type=int, required=False, location="args", default=0)
|
||||
parser.add_argument("limit", type=int, required=False, location="args", default=20)
|
||||
args = parser.parse_args()
|
||||
|
||||
return BillingService.EducationIdentity.autocomplete(args["keywords"], args["page"], args["limit"])
|
||||
|
||||
|
||||
# Register API resources
|
||||
api.add_resource(AccountInitApi, "/account/init")
|
||||
api.add_resource(AccountProfileApi, "/account/profile")
|
||||
@@ -305,5 +384,8 @@ api.add_resource(AccountIntegrateApi, "/account/integrates")
|
||||
api.add_resource(AccountDeleteVerifyApi, "/account/delete/verify")
|
||||
api.add_resource(AccountDeleteApi, "/account/delete")
|
||||
api.add_resource(AccountDeleteUpdateFeedbackApi, "/account/delete/feedback")
|
||||
api.add_resource(EducationVerifyApi, "/account/education/verify")
|
||||
api.add_resource(EducationApi, "/account/education")
|
||||
api.add_resource(EducationAutoCompleteApi, "/account/education/autocomplete")
|
||||
# api.add_resource(AccountEmailApi, '/account/email')
|
||||
# api.add_resource(AccountEmailVerifyApi, '/account/email-verify')
|
||||
|
||||
@@ -236,7 +236,7 @@ class PluginFetchManifestApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def get(self):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -260,7 +260,7 @@ class PluginFetchInstallTasksApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def get(self):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -281,7 +281,7 @@ class PluginFetchInstallTaskApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def get(self, task_id: str):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -295,7 +295,7 @@ class PluginDeleteInstallTaskApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def post(self, task_id: str):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -309,7 +309,7 @@ class PluginDeleteAllInstallTaskItemsApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def post(self):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -323,7 +323,7 @@ class PluginDeleteInstallTaskItemApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def post(self, task_id: str, identifier: str):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -337,7 +337,7 @@ class PluginUpgradeFromMarketplaceApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def post(self):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -360,7 +360,7 @@ class PluginUpgradeFromGithubApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def post(self):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
@@ -391,7 +391,7 @@ class PluginUninstallApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@plugin_permission_required(debug_required=True)
|
||||
@plugin_permission_required(install_required=True)
|
||||
def post(self):
|
||||
req = reqparse.RequestParser()
|
||||
req.add_argument("plugin_installation_id", type=str, required=True, location="json")
|
||||
|
||||
@@ -216,6 +216,23 @@ class WebappLogoWorkspaceApi(Resource):
|
||||
return {"id": upload_file.id}, 201
|
||||
|
||||
|
||||
class WorkspaceInfoApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
# Change workspace name
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
tenant = Tenant.query.filter(Tenant.id == current_user.current_tenant_id).one_or_404()
|
||||
tenant.name = args["name"]
|
||||
db.session.commit()
|
||||
|
||||
return {"result": "success", "tenant": marshal(WorkspaceService.get_tenant_info(tenant), tenant_fields)}
|
||||
|
||||
|
||||
api.add_resource(TenantListApi, "/workspaces") # GET for getting all tenants
|
||||
api.add_resource(WorkspaceListApi, "/all-workspaces") # GET for getting all tenants
|
||||
api.add_resource(TenantApi, "/workspaces/current", endpoint="workspaces_current") # GET for getting current tenant info
|
||||
@@ -223,3 +240,4 @@ api.add_resource(TenantApi, "/info", endpoint="info") # Deprecated
|
||||
api.add_resource(SwitchWorkspaceApi, "/workspaces/switch") # POST for switching tenant
|
||||
api.add_resource(CustomConfigWorkspaceApi, "/workspaces/custom-config")
|
||||
api.add_resource(WebappLogoWorkspaceApi, "/workspaces/custom-config/webapp-logo/upload")
|
||||
api.add_resource(WorkspaceInfoApi, "/workspaces/info") # POST for changing workspace info
|
||||
|
||||
@@ -54,6 +54,17 @@ def only_edition_self_hosted(view):
|
||||
return decorated
|
||||
|
||||
|
||||
def cloud_edition_billing_enabled(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
features = FeatureService.get_features(current_user.current_tenant_id)
|
||||
if not features.billing.enabled:
|
||||
abort(403, "Billing feature is not enabled.")
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
def cloud_edition_billing_resource_check(resource: str):
|
||||
def interceptor(view):
|
||||
@wraps(view)
|
||||
|
||||
@@ -75,6 +75,7 @@ class FilePreviewApi(Resource):
|
||||
if args["as_attachment"]:
|
||||
encoded_filename = quote(upload_file.name)
|
||||
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
|
||||
response.headers["Content-Type"] = "application/octet-stream"
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from controllers.console.wraps import setup_required
|
||||
from controllers.inner_api import api
|
||||
from controllers.inner_api.wraps import enterprise_inner_api_only
|
||||
from events.tenant_event import tenant_was_created
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account
|
||||
from services.account_service import TenantService
|
||||
|
||||
@@ -19,7 +20,7 @@ class EnterpriseWorkspace(Resource):
|
||||
parser.add_argument("owner_email", type=str, required=True, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
account = Account.query.filter_by(email=args["owner_email"]).first()
|
||||
account = db.session.query(Account).filter_by(email=args["owner_email"]).first()
|
||||
if account is None:
|
||||
return {"message": "owner account not found."}, 404
|
||||
|
||||
|
||||
@@ -6,5 +6,6 @@ bp = Blueprint("service_api", __name__, url_prefix="/v1")
|
||||
api = ExternalApi(bp)
|
||||
|
||||
from . import index
|
||||
from .app import app, audio, completion, conversation, file, message, workflow
|
||||
from .app import annotation, app, audio, completion, conversation, file, message, workflow
|
||||
from .dataset import dataset, document, hit_testing, metadata, segment, upload_file
|
||||
from .workspace import models
|
||||
|
||||
107
api/controllers/service_api/app/annotation.py
Normal file
107
api/controllers/service_api/app/annotation.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from flask import request
|
||||
from flask_restful import Resource, marshal, marshal_with, reqparse # type: ignore
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
from controllers.service_api import api
|
||||
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
|
||||
from extensions.ext_redis import redis_client
|
||||
from fields.annotation_fields import (
|
||||
annotation_fields,
|
||||
)
|
||||
from libs.login import current_user
|
||||
from models.model import App, EndUser
|
||||
from services.annotation_service import AppAnnotationService
|
||||
|
||||
|
||||
class AnnotationReplyActionApi(Resource):
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
|
||||
def post(self, app_model: App, end_user: EndUser, action):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("score_threshold", required=True, type=float, location="json")
|
||||
parser.add_argument("embedding_provider_name", required=True, type=str, location="json")
|
||||
parser.add_argument("embedding_model_name", required=True, type=str, location="json")
|
||||
args = parser.parse_args()
|
||||
if action == "enable":
|
||||
result = AppAnnotationService.enable_app_annotation(args, app_model.id)
|
||||
elif action == "disable":
|
||||
result = AppAnnotationService.disable_app_annotation(app_model.id)
|
||||
else:
|
||||
raise ValueError("Unsupported annotation reply action")
|
||||
return result, 200
|
||||
|
||||
|
||||
class AnnotationReplyActionStatusApi(Resource):
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
|
||||
def get(self, app_model: App, end_user: EndUser, job_id, action):
|
||||
job_id = str(job_id)
|
||||
app_annotation_job_key = "{}_app_annotation_job_{}".format(action, str(job_id))
|
||||
cache_result = redis_client.get(app_annotation_job_key)
|
||||
if cache_result is None:
|
||||
raise ValueError("The job does not exist.")
|
||||
|
||||
job_status = cache_result.decode()
|
||||
error_msg = ""
|
||||
if job_status == "error":
|
||||
app_annotation_error_key = "{}_app_annotation_error_{}".format(action, str(job_id))
|
||||
error_msg = redis_client.get(app_annotation_error_key).decode()
|
||||
|
||||
return {"job_id": job_id, "job_status": job_status, "error_msg": error_msg}, 200
|
||||
|
||||
|
||||
class AnnotationListApi(Resource):
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
|
||||
def get(self, app_model: App, end_user: EndUser):
|
||||
page = request.args.get("page", default=1, type=int)
|
||||
limit = request.args.get("limit", default=20, type=int)
|
||||
keyword = request.args.get("keyword", default="", type=str)
|
||||
|
||||
annotation_list, total = AppAnnotationService.get_annotation_list_by_app_id(app_model.id, page, limit, keyword)
|
||||
response = {
|
||||
"data": marshal(annotation_list, annotation_fields),
|
||||
"has_more": len(annotation_list) == limit,
|
||||
"limit": limit,
|
||||
"total": total,
|
||||
"page": page,
|
||||
}
|
||||
return response, 200
|
||||
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
|
||||
@marshal_with(annotation_fields)
|
||||
def post(self, app_model: App, end_user: EndUser):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("question", required=True, type=str, location="json")
|
||||
parser.add_argument("answer", required=True, type=str, location="json")
|
||||
args = parser.parse_args()
|
||||
annotation = AppAnnotationService.insert_app_annotation_directly(args, app_model.id)
|
||||
return annotation
|
||||
|
||||
|
||||
class AnnotationUpdateDeleteApi(Resource):
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
|
||||
@marshal_with(annotation_fields)
|
||||
def post(self, app_model: App, end_user: EndUser, annotation_id):
|
||||
if not current_user.is_editor:
|
||||
raise Forbidden()
|
||||
|
||||
annotation_id = str(annotation_id)
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("question", required=True, type=str, location="json")
|
||||
parser.add_argument("answer", required=True, type=str, location="json")
|
||||
args = parser.parse_args()
|
||||
annotation = AppAnnotationService.update_app_annotation_directly(args, app_model.id, annotation_id)
|
||||
return annotation
|
||||
|
||||
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
|
||||
def delete(self, app_model: App, end_user: EndUser, annotation_id):
|
||||
if not current_user.is_editor:
|
||||
raise Forbidden()
|
||||
|
||||
annotation_id = str(annotation_id)
|
||||
AppAnnotationService.delete_app_annotation(app_model.id, annotation_id)
|
||||
return {"result": "success"}, 200
|
||||
|
||||
|
||||
api.add_resource(AnnotationReplyActionApi, "/apps/annotation-reply/<string:action>")
|
||||
api.add_resource(AnnotationReplyActionStatusApi, "/apps/annotation-reply/<string:action>/status/<uuid:job_id>")
|
||||
api.add_resource(AnnotationListApi, "/apps/annotations")
|
||||
api.add_resource(AnnotationUpdateDeleteApi, "/apps/annotations/<uuid:annotation_id>")
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from flask_restful import Resource, fields, marshal_with, reqparse # type: ignore
|
||||
@@ -10,7 +11,7 @@ from controllers.service_api.app.error import NotChatAppError
|
||||
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from fields.conversation_fields import message_file_fields
|
||||
from fields.message_fields import agent_thought_fields, feedback_fields, retriever_resource_fields
|
||||
from fields.message_fields import agent_thought_fields, feedback_fields
|
||||
from fields.raws import FilesContainedField
|
||||
from libs.helper import TimestampField, uuid_value
|
||||
from models.model import App, AppMode, EndUser
|
||||
@@ -19,6 +20,14 @@ from services.message_service import MessageService
|
||||
|
||||
|
||||
class MessageListApi(Resource):
|
||||
def get_retriever_resources(self):
|
||||
try:
|
||||
if self.message_metadata:
|
||||
return json.loads(self.message_metadata).get("retriever_resources", [])
|
||||
return []
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return []
|
||||
|
||||
message_fields = {
|
||||
"id": fields.String,
|
||||
"conversation_id": fields.String,
|
||||
@@ -28,7 +37,7 @@ class MessageListApi(Resource):
|
||||
"answer": fields.String(attribute="re_sign_file_url_answer"),
|
||||
"message_files": fields.List(fields.Nested(message_file_fields)),
|
||||
"feedback": fields.Nested(feedback_fields, attribute="user_feedback", allow_null=True),
|
||||
"retriever_resources": fields.List(fields.Nested(retriever_resource_fields)),
|
||||
"retriever_resources": get_retriever_resources,
|
||||
"created_at": TimestampField,
|
||||
"agent_thoughts": fields.List(fields.Nested(agent_thought_fields)),
|
||||
"status": fields.String,
|
||||
|
||||
@@ -27,6 +27,7 @@ from core.model_runtime.errors.invoke import InvokeError
|
||||
from extensions.ext_database import db
|
||||
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
|
||||
from libs import helper
|
||||
from libs.helper import TimestampField
|
||||
from models.model import App, AppMode, EndUser
|
||||
from models.workflow import WorkflowRun, WorkflowRunStatus
|
||||
from services.app_generate_service import AppGenerateService
|
||||
@@ -44,8 +45,8 @@ workflow_run_fields = {
|
||||
"error": fields.String,
|
||||
"total_steps": fields.Integer,
|
||||
"total_tokens": fields.Integer,
|
||||
"created_at": fields.DateTime,
|
||||
"finished_at": fields.DateTime,
|
||||
"created_at": TimestampField,
|
||||
"finished_at": TimestampField,
|
||||
"elapsed_time": fields.Float,
|
||||
}
|
||||
|
||||
@@ -53,7 +54,7 @@ workflow_run_fields = {
|
||||
class WorkflowRunDetailApi(Resource):
|
||||
@validate_app_token
|
||||
@marshal_with(workflow_run_fields)
|
||||
def get(self, app_model: App, workflow_id: str):
|
||||
def get(self, app_model: App, workflow_run_id: str):
|
||||
"""
|
||||
Get a workflow task running detail
|
||||
"""
|
||||
@@ -61,7 +62,7 @@ class WorkflowRunDetailApi(Resource):
|
||||
if app_mode != AppMode.WORKFLOW:
|
||||
raise NotWorkflowAppError()
|
||||
|
||||
workflow_run = db.session.query(WorkflowRun).filter(WorkflowRun.id == workflow_id).first()
|
||||
workflow_run = db.session.query(WorkflowRun).filter(WorkflowRun.id == workflow_run_id).first()
|
||||
return workflow_run
|
||||
|
||||
|
||||
@@ -162,6 +163,6 @@ class WorkflowAppLogApi(Resource):
|
||||
|
||||
|
||||
api.add_resource(WorkflowRunApi, "/workflows/run")
|
||||
api.add_resource(WorkflowRunDetailApi, "/workflows/run/<string:workflow_id>")
|
||||
api.add_resource(WorkflowRunDetailApi, "/workflows/run/<string:workflow_run_id>")
|
||||
api.add_resource(WorkflowTaskStopApi, "/workflows/tasks/<string:task_id>/stop")
|
||||
api.add_resource(WorkflowAppLogApi, "/workflows/logs")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from flask import request
|
||||
from flask_restful import marshal, reqparse # type: ignore
|
||||
from werkzeug.exceptions import NotFound
|
||||
from werkzeug.exceptions import Forbidden, NotFound
|
||||
|
||||
import services.dataset_service
|
||||
from controllers.service_api import api
|
||||
@@ -12,7 +12,7 @@ from core.provider_manager import ProviderManager
|
||||
from fields.dataset_fields import dataset_detail_fields
|
||||
from libs.login import current_user
|
||||
from models.dataset import Dataset, DatasetPermissionEnum
|
||||
from services.dataset_service import DatasetService
|
||||
from services.dataset_service import DatasetPermissionService, DatasetService
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
@@ -21,6 +21,12 @@ def _validate_name(name):
|
||||
return name
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
if len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
|
||||
|
||||
class DatasetListApi(DatasetApiResource):
|
||||
"""Resource for datasets."""
|
||||
|
||||
@@ -137,11 +143,151 @@ class DatasetListApi(DatasetApiResource):
|
||||
class DatasetApi(DatasetApiResource):
|
||||
"""Resource for dataset."""
|
||||
|
||||
def get(self, _, dataset_id):
|
||||
dataset_id_str = str(dataset_id)
|
||||
dataset = DatasetService.get_dataset(dataset_id_str)
|
||||
if dataset is None:
|
||||
raise NotFound("Dataset not found.")
|
||||
try:
|
||||
DatasetService.check_dataset_permission(dataset, current_user)
|
||||
except services.errors.account.NoPermissionError as e:
|
||||
raise Forbidden(str(e))
|
||||
data = marshal(dataset, dataset_detail_fields)
|
||||
if data.get("permission") == "partial_members":
|
||||
part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
|
||||
data.update({"partial_member_list": part_users_list})
|
||||
|
||||
# check embedding setting
|
||||
provider_manager = ProviderManager()
|
||||
configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
|
||||
|
||||
embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
|
||||
|
||||
model_names = []
|
||||
for embedding_model in embedding_models:
|
||||
model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
|
||||
|
||||
if data["indexing_technique"] == "high_quality":
|
||||
item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
|
||||
if item_model in model_names:
|
||||
data["embedding_available"] = True
|
||||
else:
|
||||
data["embedding_available"] = False
|
||||
else:
|
||||
data["embedding_available"] = True
|
||||
|
||||
if data.get("permission") == "partial_members":
|
||||
part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
|
||||
data.update({"partial_member_list": part_users_list})
|
||||
|
||||
return data, 200
|
||||
|
||||
def patch(self, _, dataset_id):
|
||||
dataset_id_str = str(dataset_id)
|
||||
dataset = DatasetService.get_dataset(dataset_id_str)
|
||||
if dataset is None:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument(
|
||||
"name",
|
||||
nullable=False,
|
||||
help="type is required. Name must be between 1 to 40 characters.",
|
||||
type=_validate_name,
|
||||
)
|
||||
parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
|
||||
parser.add_argument(
|
||||
"indexing_technique",
|
||||
type=str,
|
||||
location="json",
|
||||
choices=Dataset.INDEXING_TECHNIQUE_LIST,
|
||||
nullable=True,
|
||||
help="Invalid indexing technique.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"permission",
|
||||
type=str,
|
||||
location="json",
|
||||
choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
|
||||
help="Invalid permission.",
|
||||
)
|
||||
parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
|
||||
parser.add_argument(
|
||||
"embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
|
||||
)
|
||||
parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
|
||||
parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
|
||||
|
||||
parser.add_argument(
|
||||
"external_retrieval_model",
|
||||
type=dict,
|
||||
required=False,
|
||||
nullable=True,
|
||||
location="json",
|
||||
help="Invalid external retrieval model.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"external_knowledge_id",
|
||||
type=str,
|
||||
required=False,
|
||||
nullable=True,
|
||||
location="json",
|
||||
help="Invalid external knowledge id.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"external_knowledge_api_id",
|
||||
type=str,
|
||||
required=False,
|
||||
nullable=True,
|
||||
location="json",
|
||||
help="Invalid external knowledge api id.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
data = request.get_json()
|
||||
|
||||
# check embedding model setting
|
||||
if data.get("indexing_technique") == "high_quality":
|
||||
DatasetService.check_embedding_model_setting(
|
||||
dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
|
||||
)
|
||||
|
||||
# The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
|
||||
DatasetPermissionService.check_permission(
|
||||
current_user, dataset, data.get("permission"), data.get("partial_member_list")
|
||||
)
|
||||
|
||||
dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
|
||||
|
||||
if dataset is None:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
result_data = marshal(dataset, dataset_detail_fields)
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
if data.get("partial_member_list") and data.get("permission") == "partial_members":
|
||||
DatasetPermissionService.update_partial_member_list(
|
||||
tenant_id, dataset_id_str, data.get("partial_member_list")
|
||||
)
|
||||
# clear partial member list when permission is only_me or all_team_members
|
||||
elif (
|
||||
data.get("permission") == DatasetPermissionEnum.ONLY_ME
|
||||
or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
|
||||
):
|
||||
DatasetPermissionService.clear_partial_member_list(dataset_id_str)
|
||||
|
||||
partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
|
||||
result_data.update({"partial_member_list": partial_member_list})
|
||||
|
||||
return result_data, 200
|
||||
|
||||
def delete(self, _, dataset_id):
|
||||
"""
|
||||
Deletes a dataset given its ID.
|
||||
|
||||
Args:
|
||||
_: ignore
|
||||
dataset_id (UUID): The ID of the dataset to be deleted.
|
||||
|
||||
Returns:
|
||||
@@ -157,6 +303,7 @@ class DatasetApi(DatasetApiResource):
|
||||
|
||||
try:
|
||||
if DatasetService.delete_dataset(dataset_id_str, current_user):
|
||||
DatasetPermissionService.clear_partial_member_list(dataset_id_str)
|
||||
return {"result": "success"}, 204
|
||||
else:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
@@ -341,7 +341,7 @@ class DocumentListApi(DatasetApiResource):
|
||||
search = f"%{search}%"
|
||||
query = query.filter(Document.name.like(search))
|
||||
|
||||
query = query.order_by(desc(Document.created_at))
|
||||
query = query.order_by(desc(Document.created_at), desc(Document.position))
|
||||
|
||||
paginated_documents = query.paginate(page=page, per_page=limit, max_per_page=100, error_out=False)
|
||||
documents = paginated_documents.items
|
||||
|
||||
@@ -66,7 +66,7 @@ class DatasetMetadataServiceApi(DatasetApiResource):
|
||||
metadata = MetadataService.update_metadata_name(dataset_id_str, metadata_id_str, args.get("name"))
|
||||
return marshal(metadata, dataset_metadata_fields), 200
|
||||
|
||||
def delete(self, dataset_id, metadata_id):
|
||||
def delete(self, tenant_id, dataset_id, metadata_id):
|
||||
dataset_id_str = str(dataset_id)
|
||||
metadata_id_str = str(metadata_id)
|
||||
dataset = DatasetService.get_dataset(dataset_id_str)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from flask import request
|
||||
from flask_login import current_user # type: ignore
|
||||
from flask_restful import marshal, reqparse # type: ignore
|
||||
from werkzeug.exceptions import NotFound
|
||||
@@ -13,10 +14,20 @@ from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
|
||||
from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from extensions.ext_database import db
|
||||
from fields.segment_fields import segment_fields
|
||||
from models.dataset import Dataset, DocumentSegment
|
||||
from fields.segment_fields import child_chunk_fields, segment_fields
|
||||
from models.dataset import Dataset
|
||||
from services.dataset_service import DatasetService, DocumentService, SegmentService
|
||||
from services.entities.knowledge_entities.knowledge_entities import SegmentUpdateArgs
|
||||
from services.errors.chunk import (
|
||||
ChildChunkDeleteIndexError,
|
||||
ChildChunkIndexingError,
|
||||
)
|
||||
from services.errors.chunk import (
|
||||
ChildChunkDeleteIndexError as ChildChunkDeleteIndexServiceError,
|
||||
)
|
||||
from services.errors.chunk import (
|
||||
ChildChunkIndexingError as ChildChunkIndexingServiceError,
|
||||
)
|
||||
|
||||
|
||||
class SegmentApi(DatasetApiResource):
|
||||
@@ -70,10 +81,12 @@ class SegmentApi(DatasetApiResource):
|
||||
return {"error": "Segments is required"}, 400
|
||||
|
||||
def get(self, tenant_id, dataset_id, document_id):
|
||||
"""Create single segment."""
|
||||
"""Get segments."""
|
||||
# check dataset
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
page = request.args.get("page", default=1, type=int)
|
||||
limit = request.args.get("limit", default=20, type=int)
|
||||
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
@@ -107,19 +120,23 @@ class SegmentApi(DatasetApiResource):
|
||||
status_list = args["status"]
|
||||
keyword = args["keyword"]
|
||||
|
||||
query = DocumentSegment.query.filter(
|
||||
DocumentSegment.document_id == str(document_id), DocumentSegment.tenant_id == current_user.current_tenant_id
|
||||
segments, total = SegmentService.get_segments(
|
||||
document_id=document_id,
|
||||
tenant_id=current_user.current_tenant_id,
|
||||
status_list=args["status"],
|
||||
keyword=args["keyword"],
|
||||
)
|
||||
|
||||
if status_list:
|
||||
query = query.filter(DocumentSegment.status.in_(status_list))
|
||||
response = {
|
||||
"data": marshal(segments, segment_fields),
|
||||
"doc_form": document.doc_form,
|
||||
"total": total,
|
||||
"has_more": len(segments) == limit,
|
||||
"limit": limit,
|
||||
"page": page,
|
||||
}
|
||||
|
||||
if keyword:
|
||||
query = query.where(DocumentSegment.content.ilike(f"%{keyword}%"))
|
||||
|
||||
total = query.count()
|
||||
segments = query.order_by(DocumentSegment.position).all()
|
||||
return {"data": marshal(segments, segment_fields), "doc_form": document.doc_form, "total": total}, 200
|
||||
return response, 200
|
||||
|
||||
|
||||
class DatasetSegmentApi(DatasetApiResource):
|
||||
@@ -138,9 +155,8 @@ class DatasetSegmentApi(DatasetApiResource):
|
||||
if not document:
|
||||
raise NotFound("Document not found.")
|
||||
# check segment
|
||||
segment = DocumentSegment.query.filter(
|
||||
DocumentSegment.id == str(segment_id), DocumentSegment.tenant_id == current_user.current_tenant_id
|
||||
).first()
|
||||
segment_id = str(segment_id)
|
||||
segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id)
|
||||
if not segment:
|
||||
raise NotFound("Segment not found.")
|
||||
SegmentService.delete_segment(segment, document, dataset)
|
||||
@@ -179,9 +195,7 @@ class DatasetSegmentApi(DatasetApiResource):
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
# check segment
|
||||
segment_id = str(segment_id)
|
||||
segment = DocumentSegment.query.filter(
|
||||
DocumentSegment.id == str(segment_id), DocumentSegment.tenant_id == current_user.current_tenant_id
|
||||
).first()
|
||||
segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id)
|
||||
if not segment:
|
||||
raise NotFound("Segment not found.")
|
||||
|
||||
@@ -190,12 +204,200 @@ class DatasetSegmentApi(DatasetApiResource):
|
||||
parser.add_argument("segment", type=dict, required=False, nullable=True, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
SegmentService.segment_create_args_validate(args["segment"], document)
|
||||
segment = SegmentService.update_segment(SegmentUpdateArgs(**args["segment"]), segment, document, dataset)
|
||||
return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200
|
||||
updated_segment = SegmentService.update_segment(
|
||||
SegmentUpdateArgs(**args["segment"]), segment, document, dataset
|
||||
)
|
||||
return {"data": marshal(updated_segment, segment_fields), "doc_form": document.doc_form}, 200
|
||||
|
||||
|
||||
class ChildChunkApi(DatasetApiResource):
|
||||
"""Resource for child chunks."""
|
||||
|
||||
@cloud_edition_billing_resource_check("vector_space", "dataset")
|
||||
@cloud_edition_billing_knowledge_limit_check("add_segment", "dataset")
|
||||
def post(self, tenant_id, dataset_id, document_id, segment_id):
|
||||
"""Create child chunk."""
|
||||
# check dataset
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
# check document
|
||||
document_id = str(document_id)
|
||||
document = DocumentService.get_document(dataset.id, document_id)
|
||||
if not document:
|
||||
raise NotFound("Document not found.")
|
||||
|
||||
# check segment
|
||||
segment_id = str(segment_id)
|
||||
segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id)
|
||||
if not segment:
|
||||
raise NotFound("Segment not found.")
|
||||
|
||||
# check embedding model setting
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
try:
|
||||
model_manager = ModelManager()
|
||||
model_manager.get_model_instance(
|
||||
tenant_id=current_user.current_tenant_id,
|
||||
provider=dataset.embedding_model_provider,
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
model=dataset.embedding_model,
|
||||
)
|
||||
except LLMBadRequestError:
|
||||
raise ProviderNotInitializeError(
|
||||
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
|
||||
)
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
||||
# validate args
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("content", type=str, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
child_chunk = SegmentService.create_child_chunk(args.get("content"), segment, document, dataset)
|
||||
except ChildChunkIndexingServiceError as e:
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
|
||||
return {"data": marshal(child_chunk, child_chunk_fields)}, 200
|
||||
|
||||
def get(self, tenant_id, dataset_id, document_id, segment_id):
|
||||
"""Get child chunks."""
|
||||
# check dataset
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
# check document
|
||||
document_id = str(document_id)
|
||||
document = DocumentService.get_document(dataset.id, document_id)
|
||||
if not document:
|
||||
raise NotFound("Document not found.")
|
||||
|
||||
# check segment
|
||||
segment_id = str(segment_id)
|
||||
segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id)
|
||||
if not segment:
|
||||
raise NotFound("Segment not found.")
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("limit", type=int, default=20, location="args")
|
||||
parser.add_argument("keyword", type=str, default=None, location="args")
|
||||
parser.add_argument("page", type=int, default=1, location="args")
|
||||
args = parser.parse_args()
|
||||
|
||||
page = args["page"]
|
||||
limit = min(args["limit"], 100)
|
||||
keyword = args["keyword"]
|
||||
|
||||
child_chunks = SegmentService.get_child_chunks(segment_id, document_id, dataset_id, page, limit, keyword)
|
||||
|
||||
return {
|
||||
"data": marshal(child_chunks.items, child_chunk_fields),
|
||||
"total": child_chunks.total,
|
||||
"total_pages": child_chunks.pages,
|
||||
"page": page,
|
||||
"limit": limit,
|
||||
}, 200
|
||||
|
||||
|
||||
class DatasetChildChunkApi(DatasetApiResource):
|
||||
"""Resource for updating child chunks."""
|
||||
|
||||
@cloud_edition_billing_knowledge_limit_check("add_segment", "dataset")
|
||||
def delete(self, tenant_id, dataset_id, document_id, segment_id, child_chunk_id):
|
||||
"""Delete child chunk."""
|
||||
# check dataset
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
# check document
|
||||
document_id = str(document_id)
|
||||
document = DocumentService.get_document(dataset.id, document_id)
|
||||
if not document:
|
||||
raise NotFound("Document not found.")
|
||||
|
||||
# check segment
|
||||
segment_id = str(segment_id)
|
||||
segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id)
|
||||
if not segment:
|
||||
raise NotFound("Segment not found.")
|
||||
|
||||
# check child chunk
|
||||
child_chunk_id = str(child_chunk_id)
|
||||
child_chunk = SegmentService.get_child_chunk_by_id(
|
||||
child_chunk_id=child_chunk_id, tenant_id=current_user.current_tenant_id
|
||||
)
|
||||
if not child_chunk:
|
||||
raise NotFound("Child chunk not found.")
|
||||
|
||||
try:
|
||||
SegmentService.delete_child_chunk(child_chunk, dataset)
|
||||
except ChildChunkDeleteIndexServiceError as e:
|
||||
raise ChildChunkDeleteIndexError(str(e))
|
||||
|
||||
return {"result": "success"}, 200
|
||||
|
||||
@cloud_edition_billing_resource_check("vector_space", "dataset")
|
||||
@cloud_edition_billing_knowledge_limit_check("add_segment", "dataset")
|
||||
def patch(self, tenant_id, dataset_id, document_id, segment_id, child_chunk_id):
|
||||
"""Update child chunk."""
|
||||
# check dataset
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
# get document
|
||||
document = DocumentService.get_document(dataset_id, document_id)
|
||||
if not document:
|
||||
raise NotFound("Document not found.")
|
||||
|
||||
# get segment
|
||||
segment = SegmentService.get_segment_by_id(segment_id=segment_id, tenant_id=current_user.current_tenant_id)
|
||||
if not segment:
|
||||
raise NotFound("Segment not found.")
|
||||
|
||||
# get child chunk
|
||||
child_chunk = SegmentService.get_child_chunk_by_id(
|
||||
child_chunk_id=child_chunk_id, tenant_id=current_user.current_tenant_id
|
||||
)
|
||||
if not child_chunk:
|
||||
raise NotFound("Child chunk not found.")
|
||||
|
||||
# validate args
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("content", type=str, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
child_chunk = SegmentService.update_child_chunk(
|
||||
args.get("content"), child_chunk, segment, document, dataset
|
||||
)
|
||||
except ChildChunkIndexingServiceError as e:
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
|
||||
return {"data": marshal(child_chunk, child_chunk_fields)}, 200
|
||||
|
||||
|
||||
api.add_resource(SegmentApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments")
|
||||
api.add_resource(
|
||||
DatasetSegmentApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments/<uuid:segment_id>"
|
||||
)
|
||||
api.add_resource(
|
||||
ChildChunkApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments/<uuid:segment_id>/child_chunks"
|
||||
)
|
||||
api.add_resource(
|
||||
DatasetChildChunkApi,
|
||||
"/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments/<uuid:segment_id>/child_chunks/<uuid:child_chunk_id>",
|
||||
)
|
||||
|
||||
21
api/controllers/service_api/workspace/models.py
Normal file
21
api/controllers/service_api/workspace/models.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from flask_login import current_user # type: ignore
|
||||
from flask_restful import Resource # type: ignore
|
||||
|
||||
from controllers.service_api import api
|
||||
from controllers.service_api.wraps import validate_dataset_token
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from services.model_provider_service import ModelProviderService
|
||||
|
||||
|
||||
class ModelProviderAvailableModelApi(Resource):
|
||||
@validate_dataset_token
|
||||
def get(self, _, model_type):
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
model_provider_service = ModelProviderService()
|
||||
models = model_provider_service.get_models_by_model_type(tenant_id=tenant_id, model_type=model_type)
|
||||
|
||||
return jsonable_encoder({"data": models})
|
||||
|
||||
|
||||
api.add_resource(ModelProviderAvailableModelApi, "/workspaces/current/models/model-types/<string:model_type>")
|
||||
@@ -59,6 +59,27 @@ def validate_app_token(view: Optional[Callable] = None, *, fetch_user_arg: Optio
|
||||
if tenant.status == TenantStatus.ARCHIVE:
|
||||
raise Forbidden("The workspace's status is archived.")
|
||||
|
||||
tenant_account_join = (
|
||||
db.session.query(Tenant, TenantAccountJoin)
|
||||
.filter(Tenant.id == api_token.tenant_id)
|
||||
.filter(TenantAccountJoin.tenant_id == Tenant.id)
|
||||
.filter(TenantAccountJoin.role.in_(["owner"]))
|
||||
.filter(Tenant.status == TenantStatus.NORMAL)
|
||||
.one_or_none()
|
||||
) # TODO: only owner information is required, so only one is returned.
|
||||
if tenant_account_join:
|
||||
tenant, ta = tenant_account_join
|
||||
account = db.session.query(Account).filter(Account.id == ta.account_id).first()
|
||||
# Login admin
|
||||
if account:
|
||||
account.current_tenant = tenant
|
||||
current_app.login_manager._update_request_context_with_user(account) # type: ignore
|
||||
user_logged_in.send(current_app._get_current_object(), user=_get_user()) # type: ignore
|
||||
else:
|
||||
raise Unauthorized("Tenant owner account does not exist.")
|
||||
else:
|
||||
raise Unauthorized("Tenant does not exist.")
|
||||
|
||||
kwargs["app_model"] = app_model
|
||||
|
||||
if fetch_user_arg:
|
||||
|
||||
@@ -19,6 +19,8 @@ class PassportResource(Resource):
|
||||
def get(self):
|
||||
system_features = FeatureService.get_system_features()
|
||||
app_code = request.headers.get("X-App-Code")
|
||||
user_id = request.args.get("user_id")
|
||||
|
||||
if app_code is None:
|
||||
raise Unauthorized("X-App-Code header is missing.")
|
||||
|
||||
@@ -36,16 +38,33 @@ class PassportResource(Resource):
|
||||
if not app_model or app_model.status != "normal" or not app_model.enable_site:
|
||||
raise NotFound()
|
||||
|
||||
end_user = EndUser(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
type="browser",
|
||||
is_anonymous=True,
|
||||
session_id=generate_session_id(),
|
||||
)
|
||||
if user_id:
|
||||
end_user = (
|
||||
db.session.query(EndUser).filter(EndUser.app_id == app_model.id, EndUser.session_id == user_id).first()
|
||||
)
|
||||
|
||||
db.session.add(end_user)
|
||||
db.session.commit()
|
||||
if end_user:
|
||||
pass
|
||||
else:
|
||||
end_user = EndUser(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
type="browser",
|
||||
is_anonymous=True,
|
||||
session_id=user_id,
|
||||
)
|
||||
db.session.add(end_user)
|
||||
db.session.commit()
|
||||
else:
|
||||
end_user = EndUser(
|
||||
tenant_id=app_model.tenant_id,
|
||||
app_id=app_model.id,
|
||||
type="browser",
|
||||
is_anonymous=True,
|
||||
session_id=generate_session_id(),
|
||||
)
|
||||
db.session.add(end_user)
|
||||
db.session.commit()
|
||||
|
||||
payload = {
|
||||
"iss": site.app_id,
|
||||
|
||||
@@ -332,7 +332,7 @@ class BaseAgentRunner(AppRunner):
|
||||
agent_thought = updated_agent_thought
|
||||
|
||||
if thought:
|
||||
agent_thought.thought = thought
|
||||
agent_thought.thought += thought
|
||||
|
||||
if tool_name:
|
||||
agent_thought.tool = tool_name
|
||||
|
||||
@@ -12,39 +12,45 @@ class CotAgentOutputParser:
|
||||
def handle_react_stream_output(
|
||||
cls, llm_response: Generator[LLMResultChunk, None, None], usage_dict: dict
|
||||
) -> Generator[Union[str, AgentScratchpadUnit.Action], None, None]:
|
||||
def parse_action(json_str):
|
||||
try:
|
||||
action = json.loads(json_str, strict=False)
|
||||
action_name = None
|
||||
action_input = None
|
||||
def parse_action(action) -> Union[str, AgentScratchpadUnit.Action]:
|
||||
action_name = None
|
||||
action_input = None
|
||||
if isinstance(action, str):
|
||||
try:
|
||||
action = json.loads(action, strict=False)
|
||||
except json.JSONDecodeError:
|
||||
return action or ""
|
||||
|
||||
# cohere always returns a list
|
||||
if isinstance(action, list) and len(action) == 1:
|
||||
action = action[0]
|
||||
# cohere always returns a list
|
||||
if isinstance(action, list) and len(action) == 1:
|
||||
action = action[0]
|
||||
|
||||
for key, value in action.items():
|
||||
if "input" in key.lower():
|
||||
action_input = value
|
||||
else:
|
||||
action_name = value
|
||||
|
||||
if action_name is not None and action_input is not None:
|
||||
return AgentScratchpadUnit.Action(
|
||||
action_name=action_name,
|
||||
action_input=action_input,
|
||||
)
|
||||
for key, value in action.items():
|
||||
if "input" in key.lower():
|
||||
action_input = value
|
||||
else:
|
||||
return json_str or ""
|
||||
except:
|
||||
return json_str or ""
|
||||
action_name = value
|
||||
|
||||
def extra_json_from_code_block(code_block) -> Generator[Union[str, AgentScratchpadUnit.Action], None, None]:
|
||||
code_blocks = re.findall(r"```(.*?)```", code_block, re.DOTALL)
|
||||
if not code_blocks:
|
||||
return
|
||||
for block in code_blocks:
|
||||
json_text = re.sub(r"^[a-zA-Z]+\n", "", block.strip(), flags=re.MULTILINE)
|
||||
yield parse_action(json_text)
|
||||
if action_name is not None and action_input is not None:
|
||||
return AgentScratchpadUnit.Action(
|
||||
action_name=action_name,
|
||||
action_input=action_input,
|
||||
)
|
||||
else:
|
||||
return json.dumps(action)
|
||||
|
||||
def extra_json_from_code_block(code_block) -> list[Union[list, dict]]:
|
||||
blocks = re.findall(r"```[json]*\s*([\[{].*[]}])\s*```", code_block, re.DOTALL | re.IGNORECASE)
|
||||
if not blocks:
|
||||
return []
|
||||
try:
|
||||
json_blocks = []
|
||||
for block in blocks:
|
||||
json_text = re.sub(r"^[a-zA-Z]+\n", "", block.strip(), flags=re.MULTILINE)
|
||||
json_blocks.append(json.loads(json_text, strict=False))
|
||||
return json_blocks
|
||||
except:
|
||||
return []
|
||||
|
||||
code_block_cache = ""
|
||||
code_block_delimiter_count = 0
|
||||
@@ -78,7 +84,7 @@ class CotAgentOutputParser:
|
||||
delta = response_content[index : index + steps]
|
||||
yield_delta = False
|
||||
|
||||
if delta == "`":
|
||||
if not in_json and delta == "`":
|
||||
last_character = delta
|
||||
code_block_cache += delta
|
||||
code_block_delimiter_count += 1
|
||||
@@ -159,8 +165,14 @@ class CotAgentOutputParser:
|
||||
if code_block_delimiter_count == 3:
|
||||
if in_code_block:
|
||||
last_character = delta
|
||||
yield from extra_json_from_code_block(code_block_cache)
|
||||
code_block_cache = ""
|
||||
action_json_list = extra_json_from_code_block(code_block_cache)
|
||||
if action_json_list:
|
||||
for action_json in action_json_list:
|
||||
yield parse_action(action_json)
|
||||
code_block_cache = ""
|
||||
else:
|
||||
index += steps
|
||||
continue
|
||||
|
||||
in_code_block = not in_code_block
|
||||
code_block_delimiter_count = 0
|
||||
|
||||
@@ -70,11 +70,20 @@ class AgentStrategyIdentity(ToolIdentity):
|
||||
pass
|
||||
|
||||
|
||||
class AgentFeature(enum.StrEnum):
|
||||
"""
|
||||
Agent Feature, used to describe the features of the agent strategy.
|
||||
"""
|
||||
|
||||
HISTORY_MESSAGES = "history-messages"
|
||||
|
||||
|
||||
class AgentStrategyEntity(BaseModel):
|
||||
identity: AgentStrategyIdentity
|
||||
parameters: list[AgentStrategyParameter] = Field(default_factory=list)
|
||||
description: I18nObject = Field(..., description="The description of the agent strategy")
|
||||
output_schema: Optional[dict] = None
|
||||
features: Optional[list[AgentFeature]] = None
|
||||
|
||||
# pydantic configs
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
@@ -16,7 +16,6 @@ class ModelConfigConverter:
|
||||
"""
|
||||
Convert app model config dict to entity.
|
||||
:param app_config: app config
|
||||
:param skip_check: skip check
|
||||
:raises ProviderTokenNotInitError: provider token not init error
|
||||
:return: app orchestration config entity
|
||||
"""
|
||||
|
||||
@@ -88,7 +88,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is stream
|
||||
"""
|
||||
if not args.get("query"):
|
||||
raise ValueError("query is required")
|
||||
@@ -181,10 +181,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
|
||||
:param app_model: App
|
||||
:param workflow: Workflow
|
||||
:param node_id: the node id
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is streamed
|
||||
"""
|
||||
if not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
@@ -238,10 +238,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
|
||||
:param app_model: App
|
||||
:param workflow: Workflow
|
||||
:param node_id: the node id
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is stream
|
||||
"""
|
||||
if not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
|
||||
@@ -80,7 +80,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is stream
|
||||
"""
|
||||
if not streaming:
|
||||
raise ValueError("Agent Chat App does not support blocking mode")
|
||||
|
||||
@@ -157,6 +157,7 @@ class AppRunner:
|
||||
:param files: files
|
||||
:param query: query
|
||||
:param memory: memory
|
||||
:param image_detail_config: the image quality config
|
||||
:return:
|
||||
"""
|
||||
# get prompt without memory and context
|
||||
|
||||
@@ -76,7 +76,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is stream
|
||||
"""
|
||||
if not args.get("query"):
|
||||
raise ValueError("query is required")
|
||||
|
||||
@@ -74,7 +74,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is stream
|
||||
"""
|
||||
query = args["query"]
|
||||
if not isinstance(query, str):
|
||||
|
||||
@@ -148,6 +148,13 @@ class MessageBasedAppGenerator(BaseAppGenerator):
|
||||
# get conversation introduction
|
||||
introduction = self._get_conversation_introduction(application_generate_entity)
|
||||
|
||||
# get conversation name
|
||||
if isinstance(application_generate_entity, AdvancedChatAppGenerateEntity):
|
||||
query = application_generate_entity.query or "New conversation"
|
||||
else:
|
||||
query = next(iter(application_generate_entity.inputs.values()), "New conversation")
|
||||
conversation_name = (query[:20] + "…") if len(query) > 20 else query
|
||||
|
||||
if not conversation:
|
||||
conversation = Conversation(
|
||||
app_id=app_config.app_id,
|
||||
@@ -156,7 +163,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
|
||||
model_id=model_id,
|
||||
override_model_configs=json.dumps(override_model_configs) if override_model_configs else None,
|
||||
mode=app_config.app_mode.value,
|
||||
name="New conversation",
|
||||
name=conversation_name,
|
||||
inputs=application_generate_entity.inputs,
|
||||
introduction=introduction,
|
||||
system_instruction="",
|
||||
|
||||
@@ -158,7 +158,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
:param user: account or end user
|
||||
:param application_generate_entity: application generate entity
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is stream
|
||||
:param workflow_thread_pool_id: workflow thread pool id
|
||||
"""
|
||||
# init queue manager
|
||||
@@ -208,10 +208,10 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
|
||||
:param app_model: App
|
||||
:param workflow: Workflow
|
||||
:param node_id: the node id
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is streamed
|
||||
"""
|
||||
if not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
@@ -264,10 +264,10 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
|
||||
:param app_model: App
|
||||
:param workflow: Workflow
|
||||
:param node_id: the node id
|
||||
:param user: account or end user
|
||||
:param args: request args
|
||||
:param invoke_from: invoke from source
|
||||
:param stream: is stream
|
||||
:param streaming: is streamed
|
||||
"""
|
||||
if not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
|
||||
@@ -44,9 +44,6 @@ class WorkflowAppRunner(WorkflowBasedAppRunner):
|
||||
def run(self) -> None:
|
||||
"""
|
||||
Run application
|
||||
:param application_generate_entity: application generate entity
|
||||
:param queue_manager: application queue manager
|
||||
:return:
|
||||
"""
|
||||
app_config = self.application_generate_entity.app_config
|
||||
app_config = cast(WorkflowAppConfig, app_config)
|
||||
|
||||
@@ -27,6 +27,9 @@ class RateLimit:
|
||||
|
||||
def __init__(self, client_id: str, max_active_requests: int):
|
||||
self.max_active_requests = max_active_requests
|
||||
# must be called after max_active_requests is set
|
||||
if self.disabled():
|
||||
return
|
||||
if hasattr(self, "initialized"):
|
||||
return
|
||||
self.initialized = True
|
||||
@@ -37,6 +40,8 @@ class RateLimit:
|
||||
self.flush_cache(use_local_value=True)
|
||||
|
||||
def flush_cache(self, use_local_value=False):
|
||||
if self.disabled():
|
||||
return
|
||||
self.last_recalculate_time = time.time()
|
||||
# flush max active requests
|
||||
if use_local_value or not redis_client.exists(self.max_active_requests_key):
|
||||
@@ -59,18 +64,18 @@ class RateLimit:
|
||||
redis_client.hdel(self.active_requests_key, *timeout_requests)
|
||||
|
||||
def enter(self, request_id: Optional[str] = None) -> str:
|
||||
if self.disabled():
|
||||
return RateLimit._UNLIMITED_REQUEST_ID
|
||||
if time.time() - self.last_recalculate_time > RateLimit._ACTIVE_REQUESTS_COUNT_FLUSH_INTERVAL:
|
||||
self.flush_cache()
|
||||
if self.max_active_requests <= 0:
|
||||
return RateLimit._UNLIMITED_REQUEST_ID
|
||||
if not request_id:
|
||||
request_id = RateLimit.gen_request_key()
|
||||
|
||||
active_requests_count = redis_client.hlen(self.active_requests_key)
|
||||
if active_requests_count >= self.max_active_requests:
|
||||
raise AppInvokeQuotaExceededError(
|
||||
"Too many requests. Please try again later. The current maximum "
|
||||
"concurrent requests allowed is {}.".format(self.max_active_requests)
|
||||
f"Too many requests. Please try again later. The current maximum concurrent requests allowed "
|
||||
f"for {self.client_id} is {self.max_active_requests}."
|
||||
)
|
||||
redis_client.hset(self.active_requests_key, request_id, str(time.time()))
|
||||
return request_id
|
||||
@@ -80,6 +85,9 @@ class RateLimit:
|
||||
return
|
||||
redis_client.hdel(self.active_requests_key, request_id)
|
||||
|
||||
def disabled(self):
|
||||
return self.max_active_requests <= 0
|
||||
|
||||
@staticmethod
|
||||
def gen_request_key() -> str:
|
||||
return str(uuid.uuid4())
|
||||
|
||||
@@ -48,7 +48,7 @@ class MessageCycleManage:
|
||||
def _generate_conversation_name(self, *, conversation_id: str, query: str) -> Optional[Thread]:
|
||||
"""
|
||||
Generate conversation name.
|
||||
:param conversation: conversation
|
||||
:param conversation_id: conversation id
|
||||
:param query: query
|
||||
:return: thread
|
||||
"""
|
||||
|
||||
@@ -44,6 +44,7 @@ from core.app.entities.task_entities import (
|
||||
WorkflowFinishStreamResponse,
|
||||
WorkflowStartStreamResponse,
|
||||
)
|
||||
from core.app.task_pipeline.exc import WorkflowRunNotFoundError
|
||||
from core.file import FILE_MODEL_IDENTITY, File
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.ops.entities.trace_entity import TraceTaskName
|
||||
@@ -66,8 +67,6 @@ from models.workflow import (
|
||||
WorkflowRunStatus,
|
||||
)
|
||||
|
||||
from .exc import WorkflowRunNotFoundError
|
||||
|
||||
|
||||
class WorkflowCycleManage:
|
||||
def __init__(
|
||||
@@ -154,7 +153,7 @@ class WorkflowCycleManage:
|
||||
) -> WorkflowRun:
|
||||
"""
|
||||
Workflow run success
|
||||
:param workflow_run: workflow run
|
||||
:param workflow_run_id: workflow run id
|
||||
:param start_at: start time
|
||||
:param total_tokens: total tokens
|
||||
:param total_steps: total steps
|
||||
@@ -166,7 +165,7 @@ class WorkflowCycleManage:
|
||||
|
||||
outputs = WorkflowEntry.handle_special_values(outputs)
|
||||
|
||||
workflow_run.status = WorkflowRunStatus.SUCCEEDED.value
|
||||
workflow_run.status = WorkflowRunStatus.SUCCEEDED
|
||||
workflow_run.outputs = json.dumps(outputs or {})
|
||||
workflow_run.elapsed_time = time.perf_counter() - start_at
|
||||
workflow_run.total_tokens = total_tokens
|
||||
@@ -201,7 +200,7 @@ class WorkflowCycleManage:
|
||||
workflow_run = self._get_workflow_run(session=session, workflow_run_id=workflow_run_id)
|
||||
outputs = WorkflowEntry.handle_special_values(dict(outputs) if outputs else None)
|
||||
|
||||
workflow_run.status = WorkflowRunStatus.PARTIAL_SUCCESSED.value
|
||||
workflow_run.status = WorkflowRunStatus.PARTIAL_SUCCEEDED.value
|
||||
workflow_run.outputs = json.dumps(outputs or {})
|
||||
workflow_run.elapsed_time = time.perf_counter() - start_at
|
||||
workflow_run.total_tokens = total_tokens
|
||||
@@ -237,7 +236,7 @@ class WorkflowCycleManage:
|
||||
) -> WorkflowRun:
|
||||
"""
|
||||
Workflow run failed
|
||||
:param workflow_run: workflow run
|
||||
:param workflow_run_id: workflow run id
|
||||
:param start_at: start time
|
||||
:param total_tokens: total tokens
|
||||
:param total_steps: total steps
|
||||
|
||||
@@ -146,6 +146,7 @@ class BasicProviderConfig(BaseModel):
|
||||
BOOLEAN = CommonParameterType.BOOLEAN.value
|
||||
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
|
||||
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
|
||||
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
|
||||
|
||||
@classmethod
|
||||
def value_of(cls, value: str) -> "ProviderConfig.Type":
|
||||
|
||||
@@ -49,6 +49,7 @@ class FileAttribute(StrEnum):
|
||||
TRANSFER_METHOD = "transfer_method"
|
||||
URL = "url"
|
||||
EXTENSION = "extension"
|
||||
RELATED_ID = "related_id"
|
||||
|
||||
|
||||
class ArrayFileAttribute(StrEnum):
|
||||
|
||||
@@ -34,6 +34,8 @@ def get_attr(*, file: File, attr: FileAttribute):
|
||||
return file.remote_url
|
||||
case FileAttribute.EXTENSION:
|
||||
return file.extension
|
||||
case FileAttribute.RELATED_ID:
|
||||
return file.related_id
|
||||
|
||||
|
||||
def to_prompt_message_content(
|
||||
|
||||
@@ -4,12 +4,10 @@ import time
|
||||
from typing import Optional
|
||||
|
||||
from configs import dify_config
|
||||
from constants import IMAGE_EXTENSIONS
|
||||
from core.helper.url_signer import UrlSigner
|
||||
from extensions.ext_storage import storage
|
||||
|
||||
IMAGE_EXTENSIONS = ["jpg", "jpeg", "png", "webp", "gif", "svg"]
|
||||
IMAGE_EXTENSIONS.extend([ext.upper() for ext in IMAGE_EXTENSIONS])
|
||||
|
||||
|
||||
class UploadFileParser:
|
||||
@classmethod
|
||||
@@ -38,7 +36,7 @@ class UploadFileParser:
|
||||
"""
|
||||
get signed url from upload file
|
||||
|
||||
:param upload_file: UploadFile object
|
||||
:param upload_file_id: the id of UploadFile object
|
||||
:return:
|
||||
"""
|
||||
base_url = dify_config.FILES_URL
|
||||
|
||||
@@ -60,6 +60,7 @@ class CodeExecutor:
|
||||
"""
|
||||
Execute code
|
||||
:param language: code language
|
||||
:param preload: the preload script
|
||||
:param code: code
|
||||
:return:
|
||||
"""
|
||||
|
||||
@@ -53,7 +53,7 @@ def pin_position_map(original_position_map: dict[str, int], pin_list: list[str])
|
||||
"""
|
||||
Pin the items in the pin list to the beginning of the position map.
|
||||
Overall logic: exclude > include > pin
|
||||
:param position_map: the position map to be sorted and filtered
|
||||
:param original_position_map: the position map to be sorted and filtered
|
||||
:param pin_list: the list of pins to be put at the beginning
|
||||
:return: the sorted position map
|
||||
"""
|
||||
|
||||
@@ -56,8 +56,12 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
|
||||
response = client.request(method=method, url=url, **kwargs)
|
||||
elif dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL:
|
||||
proxy_mounts = {
|
||||
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
|
||||
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
|
||||
"http://": httpx.HTTPTransport(
|
||||
proxy=dify_config.SSRF_PROXY_HTTP_URL, verify=HTTP_REQUEST_NODE_SSL_VERIFY
|
||||
),
|
||||
"https://": httpx.HTTPTransport(
|
||||
proxy=dify_config.SSRF_PROXY_HTTPS_URL, verify=HTTP_REQUEST_NODE_SSL_VERIFY
|
||||
),
|
||||
}
|
||||
with httpx.Client(mounts=proxy_mounts, verify=HTTP_REQUEST_NODE_SSL_VERIFY) as client:
|
||||
response = client.request(method=method, url=url, **kwargs)
|
||||
|
||||
@@ -38,12 +38,7 @@ class ToolParameterCache:
|
||||
return None
|
||||
|
||||
def set(self, parameters: dict) -> None:
|
||||
"""
|
||||
Cache model provider credentials.
|
||||
|
||||
:param credentials: provider credentials
|
||||
:return:
|
||||
"""
|
||||
"""Cache model provider credentials."""
|
||||
redis_client.setex(self.cache_key, 86400, json.dumps(parameters))
|
||||
|
||||
def delete(self) -> None:
|
||||
|
||||
@@ -187,7 +187,7 @@ class IndexingRunner:
|
||||
},
|
||||
)
|
||||
if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
|
||||
child_chunks = document_segment.child_chunks
|
||||
child_chunks = document_segment.get_child_chunks()
|
||||
if child_chunks:
|
||||
child_documents = []
|
||||
for child_chunk in child_chunks:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Written by YORKI MINAKO🤡, Edited by Xiaoyi
|
||||
CONVERSATION_TITLE_PROMPT = """You need to decompose the user's input into "subject" and "intention" in order to accurately figure out what the user's input language actually is.
|
||||
Notice: the language type user use could be diverse, which can be English, Chinese, Español, Arabic, Japanese, French, and etc.
|
||||
Notice: the language type user use could be diverse, which can be English, Chinese, Italian, Español, Arabic, Japanese, French, and etc.
|
||||
MAKE SURE your output is the SAME language as the user's input!
|
||||
Your output is restricted only to: (Input language) Intention + Subject(short as possible)
|
||||
Your output MUST be a valid JSON.
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
- 支持 5 种模型类型的能力调用
|
||||
|
||||
- `LLM` - LLM 文本补全、对话,预计算 tokens 能力
|
||||
- `Text Embedidng Model` - 文本 Embedding ,预计算 tokens 能力
|
||||
- `Text Embedding Model` - 文本 Embedding ,预计算 tokens 能力
|
||||
- `Rerank Model` - 分段 Rerank 能力
|
||||
- `Speech-to-text Model` - 语音转文本能力
|
||||
- `Text-to-speech Model` - 文本转语音能力
|
||||
|
||||
@@ -38,7 +38,6 @@ class TTSModel(AIModel):
|
||||
:param credentials: model credentials
|
||||
:param voice: model timbre
|
||||
:param content_text: text content to be translated
|
||||
:param streaming: output is streaming
|
||||
:param user: unique user id
|
||||
:return: translated audio file
|
||||
"""
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
from collections.abc import Mapping
|
||||
from typing import Optional
|
||||
|
||||
import openai
|
||||
from httpx import Timeout
|
||||
from openai import OpenAI
|
||||
from openai.types import ModerationCreateResponse
|
||||
|
||||
from core.model_runtime.entities.model_entities import ModelPropertyKey
|
||||
from core.model_runtime.errors.invoke import (
|
||||
InvokeAuthorizationError,
|
||||
InvokeBadRequestError,
|
||||
InvokeConnectionError,
|
||||
InvokeError,
|
||||
InvokeRateLimitError,
|
||||
InvokeServerUnavailableError,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.moderation_model import ModerationModel
|
||||
|
||||
|
||||
class OpenAIModerationModel(ModerationModel):
|
||||
"""
|
||||
Model class for OpenAI text moderation model.
|
||||
"""
|
||||
|
||||
def _invoke(self, model: str, credentials: dict, text: str, user: Optional[str] = None) -> bool:
|
||||
"""
|
||||
Invoke moderation model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param text: text to moderate
|
||||
:param user: unique user id
|
||||
:return: false if text is safe, true otherwise
|
||||
"""
|
||||
# transform credentials to kwargs for model instance
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
|
||||
# init model client
|
||||
client = OpenAI(**credentials_kwargs)
|
||||
|
||||
# chars per chunk
|
||||
length = self._get_max_characters_per_chunk(model, credentials)
|
||||
text_chunks = [text[i : i + length] for i in range(0, len(text), length)]
|
||||
|
||||
max_text_chunks = self._get_max_chunks(model, credentials)
|
||||
chunks = [text_chunks[i : i + max_text_chunks] for i in range(0, len(text_chunks), max_text_chunks)]
|
||||
|
||||
for text_chunk in chunks:
|
||||
moderation_result = self._moderation_invoke(model=model, client=client, texts=text_chunk)
|
||||
|
||||
for result in moderation_result.results:
|
||||
if result.flagged is True:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
Validate model credentials
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
# transform credentials to kwargs for model instance
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
client = OpenAI(**credentials_kwargs)
|
||||
|
||||
# call moderation model
|
||||
self._moderation_invoke(
|
||||
model=model,
|
||||
client=client,
|
||||
texts=["ping"],
|
||||
)
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
def _moderation_invoke(self, model: str, client: OpenAI, texts: list[str]) -> ModerationCreateResponse:
|
||||
"""
|
||||
Invoke moderation model
|
||||
|
||||
:param model: model name
|
||||
:param client: model client
|
||||
:param texts: texts to moderate
|
||||
:return: false if text is safe, true otherwise
|
||||
"""
|
||||
# call moderation model
|
||||
moderation_result = client.moderations.create(model=model, input=texts)
|
||||
|
||||
return moderation_result
|
||||
|
||||
def _get_max_characters_per_chunk(self, model: str, credentials: dict) -> int:
|
||||
"""
|
||||
Get max characters per chunk
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return: max characters per chunk
|
||||
"""
|
||||
model_schema = self.get_model_schema(model, credentials)
|
||||
|
||||
if model_schema and ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK in model_schema.model_properties:
|
||||
max_characters_per_chunk: int = model_schema.model_properties[ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK]
|
||||
return max_characters_per_chunk
|
||||
|
||||
return 2000
|
||||
|
||||
def _get_max_chunks(self, model: str, credentials: dict) -> int:
|
||||
"""
|
||||
Get max chunks for given embedding model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return: max chunks
|
||||
"""
|
||||
model_schema = self.get_model_schema(model, credentials)
|
||||
|
||||
if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties:
|
||||
max_chunks: int = model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS]
|
||||
return max_chunks
|
||||
|
||||
return 1
|
||||
|
||||
def _to_credential_kwargs(self, credentials: Mapping) -> dict:
|
||||
"""
|
||||
Transform credentials to kwargs for model instance
|
||||
|
||||
:param credentials:
|
||||
:return:
|
||||
"""
|
||||
credentials_kwargs = {
|
||||
"api_key": credentials["openai_api_key"],
|
||||
"timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0),
|
||||
"max_retries": 1,
|
||||
}
|
||||
|
||||
if credentials.get("openai_api_base"):
|
||||
openai_api_base = credentials["openai_api_base"].rstrip("/")
|
||||
credentials_kwargs["base_url"] = openai_api_base + "/v1"
|
||||
|
||||
if "openai_organization" in credentials:
|
||||
credentials_kwargs["organization"] = credentials["openai_organization"]
|
||||
|
||||
return credentials_kwargs
|
||||
|
||||
@property
|
||||
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
|
||||
"""
|
||||
Map model invoke error to unified error
|
||||
The key is the error type thrown to the caller
|
||||
The value is the error type thrown by the model,
|
||||
which needs to be converted into a unified error type for the caller.
|
||||
|
||||
:return: Invoke error mapping
|
||||
"""
|
||||
return {
|
||||
InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError],
|
||||
InvokeServerUnavailableError: [openai.InternalServerError],
|
||||
InvokeRateLimitError: [openai.RateLimitError],
|
||||
InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError],
|
||||
InvokeBadRequestError: [
|
||||
openai.BadRequestError,
|
||||
openai.NotFoundError,
|
||||
openai.UnprocessableEntityError,
|
||||
openai.APIError,
|
||||
],
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
- claude-3-haiku@20240307
|
||||
- claude-3-opus@20240229
|
||||
- claude-3-sonnet@20240229
|
||||
- claude-3-5-sonnet-v2@20241022
|
||||
- claude-3-5-sonnet@20240620
|
||||
- gemini-1.0-pro-vision-001
|
||||
- gemini-1.0-pro-002
|
||||
- gemini-1.5-flash-001
|
||||
- gemini-1.5-flash-002
|
||||
- gemini-1.5-pro-001
|
||||
- gemini-1.5-pro-002
|
||||
- gemini-2.0-flash-001
|
||||
- gemini-2.0-flash-exp
|
||||
- gemini-2.0-flash-lite-preview-02-05
|
||||
- gemini-2.0-flash-thinking-exp-01-21
|
||||
- gemini-2.0-flash-thinking-exp-1219
|
||||
- gemini-2.0-pro-exp-02-05
|
||||
- gemini-exp-1114
|
||||
- gemini-exp-1121
|
||||
- gemini-exp-1206
|
||||
- gemini-flash-experimental
|
||||
- gemini-pro-experimental
|
||||
@@ -1,41 +0,0 @@
|
||||
model: gemini-2.0-flash-001
|
||||
label:
|
||||
en_US: Gemini 2.0 Flash 001
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,41 +0,0 @@
|
||||
model: gemini-2.0-flash-lite-preview-02-05
|
||||
label:
|
||||
en_US: Gemini 2.0 Flash Lite Preview 0205
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,39 +0,0 @@
|
||||
model: gemini-2.0-flash-thinking-exp-01-21
|
||||
label:
|
||||
en_US: Gemini 2.0 Flash Thinking Exp 0121
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,39 +0,0 @@
|
||||
model: gemini-2.0-flash-thinking-exp-1219
|
||||
label:
|
||||
en_US: Gemini 2.0 Flash Thinking Exp 1219
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,37 +0,0 @@
|
||||
model: gemini-2.0-pro-exp-02-05
|
||||
label:
|
||||
en_US: Gemini 2.0 Pro Exp 0205
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- document
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2000000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,41 +0,0 @@
|
||||
model: gemini-exp-1114
|
||||
label:
|
||||
en_US: Gemini exp 1114
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,41 +0,0 @@
|
||||
model: gemini-exp-1121
|
||||
label:
|
||||
en_US: Gemini exp 1121
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32767
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,41 +0,0 @@
|
||||
model: gemini-exp-1206
|
||||
label:
|
||||
en_US: Gemini exp 1206
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
- document
|
||||
- video
|
||||
- audio
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_output_tokens
|
||||
use_template: max_tokens
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
@@ -1,66 +0,0 @@
|
||||
model: glm-4-air-0111
|
||||
label:
|
||||
en_US: glm-4-air-0111
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 131072
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
default: 0.95
|
||||
min: 0.0
|
||||
max: 1.0
|
||||
help:
|
||||
zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
|
||||
en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
default: 0.7
|
||||
help:
|
||||
zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
|
||||
en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
|
||||
- name: do_sample
|
||||
label:
|
||||
zh_Hans: 采样策略
|
||||
en_US: Sampling strategy
|
||||
type: boolean
|
||||
help:
|
||||
zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。
|
||||
en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
|
||||
default: true
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 1024
|
||||
min: 1
|
||||
max: 4095
|
||||
- name: web_search
|
||||
type: boolean
|
||||
label:
|
||||
zh_Hans: 联网搜索
|
||||
en_US: Web Search
|
||||
default: false
|
||||
help:
|
||||
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
|
||||
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.0005'
|
||||
output: '0.0005'
|
||||
unit: '0.001'
|
||||
currency: RMB
|
||||
@@ -213,9 +213,24 @@ class LangFuseDataTrace(BaseTraceInstance):
|
||||
|
||||
if process_data and process_data.get("model_mode") == "chat":
|
||||
total_token = metadata.get("total_tokens", 0)
|
||||
prompt_tokens = 0
|
||||
completion_tokens = 0
|
||||
try:
|
||||
if outputs.get("usage"):
|
||||
prompt_tokens = outputs.get("usage", {}).get("prompt_tokens", 0)
|
||||
completion_tokens = outputs.get("usage", {}).get("completion_tokens", 0)
|
||||
else:
|
||||
prompt_tokens = process_data.get("usage", {}).get("prompt_tokens", 0)
|
||||
completion_tokens = process_data.get("usage", {}).get("completion_tokens", 0)
|
||||
except Exception:
|
||||
logger.error("Failed to extract usage", exc_info=True)
|
||||
|
||||
# add generation
|
||||
generation_usage = GenerationUsage(
|
||||
input=prompt_tokens,
|
||||
output=completion_tokens,
|
||||
total=total_token,
|
||||
unit=UnitEnum.TOKENS,
|
||||
)
|
||||
|
||||
node_generation_data = LangfuseGeneration(
|
||||
|
||||
@@ -199,6 +199,7 @@ class LangSmithDataTrace(BaseTraceInstance):
|
||||
)
|
||||
|
||||
process_data = json.loads(node_execution.process_data) if node_execution.process_data else {}
|
||||
|
||||
if process_data and process_data.get("model_mode") == "chat":
|
||||
run_type = LangSmithRunType.llm
|
||||
metadata.update(
|
||||
@@ -212,9 +213,23 @@ class LangSmithDataTrace(BaseTraceInstance):
|
||||
else:
|
||||
run_type = LangSmithRunType.tool
|
||||
|
||||
prompt_tokens = 0
|
||||
completion_tokens = 0
|
||||
try:
|
||||
if outputs.get("usage"):
|
||||
prompt_tokens = outputs.get("usage", {}).get("prompt_tokens", 0)
|
||||
completion_tokens = outputs.get("usage", {}).get("completion_tokens", 0)
|
||||
else:
|
||||
prompt_tokens = process_data.get("usage", {}).get("prompt_tokens", 0)
|
||||
completion_tokens = process_data.get("usage", {}).get("completion_tokens", 0)
|
||||
except Exception:
|
||||
logger.error("Failed to extract usage", exc_info=True)
|
||||
|
||||
node_dotted_order = generate_dotted_order(node_execution_id, created_at, workflow_dotted_order)
|
||||
langsmith_run = LangSmithRunModel(
|
||||
total_tokens=node_total_tokens,
|
||||
input_tokens=prompt_tokens,
|
||||
output_tokens=completion_tokens,
|
||||
name=node_type,
|
||||
inputs=inputs,
|
||||
run_type=run_type,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user