mirror of
https://github.com/langgenius/dify.git
synced 2026-01-19 21:44:07 +00:00
Compare commits
113 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1c6cecf10 | ||
|
|
c5ccf382df | ||
|
|
8358d0abfa | ||
|
|
bad3b14438 | ||
|
|
f42ef494f8 | ||
|
|
bb7f454ecd | ||
|
|
7f48fadd41 | ||
|
|
af2138e8b8 | ||
|
|
091beffae7 | ||
|
|
408fb502a1 | ||
|
|
7660539689 | ||
|
|
5a6061ff61 | ||
|
|
970950e3a8 | ||
|
|
431b2fd4a8 | ||
|
|
88545184be | ||
|
|
2c23caacd4 | ||
|
|
9edea9bc49 | ||
|
|
d43279a1cc | ||
|
|
10848d74a0 | ||
|
|
f9df23a091 | ||
|
|
17a1c05728 | ||
|
|
66782ef19c | ||
|
|
fb7f509e5c | ||
|
|
1a5acf43aa | ||
|
|
4ef6392de5 | ||
|
|
effdc824d9 | ||
|
|
24fa452307 | ||
|
|
9e00e3894e | ||
|
|
023783372e | ||
|
|
1d06eba61a | ||
|
|
93e99fb343 | ||
|
|
b9ebce7ab7 | ||
|
|
33b3eaf324 | ||
|
|
b6cca59517 | ||
|
|
93ae18ea12 | ||
|
|
99f7e4f277 | ||
|
|
659c3e7a81 | ||
|
|
7a16c88092 | ||
|
|
0bb253efe0 | ||
|
|
d93365d429 | ||
|
|
8b44dba988 | ||
|
|
d96bcfa4ee | ||
|
|
380b4b3ddc | ||
|
|
e2bf18053c | ||
|
|
4350bb9a00 | ||
|
|
fe688b505a | ||
|
|
056898bf21 | ||
|
|
0e8afa3aa2 | ||
|
|
933bd06460 | ||
|
|
b939039201 | ||
|
|
6da5e54180 | ||
|
|
1c5f63de7e | ||
|
|
f3219ff107 | ||
|
|
219011b62a | ||
|
|
90150a6ca9 | ||
|
|
7722a7c5cd | ||
|
|
4ba38465ac | ||
|
|
9a5ae9f51f | ||
|
|
a7c40a07d8 | ||
|
|
2d0d3365ed | ||
|
|
54a6571462 | ||
|
|
c43c3098a0 | ||
|
|
eddd038959 | ||
|
|
7a2291f450 | ||
|
|
17a8118154 | ||
|
|
4db01403ae | ||
|
|
d8425f3f4c | ||
|
|
38754734a2 | ||
|
|
b42cd38cc9 | ||
|
|
c6f715861a | ||
|
|
b46511dd7b | ||
|
|
e8e8f9e97d | ||
|
|
18d1f6a6c6 | ||
|
|
1b6e3ef964 | ||
|
|
4779fcf6f1 | ||
|
|
e8239ae631 | ||
|
|
94eb2a623e | ||
|
|
96809108ca | ||
|
|
8fc2663693 | ||
|
|
37c3b8979c | ||
|
|
f68b05d5ec | ||
|
|
3b3c604eb5 | ||
|
|
a43ef7a926 | ||
|
|
c6ba67a770 | ||
|
|
ac2a1bc954 | ||
|
|
a4481a3f29 | ||
|
|
15f932573a | ||
|
|
f8eefa31fe | ||
|
|
0587ff0fba | ||
|
|
ce492d13f1 | ||
|
|
74d954610f | ||
|
|
0abee44453 | ||
|
|
157cb2e048 | ||
|
|
a4713c01d5 | ||
|
|
8847bb1e45 | ||
|
|
5fcd5c2499 | ||
|
|
d680fca996 | ||
|
|
92fb4ab4c1 | ||
|
|
815f794eef | ||
|
|
3117619ef3 | ||
|
|
f5b2271c8c | ||
|
|
a8155cba7e | ||
|
|
0eca93ebd1 | ||
|
|
d8a716d857 | ||
|
|
c2e7fe107a | ||
|
|
805da40b15 | ||
|
|
4cfee55ec6 | ||
|
|
bcbdbed352 | ||
|
|
2e1cd3db28 | ||
|
|
19eaf27126 | ||
|
|
4926a0fcb1 | ||
|
|
58db0fac36 | ||
|
|
367ef145d6 |
32
.github/ISSUE_TEMPLATE/🐛-bug-report.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/🐛-bug-report.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: "\U0001F41B Bug report"
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Please provide a clear and concise description of what the bug is. Include
|
||||
screenshots if needed. Please test using the latest version of the relevant
|
||||
Dify packages to make sure your issue has not already been fixed.
|
||||
-->
|
||||
|
||||
Dify version: Cloud | Self Host
|
||||
|
||||
## Steps To Reproduce
|
||||
<!--
|
||||
Your bug will get fixed much faster if we can run your code and it doesn't
|
||||
have dependencies other than Dify. Issues without reproduction steps or
|
||||
code examples may be immediately closed as not actionable.
|
||||
-->
|
||||
|
||||
1.
|
||||
2.
|
||||
|
||||
|
||||
## The current behavior
|
||||
|
||||
|
||||
## The expected behavior
|
||||
20
.github/ISSUE_TEMPLATE/🚀-feature-request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/🚀-feature-request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "\U0001F680 Feature request"
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
10
.github/ISSUE_TEMPLATE/🤔-questions-and-help.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/🤔-questions-and-help.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
name: "\U0001F914 Questions and Help"
|
||||
about: Ask a usage or consultation question
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
61
.github/workflows/build-api-image.sh
vendored
61
.github/workflows/build-api-image.sh
vendored
@@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
SHA=$(git rev-parse HEAD)
|
||||
REPO_NAME=langgenius/dify
|
||||
API_REPO_NAME="${REPO_NAME}-api"
|
||||
|
||||
if [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_HEAD_REF}" | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
PR_NUM=$(echo "${GITHUB_REF}" | sed 's:refs/pull/::' | sed 's:/merge::')
|
||||
LATEST_TAG="pr-${PR_NUM}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
elif [[ "${GITHUB_EVENT_NAME}" == "release" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/tags/::' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
else
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/heads/::' | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="${REFSPEC}"
|
||||
fi
|
||||
|
||||
if [[ "${REFSPEC}" == "main" ]]; then
|
||||
LATEST_TAG="latest"
|
||||
CACHE_FROM_TAG="latest"
|
||||
fi
|
||||
|
||||
echo "Pulling cache image ${API_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
if docker pull "${API_REPO_NAME}:${CACHE_FROM_TAG}"; then
|
||||
API_CACHE_FROM_SCRIPT="--cache-from ${API_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
else
|
||||
echo "WARNING: Failed to pull ${API_REPO_NAME}:${CACHE_FROM_TAG}, disable build image cache."
|
||||
API_CACHE_FROM_SCRIPT=""
|
||||
fi
|
||||
|
||||
|
||||
cat<<EOF
|
||||
Rolling with tags:
|
||||
- ${API_REPO_NAME}:${SHA}
|
||||
- ${API_REPO_NAME}:${REFSPEC}
|
||||
- ${API_REPO_NAME}:${LATEST_TAG}
|
||||
EOF
|
||||
|
||||
#
|
||||
# Build image
|
||||
#
|
||||
cd api
|
||||
docker build \
|
||||
${API_CACHE_FROM_SCRIPT} \
|
||||
--build-arg COMMIT_SHA=${SHA} \
|
||||
-t "${API_REPO_NAME}:${SHA}" \
|
||||
-t "${API_REPO_NAME}:${REFSPEC}" \
|
||||
-t "${API_REPO_NAME}:${LATEST_TAG}" \
|
||||
--label "sha=${SHA}" \
|
||||
--label "built_at=$(date)" \
|
||||
--label "build_actor=${GITHUB_ACTOR}" \
|
||||
.
|
||||
|
||||
# push
|
||||
docker push --all-tags "${API_REPO_NAME}"
|
||||
43
.github/workflows/build-api-image.yml
vendored
43
.github/workflows/build-api-image.yml
vendored
@@ -5,18 +5,19 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'deploy/dev'
|
||||
pull_request:
|
||||
types: [synchronize, opened, reopened, ready_for_review]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
steps:
|
||||
- name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )"
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
@@ -24,13 +25,29 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
shell: bash
|
||||
env:
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
run: |
|
||||
/bin/bash .github/workflows/build-api-image.sh
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: langgenius/dify-api
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=ref,event=branch
|
||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
||||
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: "{{defaultContext}}:api"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Deploy to server
|
||||
if: github.ref == 'refs/heads/deploy/dev'
|
||||
|
||||
60
.github/workflows/build-web-image.sh
vendored
60
.github/workflows/build-web-image.sh
vendored
@@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
SHA=$(git rev-parse HEAD)
|
||||
REPO_NAME=langgenius/dify
|
||||
WEB_REPO_NAME="${REPO_NAME}-web"
|
||||
|
||||
if [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_HEAD_REF}" | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
PR_NUM=$(echo "${GITHUB_REF}" | sed 's:refs/pull/::' | sed 's:/merge::')
|
||||
LATEST_TAG="pr-${PR_NUM}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
elif [[ "${GITHUB_EVENT_NAME}" == "release" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/tags/::' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
else
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/heads/::' | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="${REFSPEC}"
|
||||
fi
|
||||
|
||||
if [[ "${REFSPEC}" == "main" ]]; then
|
||||
LATEST_TAG="latest"
|
||||
CACHE_FROM_TAG="latest"
|
||||
fi
|
||||
|
||||
echo "Pulling cache image ${WEB_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
if docker pull "${WEB_REPO_NAME}:${CACHE_FROM_TAG}"; then
|
||||
WEB_CACHE_FROM_SCRIPT="--cache-from ${WEB_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
else
|
||||
echo "WARNING: Failed to pull ${WEB_REPO_NAME}:${CACHE_FROM_TAG}, disable build image cache."
|
||||
WEB_CACHE_FROM_SCRIPT=""
|
||||
fi
|
||||
|
||||
|
||||
cat<<EOF
|
||||
Rolling with tags:
|
||||
- ${WEB_REPO_NAME}:${SHA}
|
||||
- ${WEB_REPO_NAME}:${REFSPEC}
|
||||
- ${WEB_REPO_NAME}:${LATEST_TAG}
|
||||
EOF
|
||||
|
||||
#
|
||||
# Build image
|
||||
#
|
||||
cd web
|
||||
docker build \
|
||||
${WEB_CACHE_FROM_SCRIPT} \
|
||||
--build-arg COMMIT_SHA=${SHA} \
|
||||
-t "${WEB_REPO_NAME}:${SHA}" \
|
||||
-t "${WEB_REPO_NAME}:${REFSPEC}" \
|
||||
-t "${WEB_REPO_NAME}:${LATEST_TAG}" \
|
||||
--label "sha=${SHA}" \
|
||||
--label "built_at=$(date)" \
|
||||
--label "build_actor=${GITHUB_ACTOR}" \
|
||||
.
|
||||
|
||||
docker push --all-tags "${WEB_REPO_NAME}"
|
||||
43
.github/workflows/build-web-image.yml
vendored
43
.github/workflows/build-web-image.yml
vendored
@@ -5,18 +5,19 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'deploy/dev'
|
||||
pull_request:
|
||||
types: [synchronize, opened, reopened, ready_for_review]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
steps:
|
||||
- name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )"
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
@@ -24,13 +25,29 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
shell: bash
|
||||
env:
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
run: |
|
||||
/bin/bash .github/workflows/build-web-image.sh
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: langgenius/dify-web
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=ref,event=branch
|
||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
||||
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: "{{defaultContext}}:web"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Deploy to server
|
||||
if: github.ref == 'refs/heads/deploy/dev'
|
||||
|
||||
19
.github/workflows/flake8.yml
vendored
19
.github/workflows/flake8.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: PEP8 Check
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
jobs:
|
||||
pep8:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
- name: Run flake8
|
||||
run: flake8 --ignore=E501 .
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -130,7 +130,6 @@ dmypy.json
|
||||
.idea/'
|
||||
|
||||
.DS_Store
|
||||
.vscode
|
||||
|
||||
# Intellij IDEA Files
|
||||
.idea/
|
||||
@@ -139,7 +138,7 @@ dmypy.json
|
||||
api/.env
|
||||
api/storage/*
|
||||
|
||||
docker/volumes/app/storage/privkeys/*
|
||||
docker/volumes/app/storage/*
|
||||
docker/volumes/db/data/*
|
||||
docker/volumes/redis/data/*
|
||||
docker/volumes/weaviate/*
|
||||
|
||||
@@ -22,14 +22,14 @@ To set up a working development environment, just fork the project git repositor
|
||||
|
||||
### Fork the repository
|
||||
|
||||
you need to fork the [repository](https://github.com/langgenius/langgenius-gateway).
|
||||
you need to fork the [repository](https://github.com/langgenius/dify).
|
||||
|
||||
### Clone the repo
|
||||
|
||||
Clone your GitHub forked repository:
|
||||
|
||||
```
|
||||
git clone git@github.com:<github_username>/langgenius-gateway.git
|
||||
git clone git@github.com:<github_username>/dify.git
|
||||
```
|
||||
|
||||
### Install backend
|
||||
|
||||
55
CONTRIBUTING_JA.md
Normal file
55
CONTRIBUTING_JA.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# コントリビュート
|
||||
|
||||
[Dify](https://dify.ai) に興味を持ち、貢献したいと思うようになったことに感謝します!始める前に、
|
||||
[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)を読み、
|
||||
[既存の問題](https://github.com/langgenius/langgenius-gateway/issues)をチェックしてください。
|
||||
本ドキュメントは、[Dify](https://dify.ai) をビルドしてテストするための開発環境の構築方法を説明するものです。
|
||||
|
||||
### 依存関係のインストール
|
||||
|
||||
[Dify](https://dify.ai)をビルドするには、お使いのマシンに以下の依存関係をインストールし、設定する必要があります:
|
||||
|
||||
- [Git](http://git-scm.com/)
|
||||
- [Docker](https://www.docker.com/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Node.js v18.x (LTS)](http://nodejs.org)
|
||||
- [npm](https://www.npmjs.com/) バージョン 8.x.x もしくは [Yarn](https://yarnpkg.com/)
|
||||
- [Python](https://www.python.org/) バージョン 3.10.x
|
||||
|
||||
## ローカル開発
|
||||
|
||||
開発環境を構築するには、プロジェクトの git リポジトリをフォークし、適切なパッケージマネージャを使用してバックエンドとフロントエンドの依存関係をインストールし、docker-compose スタックを実行するように作成します。
|
||||
|
||||
### リポジトリのフォーク
|
||||
|
||||
[リポジトリ](https://github.com/langgenius/dify) をフォークする必要があります。
|
||||
|
||||
### リポジトリのクローン
|
||||
|
||||
GitHub でフォークしたリポジトリのクローンを作成する:
|
||||
|
||||
```
|
||||
git clone git@github.com:<github_username>/dify.git
|
||||
```
|
||||
|
||||
### バックエンドのインストール
|
||||
|
||||
バックエンドアプリケーションのインストール方法については、[Backend README](api/README.md) を参照してください。
|
||||
|
||||
### フロントエンドのインストール
|
||||
|
||||
フロントエンドアプリケーションのインストール方法については、[Frontend README](web/README.md) を参照してください。
|
||||
|
||||
### ブラウザで dify にアクセス
|
||||
|
||||
[Dify](https://dify.ai) をローカル環境で見ることができるようになりました [http://localhost:3000](http://localhost:3000)。
|
||||
|
||||
## プルリクエストの作成
|
||||
|
||||
変更後、プルリクエスト (PR) をオープンしてください。プルリクエストを提出すると、Dify チーム/コミュニティの他の人があなたと一緒にそれをレビューします。
|
||||
|
||||
マージコンフリクトなどの問題が発生したり、プルリクエストの開き方がわからなくなったりしませんでしたか? [GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) で、マージコンフリクトやその他の問題を解決する方法をチェックしてみてください。あなたの PR がマージされると、[コントリビュータチャート](https://github.com/langgenius/langgenius-gateway/graphs/contributors)にコントリビュータとして誇らしげに掲載されます。
|
||||
|
||||
## コミュニティチャンネル
|
||||
|
||||
お困りですか?何か質問がありますか? [Discord Community サーバ](https://discord.gg/AhzKf7dNgk)に参加してください。私たちがお手伝いします!
|
||||
14
README.md
14
README.md
@@ -1,10 +1,14 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_CN.md">简体中文</a>
|
||||
<a href="./README_CN.md">简体中文</a> |
|
||||
<a href="./README_JA.md">日本語</a>
|
||||
</p>
|
||||
|
||||
[Website](http://dify.ai) • [Docs](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai)
|
||||
[Website](https://dify.ai) • [Docs](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
|
||||
|
||||
Vote for us on Product Hunt ↓
|
||||
<a href="https://www.producthunt.com/posts/dify-ai"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?sanitize=true&post_id=dify-ai&theme=light" alt="Product Hunt Badge" width="250" height="54"></a>
|
||||
|
||||
**Dify** is an easy-to-use LLMOps platform designed to empower more people to create sustainable, AI-native applications. With visual orchestration for various application types, Dify offers out-of-the-box, ready-to-use applications that can also serve as Backend-as-a-Service APIs. Unify your development process with one API for plugins and datasets integration, and streamline your operations using a single interface for prompt engineering, visual analytics, and continuous improvement.
|
||||
|
||||
@@ -21,7 +25,7 @@ Dify is compatible with Langchain, meaning we'll gradually support multiple LLMs
|
||||
|
||||
## Use Cloud Services
|
||||
|
||||
Visit [Dify.ai](http://dify.ai)
|
||||
Visit [Dify.ai](https://dify.ai)
|
||||
|
||||
## Install the Community Edition
|
||||
|
||||
@@ -41,7 +45,7 @@ cd docker
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
After running, you can access the Dify console in your browser at [http://localhost](http://localhost) and start the initialization operation.
|
||||
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization installation process.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -86,7 +90,7 @@ A: English and Chinese are currently supported, and you can contribute language
|
||||
If you have any questions, suggestions, or partnership inquiries, feel free to contact us through the following channels:
|
||||
|
||||
- Submit an Issue or PR on our GitHub Repo
|
||||
- Join the discussion in our [Discord](https://discord.gg/AhzKf7dNgk) Community
|
||||
- Join the discussion in our [Discord](https://discord.gg/FngNHpbcY7) Community
|
||||
- Send an email to hello@dify.ai
|
||||
|
||||
We're eager to assist you and together create more fun and useful AI applications!
|
||||
|
||||
14
README_CN.md
14
README_CN.md
@@ -1,11 +1,15 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_CN.md">简体中文</a>
|
||||
<a href="./README_CN.md">简体中文</a> |
|
||||
<a href="./README_JA.md">日本語</a>
|
||||
</p>
|
||||
|
||||
|
||||
[官方网站](http://dify.ai) • [文档](https://docs.dify.ai/v/zh-hans) • [Twitter](https://twitter.com/dify_ai)
|
||||
[官方网站](https://dify.ai) • [文档](https://docs.dify.ai/v/zh-hans) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
|
||||
|
||||
在 Product Hunt 上投我们一票吧 ↓
|
||||
<a href="https://www.producthunt.com/posts/dify-ai"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?sanitize=true&post_id=dify-ai&theme=light" alt="Product Hunt Badge" width="250" height="54"></a>
|
||||
|
||||
**Dify** 是一个易用的 LLMOps 平台,旨在让更多人可以创建可持续运营的原生 AI 应用。Dify 提供多种类型应用的可视化编排,应用可开箱即用,也能以“后端即服务”的 API 提供服务。
|
||||
|
||||
@@ -23,7 +27,7 @@ Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前
|
||||
|
||||
## 使用云服务
|
||||
|
||||
访问 [Dify.ai](http://cloud.dify.ai)
|
||||
访问 [Dify.ai](https://cloud.dify.ai)
|
||||
|
||||
## 安装社区版
|
||||
|
||||
@@ -43,7 +47,7 @@ cd docker
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
运行后,可以在浏览器上访问 [http://localhost](http://localhost) 进入 Dify 控制台,并开始初始化操作。
|
||||
运行后,可以在浏览器上访问 [http://localhost/install](http://localhost/install) 进入 Dify 控制台并开始初始化安装操作。
|
||||
|
||||
### 配置
|
||||
|
||||
@@ -87,7 +91,7 @@ A: 现已支持英文与中文,你可以为我们贡献语言包。
|
||||
如果您有任何问题、建议或合作意向,欢迎通过以下方式联系我们:
|
||||
|
||||
- 在我们的 [GitHub Repo](https://github.com/langgenius/dify) 上提交 Issue 或 PR
|
||||
- 在我们的 [Discord 社区](https://discord.gg/AhzKf7dNgk) 上加入讨论
|
||||
- 在我们的 [Discord 社区](https://discord.gg/FngNHpbcY7) 上加入讨论
|
||||
- 发送邮件至 hello@dify.ai
|
||||
|
||||
## 贡献代码
|
||||
|
||||
120
README_JA.md
Normal file
120
README_JA.md
Normal file
@@ -0,0 +1,120 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_CN.md">简体中文</a> |
|
||||
<a href="./README_JA.md">日本語</a>
|
||||
</p>
|
||||
|
||||
[Web サイト](https://dify.ai) • [ドキュメント](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
|
||||
|
||||
Product Huntで私たちに投票してください ↓
|
||||
<a href="https://www.producthunt.com/posts/dify-ai"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?sanitize=true&post_id=dify-ai&theme=light" alt="Product Hunt Badge" width="250" height="54"></a>
|
||||
|
||||
|
||||
**Dify** は、より多くの人々が持続可能な AI ネイティブアプリケーションを作成できるように設計された、使いやすい LLMOps プラットフォームです。様々なアプリケーションタイプに対応したビジュアルオーケストレーションにより Dify は Backend-as-a-Service API としても機能する、すぐに使えるアプリケーションを提供します。プラグインやデータセットを統合するための1つの API で開発プロセスを統一し、プロンプトエンジニアリング、ビジュアル分析、継続的な改善のための1つのインターフェイスを使って業務を合理化します。
|
||||
|
||||
Difyで作成したアプリケーションは以下の通りです:
|
||||
|
||||
フォームモードとチャット会話モードをサポートする、すぐに使える Web サイト
|
||||
プラグイン機能、コンテキストの強化などを網羅する単一の API により、バックエンドのコーディングの手間を省きます。
|
||||
アプリケーションの視覚的なデータ分析、ログレビュー、アノテーションが可能です。
|
||||
Dify は LangChain と互換性があり、複数の LLM を徐々にサポートします:
|
||||
|
||||
- GPT 3 (text-davinci-003)
|
||||
- GPT 3.5 Turbo(ChatGPT)
|
||||
- GPT-4
|
||||
|
||||
## クラウドサービスの利用
|
||||
|
||||
[Dify.ai](https://dify.ai) をご覧ください
|
||||
|
||||
## Community Edition のインストール
|
||||
|
||||
### システム要件
|
||||
|
||||
Dify をインストールする前に、お使いのマシンが以下の最低システム要件を満たしていることを確認してください:
|
||||
|
||||
- CPU >= 1 Core
|
||||
- RAM >= 4GB
|
||||
|
||||
### クイックスタート
|
||||
|
||||
Dify サーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml) ファイルを実行することです。インストールコマンドを実行する前に、[Docker](https://docs.docker.com/get-docker/) と [Docker Compose](https://docs.docker.com/compose/install/) がお使いのマシンにインストールされていることを確認してください:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
実行後、ブラウザで [http://localhost/install](http://localhost/install) にアクセスし、初期化インストール作業を開始することができます。
|
||||
|
||||
### 構成
|
||||
|
||||
カスタマイズが必要な場合は、[docker-compose.yml](docker/docker-compose.yaml) ファイルのコメントを参照し、手動で環境設定をお願いします。変更後、再度 'docker-compose up -d' を実行してください。
|
||||
|
||||
## ロードマップ
|
||||
|
||||
開発中の機能:
|
||||
|
||||
- **データセット**, Notionやウェブページからのコンテンツ同期など、より多くのデータセットをサポートします
|
||||
テキスト、ウェブページ、さらには Notion コンテンツなど、より多くのデータセットをサポートする予定です。ユーザーは、自分のデータソースをもとに AI アプリケーションを構築することができます。
|
||||
- **プラグイン**, アプリケーションに ChatGPT プラグイン標準のプラグインを導入する、または Dify 制作のプラグインを利用する
|
||||
今後、ChatGPT 規格に準拠したプラグインや、ディファイ独自のプラグインを公開し、より多くの機能をアプリケーションで実現できるようにします。
|
||||
- **オープンソースモデル**, 例えばモデルプロバイダーとして Llama を採用したり、さらにファインチューニングを行う
|
||||
Llama のような優れたオープンソースモデルを、私たちのプラットフォームのモデルオプションとして提供したり、さらなる微調整のために使用したりすることで、協力していきます。
|
||||
|
||||
|
||||
## Q&A
|
||||
|
||||
**Q: Dify で何ができるのか?**
|
||||
|
||||
A: Dify はシンプルでパワフルな LLM 開発・運用ツールです。商用グレードのアプリケーション、パーソナルアシスタントを構築するために使用することができます。独自のアプリケーションを開発したい場合、LangDifyGenius は OpenAI と統合する際のバックエンド作業を省き、視覚的な操作機能を提供し、GPT モデルを継続的に改善・訓練することが可能です。
|
||||
|
||||
**Q: Dify を使って、自分のモデルを「トレーニング」するにはどうすればいいのでしょうか?**
|
||||
|
||||
A: プロンプトエンジニアリング、コンテキスト拡張、ファインチューニングからなる価値あるアプリケーションです。プロンプトとプログラミング言語を組み合わせたハイブリッドプログラミングアプローチ(テンプレートエンジンのようなもの)で、長文の埋め込みやユーザー入力の YouTube 動画からの字幕取り込みなどを簡単に実現し、これらはすべて LLM が処理するコンテキストとして提出される予定です。また、アプリケーションの操作性を重視し、ユーザーがアプリケーションを使用する際に生成したデータを分析、アノテーション、継続的なトレーニングに利用できるようにしました。適切なツールがなければ、これらのステップに時間がかかることがあります。
|
||||
|
||||
**Q: 自分でアプリケーションを作りたい場合、何を準備すればよいですか?**
|
||||
|
||||
A: すでに OpenAI API Key をお持ちだと思いますが、お持ちでない場合はご登録ください。もし、すでにトレーニングのコンテキストとなるコンテンツをお持ちでしたら、それは素晴らしいことです!
|
||||
|
||||
**Q: インターフェイスにどの言語が使えますか?**
|
||||
|
||||
A: 現在、英語と中国語に対応しており、言語パックを寄贈することも可能です。
|
||||
|
||||
## Star ヒストリー
|
||||
|
||||
[](https://star-history.com/#langgenius/dify&Date)
|
||||
|
||||
## お問合せ
|
||||
|
||||
ご質問、ご提案、パートナーシップに関するお問い合わせは、以下のチャンネルからお気軽にご連絡ください:
|
||||
|
||||
- GitHub Repo で Issue や PR を提出する
|
||||
- [Discord](https://discord.gg/FngNHpbcY7) コミュニティで議論に参加する。
|
||||
- hello@dify.ai にメールを送信します
|
||||
|
||||
私たちは、皆様のお手伝いをさせていただき、より楽しく、より便利な AI アプリケーションを一緒に作っていきたいと思っています!
|
||||
|
||||
## コントリビュート
|
||||
|
||||
適切なレビューを行うため、コミットへの直接アクセスが可能なコントリビュータを含むすべてのコードコントリビュータは、プルリクエストで提出し、マージされる前にコア開発チームによって承認される必要があります。
|
||||
|
||||
私たちはすべてのプルリクエストを歓迎します!協力したい方は、[コントリビューションガイド](CONTRIBUTING.md) をチェックしてみてください。
|
||||
|
||||
## セキュリティ
|
||||
|
||||
プライバシー保護のため、GitHub へのセキュリティ問題の投稿は避けてください。代わりに、あなたの質問を security@dify.ai に送ってください。より詳細な回答を提供します。
|
||||
|
||||
## 引用
|
||||
|
||||
本ソフトウェアは、以下のオープンソースソフトウェアを使用しています:
|
||||
|
||||
- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
|
||||
- Liu, J. (2022). LlamaIndex [Computer software]. doi: 10.5281/zenodo.1234.
|
||||
|
||||
詳しくは、各ソフトウェアの公式サイトまたはライセンス文をご参照ください。
|
||||
|
||||
## ライセンス
|
||||
|
||||
このリポジトリは、[Dify Open Source License](LICENSE) のもとで利用できます。
|
||||
@@ -14,7 +14,7 @@ CONSOLE_URL=http://127.0.0.1:5001
|
||||
API_URL=http://127.0.0.1:5001
|
||||
|
||||
# Web APP base URL
|
||||
APP_URL=http://127.0.0.1:5001
|
||||
APP_URL=http://127.0.0.1:3000
|
||||
|
||||
# celery configuration
|
||||
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
|
||||
|
||||
@@ -33,3 +33,4 @@
|
||||
flask run --host 0.0.0.0 --port=5001 --debug
|
||||
```
|
||||
7. Setup your application by visiting http://localhost:5001/console/api/setup or other apis...
|
||||
8. If you need to debug local async processing, you can run `celery -A app.celery worker`, celery can do dataset importing and other async tasks.
|
||||
@@ -1,5 +1,4 @@
|
||||
import datetime
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
|
||||
@@ -9,7 +8,7 @@ from libs.password import password_pattern, valid_password, hash_password
|
||||
from libs.helper import email as email_validate
|
||||
from extensions.ext_database import db
|
||||
from models.account import InvitationCode
|
||||
from models.model import Account, AppModelConfig, ApiToken, Site, App, RecommendedApp
|
||||
from models.model import Account
|
||||
import secrets
|
||||
import base64
|
||||
|
||||
@@ -131,30 +130,7 @@ def generate_upper_string():
|
||||
return result
|
||||
|
||||
|
||||
@click.command('gen-recommended-apps', help='Number of records to generate')
|
||||
def generate_recommended_apps():
|
||||
print('Generating recommended app data...')
|
||||
apps = App.query.all()
|
||||
for app in apps:
|
||||
recommended_app = RecommendedApp(
|
||||
app_id=app.id,
|
||||
description={
|
||||
'en': 'Description for ' + app.name,
|
||||
'zh': '描述 ' + app.name
|
||||
},
|
||||
copyright='Copyright ' + str(random.randint(1990, 2020)),
|
||||
privacy_policy='https://privacypolicy.example.com',
|
||||
category=random.choice(['Games', 'News', 'Music', 'Sports']),
|
||||
position=random.randint(1, 100),
|
||||
install_count=random.randint(100, 100000)
|
||||
)
|
||||
db.session.add(recommended_app)
|
||||
db.session.commit()
|
||||
print('Done!')
|
||||
|
||||
|
||||
def register_commands(app):
|
||||
app.cli.add_command(reset_password)
|
||||
app.cli.add_command(reset_email)
|
||||
app.cli.add_command(generate_invitation_codes)
|
||||
app.cli.add_command(generate_recommended_apps)
|
||||
|
||||
@@ -21,9 +21,11 @@ DEFAULTS = {
|
||||
'REDIS_HOST': 'localhost',
|
||||
'REDIS_PORT': '6379',
|
||||
'REDIS_DB': '0',
|
||||
'REDIS_USE_SSL': 'False',
|
||||
'SESSION_REDIS_HOST': 'localhost',
|
||||
'SESSION_REDIS_PORT': '6379',
|
||||
'SESSION_REDIS_DB': '2',
|
||||
'SESSION_REDIS_USE_SSL': 'False',
|
||||
'OAUTH_REDIRECT_PATH': '/console/api/oauth/authorize',
|
||||
'OAUTH_REDIRECT_INDEX_PATH': '/',
|
||||
'CONSOLE_URL': 'https://cloud.dify.ai',
|
||||
@@ -44,6 +46,8 @@ DEFAULTS = {
|
||||
'CELERY_BACKEND': 'database',
|
||||
'PDF_PREVIEW': 'True',
|
||||
'LOG_LEVEL': 'INFO',
|
||||
'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False',
|
||||
'DEFAULT_LLM_PROVIDER': 'openai'
|
||||
}
|
||||
|
||||
|
||||
@@ -74,7 +78,7 @@ class Config:
|
||||
self.CONSOLE_URL = get_env('CONSOLE_URL')
|
||||
self.API_URL = get_env('API_URL')
|
||||
self.APP_URL = get_env('APP_URL')
|
||||
self.CURRENT_VERSION = "0.2.0"
|
||||
self.CURRENT_VERSION = "0.3.1"
|
||||
self.COMMIT_SHA = get_env('COMMIT_SHA')
|
||||
self.EDITION = "SELF_HOSTED"
|
||||
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
|
||||
@@ -105,14 +109,18 @@ class Config:
|
||||
# redis settings
|
||||
self.REDIS_HOST = get_env('REDIS_HOST')
|
||||
self.REDIS_PORT = get_env('REDIS_PORT')
|
||||
self.REDIS_USERNAME = get_env('REDIS_USERNAME')
|
||||
self.REDIS_PASSWORD = get_env('REDIS_PASSWORD')
|
||||
self.REDIS_DB = get_env('REDIS_DB')
|
||||
self.REDIS_USE_SSL = get_bool_env('REDIS_USE_SSL')
|
||||
|
||||
# session redis settings
|
||||
self.SESSION_REDIS_HOST = get_env('SESSION_REDIS_HOST')
|
||||
self.SESSION_REDIS_PORT = get_env('SESSION_REDIS_PORT')
|
||||
self.SESSION_REDIS_USERNAME = get_env('SESSION_REDIS_USERNAME')
|
||||
self.SESSION_REDIS_PASSWORD = get_env('SESSION_REDIS_PASSWORD')
|
||||
self.SESSION_REDIS_DB = get_env('SESSION_REDIS_DB')
|
||||
self.SESSION_REDIS_USE_SSL = get_bool_env('SESSION_REDIS_USE_SSL')
|
||||
|
||||
# storage settings
|
||||
self.STORAGE_TYPE = get_env('STORAGE_TYPE')
|
||||
@@ -165,10 +173,18 @@ class Config:
|
||||
self.CELERY_BACKEND = get_env('CELERY_BACKEND')
|
||||
self.CELERY_RESULT_BACKEND = 'db+{}'.format(self.SQLALCHEMY_DATABASE_URI) \
|
||||
if self.CELERY_BACKEND == 'database' else self.CELERY_BROKER_URL
|
||||
self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://')
|
||||
|
||||
# hosted provider credentials
|
||||
self.OPENAI_API_KEY = get_env('OPENAI_API_KEY')
|
||||
|
||||
# By default it is False
|
||||
# You could disable it for compatibility with certain OpenAPI providers
|
||||
self.DISABLE_PROVIDER_CONFIG_VALIDATION = get_bool_env('DISABLE_PROVIDER_CONFIG_VALIDATION')
|
||||
|
||||
# For temp use only
|
||||
# set default LLM provider, default is 'openai', support `azure_openai`
|
||||
self.DEFAULT_LLM_PROVIDER = get_env('DEFAULT_LLM_PROVIDER')
|
||||
|
||||
class CloudEditionConfig(Config):
|
||||
|
||||
|
||||
@@ -5,8 +5,11 @@ from libs.external_api import ExternalApi
|
||||
bp = Blueprint('console', __name__, url_prefix='/console/api')
|
||||
api = ExternalApi(bp)
|
||||
|
||||
# Import other controllers
|
||||
from . import setup, version, apikey, admin
|
||||
|
||||
# Import app controllers
|
||||
from .app import app, site, explore, completion, model_config, statistic, conversation, message
|
||||
from .app import app, site, completion, model_config, statistic, conversation, message
|
||||
|
||||
# Import auth controllers
|
||||
from .auth import login, oauth
|
||||
@@ -14,7 +17,8 @@ from .auth import login, oauth
|
||||
# Import datasets controllers
|
||||
from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing
|
||||
|
||||
# Import other controllers
|
||||
from . import setup, version, apikey
|
||||
|
||||
# Import workspace controllers
|
||||
from .workspace import workspace, members, providers, account
|
||||
|
||||
# Import explore controllers
|
||||
from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message
|
||||
|
||||
132
api/controllers/console/admin.py
Normal file
132
api/controllers/console/admin.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import os
|
||||
from functools import wraps
|
||||
|
||||
from flask import request
|
||||
from flask_restful import Resource, reqparse
|
||||
from werkzeug.exceptions import NotFound, Unauthorized
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.wraps import only_edition_cloud
|
||||
from extensions.ext_database import db
|
||||
from models.model import RecommendedApp, App, InstalledApp
|
||||
|
||||
|
||||
def admin_required(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
if not os.getenv('ADMIN_API_KEY'):
|
||||
raise Unauthorized('API key is invalid.')
|
||||
|
||||
auth_header = request.headers.get('Authorization')
|
||||
if auth_header is None:
|
||||
raise Unauthorized('Authorization header is missing.')
|
||||
|
||||
if ' ' not in auth_header:
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
auth_scheme, auth_token = auth_header.split(None, 1)
|
||||
auth_scheme = auth_scheme.lower()
|
||||
|
||||
if auth_scheme != 'bearer':
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
if os.getenv('ADMIN_API_KEY') != auth_token:
|
||||
raise Unauthorized('API key is invalid.')
|
||||
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class InsertExploreAppListApi(Resource):
|
||||
@only_edition_cloud
|
||||
@admin_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('app_id', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('desc', type=str, location='json')
|
||||
parser.add_argument('copyright', type=str, location='json')
|
||||
parser.add_argument('privacy_policy', type=str, location='json')
|
||||
parser.add_argument('language', type=str, required=True, nullable=False, choices=['en-US', 'zh-Hans'],
|
||||
location='json')
|
||||
parser.add_argument('category', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('position', type=int, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
app = App.query.filter(App.id == args['app_id']).first()
|
||||
if not app:
|
||||
raise NotFound('App not found')
|
||||
|
||||
site = app.site
|
||||
if not site:
|
||||
desc = args['desc'] if args['desc'] else ''
|
||||
copy_right = args['copyright'] if args['copyright'] else ''
|
||||
privacy_policy = args['privacy_policy'] if args['privacy_policy'] else ''
|
||||
else:
|
||||
desc = site.description if (site.description if not args['desc'] else args['desc']) else ''
|
||||
copy_right = site.copyright if (site.copyright if not args['copyright'] else args['copyright']) else ''
|
||||
privacy_policy = site.privacy_policy \
|
||||
if (site.privacy_policy if not args['privacy_policy'] else args['privacy_policy']) else ''
|
||||
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
|
||||
|
||||
if not recommended_app:
|
||||
recommended_app = RecommendedApp(
|
||||
app_id=app.id,
|
||||
description=desc,
|
||||
copyright=copy_right,
|
||||
privacy_policy=privacy_policy,
|
||||
language=args['language'],
|
||||
category=args['category'],
|
||||
position=args['position']
|
||||
)
|
||||
|
||||
db.session.add(recommended_app)
|
||||
|
||||
app.is_public = True
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success'}, 201
|
||||
else:
|
||||
recommended_app.description = desc
|
||||
recommended_app.copyright = copy_right
|
||||
recommended_app.privacy_policy = privacy_policy
|
||||
recommended_app.language = args['language']
|
||||
recommended_app.category = args['category']
|
||||
recommended_app.position = args['position']
|
||||
|
||||
app.is_public = True
|
||||
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success'}, 200
|
||||
|
||||
|
||||
class InsertExploreAppApi(Resource):
|
||||
@only_edition_cloud
|
||||
@admin_required
|
||||
def delete(self, app_id):
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == str(app_id)).first()
|
||||
if not recommended_app:
|
||||
return {'result': 'success'}, 204
|
||||
|
||||
app = App.query.filter(App.id == recommended_app.app_id).first()
|
||||
if app:
|
||||
app.is_public = False
|
||||
|
||||
installed_apps = InstalledApp.query.filter(
|
||||
InstalledApp.app_id == recommended_app.app_id,
|
||||
InstalledApp.tenant_id != InstalledApp.app_owner_tenant_id
|
||||
).all()
|
||||
|
||||
for installed_app in installed_apps:
|
||||
db.session.delete(installed_app)
|
||||
|
||||
db.session.delete(recommended_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success'}, 204
|
||||
|
||||
|
||||
api.add_resource(InsertExploreAppListApi, '/admin/insert-explore-apps')
|
||||
api.add_resource(InsertExploreAppApi, '/admin/insert-explore-apps/<uuid:app_id>')
|
||||
@@ -17,6 +17,6 @@ def _get_app(app_id, mode=None):
|
||||
raise NotFound("App not found")
|
||||
|
||||
if mode and app.mode != mode:
|
||||
raise AppUnavailableError()
|
||||
raise NotFound("The {} app not found".format(mode))
|
||||
|
||||
return app
|
||||
|
||||
@@ -45,7 +45,7 @@ message_detail_fields = {
|
||||
'message_tokens': fields.Integer,
|
||||
'answer': fields.String,
|
||||
'answer_tokens': fields.Integer,
|
||||
'provider_response_latency': fields.Integer,
|
||||
'provider_response_latency': fields.Float,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account_id': fields.String,
|
||||
|
||||
@@ -9,31 +9,33 @@ class AppNotFoundError(BaseHTTPException):
|
||||
|
||||
class ProviderNotInitializeError(BaseHTTPException):
|
||||
error_code = 'provider_not_initialize'
|
||||
description = "Provider Token not initialize."
|
||||
description = "No valid model provider credentials found. " \
|
||||
"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Provider quota exceeded."
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderModelCurrentlyNotSupportError(BaseHTTPException):
|
||||
error_code = 'model_currently_not_support'
|
||||
description = "GPT-4 currently not support."
|
||||
description = "Dify Hosted OpenAI trial currently not support the GPT-4 model."
|
||||
code = 400
|
||||
|
||||
|
||||
class ConversationCompletedError(BaseHTTPException):
|
||||
error_code = 'conversation_completed'
|
||||
description = "Conversation was completed."
|
||||
description = "The conversation has ended. Please start a new conversation."
|
||||
code = 400
|
||||
|
||||
|
||||
class AppUnavailableError(BaseHTTPException):
|
||||
error_code = 'app_unavailable'
|
||||
description = "App unavailable."
|
||||
description = "App unavailable, please check your app configurations."
|
||||
code = 400
|
||||
|
||||
|
||||
@@ -45,5 +47,5 @@ class CompletionRequestError(BaseHTTPException):
|
||||
|
||||
class AppMoreLikeThisDisabledError(BaseHTTPException):
|
||||
error_code = 'app_more_like_this_disabled'
|
||||
description = "More like this disabled."
|
||||
description = "The 'More like this' feature is disabled. Please refresh your page."
|
||||
code = 403
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from datetime import datetime
|
||||
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, reqparse, fields, marshal_with, abort, inputs
|
||||
from sqlalchemy import and_
|
||||
|
||||
from controllers.console import api
|
||||
from extensions.ext_database import db
|
||||
from models.model import Tenant, App, InstalledApp, RecommendedApp
|
||||
from services.account_service import TenantService
|
||||
|
||||
app_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'mode': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String
|
||||
}
|
||||
|
||||
installed_app_fields = {
|
||||
'id': fields.String,
|
||||
'app': fields.Nested(app_fields, attribute='app'),
|
||||
'app_owner_tenant_id': fields.String,
|
||||
'is_pinned': fields.Boolean,
|
||||
'last_used_at': fields.DateTime,
|
||||
'editable': fields.Boolean
|
||||
}
|
||||
|
||||
installed_app_list_fields = {
|
||||
'installed_apps': fields.List(fields.Nested(installed_app_fields))
|
||||
}
|
||||
|
||||
recommended_app_fields = {
|
||||
'app': fields.Nested(app_fields, attribute='app'),
|
||||
'app_id': fields.String,
|
||||
'description': fields.String(attribute='description'),
|
||||
'copyright': fields.String,
|
||||
'privacy_policy': fields.String,
|
||||
'category': fields.String,
|
||||
'position': fields.Integer,
|
||||
'is_listed': fields.Boolean,
|
||||
'install_count': fields.Integer,
|
||||
'installed': fields.Boolean,
|
||||
'editable': fields.Boolean
|
||||
}
|
||||
|
||||
recommended_app_list_fields = {
|
||||
'recommended_apps': fields.List(fields.Nested(recommended_app_fields)),
|
||||
'categories': fields.List(fields.String)
|
||||
}
|
||||
|
||||
|
||||
class InstalledAppsListResource(Resource):
|
||||
@login_required
|
||||
@marshal_with(installed_app_list_fields)
|
||||
def get(self):
|
||||
current_tenant_id = Tenant.query.first().id
|
||||
installed_apps = db.session.query(InstalledApp).filter(
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
).all()
|
||||
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
installed_apps = [
|
||||
{
|
||||
**installed_app,
|
||||
"editable": current_user.role in ["owner", "admin"],
|
||||
}
|
||||
for installed_app in installed_apps
|
||||
]
|
||||
installed_apps.sort(key=lambda app: (-app.is_pinned, app.last_used_at))
|
||||
|
||||
return {'installed_apps': installed_apps}
|
||||
|
||||
@login_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('app_id', type=str, required=True, help='Invalid app_id')
|
||||
args = parser.parse_args()
|
||||
|
||||
current_tenant_id = Tenant.query.first().id
|
||||
app = App.query.get(args['app_id'])
|
||||
if app is None:
|
||||
abort(404, message='App not found')
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
|
||||
if recommended_app is None:
|
||||
abort(404, message='App not found')
|
||||
if not app.is_public:
|
||||
abort(403, message="You can't install a non-public app")
|
||||
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.app_id == args['app_id'],
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
# todo: position
|
||||
recommended_app.install_count += 1
|
||||
|
||||
new_installed_app = InstalledApp(
|
||||
app_id=args['app_id'],
|
||||
tenant_id=current_tenant_id,
|
||||
is_pinned=False,
|
||||
last_used_at=datetime.utcnow()
|
||||
)
|
||||
db.session.add(new_installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'message': 'App installed successfully'}
|
||||
|
||||
|
||||
class InstalledAppResource(Resource):
|
||||
|
||||
@login_required
|
||||
def delete(self, installed_app_id):
|
||||
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.id == str(installed_app_id),
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
abort(404, message='App not found')
|
||||
|
||||
if installed_app.app_owner_tenant_id == current_user.current_tenant_id:
|
||||
abort(400, message="You can't uninstall an app owned by the current tenant")
|
||||
|
||||
db.session.delete(installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App uninstalled successfully'}
|
||||
|
||||
@login_required
|
||||
def patch(self, installed_app_id):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('is_pinned', type=inputs.boolean)
|
||||
args = parser.parse_args()
|
||||
|
||||
current_tenant_id = Tenant.query.first().id
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.id == str(installed_app_id),
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
abort(404, message='Installed app not found')
|
||||
|
||||
commit_args = False
|
||||
if 'is_pinned' in args:
|
||||
installed_app.is_pinned = args['is_pinned']
|
||||
commit_args = True
|
||||
|
||||
if commit_args:
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App info updated successfully'}
|
||||
|
||||
|
||||
class RecommendedAppsResource(Resource):
|
||||
@login_required
|
||||
@marshal_with(recommended_app_list_fields)
|
||||
def get(self):
|
||||
recommended_apps = db.session.query(RecommendedApp).filter(
|
||||
RecommendedApp.is_listed == True
|
||||
).all()
|
||||
|
||||
categories = set()
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
recommended_apps_result = []
|
||||
for recommended_app in recommended_apps:
|
||||
installed = db.session.query(InstalledApp).filter(
|
||||
and_(
|
||||
InstalledApp.app_id == recommended_app.app_id,
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
)
|
||||
).first() is not None
|
||||
|
||||
language_prefix = current_user.interface_language.split('-')[0]
|
||||
desc = None
|
||||
if recommended_app.description:
|
||||
if language_prefix in recommended_app.description:
|
||||
desc = recommended_app.description[language_prefix]
|
||||
elif 'en' in recommended_app.description:
|
||||
desc = recommended_app.description['en']
|
||||
|
||||
recommended_app_result = {
|
||||
'id': recommended_app.id,
|
||||
'app': recommended_app.app,
|
||||
'app_id': recommended_app.app_id,
|
||||
'description': desc,
|
||||
'copyright': recommended_app.copyright,
|
||||
'privacy_policy': recommended_app.privacy_policy,
|
||||
'category': recommended_app.category,
|
||||
'position': recommended_app.position,
|
||||
'is_listed': recommended_app.is_listed,
|
||||
'install_count': recommended_app.install_count,
|
||||
'installed': installed,
|
||||
'editable': current_user.role in ['owner', 'admin'],
|
||||
}
|
||||
recommended_apps_result.append(recommended_app_result)
|
||||
|
||||
categories.add(recommended_app.category) # add category to categories
|
||||
|
||||
return {'recommended_apps': recommended_apps_result, 'categories': list(categories)}
|
||||
|
||||
|
||||
api.add_resource(InstalledAppsListResource, '/installed-apps')
|
||||
api.add_resource(InstalledAppResource, '/installed-apps/<uuid:installed_app_id>')
|
||||
api.add_resource(RecommendedAppsResource, '/explore/apps')
|
||||
@@ -26,46 +26,46 @@ from services.errors.conversation import ConversationNotExistsError
|
||||
from services.errors.message import MessageNotExistsError
|
||||
from services.message_service import MessageService
|
||||
|
||||
account_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'email': fields.String
|
||||
}
|
||||
|
||||
class ChatMessageApi(Resource):
|
||||
account_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'email': fields.String
|
||||
}
|
||||
feedback_fields = {
|
||||
'rating': fields.String,
|
||||
'content': fields.String,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account': fields.Nested(account_fields, allow_null=True),
|
||||
}
|
||||
|
||||
feedback_fields = {
|
||||
'rating': fields.String,
|
||||
'content': fields.String,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account': fields.Nested(account_fields, allow_null=True),
|
||||
}
|
||||
annotation_fields = {
|
||||
'content': fields.String,
|
||||
'account': fields.Nested(account_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
annotation_fields = {
|
||||
'content': fields.String,
|
||||
'account': fields.Nested(account_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
message_detail_fields = {
|
||||
'id': fields.String,
|
||||
'conversation_id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'message': fields.Raw,
|
||||
'message_tokens': fields.Integer,
|
||||
'answer': fields.String,
|
||||
'answer_tokens': fields.Integer,
|
||||
'provider_response_latency': fields.Float,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account_id': fields.String,
|
||||
'feedbacks': fields.List(fields.Nested(feedback_fields)),
|
||||
'annotation': fields.Nested(annotation_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
message_detail_fields = {
|
||||
'id': fields.String,
|
||||
'conversation_id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'message': fields.Raw,
|
||||
'message_tokens': fields.Integer,
|
||||
'answer': fields.String,
|
||||
'answer_tokens': fields.Integer,
|
||||
'provider_response_latency': fields.Integer,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account_id': fields.String,
|
||||
'feedbacks': fields.List(fields.Nested(feedback_fields)),
|
||||
'annotation': fields.Nested(annotation_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
class ChatMessageListApi(Resource):
|
||||
message_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
@@ -253,7 +253,8 @@ class MessageMoreLikeThisApi(Resource):
|
||||
message_id = str(message_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'], location='args')
|
||||
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'],
|
||||
location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
@@ -301,7 +302,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
yield "data: " + json.dumps(
|
||||
api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
|
||||
@@ -353,9 +355,33 @@ class MessageSuggestedQuestionApi(Resource):
|
||||
return {'data': questions}
|
||||
|
||||
|
||||
class MessageApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(message_detail_fields)
|
||||
def get(self, app_id, message_id):
|
||||
app_id = str(app_id)
|
||||
message_id = str(message_id)
|
||||
|
||||
# get app info
|
||||
app_model = _get_app(app_id, 'chat')
|
||||
|
||||
message = db.session.query(Message).filter(
|
||||
Message.id == message_id,
|
||||
Message.app_id == app_model.id
|
||||
).first()
|
||||
|
||||
if not message:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
return message
|
||||
|
||||
|
||||
api.add_resource(MessageMoreLikeThisApi, '/apps/<uuid:app_id>/completion-messages/<uuid:message_id>/more-like-this')
|
||||
api.add_resource(MessageSuggestedQuestionApi, '/apps/<uuid:app_id>/chat-messages/<uuid:message_id>/suggested-questions')
|
||||
api.add_resource(ChatMessageApi, '/apps/<uuid:app_id>/chat-messages', endpoint='chat_messages')
|
||||
api.add_resource(ChatMessageListApi, '/apps/<uuid:app_id>/chat-messages', endpoint='console_chat_messages')
|
||||
api.add_resource(MessageFeedbackApi, '/apps/<uuid:app_id>/feedbacks')
|
||||
api.add_resource(MessageAnnotationApi, '/apps/<uuid:app_id>/annotations')
|
||||
api.add_resource(MessageAnnotationCountApi, '/apps/<uuid:app_id>/annotations/count')
|
||||
api.add_resource(MessageApi, '/apps/<uuid:app_id>/messages/<uuid:message_id>', endpoint='console_message')
|
||||
|
||||
@@ -10,13 +10,14 @@ from werkzeug.exceptions import NotFound, Forbidden
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ProviderNotInitializeError
|
||||
from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
|
||||
ProviderModelCurrentlyNotSupportError
|
||||
from controllers.console.datasets.error import DocumentAlreadyFinishedError, InvalidActionError, DocumentIndexingError, \
|
||||
InvalidMetadataError, ArchivedDocumentImmutableError
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.indexing_runner import IndexingRunner
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from extensions.ext_redis import redis_client
|
||||
from libs.helper import TimestampField
|
||||
from extensions.ext_database import db
|
||||
@@ -222,6 +223,10 @@ class DatasetDocumentListApi(Resource):
|
||||
document = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
|
||||
return document
|
||||
|
||||
@@ -259,6 +264,10 @@ class DatasetInitApi(Resource):
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
|
||||
response = {
|
||||
'dataset': dataset,
|
||||
|
||||
@@ -3,7 +3,7 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class NoFileUploadedError(BaseHTTPException):
|
||||
error_code = 'no_file_uploaded'
|
||||
description = "No file uploaded."
|
||||
description = "Please upload your file."
|
||||
code = 400
|
||||
|
||||
|
||||
@@ -27,25 +27,25 @@ class UnsupportedFileTypeError(BaseHTTPException):
|
||||
|
||||
class HighQualityDatasetOnlyError(BaseHTTPException):
|
||||
error_code = 'high_quality_dataset_only'
|
||||
description = "High quality dataset only."
|
||||
description = "Current operation only supports 'high-quality' datasets."
|
||||
code = 400
|
||||
|
||||
|
||||
class DatasetNotInitializedError(BaseHTTPException):
|
||||
error_code = 'dataset_not_initialized'
|
||||
description = "Dataset not initialized."
|
||||
description = "The dataset is still being initialized or indexing. Please wait a moment."
|
||||
code = 400
|
||||
|
||||
|
||||
class ArchivedDocumentImmutableError(BaseHTTPException):
|
||||
error_code = 'archived_document_immutable'
|
||||
description = "Cannot process an archived document."
|
||||
description = "The archived document is not editable."
|
||||
code = 403
|
||||
|
||||
|
||||
class DatasetNameDuplicateError(BaseHTTPException):
|
||||
error_code = 'dataset_name_duplicate'
|
||||
description = "Dataset name already exists."
|
||||
description = "The dataset name already exists. Please modify your dataset name."
|
||||
code = 409
|
||||
|
||||
|
||||
@@ -57,17 +57,17 @@ class InvalidActionError(BaseHTTPException):
|
||||
|
||||
class DocumentAlreadyFinishedError(BaseHTTPException):
|
||||
error_code = 'document_already_finished'
|
||||
description = "Document already finished."
|
||||
description = "The document has been processed. Please refresh the page or go to the document details."
|
||||
code = 400
|
||||
|
||||
|
||||
class DocumentIndexingError(BaseHTTPException):
|
||||
error_code = 'document_indexing'
|
||||
description = "Document indexing."
|
||||
description = "The document is being processed and cannot be edited."
|
||||
code = 400
|
||||
|
||||
|
||||
class InvalidMetadataError(BaseHTTPException):
|
||||
error_code = 'invalid_metadata'
|
||||
description = "Invalid metadata."
|
||||
description = "The metadata content is incorrect. Please check and verify."
|
||||
code = 400
|
||||
|
||||
@@ -6,9 +6,12 @@ from werkzeug.exceptions import InternalServerError, NotFound, Forbidden
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
|
||||
ProviderModelCurrentlyNotSupportError
|
||||
from controllers.console.datasets.error import HighQualityDatasetOnlyError, DatasetNotInitializedError
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from libs.helper import TimestampField
|
||||
from services.dataset_service import DatasetService
|
||||
from services.hit_testing_service import HitTestingService
|
||||
@@ -92,6 +95,12 @@ class HitTestingApi(Resource):
|
||||
return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)}
|
||||
except services.errors.index.IndexNotInitializedError:
|
||||
raise DatasetNotInitializedError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except Exception as e:
|
||||
logging.exception("Hit testing failed.")
|
||||
raise InternalServerError(str(e))
|
||||
|
||||
@@ -3,13 +3,14 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class AlreadySetupError(BaseHTTPException):
|
||||
error_code = 'already_setup'
|
||||
description = "Application already setup."
|
||||
description = "Dify has been successfully installed. Please refresh the page or return to the dashboard homepage."
|
||||
code = 403
|
||||
|
||||
|
||||
class NotSetupError(BaseHTTPException):
|
||||
error_code = 'not_setup'
|
||||
description = "Application not setup."
|
||||
description = "Dify has not been initialized and installed yet. " \
|
||||
"Please proceed with the initialization and installation process first."
|
||||
code = 401
|
||||
|
||||
|
||||
|
||||
180
api/controllers/console/explore/completion.py
Normal file
180
api/controllers/console/explore/completion.py
Normal file
@@ -0,0 +1,180 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
import json
|
||||
import logging
|
||||
from typing import Generator, Union
|
||||
|
||||
from flask import Response, stream_with_context
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse
|
||||
from werkzeug.exceptions import InternalServerError, NotFound
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \
|
||||
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
|
||||
from controllers.console.explore.error import NotCompletionAppError, NotChatAppError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from core.conversation_message_task import PubHandler
|
||||
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
|
||||
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from libs.helper import uuid_value
|
||||
from services.completion_service import CompletionService
|
||||
|
||||
|
||||
# define completion api for user
|
||||
class CompletionApi(InstalledAppResource):
|
||||
|
||||
def post(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('inputs', type=dict, required=True, location='json')
|
||||
parser.add_argument('query', type=str, location='json')
|
||||
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
|
||||
try:
|
||||
response = CompletionService.completion(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
args=args,
|
||||
from_source='console',
|
||||
streaming=streaming
|
||||
)
|
||||
|
||||
return compact_response(response)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class CompletionStopApi(InstalledAppResource):
|
||||
def post(self, installed_app, task_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
PubHandler.stop(current_user, task_id)
|
||||
|
||||
return {'result': 'success'}, 200
|
||||
|
||||
|
||||
class ChatApi(InstalledAppResource):
|
||||
def post(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('inputs', type=dict, required=True, location='json')
|
||||
parser.add_argument('query', type=str, required=True, location='json')
|
||||
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
|
||||
parser.add_argument('conversation_id', type=uuid_value, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
|
||||
try:
|
||||
response = CompletionService.completion(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
args=args,
|
||||
from_source='console',
|
||||
streaming=streaming
|
||||
)
|
||||
|
||||
return compact_response(response)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class ChatStopApi(InstalledAppResource):
|
||||
def post(self, installed_app, task_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
PubHandler.stop(current_user, task_id)
|
||||
|
||||
return {'result': 'success'}, 200
|
||||
|
||||
|
||||
def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
if isinstance(response, dict):
|
||||
return Response(response=json.dumps(response), status=200, mimetype='application/json')
|
||||
else:
|
||||
def generate() -> Generator:
|
||||
try:
|
||||
for chunk in response:
|
||||
yield chunk
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
|
||||
except ValueError as e:
|
||||
yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
|
||||
|
||||
return Response(stream_with_context(generate()), status=200,
|
||||
mimetype='text/event-stream')
|
||||
|
||||
|
||||
api.add_resource(CompletionApi, '/installed-apps/<uuid:installed_app_id>/completion-messages', endpoint='installed_app_completion')
|
||||
api.add_resource(CompletionStopApi, '/installed-apps/<uuid:installed_app_id>/completion-messages/<string:task_id>/stop', endpoint='installed_app_stop_completion')
|
||||
api.add_resource(ChatApi, '/installed-apps/<uuid:installed_app_id>/chat-messages', endpoint='installed_app_chat_completion')
|
||||
api.add_resource(ChatStopApi, '/installed-apps/<uuid:installed_app_id>/chat-messages/<string:task_id>/stop', endpoint='installed_app_stop_chat_completion')
|
||||
127
api/controllers/console/explore/conversation.py
Normal file
127
api/controllers/console/explore/conversation.py
Normal file
@@ -0,0 +1,127 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from flask_login import current_user
|
||||
from flask_restful import fields, reqparse, marshal_with
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.error import NotChatAppError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from libs.helper import TimestampField, uuid_value
|
||||
from services.conversation_service import ConversationService
|
||||
from services.errors.conversation import LastConversationNotExistsError, ConversationNotExistsError
|
||||
from services.web_conversation_service import WebConversationService
|
||||
|
||||
conversation_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'status': fields.String,
|
||||
'introduction': fields.String,
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
conversation_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
'data': fields.List(fields.Nested(conversation_fields))
|
||||
}
|
||||
|
||||
|
||||
class ConversationListApi(InstalledAppResource):
|
||||
|
||||
@marshal_with(conversation_infinite_scroll_pagination_fields)
|
||||
def get(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('last_id', type=uuid_value, location='args')
|
||||
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
|
||||
parser.add_argument('pinned', type=str, choices=['true', 'false', None], location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
pinned = None
|
||||
if 'pinned' in args and args['pinned'] is not None:
|
||||
pinned = True if args['pinned'] == 'true' else False
|
||||
|
||||
try:
|
||||
return WebConversationService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
last_id=args['last_id'],
|
||||
limit=args['limit'],
|
||||
pinned=pinned
|
||||
)
|
||||
except LastConversationNotExistsError:
|
||||
raise NotFound("Last Conversation Not Exists.")
|
||||
|
||||
|
||||
class ConversationApi(InstalledAppResource):
|
||||
def delete(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
ConversationService.delete(app_model, conversation_id, current_user)
|
||||
WebConversationService.unpin(app_model, conversation_id, current_user)
|
||||
|
||||
return {"result": "success"}, 204
|
||||
|
||||
|
||||
class ConversationRenameApi(InstalledAppResource):
|
||||
|
||||
@marshal_with(conversation_fields)
|
||||
def post(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('name', type=str, required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
return ConversationService.rename(app_model, conversation_id, current_user, args['name'])
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
||||
|
||||
class ConversationPinApi(InstalledAppResource):
|
||||
|
||||
def patch(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
|
||||
try:
|
||||
WebConversationService.pin(app_model, conversation_id, current_user)
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
class ConversationUnPinApi(InstalledAppResource):
|
||||
def patch(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
WebConversationService.unpin(app_model, conversation_id, current_user)
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
api.add_resource(ConversationRenameApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>/name', endpoint='installed_app_conversation_rename')
|
||||
api.add_resource(ConversationListApi, '/installed-apps/<uuid:installed_app_id>/conversations', endpoint='installed_app_conversations')
|
||||
api.add_resource(ConversationApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>', endpoint='installed_app_conversation')
|
||||
api.add_resource(ConversationPinApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>/pin', endpoint='installed_app_conversation_pin')
|
||||
api.add_resource(ConversationUnPinApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>/unpin', endpoint='installed_app_conversation_unpin')
|
||||
20
api/controllers/console/explore/error.py
Normal file
20
api/controllers/console/explore/error.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from libs.exception import BaseHTTPException
|
||||
|
||||
|
||||
class NotCompletionAppError(BaseHTTPException):
|
||||
error_code = 'not_completion_app'
|
||||
description = "Not Completion App"
|
||||
code = 400
|
||||
|
||||
|
||||
class NotChatAppError(BaseHTTPException):
|
||||
error_code = 'not_chat_app'
|
||||
description = "Not Chat App"
|
||||
code = 400
|
||||
|
||||
|
||||
class AppSuggestedQuestionsAfterAnswerDisabledError(BaseHTTPException):
|
||||
error_code = 'app_suggested_questions_after_answer_disabled'
|
||||
description = "Function Suggested questions after answer disabled."
|
||||
code = 403
|
||||
143
api/controllers/console/explore/installed_app.py
Normal file
143
api/controllers/console/explore/installed_app.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from datetime import datetime
|
||||
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, reqparse, fields, marshal_with, inputs
|
||||
from sqlalchemy import and_
|
||||
from werkzeug.exceptions import NotFound, Forbidden, BadRequest
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from libs.helper import TimestampField
|
||||
from models.model import App, InstalledApp, RecommendedApp
|
||||
from services.account_service import TenantService
|
||||
|
||||
app_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'mode': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String
|
||||
}
|
||||
|
||||
installed_app_fields = {
|
||||
'id': fields.String,
|
||||
'app': fields.Nested(app_fields),
|
||||
'app_owner_tenant_id': fields.String,
|
||||
'is_pinned': fields.Boolean,
|
||||
'last_used_at': TimestampField,
|
||||
'editable': fields.Boolean,
|
||||
'uninstallable': fields.Boolean,
|
||||
}
|
||||
|
||||
installed_app_list_fields = {
|
||||
'installed_apps': fields.List(fields.Nested(installed_app_fields))
|
||||
}
|
||||
|
||||
|
||||
class InstalledAppsListApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(installed_app_list_fields)
|
||||
def get(self):
|
||||
current_tenant_id = current_user.current_tenant_id
|
||||
installed_apps = db.session.query(InstalledApp).filter(
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
).all()
|
||||
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
installed_apps = [
|
||||
{
|
||||
'id': installed_app.id,
|
||||
'app': installed_app.app,
|
||||
'app_owner_tenant_id': installed_app.app_owner_tenant_id,
|
||||
'is_pinned': installed_app.is_pinned,
|
||||
'last_used_at': installed_app.last_used_at,
|
||||
"editable": current_user.role in ["owner", "admin"],
|
||||
"uninstallable": current_tenant_id == installed_app.app_owner_tenant_id
|
||||
}
|
||||
for installed_app in installed_apps
|
||||
]
|
||||
installed_apps.sort(key=lambda app: (-app['is_pinned'], app['last_used_at']
|
||||
if app['last_used_at'] is not None else datetime.min))
|
||||
|
||||
return {'installed_apps': installed_apps}
|
||||
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('app_id', type=str, required=True, help='Invalid app_id')
|
||||
args = parser.parse_args()
|
||||
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
|
||||
if recommended_app is None:
|
||||
raise NotFound('App not found')
|
||||
|
||||
current_tenant_id = current_user.current_tenant_id
|
||||
app = db.session.query(App).filter(
|
||||
App.id == args['app_id']
|
||||
).first()
|
||||
|
||||
if app is None:
|
||||
raise NotFound('App not found')
|
||||
|
||||
if not app.is_public:
|
||||
raise Forbidden('You can\'t install a non-public app')
|
||||
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.app_id == args['app_id'],
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
# todo: position
|
||||
recommended_app.install_count += 1
|
||||
|
||||
new_installed_app = InstalledApp(
|
||||
app_id=args['app_id'],
|
||||
tenant_id=current_tenant_id,
|
||||
app_owner_tenant_id=app.tenant_id,
|
||||
is_pinned=False,
|
||||
last_used_at=datetime.utcnow()
|
||||
)
|
||||
db.session.add(new_installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'message': 'App installed successfully'}
|
||||
|
||||
|
||||
class InstalledAppApi(InstalledAppResource):
|
||||
"""
|
||||
update and delete an installed app
|
||||
use InstalledAppResource to apply default decorators and get installed_app
|
||||
"""
|
||||
def delete(self, installed_app):
|
||||
if installed_app.app_owner_tenant_id == current_user.current_tenant_id:
|
||||
raise BadRequest('You can\'t uninstall an app owned by the current tenant')
|
||||
|
||||
db.session.delete(installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App uninstalled successfully'}
|
||||
|
||||
def patch(self, installed_app):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('is_pinned', type=inputs.boolean)
|
||||
args = parser.parse_args()
|
||||
|
||||
commit_args = False
|
||||
if 'is_pinned' in args:
|
||||
installed_app.is_pinned = args['is_pinned']
|
||||
commit_args = True
|
||||
|
||||
if commit_args:
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App info updated successfully'}
|
||||
|
||||
|
||||
api.add_resource(InstalledAppsListApi, '/installed-apps')
|
||||
api.add_resource(InstalledAppApi, '/installed-apps/<uuid:installed_app_id>')
|
||||
196
api/controllers/console/explore/message.py
Normal file
196
api/controllers/console/explore/message.py
Normal file
@@ -0,0 +1,196 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
import json
|
||||
import logging
|
||||
from typing import Generator, Union
|
||||
|
||||
from flask import stream_with_context, Response
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse, fields, marshal_with
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import NotFound, InternalServerError
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import AppMoreLikeThisDisabledError, ProviderNotInitializeError, \
|
||||
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
|
||||
from controllers.console.explore.error import NotCompletionAppError, AppSuggestedQuestionsAfterAnswerDisabledError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
|
||||
ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from libs.helper import uuid_value, TimestampField
|
||||
from services.completion_service import CompletionService
|
||||
from services.errors.app import MoreLikeThisDisabledError
|
||||
from services.errors.conversation import ConversationNotExistsError
|
||||
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
|
||||
from services.message_service import MessageService
|
||||
|
||||
|
||||
class MessageListApi(InstalledAppResource):
|
||||
feedback_fields = {
|
||||
'rating': fields.String
|
||||
}
|
||||
|
||||
message_fields = {
|
||||
'id': fields.String,
|
||||
'conversation_id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'answer': fields.String,
|
||||
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
message_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
'data': fields.List(fields.Nested(message_fields))
|
||||
}
|
||||
|
||||
@marshal_with(message_infinite_scroll_pagination_fields)
|
||||
def get(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('conversation_id', required=True, type=uuid_value, location='args')
|
||||
parser.add_argument('first_id', type=uuid_value, location='args')
|
||||
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
return MessageService.pagination_by_first_id(app_model, current_user,
|
||||
args['conversation_id'], args['first_id'], args['limit'])
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.message.FirstMessageNotExistsError:
|
||||
raise NotFound("First Message Not Exists.")
|
||||
|
||||
|
||||
class MessageFeedbackApi(InstalledAppResource):
|
||||
def post(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('rating', type=str, choices=['like', 'dislike', None], location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
MessageService.create_feedback(app_model, message_id, current_user, args['rating'])
|
||||
except services.errors.message.MessageNotExistsError:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
return {'result': 'success'}
|
||||
|
||||
|
||||
class MessageMoreLikeThisApi(InstalledAppResource):
|
||||
def get(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'], location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
|
||||
try:
|
||||
response = CompletionService.generate_more_like_this(app_model, current_user, message_id, streaming)
|
||||
return compact_response(response)
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message Not Exists.")
|
||||
except MoreLikeThisDisabledError:
|
||||
raise AppMoreLikeThisDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
if isinstance(response, dict):
|
||||
return Response(response=json.dumps(response), status=200, mimetype='application/json')
|
||||
else:
|
||||
def generate() -> Generator:
|
||||
try:
|
||||
for chunk in response:
|
||||
yield chunk
|
||||
except MessageNotExistsError:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
|
||||
except MoreLikeThisDisabledError:
|
||||
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
|
||||
except ValueError as e:
|
||||
yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
|
||||
|
||||
return Response(stream_with_context(generate()), status=200,
|
||||
mimetype='text/event-stream')
|
||||
|
||||
|
||||
class MessageSuggestedQuestionApi(InstalledAppResource):
|
||||
def get(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
try:
|
||||
questions = MessageService.get_suggested_questions_after_answer(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
message_id=message_id
|
||||
)
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message not found")
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation not found")
|
||||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise AppSuggestedQuestionsAfterAnswerDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {'data': questions}
|
||||
|
||||
|
||||
api.add_resource(MessageListApi, '/installed-apps/<uuid:installed_app_id>/messages', endpoint='installed_app_messages')
|
||||
api.add_resource(MessageFeedbackApi, '/installed-apps/<uuid:installed_app_id>/messages/<uuid:message_id>/feedbacks', endpoint='installed_app_message_feedback')
|
||||
api.add_resource(MessageMoreLikeThisApi, '/installed-apps/<uuid:installed_app_id>/messages/<uuid:message_id>/more-like-this', endpoint='installed_app_more_like_this')
|
||||
api.add_resource(MessageSuggestedQuestionApi, '/installed-apps/<uuid:installed_app_id>/messages/<uuid:message_id>/suggested-questions', endpoint='installed_app_suggested_question')
|
||||
43
api/controllers/console/explore/parameter.py
Normal file
43
api/controllers/console/explore/parameter.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from flask_restful import marshal_with, fields
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
|
||||
|
||||
class AppParameterApi(InstalledAppResource):
|
||||
"""Resource for app variables."""
|
||||
variable_fields = {
|
||||
'key': fields.String,
|
||||
'name': fields.String,
|
||||
'description': fields.String,
|
||||
'type': fields.String,
|
||||
'default': fields.String,
|
||||
'max_length': fields.Integer,
|
||||
'options': fields.List(fields.String)
|
||||
}
|
||||
|
||||
parameters_fields = {
|
||||
'opening_statement': fields.String,
|
||||
'suggested_questions': fields.Raw,
|
||||
'suggested_questions_after_answer': fields.Raw,
|
||||
'more_like_this': fields.Raw,
|
||||
'user_input_form': fields.Raw,
|
||||
}
|
||||
|
||||
@marshal_with(parameters_fields)
|
||||
def get(self, installed_app):
|
||||
"""Retrieve app parameters."""
|
||||
app_model = installed_app.app
|
||||
app_model_config = app_model.app_model_config
|
||||
|
||||
return {
|
||||
'opening_statement': app_model_config.opening_statement,
|
||||
'suggested_questions': app_model_config.suggested_questions_list,
|
||||
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
|
||||
'more_like_this': app_model_config.more_like_this_dict,
|
||||
'user_input_form': app_model_config.user_input_form_list
|
||||
}
|
||||
|
||||
|
||||
api.add_resource(AppParameterApi, '/installed-apps/<uuid:installed_app_id>/parameters', endpoint='installed_app_parameters')
|
||||
138
api/controllers/console/explore/recommended_app.py
Normal file
138
api/controllers/console/explore/recommended_app.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, fields, marshal_with
|
||||
from sqlalchemy import and_
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import AppNotFoundError
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from models.model import App, InstalledApp, RecommendedApp
|
||||
from services.account_service import TenantService
|
||||
|
||||
app_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'mode': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String
|
||||
}
|
||||
|
||||
recommended_app_fields = {
|
||||
'app': fields.Nested(app_fields, attribute='app'),
|
||||
'app_id': fields.String,
|
||||
'description': fields.String(attribute='description'),
|
||||
'copyright': fields.String,
|
||||
'privacy_policy': fields.String,
|
||||
'category': fields.String,
|
||||
'position': fields.Integer,
|
||||
'is_listed': fields.Boolean,
|
||||
'install_count': fields.Integer,
|
||||
'installed': fields.Boolean,
|
||||
'editable': fields.Boolean
|
||||
}
|
||||
|
||||
recommended_app_list_fields = {
|
||||
'recommended_apps': fields.List(fields.Nested(recommended_app_fields)),
|
||||
'categories': fields.List(fields.String)
|
||||
}
|
||||
|
||||
|
||||
class RecommendedAppListApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(recommended_app_list_fields)
|
||||
def get(self):
|
||||
language_prefix = current_user.interface_language if current_user.interface_language else 'en-US'
|
||||
|
||||
recommended_apps = db.session.query(RecommendedApp).filter(
|
||||
RecommendedApp.is_listed == True,
|
||||
RecommendedApp.language == language_prefix
|
||||
).all()
|
||||
|
||||
categories = set()
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
recommended_apps_result = []
|
||||
for recommended_app in recommended_apps:
|
||||
installed = db.session.query(InstalledApp).filter(
|
||||
and_(
|
||||
InstalledApp.app_id == recommended_app.app_id,
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
)
|
||||
).first() is not None
|
||||
|
||||
app = recommended_app.app
|
||||
if not app or not app.is_public:
|
||||
continue
|
||||
|
||||
site = app.site
|
||||
if not site:
|
||||
continue
|
||||
|
||||
recommended_app_result = {
|
||||
'id': recommended_app.id,
|
||||
'app': app,
|
||||
'app_id': recommended_app.app_id,
|
||||
'description': site.description,
|
||||
'copyright': site.copyright,
|
||||
'privacy_policy': site.privacy_policy,
|
||||
'category': recommended_app.category,
|
||||
'position': recommended_app.position,
|
||||
'is_listed': recommended_app.is_listed,
|
||||
'install_count': recommended_app.install_count,
|
||||
'installed': installed,
|
||||
'editable': current_user.role in ['owner', 'admin'],
|
||||
}
|
||||
recommended_apps_result.append(recommended_app_result)
|
||||
|
||||
categories.add(recommended_app.category) # add category to categories
|
||||
|
||||
return {'recommended_apps': recommended_apps_result, 'categories': list(categories)}
|
||||
|
||||
|
||||
class RecommendedAppApi(Resource):
|
||||
model_config_fields = {
|
||||
'opening_statement': fields.String,
|
||||
'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
|
||||
'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
|
||||
'more_like_this': fields.Raw(attribute='more_like_this_dict'),
|
||||
'model': fields.Raw(attribute='model_dict'),
|
||||
'user_input_form': fields.Raw(attribute='user_input_form_list'),
|
||||
'pre_prompt': fields.String,
|
||||
'agent_mode': fields.Raw(attribute='agent_mode_dict'),
|
||||
}
|
||||
|
||||
app_simple_detail_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String,
|
||||
'mode': fields.String,
|
||||
'app_model_config': fields.Nested(model_config_fields),
|
||||
}
|
||||
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(app_simple_detail_fields)
|
||||
def get(self, app_id):
|
||||
app_id = str(app_id)
|
||||
|
||||
# is in public recommended list
|
||||
recommended_app = db.session.query(RecommendedApp).filter(
|
||||
RecommendedApp.is_listed == True,
|
||||
RecommendedApp.app_id == app_id
|
||||
).first()
|
||||
|
||||
if not recommended_app:
|
||||
raise AppNotFoundError
|
||||
|
||||
# get app detail
|
||||
app = db.session.query(App).filter(App.id == app_id).first()
|
||||
if not app or not app.is_public:
|
||||
raise AppNotFoundError
|
||||
|
||||
return app
|
||||
|
||||
|
||||
api.add_resource(RecommendedAppListApi, '/explore/apps')
|
||||
api.add_resource(RecommendedAppApi, '/explore/apps/<uuid:app_id>')
|
||||
79
api/controllers/console/explore/saved_message.py
Normal file
79
api/controllers/console/explore/saved_message.py
Normal file
@@ -0,0 +1,79 @@
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse, marshal_with, fields
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.error import NotCompletionAppError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from libs.helper import uuid_value, TimestampField
|
||||
from services.errors.message import MessageNotExistsError
|
||||
from services.saved_message_service import SavedMessageService
|
||||
|
||||
feedback_fields = {
|
||||
'rating': fields.String
|
||||
}
|
||||
|
||||
message_fields = {
|
||||
'id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'answer': fields.String,
|
||||
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
|
||||
class SavedMessageListApi(InstalledAppResource):
|
||||
saved_message_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
'data': fields.List(fields.Nested(message_fields))
|
||||
}
|
||||
|
||||
@marshal_with(saved_message_infinite_scroll_pagination_fields)
|
||||
def get(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('last_id', type=uuid_value, location='args')
|
||||
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
return SavedMessageService.pagination_by_last_id(app_model, current_user, args['last_id'], args['limit'])
|
||||
|
||||
def post(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('message_id', type=uuid_value, required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
SavedMessageService.save(app_model, current_user, args['message_id'])
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
return {'result': 'success'}
|
||||
|
||||
|
||||
class SavedMessageApi(InstalledAppResource):
|
||||
def delete(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
SavedMessageService.delete(app_model, current_user, message_id)
|
||||
|
||||
return {'result': 'success'}
|
||||
|
||||
|
||||
api.add_resource(SavedMessageListApi, '/installed-apps/<uuid:installed_app_id>/saved-messages', endpoint='installed_app_saved_messages')
|
||||
api.add_resource(SavedMessageApi, '/installed-apps/<uuid:installed_app_id>/saved-messages/<uuid:message_id>', endpoint='installed_app_saved_message')
|
||||
48
api/controllers/console/explore/wraps.py
Normal file
48
api/controllers/console/explore/wraps.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource
|
||||
from functools import wraps
|
||||
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from models.model import InstalledApp
|
||||
|
||||
|
||||
def installed_app_required(view=None):
|
||||
def decorator(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
if not kwargs.get('installed_app_id'):
|
||||
raise ValueError('missing installed_app_id in path parameters')
|
||||
|
||||
installed_app_id = kwargs.get('installed_app_id')
|
||||
installed_app_id = str(installed_app_id)
|
||||
|
||||
del kwargs['installed_app_id']
|
||||
|
||||
installed_app = db.session.query(InstalledApp).filter(
|
||||
InstalledApp.id == str(installed_app_id),
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
).first()
|
||||
|
||||
if installed_app is None:
|
||||
raise NotFound('Installed app not found')
|
||||
|
||||
if not installed_app.app:
|
||||
db.session.delete(installed_app)
|
||||
db.session.commit()
|
||||
|
||||
raise NotFound('Installed app not found')
|
||||
|
||||
return view(installed_app, *args, **kwargs)
|
||||
return decorated
|
||||
|
||||
if view:
|
||||
return decorator(view)
|
||||
return decorator
|
||||
|
||||
|
||||
class InstalledAppResource(Resource):
|
||||
# must be reversed if there are multiple decorators
|
||||
method_decorators = [installed_app_required, account_initialization_required, login_required]
|
||||
@@ -19,6 +19,14 @@ class VersionApi(Resource):
|
||||
args = parser.parse_args()
|
||||
check_update_url = current_app.config['CHECK_UPDATE_URL']
|
||||
|
||||
if not check_update_url:
|
||||
return {
|
||||
'version': '0.0.0',
|
||||
'release_date': '',
|
||||
'release_notes': '',
|
||||
'can_auto_update': False
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.get(check_update_url, {
|
||||
'current_version': args.get('current_version')
|
||||
|
||||
@@ -21,11 +21,11 @@ class InvalidInvitationCodeError(BaseHTTPException):
|
||||
|
||||
class AccountAlreadyInitedError(BaseHTTPException):
|
||||
error_code = 'account_already_inited'
|
||||
description = "Account already inited."
|
||||
description = "The account has been initialized. Please refresh the page."
|
||||
code = 400
|
||||
|
||||
|
||||
class AccountNotInitializedError(BaseHTTPException):
|
||||
error_code = 'account_not_initialized'
|
||||
description = "Account not initialized."
|
||||
description = "The account has not been initialized yet. Please proceed with the initialization process first."
|
||||
code = 400
|
||||
|
||||
@@ -82,29 +82,33 @@ class ProviderTokenApi(Resource):
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args['token']:
|
||||
raise ValueError('Token is empty')
|
||||
if args['token']:
|
||||
try:
|
||||
ProviderService.validate_provider_configs(
|
||||
tenant=current_user.current_tenant,
|
||||
provider_name=ProviderName(provider),
|
||||
configs=args['token']
|
||||
)
|
||||
token_is_valid = True
|
||||
except ValidateFailedError as ex:
|
||||
raise ValueError(str(ex))
|
||||
|
||||
try:
|
||||
ProviderService.validate_provider_configs(
|
||||
base64_encrypted_token = ProviderService.get_encrypted_token(
|
||||
tenant=current_user.current_tenant,
|
||||
provider_name=ProviderName(provider),
|
||||
configs=args['token']
|
||||
)
|
||||
token_is_valid = True
|
||||
except ValidateFailedError:
|
||||
else:
|
||||
base64_encrypted_token = None
|
||||
token_is_valid = False
|
||||
|
||||
tenant = current_user.current_tenant
|
||||
|
||||
base64_encrypted_token = ProviderService.get_encrypted_token(
|
||||
tenant=current_user.current_tenant,
|
||||
provider_name=ProviderName(provider),
|
||||
configs=args['token']
|
||||
)
|
||||
|
||||
provider_model = Provider.query.filter_by(tenant_id=tenant.id, provider_name=provider,
|
||||
provider_type=ProviderType.CUSTOM.value).first()
|
||||
provider_model = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant.id,
|
||||
Provider.provider_name == provider,
|
||||
Provider.provider_type == ProviderType.CUSTOM.value
|
||||
).first()
|
||||
|
||||
# Only allow updating token for CUSTOM provider type
|
||||
if provider_model:
|
||||
@@ -117,6 +121,16 @@ class ProviderTokenApi(Resource):
|
||||
is_valid=token_is_valid)
|
||||
db.session.add(provider_model)
|
||||
|
||||
if provider_model.is_valid:
|
||||
other_providers = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant.id,
|
||||
Provider.provider_name != provider,
|
||||
Provider.provider_type == ProviderType.CUSTOM.value
|
||||
).all()
|
||||
|
||||
for other_provider in other_providers:
|
||||
other_provider.is_valid = False
|
||||
|
||||
db.session.commit()
|
||||
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
|
||||
@@ -143,7 +157,7 @@ class ProviderTokenValidateApi(Resource):
|
||||
args = parser.parse_args()
|
||||
|
||||
# todo: remove this when the provider is supported
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.COHERE.value,
|
||||
ProviderName.HUGGINGFACEHUB.value]:
|
||||
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}
|
||||
|
||||
|
||||
@@ -4,43 +4,45 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class AppUnavailableError(BaseHTTPException):
|
||||
error_code = 'app_unavailable'
|
||||
description = "App unavailable."
|
||||
description = "App unavailable, please check your app configurations."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotCompletionAppError(BaseHTTPException):
|
||||
error_code = 'not_completion_app'
|
||||
description = "Not Completion App"
|
||||
description = "Please check if your Completion app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotChatAppError(BaseHTTPException):
|
||||
error_code = 'not_chat_app'
|
||||
description = "Not Chat App"
|
||||
description = "Please check if your Chat app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class ConversationCompletedError(BaseHTTPException):
|
||||
error_code = 'conversation_completed'
|
||||
description = "Conversation Completed."
|
||||
description = "The conversation has ended. Please start a new conversation."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderNotInitializeError(BaseHTTPException):
|
||||
error_code = 'provider_not_initialize'
|
||||
description = "Provider Token not initialize."
|
||||
description = "No valid model provider credentials found. " \
|
||||
"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Provider quota exceeded."
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderModelCurrentlyNotSupportError(BaseHTTPException):
|
||||
error_code = 'model_currently_not_support'
|
||||
description = "GPT-4 currently not support."
|
||||
description = "Dify Hosted OpenAI trial currently not support the GPT-4 model."
|
||||
code = 400
|
||||
|
||||
|
||||
|
||||
@@ -16,5 +16,5 @@ class DocumentIndexingError(BaseHTTPException):
|
||||
|
||||
class DatasetNotInitedError(BaseHTTPException):
|
||||
error_code = 'dataset_not_inited'
|
||||
description = "Dataset not inited."
|
||||
description = "The dataset is still being initialized or indexing. Please wait a moment."
|
||||
code = 403
|
||||
|
||||
@@ -47,7 +47,7 @@ class ConversationListApi(WebApiResource):
|
||||
try:
|
||||
return WebConversationService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
end_user=end_user,
|
||||
user=end_user,
|
||||
last_id=args['last_id'],
|
||||
limit=args['limit'],
|
||||
pinned=pinned
|
||||
|
||||
@@ -4,43 +4,45 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class AppUnavailableError(BaseHTTPException):
|
||||
error_code = 'app_unavailable'
|
||||
description = "App unavailable."
|
||||
description = "App unavailable, please check your app configurations."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotCompletionAppError(BaseHTTPException):
|
||||
error_code = 'not_completion_app'
|
||||
description = "Not Completion App"
|
||||
description = "Please check if your Completion app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotChatAppError(BaseHTTPException):
|
||||
error_code = 'not_chat_app'
|
||||
description = "Not Chat App"
|
||||
description = "Please check if your Chat app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class ConversationCompletedError(BaseHTTPException):
|
||||
error_code = 'conversation_completed'
|
||||
description = "Conversation Completed."
|
||||
description = "The conversation has ended. Please start a new conversation."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderNotInitializeError(BaseHTTPException):
|
||||
error_code = 'provider_not_initialize'
|
||||
description = "Provider Token not initialize."
|
||||
description = "No valid model provider credentials found. " \
|
||||
"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Provider quota exceeded."
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderModelCurrentlyNotSupportError(BaseHTTPException):
|
||||
error_code = 'model_currently_not_support'
|
||||
description = "GPT-4 currently not support."
|
||||
description = "Dify Hosted OpenAI trial currently not support the GPT-4 model."
|
||||
code = 400
|
||||
|
||||
|
||||
@@ -52,11 +54,11 @@ class CompletionRequestError(BaseHTTPException):
|
||||
|
||||
class AppMoreLikeThisDisabledError(BaseHTTPException):
|
||||
error_code = 'app_more_like_this_disabled'
|
||||
description = "More like this disabled."
|
||||
description = "The 'More like this' feature is disabled. Please refresh your page."
|
||||
code = 403
|
||||
|
||||
|
||||
class AppSuggestedQuestionsAfterAnswerDisabledError(BaseHTTPException):
|
||||
error_code = 'app_suggested_questions_after_answer_disabled'
|
||||
description = "Function Suggested questions after answer disabled."
|
||||
description = "The 'Suggested Questions After Answer' feature is disabled. Please refresh your page."
|
||||
code = 403
|
||||
|
||||
@@ -42,13 +42,16 @@ def validate_and_get_site():
|
||||
"""
|
||||
auth_header = request.headers.get('Authorization')
|
||||
if auth_header is None:
|
||||
raise Unauthorized()
|
||||
raise Unauthorized('Authorization header is missing.')
|
||||
|
||||
if ' ' not in auth_header:
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
auth_scheme, auth_token = auth_header.split(None, 1)
|
||||
auth_scheme = auth_scheme.lower()
|
||||
|
||||
if auth_scheme != 'bearer':
|
||||
raise Unauthorized()
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
site = db.session.query(Site).filter(
|
||||
Site.code == auth_token,
|
||||
|
||||
@@ -34,5 +34,9 @@ class DatasetIndexToolCallbackHandler(IndexToolCallbackHandler):
|
||||
db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.dataset_id == self.dataset_id,
|
||||
DocumentSegment.index_node_id == index_node_id
|
||||
).update({DocumentSegment.hit_count: DocumentSegment.hit_count + 1}, synchronize_session=False)
|
||||
).update(
|
||||
{DocumentSegment.hit_count: DocumentSegment.hit_count + 1},
|
||||
synchronize_session=False
|
||||
)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
135
api/core/chain/llm_router_chain.py
Normal file
135
api/core/chain/llm_router_chain.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Base classes for LLM-powered router chains."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional, Type, cast, NamedTuple
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import root_validator
|
||||
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import BasePromptTemplate
|
||||
from langchain.schema import BaseOutputParser, OutputParserException, BaseLanguageModel
|
||||
|
||||
|
||||
class Route(NamedTuple):
|
||||
destination: Optional[str]
|
||||
next_inputs: Dict[str, Any]
|
||||
|
||||
|
||||
class LLMRouterChain(Chain):
|
||||
"""A router chain that uses an LLM chain to perform routing."""
|
||||
|
||||
llm_chain: LLMChain
|
||||
"""LLM chain used to perform routing"""
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt(cls, values: dict) -> dict:
|
||||
prompt = values["llm_chain"].prompt
|
||||
if prompt.output_parser is None:
|
||||
raise ValueError(
|
||||
"LLMRouterChain requires base llm_chain prompt to have an output"
|
||||
" parser that converts LLM text output to a dictionary with keys"
|
||||
" 'destination' and 'next_inputs'. Received a prompt with no output"
|
||||
" parser."
|
||||
)
|
||||
return values
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be whatever keys the LLM chain prompt expects.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return self.llm_chain.input_keys
|
||||
|
||||
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
|
||||
super()._validate_outputs(outputs)
|
||||
if not isinstance(outputs["next_inputs"], dict):
|
||||
raise ValueError
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
output = cast(
|
||||
Dict[str, Any],
|
||||
self.llm_chain.predict_and_parse(**inputs),
|
||||
)
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any
|
||||
) -> LLMRouterChain:
|
||||
"""Convenience constructor."""
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return cls(llm_chain=llm_chain, **kwargs)
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return ["destination", "next_inputs"]
|
||||
|
||||
def route(self, inputs: Dict[str, Any]) -> Route:
|
||||
result = self(inputs)
|
||||
return Route(result["destination"], result["next_inputs"])
|
||||
|
||||
|
||||
class RouterOutputParser(BaseOutputParser[Dict[str, str]]):
|
||||
"""Parser for output of router chain int he multi-prompt chain."""
|
||||
|
||||
default_destination: str = "DEFAULT"
|
||||
next_inputs_type: Type = str
|
||||
next_inputs_inner_key: str = "input"
|
||||
|
||||
def parse_json_markdown(self, json_string: str) -> dict:
|
||||
# Remove the triple backticks if present
|
||||
start_index = json_string.find("```json")
|
||||
end_index = json_string.find("```", start_index + len("```json"))
|
||||
|
||||
if start_index != -1 and end_index != -1:
|
||||
extracted_content = json_string[start_index + len("```json"):end_index].strip()
|
||||
|
||||
# Parse the JSON string into a Python dictionary
|
||||
parsed = json.loads(extracted_content)
|
||||
else:
|
||||
raise Exception("Could not find JSON block in the output.")
|
||||
|
||||
return parsed
|
||||
|
||||
def parse_and_check_json_markdown(self, text: str, expected_keys: List[str]) -> dict:
|
||||
try:
|
||||
json_obj = self.parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise OutputParserException(f"Got invalid JSON object. Error: {e}")
|
||||
for key in expected_keys:
|
||||
if key not in json_obj:
|
||||
raise OutputParserException(
|
||||
f"Got invalid return object. Expected key `{key}` "
|
||||
f"to be present, but got {json_obj}"
|
||||
)
|
||||
return json_obj
|
||||
|
||||
def parse(self, text: str) -> Dict[str, Any]:
|
||||
try:
|
||||
expected_keys = ["destination", "next_inputs"]
|
||||
parsed = self.parse_and_check_json_markdown(text, expected_keys)
|
||||
if not isinstance(parsed["destination"], str):
|
||||
raise ValueError("Expected 'destination' to be a string.")
|
||||
if not isinstance(parsed["next_inputs"], self.next_inputs_type):
|
||||
raise ValueError(
|
||||
f"Expected 'next_inputs' to be {self.next_inputs_type}."
|
||||
)
|
||||
parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]}
|
||||
if (
|
||||
parsed["destination"].strip().lower()
|
||||
== self.default_destination.lower()
|
||||
):
|
||||
parsed["destination"] = None
|
||||
else:
|
||||
parsed["destination"] = parsed["destination"].strip()
|
||||
return parsed
|
||||
except Exception as e:
|
||||
raise OutputParserException(
|
||||
f"Parsing text\n{text}\n raised following error:\n{e}"
|
||||
)
|
||||
@@ -1,18 +1,18 @@
|
||||
from typing import Optional, List
|
||||
|
||||
from langchain.callbacks import SharedCallbackManager
|
||||
from langchain.callbacks import SharedCallbackManager, CallbackManager
|
||||
from langchain.chains import SequentialChain
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
|
||||
from core.agent.agent_builder import AgentBuilder
|
||||
from core.callback_handler.agent_loop_gather_callback_handler import AgentLoopGatherCallbackHandler
|
||||
from core.callback_handler.dataset_tool_callback_handler import DatasetToolCallbackHandler
|
||||
from core.callback_handler.main_chain_gather_callback_handler import MainChainGatherCallbackHandler
|
||||
from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler
|
||||
from core.chain.chain_builder import ChainBuilder
|
||||
from core.constant import llm_constant
|
||||
from core.chain.multi_dataset_router_chain import MultiDatasetRouterChain
|
||||
from core.conversation_message_task import ConversationMessageTask
|
||||
from core.tool.dataset_tool_builder import DatasetToolBuilder
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset
|
||||
|
||||
|
||||
class MainChainBuilder:
|
||||
@@ -31,8 +31,7 @@ class MainChainBuilder:
|
||||
tenant_id=tenant_id,
|
||||
agent_mode=agent_mode,
|
||||
memory=memory,
|
||||
dataset_tool_callback_handler=DatasetToolCallbackHandler(conversation_message_task),
|
||||
agent_loop_gather_callback_handler=chain_callback_handler.agent_loop_gather_callback_handler
|
||||
conversation_message_task=conversation_message_task
|
||||
)
|
||||
chains += tool_chains
|
||||
|
||||
@@ -59,15 +58,15 @@ class MainChainBuilder:
|
||||
|
||||
@classmethod
|
||||
def get_agent_chains(cls, tenant_id: str, agent_mode: dict, memory: Optional[BaseChatMemory],
|
||||
dataset_tool_callback_handler: DatasetToolCallbackHandler,
|
||||
agent_loop_gather_callback_handler: AgentLoopGatherCallbackHandler):
|
||||
conversation_message_task: ConversationMessageTask):
|
||||
# agent mode
|
||||
chains = []
|
||||
if agent_mode and agent_mode.get('enabled'):
|
||||
tools = agent_mode.get('tools', [])
|
||||
|
||||
pre_fixed_chains = []
|
||||
agent_tools = []
|
||||
# agent_tools = []
|
||||
datasets = []
|
||||
for tool in tools:
|
||||
tool_type = list(tool.keys())[0]
|
||||
tool_config = list(tool.values())[0]
|
||||
@@ -76,34 +75,27 @@ class MainChainBuilder:
|
||||
if chain:
|
||||
pre_fixed_chains.append(chain)
|
||||
elif tool_type == "dataset":
|
||||
dataset_tool = DatasetToolBuilder.build_dataset_tool(
|
||||
tenant_id=tenant_id,
|
||||
dataset_id=tool_config.get("id"),
|
||||
response_mode='no_synthesizer', # "compact"
|
||||
callback_handler=dataset_tool_callback_handler
|
||||
)
|
||||
# get dataset from dataset id
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.tenant_id == tenant_id,
|
||||
Dataset.id == tool_config.get("id")
|
||||
).first()
|
||||
|
||||
if dataset_tool:
|
||||
agent_tools.append(dataset_tool)
|
||||
if dataset:
|
||||
datasets.append(dataset)
|
||||
|
||||
# add pre-fixed chains
|
||||
chains += pre_fixed_chains
|
||||
|
||||
if len(agent_tools) == 1:
|
||||
if len(datasets) > 0:
|
||||
# tool to chain
|
||||
tool_chain = ChainBuilder.to_tool_chain(tool=agent_tools[0], output_key='tool_output')
|
||||
chains.append(tool_chain)
|
||||
elif len(agent_tools) > 1:
|
||||
# build agent config
|
||||
agent_chain = AgentBuilder.to_agent_chain(
|
||||
multi_dataset_router_chain = MultiDatasetRouterChain.from_datasets(
|
||||
tenant_id=tenant_id,
|
||||
tools=agent_tools,
|
||||
memory=memory,
|
||||
dataset_tool_callback_handler=dataset_tool_callback_handler,
|
||||
agent_loop_gather_callback_handler=agent_loop_gather_callback_handler
|
||||
datasets=datasets,
|
||||
conversation_message_task=conversation_message_task,
|
||||
callback_manager=CallbackManager([DifyStdOutCallbackHandler()])
|
||||
)
|
||||
|
||||
chains.append(agent_chain)
|
||||
chains.append(multi_dataset_router_chain)
|
||||
|
||||
final_output_key = cls.get_chains_output_key(chains)
|
||||
|
||||
|
||||
140
api/core/chain/multi_dataset_router_chain.py
Normal file
140
api/core/chain/multi_dataset_router_chain.py
Normal file
@@ -0,0 +1,140 @@
|
||||
from typing import Mapping, List, Dict, Any, Optional
|
||||
|
||||
from langchain import LLMChain, PromptTemplate, ConversationChain
|
||||
from langchain.callbacks import CallbackManager
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from pydantic import Extra
|
||||
|
||||
from core.callback_handler.dataset_tool_callback_handler import DatasetToolCallbackHandler
|
||||
from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler
|
||||
from core.chain.llm_router_chain import LLMRouterChain, RouterOutputParser
|
||||
from core.conversation_message_task import ConversationMessageTask
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
from core.tool.dataset_tool_builder import DatasetToolBuilder
|
||||
from core.tool.llama_index_tool import EnhanceLlamaIndexTool
|
||||
from models.dataset import Dataset
|
||||
|
||||
MULTI_PROMPT_ROUTER_TEMPLATE = """
|
||||
Given a raw text input to a language model select the model prompt best suited for \
|
||||
the input. You will be given the names of the available prompts and a description of \
|
||||
what the prompt is best suited for. You may also revise the original input if you \
|
||||
think that revising it will ultimately lead to a better response from the language \
|
||||
model.
|
||||
|
||||
<< FORMATTING >>
|
||||
Return a markdown code snippet with a JSON object formatted to look like:
|
||||
```json
|
||||
{{{{
|
||||
"destination": string \\ name of the prompt to use or "DEFAULT"
|
||||
"next_inputs": string \\ a potentially modified version of the original input
|
||||
}}}}
|
||||
```
|
||||
|
||||
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \
|
||||
it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
||||
REMEMBER: "next_inputs" can just be the original input if you don't think any \
|
||||
modifications are needed.
|
||||
|
||||
<< CANDIDATE PROMPTS >>
|
||||
{destinations}
|
||||
|
||||
<< INPUT >>
|
||||
{{input}}
|
||||
|
||||
<< OUTPUT >>
|
||||
"""
|
||||
|
||||
|
||||
class MultiDatasetRouterChain(Chain):
|
||||
"""Use a single chain to route an input to one of multiple candidate chains."""
|
||||
|
||||
router_chain: LLMRouterChain
|
||||
"""Chain for deciding a destination chain and the input to it."""
|
||||
dataset_tools: Mapping[str, EnhanceLlamaIndexTool]
|
||||
"""Map of name to candidate chains that inputs can be routed to."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be whatever keys the router chain prompt expects.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return self.router_chain.input_keys
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return ["text"]
|
||||
|
||||
@classmethod
|
||||
def from_datasets(
|
||||
cls,
|
||||
tenant_id: str,
|
||||
datasets: List[Dataset],
|
||||
conversation_message_task: ConversationMessageTask,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Convenience constructor for instantiating from destination prompts."""
|
||||
llm_callback_manager = CallbackManager([DifyStdOutCallbackHandler()])
|
||||
llm = LLMBuilder.to_llm(
|
||||
tenant_id=tenant_id,
|
||||
model_name='gpt-3.5-turbo',
|
||||
temperature=0,
|
||||
max_tokens=1024,
|
||||
callback_manager=llm_callback_manager
|
||||
)
|
||||
|
||||
destinations = ["{}: {}".format(d.id, d.description.replace('\n', ' ') if d.description
|
||||
else ('useful for when you want to answer queries about the ' + d.name))
|
||||
for d in datasets]
|
||||
destinations_str = "\n".join(destinations)
|
||||
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
|
||||
destinations=destinations_str
|
||||
)
|
||||
router_prompt = PromptTemplate(
|
||||
template=router_template,
|
||||
input_variables=["input"],
|
||||
output_parser=RouterOutputParser(),
|
||||
)
|
||||
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
||||
dataset_tools = {}
|
||||
for dataset in datasets:
|
||||
dataset_tool = DatasetToolBuilder.build_dataset_tool(
|
||||
dataset=dataset,
|
||||
response_mode='no_synthesizer', # "compact"
|
||||
callback_handler=DatasetToolCallbackHandler(conversation_message_task)
|
||||
)
|
||||
dataset_tools[dataset.id] = dataset_tool
|
||||
return cls(
|
||||
router_chain=router_chain,
|
||||
dataset_tools=dataset_tools,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
if len(self.dataset_tools) == 0:
|
||||
return {"text": ''}
|
||||
elif len(self.dataset_tools) == 1:
|
||||
return {"text": next(iter(self.dataset_tools.values())).run(inputs['input'])}
|
||||
|
||||
route = self.router_chain.route(inputs)
|
||||
|
||||
if not route.destination:
|
||||
return {"text": ''}
|
||||
elif route.destination in self.dataset_tools:
|
||||
return {"text": self.dataset_tools[route.destination].run(
|
||||
route.next_inputs['input']
|
||||
)}
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Received invalid destination chain name '{route.destination}'"
|
||||
)
|
||||
@@ -1,14 +1,17 @@
|
||||
from typing import Optional, List, Union
|
||||
import logging
|
||||
from typing import Optional, List, Union, Tuple
|
||||
|
||||
from langchain.callbacks import CallbackManager
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.llms import BaseLLM
|
||||
from langchain.schema import BaseMessage, BaseLanguageModel, HumanMessage
|
||||
from requests.exceptions import ChunkedEncodingError
|
||||
|
||||
from core.constant import llm_constant
|
||||
from core.callback_handler.llm_callback_handler import LLMCallbackHandler
|
||||
from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \
|
||||
DifyStdOutCallbackHandler
|
||||
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException
|
||||
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException, PubHandler
|
||||
from core.llm.error import LLMBadRequestError
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
from core.chain.main_chain_builder import MainChainBuilder
|
||||
@@ -39,7 +42,8 @@ class Completion:
|
||||
memory = cls.get_memory_from_conversation(
|
||||
tenant_id=app.tenant_id,
|
||||
app_model_config=app_model_config,
|
||||
conversation=conversation
|
||||
conversation=conversation,
|
||||
return_messages=False
|
||||
)
|
||||
|
||||
inputs = conversation.inputs
|
||||
@@ -83,6 +87,11 @@ class Completion:
|
||||
)
|
||||
except ConversationTaskStoppedException:
|
||||
return
|
||||
except ChunkedEncodingError as e:
|
||||
# Interrupt by LLM (like OpenAI), handle it.
|
||||
logging.warning(f'ChunkedEncodingError: {e}')
|
||||
conversation_message_task.end()
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict,
|
||||
@@ -96,7 +105,7 @@ class Completion:
|
||||
)
|
||||
|
||||
# get llm prompt
|
||||
prompt = cls.get_main_llm_prompt(
|
||||
prompt, stop_words = cls.get_main_llm_prompt(
|
||||
mode=mode,
|
||||
llm=final_llm,
|
||||
pre_prompt=app_model_config.pre_prompt,
|
||||
@@ -114,30 +123,47 @@ class Completion:
|
||||
mode=mode
|
||||
)
|
||||
|
||||
response = final_llm.generate([prompt])
|
||||
response = final_llm.generate([prompt], stop_words)
|
||||
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict, chain_output: Optional[str],
|
||||
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict,
|
||||
chain_output: Optional[str],
|
||||
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
|
||||
Union[str | List[BaseMessage]]:
|
||||
Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]:
|
||||
# disable template string in query
|
||||
query_params = OutLinePromptTemplate.from_template(template=query).input_variables
|
||||
if query_params:
|
||||
for query_param in query_params:
|
||||
if query_param not in inputs:
|
||||
inputs[query_param] = '{' + query_param + '}'
|
||||
|
||||
pre_prompt = PromptBuilder.process_template(pre_prompt) if pre_prompt else pre_prompt
|
||||
if mode == 'completion':
|
||||
prompt_template = OutLinePromptTemplate.from_template(
|
||||
template=("Use the following pieces of [CONTEXT] to answer the question at the end. "
|
||||
"If you don't know the answer, "
|
||||
"just say that you don't know, don't try to make up an answer. \n"
|
||||
"```\n"
|
||||
"[CONTEXT]\n"
|
||||
"{context}\n"
|
||||
"```\n" if chain_output else "")
|
||||
template=("""Use the following CONTEXT as your learned knowledge:
|
||||
[CONTEXT]
|
||||
{context}
|
||||
[END CONTEXT]
|
||||
|
||||
When answer to user:
|
||||
- If you don't know, just say that you don't know.
|
||||
- If you don't know when you are not sure, ask for clarification.
|
||||
Avoid mentioning that you obtained the information from the context.
|
||||
And answer according to the language of the user's question.
|
||||
""" if chain_output else "")
|
||||
+ (pre_prompt + "\n" if pre_prompt else "")
|
||||
+ "{query}\n"
|
||||
)
|
||||
|
||||
if chain_output:
|
||||
inputs['context'] = chain_output
|
||||
context_params = OutLinePromptTemplate.from_template(template=chain_output).input_variables
|
||||
if context_params:
|
||||
for context_param in context_params:
|
||||
if context_param not in inputs:
|
||||
inputs[context_param] = '{' + context_param + '}'
|
||||
|
||||
prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs}
|
||||
prompt_content = prompt_template.format(
|
||||
@@ -147,64 +173,83 @@ class Completion:
|
||||
|
||||
if isinstance(llm, BaseChatModel):
|
||||
# use chat llm as completion model
|
||||
return [HumanMessage(content=prompt_content)]
|
||||
return [HumanMessage(content=prompt_content)], None
|
||||
else:
|
||||
return prompt_content
|
||||
return prompt_content, None
|
||||
else:
|
||||
messages: List[BaseMessage] = []
|
||||
|
||||
system_message = None
|
||||
if pre_prompt:
|
||||
# append pre prompt as system message
|
||||
system_message = PromptBuilder.to_system_message(pre_prompt, inputs)
|
||||
|
||||
if chain_output:
|
||||
# append context as system message, currently only use simple stuff prompt
|
||||
context_message = PromptBuilder.to_system_message(
|
||||
"""Use the following pieces of [CONTEXT] to answer the users question.
|
||||
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
||||
```
|
||||
[CONTEXT]
|
||||
{context}
|
||||
```""",
|
||||
{'context': chain_output}
|
||||
)
|
||||
|
||||
if not system_message:
|
||||
system_message = context_message
|
||||
else:
|
||||
system_message.content = context_message.content + "\n\n" + system_message.content
|
||||
|
||||
if system_message:
|
||||
messages.append(system_message)
|
||||
|
||||
human_inputs = {
|
||||
"query": query
|
||||
}
|
||||
|
||||
# construct main prompt
|
||||
human_message = PromptBuilder.to_human_message(
|
||||
prompt_content="{query}",
|
||||
inputs=human_inputs
|
||||
)
|
||||
human_message_prompt = ""
|
||||
|
||||
if pre_prompt:
|
||||
pre_prompt_inputs = {k: inputs[k] for k in
|
||||
OutLinePromptTemplate.from_template(template=pre_prompt).input_variables
|
||||
if k in inputs}
|
||||
|
||||
if pre_prompt_inputs:
|
||||
human_inputs.update(pre_prompt_inputs)
|
||||
|
||||
if chain_output:
|
||||
human_inputs['context'] = chain_output
|
||||
human_message_prompt += """Use the following CONTEXT as your learned knowledge.
|
||||
[CONTEXT]
|
||||
{context}
|
||||
[END CONTEXT]
|
||||
|
||||
When answer to user:
|
||||
- If you don't know, just say that you don't know.
|
||||
- If you don't know when you are not sure, ask for clarification.
|
||||
Avoid mentioning that you obtained the information from the context.
|
||||
And answer according to the language of the user's question.
|
||||
"""
|
||||
|
||||
if pre_prompt:
|
||||
human_message_prompt += pre_prompt
|
||||
|
||||
query_prompt = "\nHuman: {query}\nAI: "
|
||||
|
||||
if memory:
|
||||
# append chat histories
|
||||
tmp_messages = messages.copy() + [human_message]
|
||||
curr_message_tokens = memory.llm.get_messages_tokens(tmp_messages)
|
||||
rest_tokens = llm_constant.max_context_token_length[
|
||||
memory.llm.model_name] - memory.llm.max_tokens - curr_message_tokens
|
||||
tmp_human_message = PromptBuilder.to_human_message(
|
||||
prompt_content=human_message_prompt + query_prompt,
|
||||
inputs=human_inputs
|
||||
)
|
||||
|
||||
curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message])
|
||||
rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \
|
||||
- memory.llm.max_tokens - curr_message_tokens
|
||||
rest_tokens = max(rest_tokens, 0)
|
||||
history_messages = cls.get_history_messages_from_memory(memory, rest_tokens)
|
||||
messages += history_messages
|
||||
histories = cls.get_history_messages_from_memory(memory, rest_tokens)
|
||||
|
||||
# disable template string in query
|
||||
histories_params = OutLinePromptTemplate.from_template(template=histories).input_variables
|
||||
if histories_params:
|
||||
for histories_param in histories_params:
|
||||
if histories_param not in human_inputs:
|
||||
human_inputs[histories_param] = '{' + histories_param + '}'
|
||||
|
||||
human_message_prompt += "\n\n" + histories
|
||||
|
||||
human_message_prompt += query_prompt
|
||||
|
||||
# construct main prompt
|
||||
human_message = PromptBuilder.to_human_message(
|
||||
prompt_content=human_message_prompt,
|
||||
inputs=human_inputs
|
||||
)
|
||||
|
||||
messages.append(human_message)
|
||||
|
||||
return messages
|
||||
return messages, ['\nHuman:']
|
||||
|
||||
@classmethod
|
||||
def get_llm_callback_manager(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
|
||||
streaming: bool, conversation_message_task: ConversationMessageTask) -> CallbackManager:
|
||||
streaming: bool,
|
||||
conversation_message_task: ConversationMessageTask) -> CallbackManager:
|
||||
llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task)
|
||||
if streaming:
|
||||
callback_handlers = [llm_callback_handler, DifyStreamingStdOutCallbackHandler()]
|
||||
@@ -216,7 +261,7 @@ If you don't know the answer, just say that you don't know, don't try to make up
|
||||
@classmethod
|
||||
def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory,
|
||||
max_token_limit: int) -> \
|
||||
List[BaseMessage]:
|
||||
str:
|
||||
"""Get memory messages."""
|
||||
memory.max_token_limit = max_token_limit
|
||||
memory_key = memory.memory_variables[0]
|
||||
@@ -286,7 +331,7 @@ If you don't know the answer, just say that you don't know, don't try to make up
|
||||
)
|
||||
|
||||
# get llm prompt
|
||||
original_prompt = cls.get_main_llm_prompt(
|
||||
original_prompt, _ = cls.get_main_llm_prompt(
|
||||
mode="completion",
|
||||
llm=llm,
|
||||
pre_prompt=pre_prompt,
|
||||
|
||||
@@ -2,8 +2,6 @@ import decimal
|
||||
import json
|
||||
from typing import Optional, Union
|
||||
|
||||
from gunicorn.config import User
|
||||
|
||||
from core.callback_handler.entity.agent_loop import AgentLoop
|
||||
from core.callback_handler.entity.dataset_query import DatasetQueryObj
|
||||
from core.callback_handler.entity.llm_message import LLMMessage
|
||||
@@ -58,6 +56,9 @@ class ConversationMessageTask:
|
||||
)
|
||||
|
||||
def init(self):
|
||||
provider_name = LLMBuilder.get_default_provider(self.app.tenant_id)
|
||||
self.model_dict['provider'] = provider_name
|
||||
|
||||
override_model_configs = None
|
||||
if self.is_override:
|
||||
override_model_configs = {
|
||||
@@ -79,7 +80,10 @@ class ConversationMessageTask:
|
||||
if introduction:
|
||||
prompt_template = OutLinePromptTemplate.from_template(template=PromptBuilder.process_template(introduction))
|
||||
prompt_inputs = {k: self.inputs[k] for k in prompt_template.input_variables if k in self.inputs}
|
||||
introduction = prompt_template.format(**prompt_inputs)
|
||||
try:
|
||||
introduction = prompt_template.format(**prompt_inputs)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if self.app_model_config.pre_prompt:
|
||||
pre_prompt = PromptBuilder.process_template(self.app_model_config.pre_prompt)
|
||||
@@ -170,7 +174,7 @@ class ConversationMessageTask:
|
||||
)
|
||||
|
||||
if not by_stopped:
|
||||
self._pub_handler.pub_end()
|
||||
self.end()
|
||||
|
||||
def update_provider_quota(self):
|
||||
llm_provider_service = LLMProviderService(
|
||||
@@ -267,9 +271,12 @@ class ConversationMessageTask:
|
||||
total_price = message_tokens_per_1k * message_unit_price + answer_tokens_per_1k * answer_unit_price
|
||||
return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP)
|
||||
|
||||
def end(self):
|
||||
self._pub_handler.pub_end()
|
||||
|
||||
|
||||
class PubHandler:
|
||||
def __init__(self, user: Union[Account | User], task_id: str,
|
||||
def __init__(self, user: Union[Account | EndUser], task_id: str,
|
||||
message: Message, conversation: Conversation,
|
||||
chain_pub: bool = False, agent_thought_pub: bool = False):
|
||||
self._channel = PubHandler.generate_channel_name(user, task_id)
|
||||
@@ -282,12 +289,15 @@ class PubHandler:
|
||||
self._agent_thought_pub = agent_thought_pub
|
||||
|
||||
@classmethod
|
||||
def generate_channel_name(cls, user: Union[Account | User], task_id: str):
|
||||
def generate_channel_name(cls, user: Union[Account | EndUser], task_id: str):
|
||||
if not user:
|
||||
raise ValueError("user is required")
|
||||
|
||||
user_str = 'account-' + user.id if isinstance(user, Account) else 'end-user-' + user.id
|
||||
return "generate_result:{}-{}".format(user_str, task_id)
|
||||
|
||||
@classmethod
|
||||
def generate_stopped_cache_key(cls, user: Union[Account | User], task_id: str):
|
||||
def generate_stopped_cache_key(cls, user: Union[Account | EndUser], task_id: str):
|
||||
user_str = 'account-' + user.id if isinstance(user, Account) else 'end-user-' + user.id
|
||||
return "generate_result_stopped:{}-{}".format(user_str, task_id)
|
||||
|
||||
@@ -366,7 +376,7 @@ class PubHandler:
|
||||
redis_client.publish(self._channel, json.dumps(content))
|
||||
|
||||
@classmethod
|
||||
def pub_error(cls, user: Union[Account | User], task_id: str, e):
|
||||
def pub_error(cls, user: Union[Account | EndUser], task_id: str, e):
|
||||
content = {
|
||||
'error': type(e).__name__,
|
||||
'description': e.description if getattr(e, 'description', None) is not None else str(e)
|
||||
@@ -379,7 +389,7 @@ class PubHandler:
|
||||
return redis_client.get(self._stopped_cache_key) is not None
|
||||
|
||||
@classmethod
|
||||
def stop(cls, user: Union[Account | User], task_id: str):
|
||||
def stop(cls, user: Union[Account | EndUser], task_id: str):
|
||||
stopped_cache_key = cls.generate_stopped_cache_key(user, task_id)
|
||||
redis_client.setex(stopped_cache_key, 600, 1)
|
||||
|
||||
|
||||
@@ -11,9 +11,10 @@ from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_except
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
def get_embedding(
|
||||
text: str,
|
||||
engine: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
text: str,
|
||||
engine: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> List[float]:
|
||||
"""Get embedding.
|
||||
|
||||
@@ -25,11 +26,12 @@ def get_embedding(
|
||||
|
||||
"""
|
||||
text = text.replace("\n", " ")
|
||||
return openai.Embedding.create(input=[text], engine=engine, api_key=openai_api_key)["data"][0]["embedding"]
|
||||
return openai.Embedding.create(input=[text], engine=engine, api_key=api_key, **kwargs)["data"][0]["embedding"]
|
||||
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
async def aget_embedding(text: str, engine: Optional[str] = None, openai_api_key: Optional[str] = None) -> List[float]:
|
||||
async def aget_embedding(text: str, engine: Optional[str] = None, api_key: Optional[str] = None, **kwargs) -> List[
|
||||
float]:
|
||||
"""Asynchronously get embedding.
|
||||
|
||||
NOTE: Copied from OpenAI's embedding utils:
|
||||
@@ -42,16 +44,17 @@ async def aget_embedding(text: str, engine: Optional[str] = None, openai_api_key
|
||||
# replace newlines, which can negatively affect performance.
|
||||
text = text.replace("\n", " ")
|
||||
|
||||
return (await openai.Embedding.acreate(input=[text], engine=engine, api_key=openai_api_key))["data"][0][
|
||||
return (await openai.Embedding.acreate(input=[text], engine=engine, api_key=api_key, **kwargs))["data"][0][
|
||||
"embedding"
|
||||
]
|
||||
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
def get_embeddings(
|
||||
list_of_text: List[str],
|
||||
engine: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None
|
||||
list_of_text: List[str],
|
||||
engine: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> List[List[float]]:
|
||||
"""Get embeddings.
|
||||
|
||||
@@ -67,14 +70,14 @@ def get_embeddings(
|
||||
# replace newlines, which can negatively affect performance.
|
||||
list_of_text = [text.replace("\n", " ") for text in list_of_text]
|
||||
|
||||
data = openai.Embedding.create(input=list_of_text, engine=engine, api_key=openai_api_key).data
|
||||
data = openai.Embedding.create(input=list_of_text, engine=engine, api_key=api_key, **kwargs).data
|
||||
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
|
||||
return [d["embedding"] for d in data]
|
||||
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
async def aget_embeddings(
|
||||
list_of_text: List[str], engine: Optional[str] = None, openai_api_key: Optional[str] = None
|
||||
list_of_text: List[str], engine: Optional[str] = None, api_key: Optional[str] = None, **kwargs
|
||||
) -> List[List[float]]:
|
||||
"""Asynchronously get embeddings.
|
||||
|
||||
@@ -90,7 +93,7 @@ async def aget_embeddings(
|
||||
# replace newlines, which can negatively affect performance.
|
||||
list_of_text = [text.replace("\n", " ") for text in list_of_text]
|
||||
|
||||
data = (await openai.Embedding.acreate(input=list_of_text, engine=engine, api_key=openai_api_key)).data
|
||||
data = (await openai.Embedding.acreate(input=list_of_text, engine=engine, api_key=api_key, **kwargs)).data
|
||||
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
|
||||
return [d["embedding"] for d in data]
|
||||
|
||||
@@ -98,19 +101,30 @@ async def aget_embeddings(
|
||||
class OpenAIEmbedding(BaseEmbedding):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
|
||||
model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
|
||||
deployment_name: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
self,
|
||||
mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
|
||||
model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
|
||||
deployment_name: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(**kwargs)
|
||||
new_kwargs = {}
|
||||
|
||||
if 'embed_batch_size' in kwargs:
|
||||
new_kwargs['embed_batch_size'] = kwargs['embed_batch_size']
|
||||
|
||||
if 'tokenizer' in kwargs:
|
||||
new_kwargs['tokenizer'] = kwargs['tokenizer']
|
||||
|
||||
super().__init__(**new_kwargs)
|
||||
self.mode = OpenAIEmbeddingMode(mode)
|
||||
self.model = OpenAIEmbeddingModelType(model)
|
||||
self.deployment_name = deployment_name
|
||||
self.openai_api_key = openai_api_key
|
||||
self.openai_api_type = kwargs.get('openai_api_type')
|
||||
self.openai_api_version = kwargs.get('openai_api_version')
|
||||
self.openai_api_base = kwargs.get('openai_api_base')
|
||||
|
||||
@handle_llm_exceptions
|
||||
def _get_query_embedding(self, query: str) -> List[float]:
|
||||
@@ -122,7 +136,9 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _QUERY_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _QUERY_MODE_MODEL_DICT[key]
|
||||
return get_embedding(query, engine=engine, openai_api_key=self.openai_api_key)
|
||||
return get_embedding(query, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
|
||||
def _get_text_embedding(self, text: str) -> List[float]:
|
||||
"""Get text embedding."""
|
||||
@@ -133,7 +149,9 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
return get_embedding(text, engine=engine, openai_api_key=self.openai_api_key)
|
||||
return get_embedding(text, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
|
||||
async def _aget_text_embedding(self, text: str) -> List[float]:
|
||||
"""Asynchronously get text embedding."""
|
||||
@@ -144,7 +162,9 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
return await aget_embedding(text, engine=engine, openai_api_key=self.openai_api_key)
|
||||
return await aget_embedding(text, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
|
||||
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Get text embeddings.
|
||||
@@ -153,6 +173,13 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
Can be overriden for batch queries.
|
||||
|
||||
"""
|
||||
if self.openai_api_type and self.openai_api_type == 'azure':
|
||||
embeddings = []
|
||||
for text in texts:
|
||||
embeddings.append(self._get_text_embedding(text))
|
||||
|
||||
return embeddings
|
||||
|
||||
if self.deployment_name is not None:
|
||||
engine = self.deployment_name
|
||||
else:
|
||||
@@ -160,11 +187,20 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
embeddings = get_embeddings(texts, engine=engine, openai_api_key=self.openai_api_key)
|
||||
embeddings = get_embeddings(texts, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
return embeddings
|
||||
|
||||
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Asynchronously get text embeddings."""
|
||||
if self.openai_api_type and self.openai_api_type == 'azure':
|
||||
embeddings = []
|
||||
for text in texts:
|
||||
embeddings.append(await self._aget_text_embedding(text))
|
||||
|
||||
return embeddings
|
||||
|
||||
if self.deployment_name is not None:
|
||||
engine = self.deployment_name
|
||||
else:
|
||||
@@ -172,5 +208,7 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
embeddings = await aget_embeddings(texts, engine=engine, openai_api_key=self.openai_api_key)
|
||||
embeddings = await aget_embeddings(texts, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
return embeddings
|
||||
|
||||
@@ -33,8 +33,11 @@ class IndexBuilder:
|
||||
max_chunk_overlap=20
|
||||
)
|
||||
|
||||
provider = LLMBuilder.get_default_provider(tenant_id)
|
||||
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=tenant_id,
|
||||
model_provider=provider,
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
@@ -43,3 +46,15 @@ class IndexBuilder:
|
||||
prompt_helper=prompt_helper,
|
||||
embed_model=OpenAIEmbedding(**model_credentials),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_fake_llm_service_context(cls, tenant_id: str) -> ServiceContext:
|
||||
llm = LLMBuilder.to_llm(
|
||||
tenant_id=tenant_id,
|
||||
model_name='fake'
|
||||
)
|
||||
|
||||
return ServiceContext.from_defaults(
|
||||
llm_predictor=LLMPredictor(llm=llm),
|
||||
embed_model=OpenAIEmbedding()
|
||||
)
|
||||
|
||||
68
api/core/index/spiltter/fixed_text_splitter.py
Normal file
68
api/core/index/spiltter/fixed_text_splitter.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""Functionality for splitting text."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
List,
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
|
||||
class FixedRecursiveCharacterTextSplitter(RecursiveCharacterTextSplitter):
|
||||
def __init__(self, fixed_separator: str = "\n\n", separators: Optional[List[str]] = None, **kwargs: Any):
|
||||
"""Create a new TextSplitter."""
|
||||
super().__init__(**kwargs)
|
||||
self._fixed_separator = fixed_separator
|
||||
self._separators = separators or ["\n\n", "\n", " ", ""]
|
||||
|
||||
def split_text(self, text: str) -> List[str]:
|
||||
"""Split incoming text and return chunks."""
|
||||
if self._fixed_separator:
|
||||
chunks = text.split(self._fixed_separator)
|
||||
else:
|
||||
chunks = list(text)
|
||||
|
||||
final_chunks = []
|
||||
for chunk in chunks:
|
||||
if self._length_function(chunk) > self._chunk_size:
|
||||
final_chunks.extend(self.recursive_split_text(chunk))
|
||||
else:
|
||||
final_chunks.append(chunk)
|
||||
|
||||
return final_chunks
|
||||
|
||||
def recursive_split_text(self, text: str) -> List[str]:
|
||||
"""Split incoming text and return chunks."""
|
||||
final_chunks = []
|
||||
# Get appropriate separator to use
|
||||
separator = self._separators[-1]
|
||||
for _s in self._separators:
|
||||
if _s == "":
|
||||
separator = _s
|
||||
break
|
||||
if _s in text:
|
||||
separator = _s
|
||||
break
|
||||
# Now that we have the separator, split the text
|
||||
if separator:
|
||||
splits = text.split(separator)
|
||||
else:
|
||||
splits = list(text)
|
||||
# Now go merging things, recursively splitting longer texts.
|
||||
_good_splits = []
|
||||
for s in splits:
|
||||
if self._length_function(s) < self._chunk_size:
|
||||
_good_splits.append(s)
|
||||
else:
|
||||
if _good_splits:
|
||||
merged_text = self._merge_splits(_good_splits, separator)
|
||||
final_chunks.extend(merged_text)
|
||||
_good_splits = []
|
||||
other_info = self.recursive_split_text(s)
|
||||
final_chunks.extend(other_info)
|
||||
if _good_splits:
|
||||
merged_text = self._merge_splits(_good_splits, separator)
|
||||
final_chunks.extend(merged_text)
|
||||
return final_chunks
|
||||
@@ -83,7 +83,7 @@ class VectorIndex:
|
||||
if not self._dataset.index_struct_dict:
|
||||
return
|
||||
|
||||
service_context = IndexBuilder.get_default_service_context(tenant_id=self._dataset.tenant_id)
|
||||
service_context = IndexBuilder.get_fake_llm_service_context(tenant_id=self._dataset.tenant_id)
|
||||
|
||||
index = vector_store.get_index(
|
||||
service_context=service_context,
|
||||
@@ -101,7 +101,7 @@ class VectorIndex:
|
||||
if not self._dataset.index_struct_dict:
|
||||
return
|
||||
|
||||
service_context = IndexBuilder.get_default_service_context(tenant_id=self._dataset.tenant_id)
|
||||
service_context = IndexBuilder.get_fake_llm_service_context(tenant_id=self._dataset.tenant_id)
|
||||
|
||||
index = vector_store.get_index(
|
||||
service_context=service_context,
|
||||
|
||||
@@ -18,6 +18,7 @@ from core.docstore.dataset_docstore import DatesetDocumentStore
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.readers.html_parser import HTMLParser
|
||||
from core.index.readers.pdf_parser import PDFParser
|
||||
from core.index.spiltter.fixed_text_splitter import FixedRecursiveCharacterTextSplitter
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.llm.token_calculator import TokenCalculator
|
||||
from extensions.ext_database import db
|
||||
@@ -267,16 +268,14 @@ class IndexingRunner:
|
||||
raise ValueError("Custom segment length should be between 50 and 1000.")
|
||||
|
||||
separator = segmentation["separator"]
|
||||
if not separator:
|
||||
separators = ["\n\n", "。", ".", " ", ""]
|
||||
else:
|
||||
if separator:
|
||||
separator = separator.replace('\\n', '\n')
|
||||
separators = [separator, ""]
|
||||
|
||||
character_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
|
||||
character_splitter = FixedRecursiveCharacterTextSplitter.from_tiktoken_encoder(
|
||||
chunk_size=segmentation["max_tokens"],
|
||||
chunk_overlap=0,
|
||||
separators=separators
|
||||
fixed_separator=separator,
|
||||
separators=["\n\n", "。", ".", " ", ""]
|
||||
)
|
||||
else:
|
||||
# Automatic segmentation
|
||||
@@ -344,7 +343,7 @@ class IndexingRunner:
|
||||
|
||||
# parse document to nodes
|
||||
nodes = node_parser.get_nodes_from_documents([text_doc])
|
||||
|
||||
nodes = [node for node in nodes if node.text is not None and node.text.strip()]
|
||||
all_nodes.extend(nodes)
|
||||
|
||||
return all_nodes
|
||||
|
||||
@@ -4,9 +4,14 @@ from langchain.callbacks import CallbackManager
|
||||
from langchain.llms.fake import FakeListLLM
|
||||
|
||||
from core.constant import llm_constant
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from core.llm.provider.base import BaseProvider
|
||||
from core.llm.provider.llm_provider_service import LLMProviderService
|
||||
from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI
|
||||
from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI
|
||||
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
|
||||
from core.llm.streamable_open_ai import StreamableOpenAI
|
||||
from models.provider import ProviderType
|
||||
|
||||
|
||||
class LLMBuilder:
|
||||
@@ -31,16 +36,23 @@ class LLMBuilder:
|
||||
if model_name == 'fake':
|
||||
return FakeListLLM(responses=[])
|
||||
|
||||
provider = cls.get_default_provider(tenant_id)
|
||||
|
||||
mode = cls.get_mode_by_model(model_name)
|
||||
if mode == 'chat':
|
||||
# llm_cls = StreamableAzureChatOpenAI
|
||||
llm_cls = StreamableChatOpenAI
|
||||
if provider == 'openai':
|
||||
llm_cls = StreamableChatOpenAI
|
||||
else:
|
||||
llm_cls = StreamableAzureChatOpenAI
|
||||
elif mode == 'completion':
|
||||
llm_cls = StreamableOpenAI
|
||||
if provider == 'openai':
|
||||
llm_cls = StreamableOpenAI
|
||||
else:
|
||||
llm_cls = StreamableAzureOpenAI
|
||||
else:
|
||||
raise ValueError(f"model name {model_name} is not supported.")
|
||||
|
||||
model_credentials = cls.get_model_credentials(tenant_id, model_name)
|
||||
model_credentials = cls.get_model_credentials(tenant_id, provider, model_name)
|
||||
|
||||
return llm_cls(
|
||||
model_name=model_name,
|
||||
@@ -86,18 +98,31 @@ class LLMBuilder:
|
||||
raise ValueError(f"model name {model_name} is not supported.")
|
||||
|
||||
@classmethod
|
||||
def get_model_credentials(cls, tenant_id: str, model_name: str) -> dict:
|
||||
def get_model_credentials(cls, tenant_id: str, model_provider: str, model_name: str) -> dict:
|
||||
"""
|
||||
Returns the API credentials for the given tenant_id and model_name, based on the model's provider.
|
||||
Raises an exception if the model_name is not found or if the provider is not found.
|
||||
"""
|
||||
if not model_name:
|
||||
raise Exception('model name not found')
|
||||
#
|
||||
# if model_name not in llm_constant.models:
|
||||
# raise Exception('model {} not found'.format(model_name))
|
||||
|
||||
if model_name not in llm_constant.models:
|
||||
raise Exception('model {} not found'.format(model_name))
|
||||
|
||||
model_provider = llm_constant.models[model_name]
|
||||
# model_provider = llm_constant.models[model_name]
|
||||
|
||||
provider_service = LLMProviderService(tenant_id=tenant_id, provider_name=model_provider)
|
||||
return provider_service.get_credentials(model_name)
|
||||
|
||||
@classmethod
|
||||
def get_default_provider(cls, tenant_id: str) -> str:
|
||||
provider = BaseProvider.get_valid_provider(tenant_id)
|
||||
if not provider:
|
||||
raise ProviderTokenNotInitError()
|
||||
|
||||
if provider.provider_type == ProviderType.SYSTEM.value:
|
||||
provider_name = 'openai'
|
||||
else:
|
||||
provider_name = provider.provider_name
|
||||
|
||||
return provider_name
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.llm.provider.base import BaseProvider
|
||||
from core.llm.provider.errors import ValidateFailedError
|
||||
from models.provider import ProviderName
|
||||
|
||||
|
||||
class AzureProvider(BaseProvider):
|
||||
def get_models(self, model_id: Optional[str] = None) -> list[dict]:
|
||||
credentials = self.get_credentials(model_id)
|
||||
def get_models(self, model_id: Optional[str] = None, credentials: Optional[dict] = None) -> list[dict]:
|
||||
credentials = self.get_credentials(model_id) if not credentials else credentials
|
||||
url = "{}/openai/deployments?api-version={}".format(
|
||||
credentials.get('openai_api_base'),
|
||||
credentials.get('openai_api_version')
|
||||
str(credentials.get('openai_api_base')),
|
||||
str(credentials.get('openai_api_version'))
|
||||
)
|
||||
|
||||
headers = {
|
||||
"api-key": credentials.get('openai_api_key'),
|
||||
"api-key": str(credentials.get('openai_api_key')),
|
||||
"content-type": "application/json; charset=utf-8"
|
||||
}
|
||||
|
||||
@@ -29,17 +31,18 @@ class AzureProvider(BaseProvider):
|
||||
'name': '{} ({})'.format(deployment['id'], deployment['model'])
|
||||
} for deployment in result['data'] if deployment['status'] == 'succeeded']
|
||||
else:
|
||||
# TODO: optimize in future
|
||||
raise Exception('Failed to get deployments from Azure OpenAI. Status code: {}'.format(response.status_code))
|
||||
if response.status_code == 401:
|
||||
raise AzureAuthenticationError()
|
||||
else:
|
||||
raise AzureRequestFailedError('Failed to request Azure OpenAI. Status code: {}'.format(response.status_code))
|
||||
|
||||
def get_credentials(self, model_id: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Returns the API credentials for Azure OpenAI as a dictionary.
|
||||
"""
|
||||
encrypted_config = self.get_provider_api_key(model_id=model_id)
|
||||
config = json.loads(encrypted_config)
|
||||
config = self.get_provider_api_key(model_id=model_id)
|
||||
config['openai_api_type'] = 'azure'
|
||||
config['deployment_name'] = model_id
|
||||
config['deployment_name'] = model_id.replace('.', '') if model_id else None
|
||||
return config
|
||||
|
||||
def get_provider_name(self):
|
||||
@@ -51,12 +54,11 @@ class AzureProvider(BaseProvider):
|
||||
"""
|
||||
try:
|
||||
config = self.get_provider_api_key()
|
||||
config = json.loads(config)
|
||||
except:
|
||||
config = {
|
||||
'openai_api_type': 'azure',
|
||||
'openai_api_version': '2023-03-15-preview',
|
||||
'openai_api_base': 'https://foo.microsoft.com/bar',
|
||||
'openai_api_base': '',
|
||||
'openai_api_key': ''
|
||||
}
|
||||
|
||||
@@ -65,7 +67,7 @@ class AzureProvider(BaseProvider):
|
||||
config = {
|
||||
'openai_api_type': 'azure',
|
||||
'openai_api_version': '2023-03-15-preview',
|
||||
'openai_api_base': 'https://foo.microsoft.com/bar',
|
||||
'openai_api_base': '',
|
||||
'openai_api_key': ''
|
||||
}
|
||||
|
||||
@@ -76,14 +78,47 @@ class AzureProvider(BaseProvider):
|
||||
|
||||
def get_token_type(self):
|
||||
# TODO: change to dict when implemented
|
||||
return lambda value: value
|
||||
return dict
|
||||
|
||||
def config_validate(self, config: Union[dict | str]):
|
||||
"""
|
||||
Validates the given config.
|
||||
"""
|
||||
# TODO: implement
|
||||
pass
|
||||
try:
|
||||
if not isinstance(config, dict):
|
||||
raise ValueError('Config must be a object.')
|
||||
|
||||
if 'openai_api_version' not in config:
|
||||
config['openai_api_version'] = '2023-03-15-preview'
|
||||
|
||||
models = self.get_models(credentials=config)
|
||||
|
||||
if not models:
|
||||
raise ValidateFailedError("Please add deployments for 'text-davinci-003', "
|
||||
"'gpt-3.5-turbo', 'text-embedding-ada-002'.")
|
||||
|
||||
fixed_model_ids = [
|
||||
'text-davinci-003',
|
||||
'gpt-35-turbo',
|
||||
'text-embedding-ada-002'
|
||||
]
|
||||
|
||||
current_model_ids = [model['id'] for model in models]
|
||||
|
||||
missing_model_ids = [fixed_model_id for fixed_model_id in fixed_model_ids if
|
||||
fixed_model_id not in current_model_ids]
|
||||
|
||||
if missing_model_ids:
|
||||
raise ValidateFailedError("Please add deployments for '{}'.".format(", ".join(missing_model_ids)))
|
||||
except AzureAuthenticationError:
|
||||
raise ValidateFailedError('Validation failed, please check your API Key.')
|
||||
except (requests.ConnectionError, requests.RequestException):
|
||||
raise ValidateFailedError('Validation failed, please check your API Base Endpoint.')
|
||||
except AzureRequestFailedError as ex:
|
||||
raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex)))
|
||||
except Exception as ex:
|
||||
logging.exception('Azure OpenAI Credentials validation failed')
|
||||
raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex)))
|
||||
|
||||
def get_encrypted_token(self, config: Union[dict | str]):
|
||||
"""
|
||||
@@ -103,3 +138,11 @@ class AzureProvider(BaseProvider):
|
||||
config = json.loads(token)
|
||||
config['openai_api_key'] = self.decrypt_token(config['openai_api_key'])
|
||||
return config
|
||||
|
||||
|
||||
class AzureAuthenticationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AzureRequestFailedError(Exception):
|
||||
pass
|
||||
|
||||
@@ -14,7 +14,7 @@ class BaseProvider(ABC):
|
||||
def __init__(self, tenant_id: str):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> str:
|
||||
def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> Union[str | dict]:
|
||||
"""
|
||||
Returns the decrypted API key for the given tenant_id and provider_name.
|
||||
If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError.
|
||||
@@ -43,23 +43,35 @@ class BaseProvider(ABC):
|
||||
Returns the Provider instance for the given tenant_id and provider_name.
|
||||
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
|
||||
"""
|
||||
providers = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == self.tenant_id,
|
||||
Provider.provider_name == self.get_provider_name().value
|
||||
).order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all()
|
||||
return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, prefer_custom)
|
||||
|
||||
@classmethod
|
||||
def get_valid_provider(cls, tenant_id: str, provider_name: str = None, prefer_custom: bool = False) -> Optional[Provider]:
|
||||
"""
|
||||
Returns the Provider instance for the given tenant_id and provider_name.
|
||||
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
|
||||
"""
|
||||
query = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant_id
|
||||
)
|
||||
|
||||
if provider_name:
|
||||
query = query.filter(Provider.provider_name == provider_name)
|
||||
|
||||
providers = query.order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all()
|
||||
|
||||
custom_provider = None
|
||||
system_provider = None
|
||||
|
||||
for provider in providers:
|
||||
if provider.provider_type == ProviderType.CUSTOM.value:
|
||||
if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config:
|
||||
custom_provider = provider
|
||||
elif provider.provider_type == ProviderType.SYSTEM.value:
|
||||
elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid:
|
||||
system_provider = provider
|
||||
|
||||
if custom_provider and custom_provider.is_valid and custom_provider.encrypted_config:
|
||||
if custom_provider:
|
||||
return custom_provider
|
||||
elif system_provider and system_provider.is_valid:
|
||||
elif system_provider:
|
||||
return system_provider
|
||||
else:
|
||||
return None
|
||||
@@ -80,7 +92,7 @@ class BaseProvider(ABC):
|
||||
try:
|
||||
config = self.get_provider_api_key()
|
||||
except:
|
||||
config = 'THIS-IS-A-MOCK-TOKEN'
|
||||
config = ''
|
||||
|
||||
if obfuscated:
|
||||
return self.obfuscated_token(config)
|
||||
|
||||
@@ -1,12 +1,50 @@
|
||||
import requests
|
||||
from langchain.schema import BaseMessage, ChatResult, LLMResult
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
from typing import Optional, List
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableAzureChatOpenAI(AzureChatOpenAI):
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
return {
|
||||
**super()._default_params,
|
||||
"engine": self.deployment_name,
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}
|
||||
|
||||
def get_messages_tokens(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in a list of messages.
|
||||
|
||||
|
||||
64
api/core/llm/streamable_azure_open_ai.py
Normal file
64
api/core/llm/streamable_azure_open_ai.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import os
|
||||
|
||||
from langchain.llms import AzureOpenAI
|
||||
from langchain.schema import LLMResult
|
||||
from typing import Optional, List, Dict, Mapping, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableAzureOpenAI(AzureOpenAI):
|
||||
openai_api_type: str = "azure"
|
||||
openai_api_version: str = ""
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
|
||||
values["client"] = openai.Completion
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
if values["streaming"] and values["n"] > 1:
|
||||
raise ValueError("Cannot stream results when n > 1.")
|
||||
if values["streaming"] and values["best_of"] > 1:
|
||||
raise ValueError("Cannot stream results when best_of > 1.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _invocation_params(self) -> Dict[str, Any]:
|
||||
return {**super()._invocation_params, **{
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
return {**super()._identifying_params, **{
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@handle_llm_exceptions
|
||||
def generate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
return super().generate(prompts, stop)
|
||||
|
||||
@handle_llm_exceptions_async
|
||||
async def agenerate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
return await super().agenerate(prompts, stop)
|
||||
@@ -1,12 +1,52 @@
|
||||
import os
|
||||
|
||||
from langchain.schema import BaseMessage, ChatResult, LLMResult
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from typing import Optional, List
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableChatOpenAI(ChatOpenAI):
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
return {
|
||||
**super()._default_params,
|
||||
"api_type": 'openai',
|
||||
"api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
||||
"api_version": None,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}
|
||||
|
||||
def get_messages_tokens(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in a list of messages.
|
||||
|
||||
|
||||
@@ -1,12 +1,54 @@
|
||||
import os
|
||||
|
||||
from langchain.schema import LLMResult
|
||||
from typing import Optional, List
|
||||
from typing import Optional, List, Dict, Any, Mapping
|
||||
from langchain import OpenAI
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableOpenAI(OpenAI):
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
|
||||
values["client"] = openai.Completion
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
if values["streaming"] and values["n"] > 1:
|
||||
raise ValueError("Cannot stream results when n > 1.")
|
||||
if values["streaming"] and values["best_of"] > 1:
|
||||
raise ValueError("Cannot stream results when best_of > 1.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _invocation_params(self) -> Dict[str, Any]:
|
||||
return {**super()._invocation_params, **{
|
||||
"api_type": 'openai',
|
||||
"api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
||||
"api_version": None,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
return {**super()._identifying_params, **{
|
||||
"api_type": 'openai',
|
||||
"api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
||||
"api_version": None,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
|
||||
@handle_llm_exceptions
|
||||
def generate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
|
||||
@@ -32,6 +32,6 @@ class PromptBuilder:
|
||||
|
||||
@classmethod
|
||||
def process_template(cls, template: str):
|
||||
processed_template = re.sub(r'\{(.+?)\}', r'\1', template)
|
||||
processed_template = re.sub(r'\{\{(.+?)\}\}', r'{\1}', processed_template)
|
||||
processed_template = re.sub(r'\{([a-zA-Z_]\w+?)\}', r'\1', template)
|
||||
processed_template = re.sub(r'\{\{([a-zA-Z_]\w+?)\}\}', r'{\1}', processed_template)
|
||||
return processed_template
|
||||
|
||||
@@ -10,24 +10,14 @@ from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.prompt.prompts import QUERY_KEYWORD_EXTRACT_TEMPLATE
|
||||
from core.tool.llama_index_tool import EnhanceLlamaIndexTool
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset
|
||||
|
||||
|
||||
class DatasetToolBuilder:
|
||||
@classmethod
|
||||
def build_dataset_tool(cls, tenant_id: str, dataset_id: str,
|
||||
def build_dataset_tool(cls, dataset: Dataset,
|
||||
response_mode: str = "no_synthesizer",
|
||||
callback_handler: Optional[DatasetToolCallbackHandler] = None):
|
||||
# get dataset from dataset id
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.tenant_id == tenant_id,
|
||||
Dataset.id == dataset_id
|
||||
).first()
|
||||
|
||||
if not dataset:
|
||||
return None
|
||||
|
||||
if dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
index = KeywordTableIndex(dataset=dataset).query_index
|
||||
@@ -65,7 +55,7 @@ class DatasetToolBuilder:
|
||||
|
||||
index_tool_config = IndexToolConfig(
|
||||
index=index,
|
||||
name=f"dataset-{dataset_id}",
|
||||
name=f"dataset-{dataset.id}",
|
||||
description=description,
|
||||
index_query_kwargs=query_kwargs,
|
||||
tool_kwargs={
|
||||
@@ -75,7 +65,7 @@ class DatasetToolBuilder:
|
||||
# return_direct: Whether to return LLM results directly or process the output data with an Output Parser
|
||||
)
|
||||
|
||||
index_callback_handler = DatasetIndexToolCallbackHandler(dataset_id=dataset_id)
|
||||
index_callback_handler = DatasetIndexToolCallbackHandler(dataset_id=dataset.id)
|
||||
|
||||
return EnhanceLlamaIndexTool.from_tool_config(
|
||||
tool_config=index_tool_config,
|
||||
|
||||
@@ -29,7 +29,7 @@ class WeaviateVectorStoreClient(BaseVectorStoreClient):
|
||||
return weaviate.Client(
|
||||
url=endpoint,
|
||||
auth_client_secret=auth_config,
|
||||
timeout_config=(5, 15),
|
||||
timeout_config=(5, 60),
|
||||
startup_period=None
|
||||
)
|
||||
|
||||
|
||||
@@ -15,9 +15,24 @@ def init_app(app: Flask) -> Celery:
|
||||
backend=app.config["CELERY_BACKEND"],
|
||||
task_ignore_result=True,
|
||||
)
|
||||
|
||||
# Add SSL options to the Celery configuration
|
||||
ssl_options = {
|
||||
"ssl_cert_reqs": None,
|
||||
"ssl_ca_certs": None,
|
||||
"ssl_certfile": None,
|
||||
"ssl_keyfile": None,
|
||||
}
|
||||
|
||||
celery_app.conf.update(
|
||||
result_backend=app.config["CELERY_RESULT_BACKEND"],
|
||||
)
|
||||
|
||||
if app.config["BROKER_USE_SSL"]:
|
||||
celery_app.conf.update(
|
||||
broker_use_ssl=ssl_options, # Add the SSL options to the broker configuration
|
||||
)
|
||||
|
||||
celery_app.set_default()
|
||||
app.extensions["celery"] = celery_app
|
||||
return celery_app
|
||||
|
||||
@@ -1,18 +1,23 @@
|
||||
import redis
|
||||
|
||||
from redis.connection import SSLConnection, Connection
|
||||
|
||||
redis_client = redis.Redis()
|
||||
|
||||
|
||||
def init_app(app):
|
||||
connection_class = Connection
|
||||
if app.config.get('REDIS_USE_SSL', False):
|
||||
connection_class = SSLConnection
|
||||
|
||||
redis_client.connection_pool = redis.ConnectionPool(**{
|
||||
'host': app.config.get('REDIS_HOST', 'localhost'),
|
||||
'port': app.config.get('REDIS_PORT', 6379),
|
||||
'username': app.config.get('REDIS_USERNAME', None),
|
||||
'password': app.config.get('REDIS_PASSWORD', None),
|
||||
'db': app.config.get('REDIS_DB', 0),
|
||||
'encoding': 'utf-8',
|
||||
'encoding_errors': 'strict',
|
||||
'decode_responses': False
|
||||
})
|
||||
}, connection_class=connection_class)
|
||||
|
||||
app.extensions['redis'] = redis_client
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import redis
|
||||
from redis.connection import SSLConnection, Connection
|
||||
from flask import request
|
||||
from flask_session import Session, SqlAlchemySessionInterface, RedisSessionInterface
|
||||
from flask_session.sessions import total_seconds
|
||||
@@ -23,16 +24,21 @@ def init_app(app):
|
||||
if session_type == 'sqlalchemy':
|
||||
app.session_interface = sqlalchemy_session_interface
|
||||
elif session_type == 'redis':
|
||||
connection_class = Connection
|
||||
if app.config.get('SESSION_REDIS_USE_SSL', False):
|
||||
connection_class = SSLConnection
|
||||
|
||||
sess_redis_client = redis.Redis()
|
||||
sess_redis_client.connection_pool = redis.ConnectionPool(**{
|
||||
'host': app.config.get('SESSION_REDIS_HOST', 'localhost'),
|
||||
'port': app.config.get('SESSION_REDIS_PORT', 6379),
|
||||
'username': app.config.get('SESSION_REDIS_USERNAME', None),
|
||||
'password': app.config.get('SESSION_REDIS_PASSWORD', None),
|
||||
'db': app.config.get('SESSION_REDIS_DB', 2),
|
||||
'encoding': 'utf-8',
|
||||
'encoding_errors': 'strict',
|
||||
'decode_responses': False
|
||||
})
|
||||
}, connection_class=connection_class)
|
||||
|
||||
app.extensions['session_redis'] = sess_redis_client
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ class TimestampField(fields.Raw):
|
||||
|
||||
def email(email):
|
||||
# Define a regex pattern for email addresses
|
||||
pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$"
|
||||
pattern = r"^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$"
|
||||
# Check if the email matches the pattern
|
||||
if re.match(pattern, email) is not None:
|
||||
return email
|
||||
|
||||
@@ -18,6 +18,8 @@ depends_on = None
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";')
|
||||
|
||||
op.create_table('account_integrates',
|
||||
sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
|
||||
sa.Column('account_id', postgresql.UUID(), nullable=False),
|
||||
@@ -790,4 +792,6 @@ def downgrade():
|
||||
|
||||
op.drop_table('accounts')
|
||||
op.drop_table('account_integrates')
|
||||
|
||||
op.execute('DROP EXTENSION IF EXISTS "uuid-ossp";')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
46
api/migrations/versions/9f4e3427ea84_add_created_by_role.py
Normal file
46
api/migrations/versions/9f4e3427ea84_add_created_by_role.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""add created by role
|
||||
|
||||
Revision ID: 9f4e3427ea84
|
||||
Revises: 64b051264f32
|
||||
Create Date: 2023-05-17 17:29:01.060435
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '9f4e3427ea84'
|
||||
down_revision = '64b051264f32'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('pinned_conversations', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('created_by_role', sa.String(length=255), server_default=sa.text("'end_user'::character varying"), nullable=False))
|
||||
batch_op.drop_index('pinned_conversation_conversation_idx')
|
||||
batch_op.create_index('pinned_conversation_conversation_idx', ['app_id', 'conversation_id', 'created_by_role', 'created_by'], unique=False)
|
||||
|
||||
with op.batch_alter_table('saved_messages', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('created_by_role', sa.String(length=255), server_default=sa.text("'end_user'::character varying"), nullable=False))
|
||||
batch_op.drop_index('saved_message_message_idx')
|
||||
batch_op.create_index('saved_message_message_idx', ['app_id', 'message_id', 'created_by_role', 'created_by'], unique=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('saved_messages', schema=None) as batch_op:
|
||||
batch_op.drop_index('saved_message_message_idx')
|
||||
batch_op.create_index('saved_message_message_idx', ['app_id', 'message_id', 'created_by'], unique=False)
|
||||
batch_op.drop_column('created_by_role')
|
||||
|
||||
with op.batch_alter_table('pinned_conversations', schema=None) as batch_op:
|
||||
batch_op.drop_index('pinned_conversation_conversation_idx')
|
||||
batch_op.create_index('pinned_conversation_conversation_idx', ['app_id', 'conversation_id', 'created_by'], unique=False)
|
||||
batch_op.drop_column('created_by_role')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,36 @@
|
||||
"""add language to recommend apps
|
||||
|
||||
Revision ID: a45f4dfde53b
|
||||
Revises: 9f4e3427ea84
|
||||
Create Date: 2023-05-25 17:50:32.052335
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'a45f4dfde53b'
|
||||
down_revision = '9f4e3427ea84'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('recommended_apps', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('language', sa.String(length=255), server_default=sa.text("'en-US'::character varying"), nullable=False))
|
||||
batch_op.drop_index('recommended_app_is_listed_idx')
|
||||
batch_op.create_index('recommended_app_is_listed_idx', ['is_listed', 'language'], unique=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('recommended_apps', schema=None) as batch_op:
|
||||
batch_op.drop_index('recommended_app_is_listed_idx')
|
||||
batch_op.create_index('recommended_app_is_listed_idx', ['is_listed'], unique=False)
|
||||
batch_op.drop_column('language')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
|
||||
from flask import current_app
|
||||
from flask import current_app, request
|
||||
from flask_login import UserMixin
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
|
||||
@@ -56,7 +56,7 @@ class App(db.Model):
|
||||
|
||||
@property
|
||||
def api_base_url(self):
|
||||
return current_app.config['API_URL'] + '/v1'
|
||||
return (current_app.config['API_URL'] if current_app.config['API_URL'] else request.host_url.rstrip('/')) + '/v1'
|
||||
|
||||
@property
|
||||
def tenant(self):
|
||||
@@ -123,7 +123,7 @@ class RecommendedApp(db.Model):
|
||||
__table_args__ = (
|
||||
db.PrimaryKeyConstraint('id', name='recommended_app_pkey'),
|
||||
db.Index('recommended_app_app_id_idx', 'app_id'),
|
||||
db.Index('recommended_app_is_listed_idx', 'is_listed')
|
||||
db.Index('recommended_app_is_listed_idx', 'is_listed', 'language')
|
||||
)
|
||||
|
||||
id = db.Column(UUID, primary_key=True, server_default=db.text('uuid_generate_v4()'))
|
||||
@@ -135,6 +135,7 @@ class RecommendedApp(db.Model):
|
||||
position = db.Column(db.Integer, nullable=False, default=0)
|
||||
is_listed = db.Column(db.Boolean, nullable=False, default=True)
|
||||
install_count = db.Column(db.Integer, nullable=False, default=0)
|
||||
language = db.Column(db.String(255), nullable=False, server_default=db.text("'en-US'::character varying"))
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
|
||||
@@ -143,17 +144,6 @@ class RecommendedApp(db.Model):
|
||||
app = db.session.query(App).filter(App.id == self.app_id).first()
|
||||
return app
|
||||
|
||||
# def set_description(self, lang, desc):
|
||||
# if self.description is None:
|
||||
# self.description = {}
|
||||
# self.description[lang] = desc
|
||||
|
||||
def get_description(self, lang):
|
||||
if self.description and lang in self.description:
|
||||
return self.description[lang]
|
||||
else:
|
||||
return self.description.get('en')
|
||||
|
||||
|
||||
class InstalledApp(db.Model):
|
||||
__tablename__ = 'installed_apps'
|
||||
@@ -505,7 +495,7 @@ class Site(db.Model):
|
||||
|
||||
@property
|
||||
def app_base_url(self):
|
||||
return current_app.config['APP_URL']
|
||||
return (current_app.config['APP_URL'] if current_app.config['APP_URL'] else request.host_url.rstrip('/'))
|
||||
|
||||
|
||||
class ApiToken(db.Model):
|
||||
|
||||
@@ -8,12 +8,13 @@ class SavedMessage(db.Model):
|
||||
__tablename__ = 'saved_messages'
|
||||
__table_args__ = (
|
||||
db.PrimaryKeyConstraint('id', name='saved_message_pkey'),
|
||||
db.Index('saved_message_message_idx', 'app_id', 'message_id', 'created_by'),
|
||||
db.Index('saved_message_message_idx', 'app_id', 'message_id', 'created_by_role', 'created_by'),
|
||||
)
|
||||
|
||||
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
|
||||
app_id = db.Column(UUID, nullable=False)
|
||||
message_id = db.Column(UUID, nullable=False)
|
||||
created_by_role = db.Column(db.String(255), nullable=False, server_default=db.text("'end_user'::character varying"))
|
||||
created_by = db.Column(UUID, nullable=False)
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
|
||||
@@ -26,11 +27,12 @@ class PinnedConversation(db.Model):
|
||||
__tablename__ = 'pinned_conversations'
|
||||
__table_args__ = (
|
||||
db.PrimaryKeyConstraint('id', name='pinned_conversation_pkey'),
|
||||
db.Index('pinned_conversation_conversation_idx', 'app_id', 'conversation_id', 'created_by'),
|
||||
db.Index('pinned_conversation_conversation_idx', 'app_id', 'conversation_id', 'created_by_role', 'created_by'),
|
||||
)
|
||||
|
||||
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
|
||||
app_id = db.Column(UUID, nullable=False)
|
||||
conversation_id = db.Column(UUID, nullable=False)
|
||||
created_by_role = db.Column(db.String(255), nullable=False, server_default=db.text("'end_user'::character varying"))
|
||||
created_by = db.Column(UUID, nullable=False)
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
|
||||
@@ -18,6 +18,7 @@ from services.errors.account import NoPermissionError
|
||||
from services.errors.dataset import DatasetNameDuplicateError
|
||||
from services.errors.document import DocumentIndexingError
|
||||
from services.errors.file import FileNotExistsError
|
||||
from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task
|
||||
from tasks.document_indexing_task import document_indexing_task
|
||||
|
||||
|
||||
@@ -97,7 +98,12 @@ class DatasetService:
|
||||
def update_dataset(dataset_id, data, user):
|
||||
dataset = DatasetService.get_dataset(dataset_id)
|
||||
DatasetService.check_dataset_permission(dataset, user)
|
||||
|
||||
if dataset.indexing_technique != data['indexing_technique']:
|
||||
# if update indexing_technique
|
||||
if data['indexing_technique'] == 'economy':
|
||||
deal_dataset_vector_index_task.delay(dataset_id, 'remove')
|
||||
elif data['indexing_technique'] == 'high_quality':
|
||||
deal_dataset_vector_index_task.delay(dataset_id, 'add')
|
||||
filtered_data = {k: v for k, v in data.items() if v is not None or k == 'description'}
|
||||
|
||||
filtered_data['updated_by'] = user.id
|
||||
|
||||
@@ -127,7 +127,7 @@ class MessageService:
|
||||
message_id=message_id
|
||||
)
|
||||
|
||||
feedback = message.user_feedback
|
||||
feedback = message.user_feedback if isinstance(user, EndUser) else message.admin_feedback
|
||||
|
||||
if not rating and feedback:
|
||||
db.session.delete(feedback)
|
||||
|
||||
@@ -62,6 +62,8 @@ class ProviderService:
|
||||
|
||||
@staticmethod
|
||||
def validate_provider_configs(tenant, provider_name: ProviderName, configs: Union[dict | str]):
|
||||
if current_app.config['DISABLE_PROVIDER_CONFIG_VALIDATION']:
|
||||
return
|
||||
llm_provider_service = LLMProviderService(tenant.id, provider_name.value)
|
||||
return llm_provider_service.config_validate(configs)
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account
|
||||
from models.model import App, EndUser
|
||||
from models.web import SavedMessage
|
||||
from services.message_service import MessageService
|
||||
@@ -9,27 +10,29 @@ from services.message_service import MessageService
|
||||
|
||||
class SavedMessageService:
|
||||
@classmethod
|
||||
def pagination_by_last_id(cls, app_model: App, end_user: Optional[EndUser],
|
||||
def pagination_by_last_id(cls, app_model: App, user: Optional[Union[Account | EndUser]],
|
||||
last_id: Optional[str], limit: int) -> InfiniteScrollPagination:
|
||||
saved_messages = db.session.query(SavedMessage).filter(
|
||||
SavedMessage.app_id == app_model.id,
|
||||
SavedMessage.created_by == end_user.id
|
||||
SavedMessage.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
SavedMessage.created_by == user.id
|
||||
).order_by(SavedMessage.created_at.desc()).all()
|
||||
message_ids = [sm.message_id for sm in saved_messages]
|
||||
|
||||
return MessageService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
user=end_user,
|
||||
user=user,
|
||||
last_id=last_id,
|
||||
limit=limit,
|
||||
include_ids=message_ids
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def save(cls, app_model: App, user: Optional[EndUser], message_id: str):
|
||||
def save(cls, app_model: App, user: Optional[Union[Account | EndUser]], message_id: str):
|
||||
saved_message = db.session.query(SavedMessage).filter(
|
||||
SavedMessage.app_id == app_model.id,
|
||||
SavedMessage.message_id == message_id,
|
||||
SavedMessage.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
SavedMessage.created_by == user.id
|
||||
).first()
|
||||
|
||||
@@ -45,6 +48,7 @@ class SavedMessageService:
|
||||
saved_message = SavedMessage(
|
||||
app_id=app_model.id,
|
||||
message_id=message.id,
|
||||
created_by_role='account' if isinstance(user, Account) else 'end_user',
|
||||
created_by=user.id
|
||||
)
|
||||
|
||||
@@ -52,10 +56,11 @@ class SavedMessageService:
|
||||
db.session.commit()
|
||||
|
||||
@classmethod
|
||||
def delete(cls, app_model: App, user: Optional[EndUser], message_id: str):
|
||||
def delete(cls, app_model: App, user: Optional[Union[Account | EndUser]], message_id: str):
|
||||
saved_message = db.session.query(SavedMessage).filter(
|
||||
SavedMessage.app_id == app_model.id,
|
||||
SavedMessage.message_id == message_id,
|
||||
SavedMessage.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
SavedMessage.created_by == user.id
|
||||
).first()
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ from typing import Optional, Union
|
||||
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account
|
||||
from models.model import App, EndUser
|
||||
from models.web import PinnedConversation
|
||||
from services.conversation_service import ConversationService
|
||||
@@ -9,14 +10,15 @@ from services.conversation_service import ConversationService
|
||||
|
||||
class WebConversationService:
|
||||
@classmethod
|
||||
def pagination_by_last_id(cls, app_model: App, end_user: Optional[EndUser],
|
||||
def pagination_by_last_id(cls, app_model: App, user: Optional[Union[Account | EndUser]],
|
||||
last_id: Optional[str], limit: int, pinned: Optional[bool] = None) -> InfiniteScrollPagination:
|
||||
include_ids = None
|
||||
exclude_ids = None
|
||||
if pinned is not None:
|
||||
pinned_conversations = db.session.query(PinnedConversation).filter(
|
||||
PinnedConversation.app_id == app_model.id,
|
||||
PinnedConversation.created_by == end_user.id
|
||||
PinnedConversation.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
PinnedConversation.created_by == user.id
|
||||
).order_by(PinnedConversation.created_at.desc()).all()
|
||||
pinned_conversation_ids = [pc.conversation_id for pc in pinned_conversations]
|
||||
if pinned:
|
||||
@@ -26,7 +28,7 @@ class WebConversationService:
|
||||
|
||||
return ConversationService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
user=end_user,
|
||||
user=user,
|
||||
last_id=last_id,
|
||||
limit=limit,
|
||||
include_ids=include_ids,
|
||||
@@ -34,10 +36,11 @@ class WebConversationService:
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def pin(cls, app_model: App, conversation_id: str, user: Optional[EndUser]):
|
||||
def pin(cls, app_model: App, conversation_id: str, user: Optional[Union[Account | EndUser]]):
|
||||
pinned_conversation = db.session.query(PinnedConversation).filter(
|
||||
PinnedConversation.app_id == app_model.id,
|
||||
PinnedConversation.conversation_id == conversation_id,
|
||||
PinnedConversation.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
PinnedConversation.created_by == user.id
|
||||
).first()
|
||||
|
||||
@@ -53,6 +56,7 @@ class WebConversationService:
|
||||
pinned_conversation = PinnedConversation(
|
||||
app_id=app_model.id,
|
||||
conversation_id=conversation.id,
|
||||
created_by_role='account' if isinstance(user, Account) else 'end_user',
|
||||
created_by=user.id
|
||||
)
|
||||
|
||||
@@ -60,10 +64,11 @@ class WebConversationService:
|
||||
db.session.commit()
|
||||
|
||||
@classmethod
|
||||
def unpin(cls, app_model: App, conversation_id: str, user: Optional[EndUser]):
|
||||
def unpin(cls, app_model: App, conversation_id: str, user: Optional[Union[Account | EndUser]]):
|
||||
pinned_conversation = db.session.query(PinnedConversation).filter(
|
||||
PinnedConversation.app_id == app_model.id,
|
||||
PinnedConversation.conversation_id == conversation_id,
|
||||
PinnedConversation.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
PinnedConversation.created_by == user.id
|
||||
).first()
|
||||
|
||||
|
||||
75
api/tasks/deal_dataset_vector_index_task.py
Normal file
75
api/tasks/deal_dataset_vector_index_task.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship, Node
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Document, Dataset
|
||||
|
||||
|
||||
@shared_task
|
||||
def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
"""
|
||||
Async deal dataset from index
|
||||
:param dataset_id: dataset_id
|
||||
:param action: action
|
||||
Usage: deal_dataset_vector_index_task.delay(dataset_id, action)
|
||||
"""
|
||||
logging.info(click.style('Start deal dataset vector index: {}'.format(dataset_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
dataset = Dataset.query.filter_by(
|
||||
id=dataset_id
|
||||
).first()
|
||||
if not dataset:
|
||||
raise Exception('Dataset not found')
|
||||
documents = Document.query.filter_by(dataset_id=dataset_id).all()
|
||||
if documents:
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
for document in documents:
|
||||
# delete from vector index
|
||||
if action == "remove":
|
||||
vector_index.del_doc(document.id)
|
||||
elif action == "add":
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) .order_by(DocumentSegment.position.asc()).all()
|
||||
|
||||
nodes = []
|
||||
previous_node = None
|
||||
for segment in segments:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: document.id
|
||||
}
|
||||
|
||||
if previous_node:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_node.doc_id
|
||||
|
||||
previous_node.relationships[DocumentRelationship.NEXT] = segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
)
|
||||
|
||||
previous_node = node
|
||||
nodes.append(node)
|
||||
# save vector index
|
||||
vector_index.add_nodes(
|
||||
nodes=nodes,
|
||||
duplicate_check=True
|
||||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Deal dataset vector index: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Deal dataset vector index failed")
|
||||
@@ -13,7 +13,6 @@ services:
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
- ./volumes/db/scripts:/docker-entrypoint-initdb.d/
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3.1'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:latest
|
||||
image: langgenius/dify-api:0.3.1
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@@ -11,12 +11,18 @@ services:
|
||||
LOG_LEVEL: INFO
|
||||
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
|
||||
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
|
||||
# The base URL of console application, refers to the Console base URL of WEB service.
|
||||
CONSOLE_URL: http://localhost
|
||||
# The URL for Service API endpoints,refers to the base URL of the current API service.
|
||||
API_URL: http://localhost
|
||||
# The URL for Web APP, refers to the Web App base URL of WEB service.
|
||||
APP_URL: http://localhost
|
||||
# The base URL of console application, refers to the Console base URL of WEB service if console domain is
|
||||
# different from api or web app domain.
|
||||
# example: http://cloud.dify.ai
|
||||
CONSOLE_URL: ''
|
||||
# The URL for Service API endpoints,refers to the base URL of the current API service if api domain is
|
||||
# different from console domain.
|
||||
# example: http://api.dify.ai
|
||||
API_URL: ''
|
||||
# The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from
|
||||
# console or api domain.
|
||||
# example: http://udify.app
|
||||
APP_URL: ''
|
||||
# When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
|
||||
MIGRATION_ENABLED: 'true'
|
||||
# The configurations of postgres database connection.
|
||||
@@ -30,29 +36,42 @@ services:
|
||||
# It is consistent with the configuration in the 'redis' service below.
|
||||
REDIS_HOST: redis
|
||||
REDIS_PORT: 6379
|
||||
REDIS_USERNAME: ''
|
||||
REDIS_PASSWORD: difyai123456
|
||||
REDIS_USE_SSL: 'false'
|
||||
# use redis db 0 for redis cache
|
||||
REDIS_DB: 0
|
||||
# The configurations of session, Supported values are `sqlalchemy`. `redis`
|
||||
SESSION_TYPE: redis
|
||||
SESSION_REDIS_HOST: redis
|
||||
SESSION_REDIS_PORT: 6379
|
||||
SESSION_REDIS_USERNAME: ''
|
||||
SESSION_REDIS_PASSWORD: difyai123456
|
||||
SESSION_REDIS_USE_SSL: 'false'
|
||||
# use redis db 2 for session store
|
||||
SESSION_REDIS_DB: 2
|
||||
# The configurations of celery broker.
|
||||
# Use redis as the broker, and redis db 1 for celery broker.
|
||||
CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
|
||||
# Specifies the allowed origins for cross-origin requests to the Web API
|
||||
WEB_API_CORS_ALLOW_ORIGINS: http://localhost,*
|
||||
# Specifies the allowed origins for cross-origin requests to the console API
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: http://localhost,*
|
||||
# Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
|
||||
WEB_API_CORS_ALLOW_ORIGINS: '*'
|
||||
# Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: '*'
|
||||
# CSRF Cookie settings
|
||||
# Controls whether a cookie is sent with cross-site requests,
|
||||
# providing some protection against cross-site request forgery attacks
|
||||
#
|
||||
# Default: `SameSite=Lax, Secure=false, HttpOnly=true`
|
||||
# This default configuration supports same-origin requests using either HTTP or HTTPS,
|
||||
# but does not support cross-origin requests. It is suitable for local debugging purposes.
|
||||
#
|
||||
# If you want to enable cross-origin support,
|
||||
# you must use the HTTPS protocol and set the configuration to `SameSite=None, Secure=true, HttpOnly=true`.
|
||||
#
|
||||
# For **production** purposes, please set `SameSite=Lax, Secure=true, HttpOnly=true`.
|
||||
COOKIE_HTTPONLY: 'true'
|
||||
COOKIE_SAMESITE: 'None'
|
||||
COOKIE_SECURE: 'true'
|
||||
COOKIE_SAMESITE: 'Lax'
|
||||
COOKIE_SECURE: 'false'
|
||||
# The type of storage to use for storing user files. Supported values are `local` and `s3`, Default: `local`
|
||||
STORAGE_TYPE: local
|
||||
# The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
|
||||
@@ -86,12 +105,12 @@ services:
|
||||
- weaviate
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/storage
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:latest
|
||||
image: langgenius/dify-api:0.3.1
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
@@ -104,12 +123,6 @@ services:
|
||||
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
|
||||
# same as the API service
|
||||
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
|
||||
# The base URL of console application, refers to the Console base URL of WEB service.
|
||||
CONSOLE_URL: http://localhost
|
||||
# The URL for Service API endpoints,refers to the base URL of the current API service.
|
||||
API_URL: http://localhost
|
||||
# The URL for Web APP, refers to the Web App base URL of WEB service.
|
||||
APP_URL: http://localhost
|
||||
# The configurations of postgres database connection.
|
||||
# It is consistent with the configuration in the 'db' service below.
|
||||
DB_USERNAME: postgres
|
||||
@@ -120,8 +133,10 @@ services:
|
||||
# The configurations of redis cache connection.
|
||||
REDIS_HOST: redis
|
||||
REDIS_PORT: 6379
|
||||
REDIS_USERNAME: ''
|
||||
REDIS_PASSWORD: difyai123456
|
||||
REDIS_DB: 0
|
||||
REDIS_USE_SSL: 'false'
|
||||
# The configurations of celery broker.
|
||||
CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1
|
||||
# The type of storage to use for storing user files. Supported values are `local` and `s3`, Default: `local`
|
||||
@@ -137,18 +152,22 @@ services:
|
||||
- weaviate
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/storage
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:latest
|
||||
image: langgenius/dify-web:0.3.1
|
||||
restart: always
|
||||
environment:
|
||||
EDITION: SELF_HOSTED
|
||||
# The base URL of console application, refers to the Console base URL of WEB service.
|
||||
CONSOLE_URL: http://localhost
|
||||
# The URL for Web APP, refers to the Web App base URL of WEB service.
|
||||
APP_URL: http://localhost
|
||||
# The base URL of console application, refers to the Console base URL of WEB service if console domain is
|
||||
# different from api or web app domain.
|
||||
# example: http://cloud.dify.ai
|
||||
CONSOLE_URL: ''
|
||||
# The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from
|
||||
# console or api domain.
|
||||
# example: http://udify.app
|
||||
APP_URL: ''
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
@@ -163,7 +182,6 @@ services:
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
- ./volumes/db/scripts:/docker-entrypoint-initdb.d/
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
server_name _;
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
psql -U postgres -d dify -c 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp";'
|
||||
117
mock-server/.gitignore
vendored
117
mock-server/.gitignore
vendored
@@ -1,117 +0,0 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# TypeScript v1 declaration files
|
||||
typings/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Microbundle cache
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
.env.test
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and *not* Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# npm
|
||||
package-lock.json
|
||||
|
||||
# yarn
|
||||
.pnp.cjs
|
||||
.pnp.loader.mjs
|
||||
.yarn/
|
||||
yarn.lock
|
||||
.yarnrc.yml
|
||||
|
||||
# pmpm
|
||||
pnpm-lock.yaml
|
||||
@@ -1 +0,0 @@
|
||||
# Mock Server
|
||||
@@ -1,551 +0,0 @@
|
||||
const chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_'
|
||||
|
||||
function randomString (length) {
|
||||
let result = ''
|
||||
for (let i = length; i > 0; --i) result += chars[Math.floor(Math.random() * chars.length)]
|
||||
return result
|
||||
}
|
||||
|
||||
// https://www.notion.so/55773516a0194781ae211792a44a3663?pvs=4
|
||||
const VirtualData = new Array(10).fill().map((_, index) => {
|
||||
const date = new Date(Date.now() - index * 24 * 60 * 60 * 1000)
|
||||
return {
|
||||
date: `${date.getFullYear()}-${date.getMonth()}-${date.getDate()}`,
|
||||
conversation_count: Math.floor(Math.random() * 10) + index,
|
||||
terminal_count: Math.floor(Math.random() * 10) + index,
|
||||
token_count: Math.floor(Math.random() * 10) + index,
|
||||
total_price: Math.floor(Math.random() * 10) + index,
|
||||
}
|
||||
})
|
||||
|
||||
const registerAPI = function (app) {
|
||||
const apps = [{
|
||||
id: '1',
|
||||
name: 'chat app',
|
||||
mode: 'chat',
|
||||
description: 'description01',
|
||||
enable_site: true,
|
||||
enable_api: true,
|
||||
api_rpm: 60,
|
||||
api_rph: 3600,
|
||||
is_demo: false,
|
||||
model_config: {
|
||||
provider: 'OPENAI',
|
||||
model_id: 'gpt-3.5-turbo',
|
||||
configs: {
|
||||
prompt_template: '你是我的解梦小助手,请参考 {{book}} 回答我有关梦境的问题。在回答前请称呼我为 {{myName}}。',
|
||||
prompt_variables: [
|
||||
{
|
||||
key: 'book',
|
||||
name: '书',
|
||||
value: '《梦境解析》',
|
||||
type: 'string',
|
||||
description: '请具体说下书名'
|
||||
},
|
||||
{
|
||||
key: 'myName',
|
||||
name: 'your name',
|
||||
value: 'Book',
|
||||
type: 'string',
|
||||
description: 'please tell me your name'
|
||||
}
|
||||
],
|
||||
completion_params: {
|
||||
max_token: 16,
|
||||
temperature: 1, // 0-2
|
||||
top_p: 1,
|
||||
presence_penalty: 1, // -2-2
|
||||
frequency_penalty: 1, // -2-2
|
||||
}
|
||||
}
|
||||
},
|
||||
site: {
|
||||
access_token: '1000',
|
||||
title: 'site 01',
|
||||
author: 'John',
|
||||
default_language: 'zh-Hans-CN',
|
||||
customize_domain: 'http://customize_domain',
|
||||
theme: 'theme',
|
||||
customize_token_strategy: 'must',
|
||||
prompt_public: true
|
||||
}
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: 'completion app',
|
||||
mode: 'completion', // genertation text
|
||||
description: 'description 02', // genertation text
|
||||
enable_site: false,
|
||||
enable_api: false,
|
||||
api_rpm: 60,
|
||||
api_rph: 3600,
|
||||
is_demo: false,
|
||||
model_config: {
|
||||
provider: 'OPENAI',
|
||||
model_id: 'text-davinci-003',
|
||||
configs: {
|
||||
prompt_template: '你是我的翻译小助手,请把以下内容 {{langA}} 翻译成 {{langB}},以下的内容:',
|
||||
prompt_variables: [
|
||||
{
|
||||
key: 'langA',
|
||||
name: '原始语音',
|
||||
value: '中文',
|
||||
type: 'string',
|
||||
description: '这是中文格式的原始语音'
|
||||
},
|
||||
{
|
||||
key: 'langB',
|
||||
name: '目标语言',
|
||||
value: '英语',
|
||||
type: 'string',
|
||||
description: '这是英语格式的目标语言'
|
||||
}
|
||||
],
|
||||
completion_params: {
|
||||
max_token: 16,
|
||||
temperature: 1, // 0-2
|
||||
top_p: 1,
|
||||
presence_penalty: 1, // -2-2
|
||||
frequency_penalty: 1, // -2-2
|
||||
}
|
||||
}
|
||||
},
|
||||
site: {
|
||||
access_token: '2000',
|
||||
title: 'site 02',
|
||||
author: 'Mark',
|
||||
default_language: 'en-US',
|
||||
customize_domain: 'http://customize_domain',
|
||||
theme: 'theme',
|
||||
customize_token_strategy: 'must',
|
||||
prompt_public: false
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
const apikeys = [{
|
||||
id: '111121312313132',
|
||||
token: 'sk-DEFGHJKMNPQRSTWXYZabcdefhijk1234',
|
||||
last_used_at: '1679212138000',
|
||||
created_at: '1673316000000'
|
||||
}, {
|
||||
id: '43441242131223123',
|
||||
token: 'sk-EEFGHJKMNPQRSTWXYZabcdefhijk5678',
|
||||
last_used_at: '1679212721000',
|
||||
created_at: '1679212731000'
|
||||
}]
|
||||
|
||||
// create app
|
||||
app.post('/apps', async (req, res) => {
|
||||
apps.push({
|
||||
id: apps.length + 1 + '',
|
||||
...req.body,
|
||||
|
||||
})
|
||||
res.send({
|
||||
result: 'success'
|
||||
})
|
||||
})
|
||||
|
||||
// app list
|
||||
app.get('/apps', async (req, res) => {
|
||||
res.send({
|
||||
data: apps
|
||||
})
|
||||
})
|
||||
|
||||
// app detail
|
||||
app.get('/apps/:id', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id) || apps[0]
|
||||
res.send(item)
|
||||
})
|
||||
|
||||
// update app name
|
||||
app.post('/apps/:id/name', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
item.name = req.body.name
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
// update app site-enable status
|
||||
app.post('/apps/:id/site-enable', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
console.log(item)
|
||||
item.enable_site = req.body.enable_site
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
// update app api-enable status
|
||||
app.post('/apps/:id/api-enable', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
console.log(item)
|
||||
item.enable_api = req.body.enable_api
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
// update app rate-limit
|
||||
app.post('/apps/:id/rate-limit', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
console.log(item)
|
||||
item.api_rpm = req.body.api_rpm
|
||||
item.api_rph = req.body.api_rph
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
// update app url including code
|
||||
app.post('/apps/:id/site/access-token-reset', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
console.log(item)
|
||||
item.site.access_token = randomString(12)
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
// update app config
|
||||
app.post('/apps/:id/site', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
console.log(item)
|
||||
item.name = req.body.title
|
||||
item.description = req.body.description
|
||||
item.prompt_public = req.body.prompt_public
|
||||
item.default_language = req.body.default_language
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
// get statistics daily-conversations
|
||||
app.get('/apps/:id/statistics/daily-conversations', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
if (item) {
|
||||
res.send({
|
||||
data: VirtualData
|
||||
})
|
||||
} else {
|
||||
res.send({
|
||||
data: []
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// get statistics daily-end-users
|
||||
app.get('/apps/:id/statistics/daily-end-users', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
if (item) {
|
||||
res.send({
|
||||
data: VirtualData
|
||||
})
|
||||
} else {
|
||||
res.send({
|
||||
data: []
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// get statistics token-costs
|
||||
app.get('/apps/:id/statistics/token-costs', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
if (item) {
|
||||
res.send({
|
||||
data: VirtualData
|
||||
})
|
||||
} else {
|
||||
res.send({
|
||||
data: []
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// update app model config
|
||||
app.post('/apps/:id/model-config', async (req, res) => {
|
||||
const item = apps.find(item => item.id === req.params.id)
|
||||
console.log(item)
|
||||
item.model_config = req.body
|
||||
res.send(item || null)
|
||||
})
|
||||
|
||||
|
||||
// get api keys list
|
||||
app.get('/apps/:id/api-keys', async (req, res) => {
|
||||
res.send({
|
||||
data: apikeys
|
||||
})
|
||||
})
|
||||
|
||||
// del api key
|
||||
app.delete('/apps/:id/api-keys/:api_key_id', async (req, res) => {
|
||||
res.send({
|
||||
result: 'success'
|
||||
})
|
||||
})
|
||||
|
||||
// create api key
|
||||
app.post('/apps/:id/api-keys', async (req, res) => {
|
||||
res.send({
|
||||
id: 'e2424241313131',
|
||||
token: 'sk-GEFGHJKMNPQRSTWXYZabcdefhijk0124',
|
||||
created_at: '1679216688962'
|
||||
})
|
||||
})
|
||||
|
||||
// get completion-conversations
|
||||
app.get('/apps/:id/completion-conversations', async (req, res) => {
|
||||
const data = {
|
||||
data: [{
|
||||
id: 1,
|
||||
from_end_user_id: 'user 1',
|
||||
summary: 'summary1',
|
||||
created_at: '2023-10-11',
|
||||
annotated: true,
|
||||
message_count: 100,
|
||||
user_feedback_stats: {
|
||||
like: 4, dislike: 5
|
||||
},
|
||||
admin_feedback_stats: {
|
||||
like: 1, dislike: 2
|
||||
},
|
||||
message: {
|
||||
message: 'message1',
|
||||
query: 'question1',
|
||||
answer: 'answer1'
|
||||
}
|
||||
}, {
|
||||
id: 12,
|
||||
from_end_user_id: 'user 2',
|
||||
summary: 'summary2',
|
||||
created_at: '2023-10-01',
|
||||
annotated: false,
|
||||
message_count: 10,
|
||||
user_feedback_stats: {
|
||||
like: 2, dislike: 20
|
||||
},
|
||||
admin_feedback_stats: {
|
||||
like: 12, dislike: 21
|
||||
},
|
||||
message: {
|
||||
message: 'message2',
|
||||
query: 'question2',
|
||||
answer: 'answer2'
|
||||
}
|
||||
}, {
|
||||
id: 13,
|
||||
from_end_user_id: 'user 3',
|
||||
summary: 'summary3',
|
||||
created_at: '2023-10-11',
|
||||
annotated: false,
|
||||
message_count: 20,
|
||||
user_feedback_stats: {
|
||||
like: 2, dislike: 0
|
||||
},
|
||||
admin_feedback_stats: {
|
||||
like: 0, dislike: 21
|
||||
},
|
||||
message: {
|
||||
message: 'message3',
|
||||
query: 'question3',
|
||||
answer: 'answer3'
|
||||
}
|
||||
}],
|
||||
total: 200
|
||||
}
|
||||
res.send(data)
|
||||
})
|
||||
|
||||
// get chat-conversations
|
||||
app.get('/apps/:id/chat-conversations', async (req, res) => {
|
||||
const data = {
|
||||
data: [{
|
||||
id: 1,
|
||||
from_end_user_id: 'user 1',
|
||||
summary: 'summary1',
|
||||
created_at: '2023-10-11',
|
||||
read_at: '2023-10-12',
|
||||
annotated: true,
|
||||
message_count: 100,
|
||||
user_feedback_stats: {
|
||||
like: 4, dislike: 5
|
||||
},
|
||||
admin_feedback_stats: {
|
||||
like: 1, dislike: 2
|
||||
},
|
||||
message: {
|
||||
message: 'message1',
|
||||
query: 'question1',
|
||||
answer: 'answer1'
|
||||
}
|
||||
}, {
|
||||
id: 12,
|
||||
from_end_user_id: 'user 2',
|
||||
summary: 'summary2',
|
||||
created_at: '2023-10-01',
|
||||
annotated: false,
|
||||
message_count: 10,
|
||||
user_feedback_stats: {
|
||||
like: 2, dislike: 20
|
||||
},
|
||||
admin_feedback_stats: {
|
||||
like: 12, dislike: 21
|
||||
},
|
||||
message: {
|
||||
message: 'message2',
|
||||
query: 'question2',
|
||||
answer: 'answer2'
|
||||
}
|
||||
}, {
|
||||
id: 13,
|
||||
from_end_user_id: 'user 3',
|
||||
summary: 'summary3',
|
||||
created_at: '2023-10-11',
|
||||
annotated: false,
|
||||
message_count: 20,
|
||||
user_feedback_stats: {
|
||||
like: 2, dislike: 0
|
||||
},
|
||||
admin_feedback_stats: {
|
||||
like: 0, dislike: 21
|
||||
},
|
||||
message: {
|
||||
message: 'message3',
|
||||
query: 'question3',
|
||||
answer: 'answer3'
|
||||
}
|
||||
}],
|
||||
total: 200
|
||||
}
|
||||
res.send(data)
|
||||
})
|
||||
|
||||
// get completion-conversation detail
|
||||
app.get('/apps/:id/completion-conversations/:cid', async (req, res) => {
|
||||
const data =
|
||||
{
|
||||
id: 1,
|
||||
from_end_user_id: 'user 1',
|
||||
summary: 'summary1',
|
||||
created_at: '2023-10-11',
|
||||
annotated: true,
|
||||
message: {
|
||||
message: 'question1',
|
||||
// query: 'question1',
|
||||
answer: 'answer1',
|
||||
annotation: {
|
||||
content: '这是一段纠正的内容'
|
||||
}
|
||||
},
|
||||
model_config: {
|
||||
provider: 'openai',
|
||||
model_id: 'model_id',
|
||||
configs: {
|
||||
prompt_template: '你是我的翻译小助手,请把以下内容 {{langA}} 翻译成 {{langB}},以下的内容:{{content}}'
|
||||
}
|
||||
}
|
||||
}
|
||||
res.send(data)
|
||||
})
|
||||
|
||||
// get chat-conversation detail
|
||||
app.get('/apps/:id/chat-conversations/:cid', async (req, res) => {
|
||||
const data =
|
||||
{
|
||||
id: 1,
|
||||
from_end_user_id: 'user 1',
|
||||
summary: 'summary1',
|
||||
created_at: '2023-10-11',
|
||||
annotated: true,
|
||||
message: {
|
||||
message: 'question1',
|
||||
// query: 'question1',
|
||||
answer: 'answer1',
|
||||
created_at: '2023-08-09 13:00',
|
||||
provider_response_latency: 130,
|
||||
message_tokens: 230
|
||||
},
|
||||
model_config: {
|
||||
provider: 'openai',
|
||||
model_id: 'model_id',
|
||||
configs: {
|
||||
prompt_template: '你是我的翻译小助手,请把以下内容 {{langA}} 翻译成 {{langB}},以下的内容:{{content}}'
|
||||
}
|
||||
}
|
||||
}
|
||||
res.send(data)
|
||||
})
|
||||
|
||||
// get chat-conversation message list
|
||||
app.get('/apps/:id/chat-messages', async (req, res) => {
|
||||
const data = {
|
||||
data: [{
|
||||
id: 1,
|
||||
created_at: '2023-10-11 07:09',
|
||||
message: '请说说人为什么会做梦?' + req.query.conversation_id,
|
||||
answer: '梦境通常是个人内心深处的反映,很难确定每个人梦境的确切含义,因为它们可能会受到梦境者的文化背景、生活经验和情感状态等多种因素的影响。',
|
||||
provider_response_latency: 450,
|
||||
answer_tokens: 200,
|
||||
annotation: {
|
||||
content: 'string',
|
||||
account: {
|
||||
id: 'string',
|
||||
name: 'string',
|
||||
email: 'string'
|
||||
}
|
||||
},
|
||||
feedbacks: {
|
||||
rating: 'like',
|
||||
content: 'string',
|
||||
from_source: 'log'
|
||||
}
|
||||
}, {
|
||||
id: 2,
|
||||
created_at: '2023-10-11 8:23',
|
||||
message: '夜里经常做梦会影响次日的精神状态吗?',
|
||||
answer: '总之,这个梦境可能与梦境者的个人经历和情感状态有关,但在一般情况下,它可能表示一种强烈的情感反应,包括愤怒、不满和对于正义和自由的渴望。',
|
||||
provider_response_latency: 400,
|
||||
answer_tokens: 250,
|
||||
annotation: {
|
||||
content: 'string',
|
||||
account: {
|
||||
id: 'string',
|
||||
name: 'string',
|
||||
email: 'string'
|
||||
}
|
||||
},
|
||||
// feedbacks: {
|
||||
// rating: 'like',
|
||||
// content: 'string',
|
||||
// from_source: 'log'
|
||||
// }
|
||||
}, {
|
||||
id: 3,
|
||||
created_at: '2023-10-11 10:20',
|
||||
message: '梦见在山上手撕鬼子,大师解解梦',
|
||||
answer: '但是,一般来说,“手撕鬼子”这个场景可能是梦境者对于过去历史上的战争、侵略以及对于自己国家和族群的保护与维护的情感反应。在梦中,你可能会感到自己充满力量和勇气,去对抗那些看似强大的侵略者。',
|
||||
provider_response_latency: 288,
|
||||
answer_tokens: 100,
|
||||
annotation: {
|
||||
content: 'string',
|
||||
account: {
|
||||
id: 'string',
|
||||
name: 'string',
|
||||
email: 'string'
|
||||
}
|
||||
},
|
||||
feedbacks: {
|
||||
rating: 'dislike',
|
||||
content: 'string',
|
||||
from_source: 'log'
|
||||
}
|
||||
}],
|
||||
limit: 20,
|
||||
has_more: true
|
||||
}
|
||||
res.send(data)
|
||||
})
|
||||
|
||||
app.post('/apps/:id/annotations', async (req, res) => {
|
||||
res.send({ result: 'success' })
|
||||
})
|
||||
|
||||
app.post('/apps/:id/feedbacks', async (req, res) => {
|
||||
res.send({ result: 'success' })
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
module.exports = registerAPI
|
||||
@@ -1,38 +0,0 @@
|
||||
|
||||
const registerAPI = function (app) {
|
||||
app.post('/login', async (req, res) => {
|
||||
res.send({
|
||||
result: 'success'
|
||||
})
|
||||
})
|
||||
|
||||
// get user info
|
||||
app.get('/account/profile', async (req, res) => {
|
||||
res.send({
|
||||
id: '11122222',
|
||||
name: 'Joel',
|
||||
email: 'iamjoel007@gmail.com'
|
||||
})
|
||||
})
|
||||
|
||||
// logout
|
||||
app.get('/logout', async (req, res) => {
|
||||
res.send({
|
||||
result: 'success'
|
||||
})
|
||||
})
|
||||
|
||||
// Langgenius version
|
||||
app.get('/version', async (req, res) => {
|
||||
res.send({
|
||||
current_version: 'v1.0.0',
|
||||
latest_version: 'v1.0.0',
|
||||
upgradeable: true,
|
||||
compatible_upgrade: true
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
module.exports = registerAPI
|
||||
|
||||
@@ -1,249 +0,0 @@
|
||||
const registerAPI = function (app) {
|
||||
app.get("/datasets/:id/documents", async (req, res) => {
|
||||
if (req.params.id === "0") res.send({ data: [] });
|
||||
else {
|
||||
res.send({
|
||||
data: [
|
||||
{
|
||||
id: 1,
|
||||
name: "Steve Jobs' life",
|
||||
words: "70k",
|
||||
word_count: 100,
|
||||
updated_at: 1681801029,
|
||||
indexing_status: "completed",
|
||||
archived: true,
|
||||
enabled: false,
|
||||
data_source_info: {
|
||||
upload_file: {
|
||||
// id: string
|
||||
// name: string
|
||||
// size: number
|
||||
// mime_type: string
|
||||
// created_at: number
|
||||
// created_by: string
|
||||
extension: "pdf",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
name: "Steve Jobs' life",
|
||||
word_count: "10k",
|
||||
hit_count: 10,
|
||||
updated_at: 1681801029,
|
||||
indexing_status: "waiting",
|
||||
archived: true,
|
||||
enabled: false,
|
||||
data_source_info: {
|
||||
upload_file: {
|
||||
extension: "json",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
name: "Steve Jobs' life xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
word_count: "100k",
|
||||
hit_count: 0,
|
||||
updated_at: 1681801029,
|
||||
indexing_status: "indexing",
|
||||
archived: false,
|
||||
enabled: true,
|
||||
data_source_info: {
|
||||
upload_file: {
|
||||
extension: "txt",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
name: "Steve Jobs' life xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
word_count: "100k",
|
||||
hit_count: 0,
|
||||
updated_at: 1681801029,
|
||||
indexing_status: "splitting",
|
||||
archived: false,
|
||||
enabled: true,
|
||||
data_source_info: {
|
||||
upload_file: {
|
||||
extension: "md",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 5,
|
||||
name: "Steve Jobs' life",
|
||||
word_count: "100k",
|
||||
hit_count: 0,
|
||||
updated_at: 1681801029,
|
||||
indexing_status: "error",
|
||||
archived: false,
|
||||
enabled: false,
|
||||
data_source_info: {
|
||||
upload_file: {
|
||||
extension: "html",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
total: 100,
|
||||
id: req.params.id,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
app.get("/datasets/:id/documents/:did/segments", async (req, res) => {
|
||||
if (req.params.id === "0") res.send({ data: [] });
|
||||
else {
|
||||
res.send({
|
||||
data: new Array(100).fill({
|
||||
id: 1234,
|
||||
content: `他的坚持让我很为难。众所周知他非常注意保护自己的隐私,而我想他应该从来没有看过我写的书。也许将来的某个时候吧,我还是这么说。但是,到了2009年,他的妻子劳伦·鲍威尔(Laurene Powell)直言不讳地对我说:“如果你真的打算写一本关于史蒂夫的书,最好现在就开始。”他当时刚刚第二次因病休假。我向劳伦坦承,当乔布斯第一次提出这个想法时,我并不知道他病了。几乎没有人知道,她说。他是在接受癌症手术之前给我打的电话,直到今天他还将此事作为一个秘密,她这么解释道。\n
|
||||
他的坚持让我很为难。众所周知他非常注意保护自己的隐私,而我想他应该从来没有看过我写的书。也许将来的某个时候吧,我还是这么说。但是,到了2009年,他的妻子劳伦·鲍威尔(Laurene Powell)直言不讳地对我说:“如果你真的打算写一本关于史蒂夫的书,最好现在就开始。”他当时刚刚第二次因病休假。我向劳伦坦承,当乔布斯第一次提出这个想法时,我并不知道他病了。几乎没有人知道,她说。他是在接受癌症手术之前给我打的电话,直到今天他还将此事作为一个秘密,她这么解释道。`,
|
||||
enabled: true,
|
||||
keyWords: [
|
||||
"劳伦·鲍威尔",
|
||||
"劳伦·鲍威尔",
|
||||
"手术",
|
||||
"秘密",
|
||||
"癌症",
|
||||
"乔布斯",
|
||||
"史蒂夫",
|
||||
"书",
|
||||
"休假",
|
||||
"坚持",
|
||||
"隐私",
|
||||
],
|
||||
word_count: 120,
|
||||
hit_count: 100,
|
||||
status: "ok",
|
||||
index_node_hash: "index_node_hash value",
|
||||
}),
|
||||
limit: 100,
|
||||
has_more: true,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// get doc detail
|
||||
app.get("/datasets/:id/documents/:did", async (req, res) => {
|
||||
const fixedParams = {
|
||||
// originInfo: {
|
||||
originalFilename: "Original filename",
|
||||
originalFileSize: "16mb",
|
||||
uploadDate: "2023-01-01",
|
||||
lastUpdateDate: "2023-01-05",
|
||||
source: "Source",
|
||||
// },
|
||||
// technicalParameters: {
|
||||
segmentSpecification: "909090",
|
||||
segmentLength: 100,
|
||||
avgParagraphLength: 130,
|
||||
};
|
||||
const bookData = {
|
||||
doc_type: "book",
|
||||
doc_metadata: {
|
||||
title: "机器学习实战",
|
||||
language: "zh",
|
||||
author: "Peter Harrington",
|
||||
publisher: "人民邮电出版社",
|
||||
publicationDate: "2013-01-01",
|
||||
ISBN: "9787115335500",
|
||||
category: "技术",
|
||||
},
|
||||
};
|
||||
const webData = {
|
||||
doc_type: "webPage",
|
||||
doc_metadata: {
|
||||
title: "深度学习入门教程",
|
||||
url: "https://www.example.com/deep-learning-tutorial",
|
||||
language: "zh",
|
||||
publishDate: "2020-05-01",
|
||||
authorPublisher: "张三",
|
||||
topicsKeywords: "深度学习, 人工智能, 教程",
|
||||
description:
|
||||
"这是一篇详细的深度学习入门教程,适用于对人工智能和深度学习感兴趣的初学者。",
|
||||
},
|
||||
};
|
||||
const postData = {
|
||||
doc_type: "socialMediaPost",
|
||||
doc_metadata: {
|
||||
platform: "Twitter",
|
||||
authorUsername: "example_user",
|
||||
publishDate: "2021-08-15",
|
||||
postURL: "https://twitter.com/example_user/status/1234567890",
|
||||
topicsTags:
|
||||
"AI, DeepLearning, Tutorial, Example, Example2, Example3, AI, DeepLearning, Tutorial, Example, Example2, Example3, AI, DeepLearning, Tutorial, Example, Example2, Example3,",
|
||||
},
|
||||
};
|
||||
res.send({
|
||||
id: "550e8400-e29b-41d4-a716-446655440000",
|
||||
position: 1,
|
||||
dataset_id: "550e8400-e29b-41d4-a716-446655440002",
|
||||
data_source_type: "upload_file",
|
||||
data_source_info: {
|
||||
upload_file: {
|
||||
extension: "html",
|
||||
id: "550e8400-e29b-41d4-a716-446655440003",
|
||||
},
|
||||
},
|
||||
dataset_process_rule_id: "550e8400-e29b-41d4-a716-446655440004",
|
||||
batch: "20230410123456123456",
|
||||
name: "example_document",
|
||||
created_from: "web",
|
||||
created_by: "550e8400-e29b-41d4-a716-446655440005",
|
||||
created_api_request_id: "550e8400-e29b-41d4-a716-446655440006",
|
||||
created_at: 1671269696,
|
||||
processing_started_at: 1671269700,
|
||||
word_count: 11,
|
||||
parsing_completed_at: 1671269710,
|
||||
cleaning_completed_at: 1671269720,
|
||||
splitting_completed_at: 1671269730,
|
||||
tokens: 10,
|
||||
indexing_latency: 5.0,
|
||||
completed_at: 1671269740,
|
||||
paused_by: null,
|
||||
paused_at: null,
|
||||
error: null,
|
||||
stopped_at: null,
|
||||
indexing_status: "completed",
|
||||
enabled: true,
|
||||
disabled_at: null,
|
||||
disabled_by: null,
|
||||
archived: false,
|
||||
archived_reason: null,
|
||||
archived_by: null,
|
||||
archived_at: null,
|
||||
updated_at: 1671269740,
|
||||
...(req.params.did === "book"
|
||||
? bookData
|
||||
: req.params.did === "web"
|
||||
? webData
|
||||
: req.params.did === "post"
|
||||
? postData
|
||||
: {}),
|
||||
segment_count: 10,
|
||||
hit_count: 9,
|
||||
status: "ok",
|
||||
});
|
||||
});
|
||||
|
||||
// // logout
|
||||
// app.get("/logout", async (req, res) => {
|
||||
// res.send({
|
||||
// result: "success",
|
||||
// });
|
||||
// });
|
||||
|
||||
// // Langgenius version
|
||||
// app.get("/version", async (req, res) => {
|
||||
// res.send({
|
||||
// current_version: "v1.0.0",
|
||||
// latest_version: "v1.0.0",
|
||||
// upgradeable: true,
|
||||
// compatible_upgrade: true,
|
||||
// });
|
||||
// });
|
||||
};
|
||||
|
||||
module.exports = registerAPI;
|
||||
@@ -1,119 +0,0 @@
|
||||
const registerAPI = function (app) {
|
||||
const coversationList = [
|
||||
{
|
||||
id: '1',
|
||||
name: '梦的解析',
|
||||
inputs: {
|
||||
book: '《梦的解析》',
|
||||
callMe: '大师',
|
||||
},
|
||||
chats: []
|
||||
},
|
||||
{
|
||||
id: '2',
|
||||
name: '生命的起源',
|
||||
inputs: {
|
||||
book: '《x x x》',
|
||||
}
|
||||
},
|
||||
]
|
||||
// site info
|
||||
app.get('/apps/site/info', async (req, res) => {
|
||||
// const id = req.params.id
|
||||
res.send({
|
||||
enable_site: true,
|
||||
appId: '1',
|
||||
site: {
|
||||
title: 'Story Bot',
|
||||
description: '这是一款解梦聊天机器人,你可以选择你喜欢的解梦人进行解梦,这句话是客户端应用说明',
|
||||
},
|
||||
prompt_public: true, //id === '1',
|
||||
prompt_template: '你是我的解梦小助手,请参考 {{book}} 回答我有关梦境的问题。在回答前请称呼我为 {{myName}}。',
|
||||
})
|
||||
})
|
||||
|
||||
app.post('/apps/:id/chat-messages', async (req, res) => {
|
||||
const conversationId = req.body.conversation_id ? req.body.conversation_id : Date.now() + ''
|
||||
res.send({
|
||||
id: Date.now() + '',
|
||||
conversation_id: Date.now() + '',
|
||||
answer: 'balabababab'
|
||||
})
|
||||
})
|
||||
|
||||
app.post('/apps/:id/completion-messages', async (req, res) => {
|
||||
res.send({
|
||||
id: Date.now() + '',
|
||||
answer: `做为一个AI助手,我可以为你提供随机生成的段落,这些段落可以用于测试、占位符、或者其他目的。以下是一个随机生成的段落:
|
||||
|
||||
“随着科技的不断发展,越来越多的人开始意识到人工智能的重要性。人工智能已经成为我们生活中不可或缺的一部分,它可以帮助我们完成很多繁琐的工作,也可以为我们提供更智能、更便捷的服务。虽然人工智能带来了很多好处,但它也面临着很多挑战。例如,人工智能的算法可能会出现偏见,导致对某些人群不公平。此外,人工智能的发展也可能会导致一些工作的失业。因此,我们需要不断地研究人工智能的发展,以确保它能够为人类带来更多的好处。”`
|
||||
})
|
||||
})
|
||||
|
||||
// share api
|
||||
// chat list
|
||||
app.get('/apps/:id/coversations', async (req, res) => {
|
||||
res.send({
|
||||
data: coversationList
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
|
||||
app.get('/apps/:id/variables', async (req, res) => {
|
||||
res.send({
|
||||
variables: [
|
||||
{
|
||||
key: 'book',
|
||||
name: '书',
|
||||
value: '《梦境解析》',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
key: 'myName',
|
||||
name: '称呼',
|
||||
value: '',
|
||||
type: 'string'
|
||||
}
|
||||
],
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
module.exports = registerAPI
|
||||
|
||||
// const chatList = [
|
||||
// {
|
||||
// id: 1,
|
||||
// content: 'AI 开场白',
|
||||
// isAnswer: true,
|
||||
// },
|
||||
// {
|
||||
// id: 2,
|
||||
// content: '梦见在山上手撕鬼子,大师解解梦',
|
||||
// more: { time: '5.6 秒' },
|
||||
// },
|
||||
// {
|
||||
// id: 3,
|
||||
// content: '梦境通常是个人内心深处的反映,很难确定每个人梦境的确切含义,因为它们可能会受到梦境者的文化背景、生活经验和情感状态等多种因素的影响。',
|
||||
// isAnswer: true,
|
||||
// more: { time: '99 秒' },
|
||||
|
||||
// },
|
||||
// {
|
||||
// id: 4,
|
||||
// content: '梦见在山上手撕鬼子,大师解解梦',
|
||||
// more: { time: '5.6 秒' },
|
||||
// },
|
||||
// {
|
||||
// id: 5,
|
||||
// content: '梦见在山上手撕鬼子,大师解解梦',
|
||||
// more: { time: '5.6 秒' },
|
||||
// },
|
||||
// {
|
||||
// id: 6,
|
||||
// content: '梦见在山上手撕鬼子,大师解解梦',
|
||||
// more: { time: '5.6 秒' },
|
||||
// },
|
||||
// ]
|
||||
@@ -1,15 +0,0 @@
|
||||
const registerAPI = function (app) {
|
||||
app.get('/demo', async (req, res) => {
|
||||
res.send({
|
||||
des: 'get res'
|
||||
})
|
||||
})
|
||||
|
||||
app.post('/demo', async (req, res) => {
|
||||
res.send({
|
||||
des: 'post res'
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
module.exports = registerAPI
|
||||
@@ -1,42 +0,0 @@
|
||||
const express = require('express')
|
||||
const app = express()
|
||||
const bodyParser = require('body-parser')
|
||||
var cors = require('cors')
|
||||
|
||||
const commonAPI = require('./api/common')
|
||||
const demoAPI = require('./api/demo')
|
||||
const appsApi = require('./api/apps')
|
||||
const debugAPI = require('./api/debug')
|
||||
const datasetsAPI = require('./api/datasets')
|
||||
|
||||
const port = 3001
|
||||
|
||||
app.use(bodyParser.json()) // for parsing application/json
|
||||
app.use(bodyParser.urlencoded({ extended: true })) // for parsing application/x-www-form-urlencoded
|
||||
|
||||
const corsOptions = {
|
||||
origin: true,
|
||||
credentials: true,
|
||||
}
|
||||
app.use(cors(corsOptions)) // for cross origin
|
||||
app.options('*', cors(corsOptions)) // include before other routes
|
||||
|
||||
|
||||
demoAPI(app)
|
||||
commonAPI(app)
|
||||
appsApi(app)
|
||||
debugAPI(app)
|
||||
datasetsAPI(app)
|
||||
|
||||
|
||||
app.get('/', (req, res) => {
|
||||
res.send('rootpath')
|
||||
})
|
||||
|
||||
app.listen(port, () => {
|
||||
console.log(`Mock run on port ${port}`)
|
||||
})
|
||||
|
||||
const sleep = (ms) => {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "server",
|
||||
"version": "1.0.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"dev": "nodemon node app.js",
|
||||
"start": "node app.js",
|
||||
"tcp": "node tcp.js"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=16.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"body-parser": "^1.20.2",
|
||||
"cors": "^2.8.5",
|
||||
"express": "4.18.2",
|
||||
"express-jwt": "8.4.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"nodemon": "2.0.21"
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ After installing the SDK, you can use it in your project like this:
|
||||
import { DifyClient, ChatClient, CompletionClient } from 'dify-client'
|
||||
|
||||
const API_KEY = 'your-api-key-here';
|
||||
const user = `random-user-id`:
|
||||
const user = `random-user-id`;
|
||||
|
||||
// Create a completion client
|
||||
const completionClient = new CompletionClient(API_KEY)
|
||||
|
||||
5
sdks/nodejs-client/babel.config.json
Normal file
5
sdks/nodejs-client/babel.config.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"presets": [
|
||||
"@babel/preset-env"
|
||||
]
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
import axios from 'axios'
|
||||
|
||||
const BASE_URL = 'https://api.dify.ai/v1'
|
||||
export const BASE_URL = 'https://api.dify.ai/v1'
|
||||
|
||||
const routes = {
|
||||
export const routes = {
|
||||
application: {
|
||||
method: 'GET',
|
||||
url: () => `/parameters`
|
||||
@@ -17,7 +17,7 @@ const routes = {
|
||||
},
|
||||
createChatMessage: {
|
||||
method: 'POST',
|
||||
url: () => `/chat-message`,
|
||||
url: () => `/chat-messages`,
|
||||
},
|
||||
getConversationMessages: {
|
||||
method: 'GET',
|
||||
|
||||
66
sdks/nodejs-client/index.test.js
Normal file
66
sdks/nodejs-client/index.test.js
Normal file
@@ -0,0 +1,66 @@
|
||||
import { DifyClient, BASE_URL, routes } from ".";
|
||||
|
||||
import axios from 'axios'
|
||||
|
||||
jest.mock('axios')
|
||||
|
||||
describe('Client', () => {
|
||||
let difyClient
|
||||
beforeEach(() => {
|
||||
difyClient = new DifyClient('test')
|
||||
})
|
||||
|
||||
test('should create a client', () => {
|
||||
expect(difyClient).toBeDefined();
|
||||
})
|
||||
// test updateApiKey
|
||||
test('should update the api key', () => {
|
||||
difyClient.updateApiKey('test2');
|
||||
expect(difyClient.apiKey).toBe('test2');
|
||||
})
|
||||
});
|
||||
|
||||
describe('Send Requests', () => {
|
||||
let difyClient
|
||||
|
||||
beforeEach(() => {
|
||||
difyClient = new DifyClient('test')
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
jest.resetAllMocks()
|
||||
})
|
||||
|
||||
it('should make a successful request to the application parameter', async () => {
|
||||
const method = 'GET'
|
||||
const endpoint = routes.application.url
|
||||
const expectedResponse = { data: 'response' }
|
||||
axios.mockResolvedValue(expectedResponse)
|
||||
|
||||
await difyClient.sendRequest(method, endpoint)
|
||||
|
||||
expect(axios).toHaveBeenCalledWith({
|
||||
method,
|
||||
url: `${BASE_URL}${endpoint}`,
|
||||
data: null,
|
||||
params: null,
|
||||
headers: {
|
||||
Authorization: `Bearer ${difyClient.apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
responseType: 'json',
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
it('should handle errors from the API', async () => {
|
||||
const method = 'GET'
|
||||
const endpoint = '/test-endpoint'
|
||||
const errorMessage = 'Request failed with status code 404'
|
||||
axios.mockRejectedValue(new Error(errorMessage))
|
||||
|
||||
await expect(difyClient.sendRequest(method, endpoint)).rejects.toThrow(
|
||||
errorMessage
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "dify-client",
|
||||
"version": "1.0.2",
|
||||
"version": "1.0.3",
|
||||
"description": "This is the Node.js SDK for the Dify.AI API, which allows you to easily integrate Dify.AI into your Node.js applications.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -14,7 +14,21 @@
|
||||
"<crazywoola> <<427733928@qq.com>> (https://github.com/crazywoola)"
|
||||
],
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test": "jest"
|
||||
},
|
||||
"jest": {
|
||||
"transform": {
|
||||
"^.+\\.[t|j]sx?$": "babel-jest"
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.21.8",
|
||||
"@babel/preset-env": "^7.21.5",
|
||||
"babel-jest": "^29.5.0",
|
||||
"jest": "^29.5.0"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user