Compare commits

...

545 Commits

Author SHA1 Message Date
takatost
12c815c597 fix: ExtractSetting optional value missing None as default val (#5238) 2024-06-15 02:58:47 +08:00
takatost
d098bdc59b version to 0.6.11 (#5224) 2024-06-15 02:46:24 +08:00
Jyong
ba5f8afaa8 Feat/firecrawl data source (#5232)
Co-authored-by: Nicolas <nicolascamara29@gmail.com>
Co-authored-by: chenhe <guchenhe@gmail.com>
Co-authored-by: takatost <takatost@gmail.com>
2024-06-15 02:46:02 +08:00
Chenhe Gu
918ebe1620 update tooltip (#5235) 2024-06-15 02:21:46 +08:00
zxhlyh
6be0027853 fix: note editor italic (#5230) 2024-06-14 22:31:39 +08:00
crazywoola
bc757f1ddc fix: z-index (#5229) 2024-06-14 22:31:19 +08:00
takatost
8da035aac6 Update README.md (#5228) 2024-06-14 22:31:01 +08:00
kurokobo
ef6034abfd fix: allow the name and icon of the web app to be set independently of that of the bot itself (#5225) 2024-06-14 22:16:11 +08:00
kurokobo
0391282b5e fix: initialize site with customized icon and icon_background (#5227) 2024-06-14 22:15:50 +08:00
Joel
28554350de feat: support firecrawl frontend code (#5226) 2024-06-14 22:02:41 +08:00
走在修行的大街上
8d1386df0f feat(Tools): Add Feishu multi-dimensional table operation function (#5213)
Co-authored-by: 黎斌 <libin.23@bytedance.com>
Co-authored-by: takatost <takatost@gmail.com>
2024-06-14 21:19:20 +08:00
Bowen Liang
e7752e8135 chore: development script for syncing Poetry lockfile (#5170) 2024-06-14 20:54:07 +08:00
DomKing
43c19007e0 fix: workspace member's last_active should be last_active_time, but not last_login_time (#4906) 2024-06-14 20:49:19 +08:00
rerorero
c6b791d070 fix: number variable cause type error in openai moderation (#5222) 2024-06-14 20:43:03 +08:00
Charles Zhou
8bcc5a36bb feat: new editor user permission profile (#4435)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-06-14 20:34:25 +08:00
th3n00b13
cdb6c801c1 Fix: http_request delete method not working (#4975) 2024-06-14 20:07:22 +08:00
Winson Li
511ead4b8d Update README, deploy dify with YAML file on Kubernetes (#5131) 2024-06-14 19:53:40 +08:00
quicksand
4080f7b8ad feat: support tencent vector db (#3568) 2024-06-14 19:25:17 +08:00
gongzhongqiang
9ed21737d5 fix: add repo check for build-push.yml (#5141) 2024-06-14 19:15:27 +08:00
Jaxon Ley
337bad8525 feat: Add Optional API Key, Proxy Server, and Bypass Cache Parameters to Jina Tools (#5197) 2024-06-14 19:09:25 +08:00
Bin
0f35d07052 support ERNIE-4.0-8K-Latest (#5216) 2024-06-14 18:45:24 +08:00
-LAN-
7f44e88eda fix(model_providers/ollama): Fix OllamaLargeLanguageModel to correctly set the stop option (#5217) 2024-06-14 18:26:14 +08:00
Jason
b7ff765d8d Add novita.ai as model provider (#4961) 2024-06-14 18:23:06 +08:00
zxhlyh
c28d709d7f feat: workflow add note node (#5164) 2024-06-14 17:08:11 +08:00
Jyong
d7fbae286a add aws s3 iam check (#5174) 2024-06-14 15:19:59 +08:00
Masashi Tomooka
0633aae7dc feat: allow to use IAM Role for Bedrock (#5188) 2024-06-14 15:18:42 +08:00
doufa
f87f11e92c chore: make the Celery command more noticeable (#5203) 2024-06-14 15:06:07 +08:00
Bowen Liang
2b04388361 chore: remove bump-pydantic dependency (#5177) 2024-06-14 15:05:17 +08:00
takatost
3c0f21d174 fix: workflow as tool create error by type misuse (#5205) 2024-06-14 15:01:09 +08:00
Hanqing Zhao
8e2f8ffb9e Modify docs in JP (#5185) 2024-06-14 14:06:23 +08:00
KVOJJJin
e68d1b88de Fix: conversation id display & support copy (#5195) 2024-06-14 13:58:51 +08:00
-LAN-
ed53ef29f4 fix(core/tools): Fix the issue with iterating over None in _transform_tool_parameters_type. (#5190) 2024-06-14 11:25:48 +08:00
KVOJJJin
4289f17be2 Chore: refactor embedded chatbot (#5125) 2024-06-14 08:42:41 +08:00
dependabot[bot]
54e02b8147 chore(deps): bump authlib from 1.2.0 to 1.3.1 in /api (#5115)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: takatost <takatost@gmail.com>
2024-06-14 03:55:40 +08:00
Summer-Gu
7f98c2ea3f refactor: Delete the dataset to verify whether it is in use (#5112) 2024-06-14 03:25:38 +08:00
dependabot[bot]
7189a4c379 chore(deps): bump azure-identity from 1.15.0 to 1.16.1 in /api (#5116)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: takatost <takatost@gmail.com>
2024-06-14 03:24:32 +08:00
takatost
415022aa14 fix: pydantic2 error (#5172) 2024-06-14 03:05:04 +08:00
saga.rey
edf2047f04 fix: milvus_vector default dataset index_struct type from weaviate to milvus (#5098) 2024-06-14 02:36:01 +08:00
rerorero
b85ae146a7 fix: JSON mode with an image doesn't work for Gemini (#5169) 2024-06-14 02:32:09 +08:00
takatost
5ec7d85629 fix: issues by pydantic2 upgrade (#5171) 2024-06-14 02:28:28 +08:00
Pan, Wen-Ming
f13af5a811 fix(model_providers/vertex_ai): Vertex AI Anthropic models authentication failed (#4971) 2024-06-14 01:34:31 +08:00
Bowen Liang
f976740b57 improve: mordernizing validation by migrating pydantic from 1.x to 2.x (#4592) 2024-06-14 01:05:37 +08:00
Yeuoly
e8afc416dd improve: CI experience (#5168) 2024-06-13 23:16:28 +08:00
Yeuoly
0cccf9c67d feat: introduce APP_MAX_EXECUTION_TIME (#5167) 2024-06-13 23:08:05 +08:00
Bowen Liang
cdc08a434f feat: support Chroma vector store (#5015) 2024-06-13 18:02:18 +08:00
Kazuki Takamatsu
3f18369ad2 Fix: google storage init with sa and download (#5054) 2024-06-13 17:36:34 +08:00
Kazuki Hasegawa
db976a1f74 Upgrade boto3 library to support EKS Pod Identity. (#5064) 2024-06-13 17:36:14 +08:00
kurokobo
e61f5d029a chore(docs): fix minor small typos (#5124) 2024-06-13 17:36:01 +08:00
Charles Zhou
eaca892c4e fix: front end error when same tool is called twice at once (#5068) 2024-06-13 17:16:59 +08:00
非法操作
015c26d303 fix: style misalignment and inconsistency (#5149) 2024-06-13 16:32:42 +08:00
sino
8210637bc5 feat: support jina-clip-v1 embedding model (#5146) 2024-06-13 16:31:18 +08:00
呆萌闷油瓶
790543131a chore:add some new api version for azure openai (#5142) 2024-06-13 16:30:47 +08:00
Ikko Eltociear Ashimine
a40f68cf94 chore: update qdrant_vector.py (#5128) 2024-06-13 15:35:14 +08:00
yanghx
adc948e87c fix(api/core/model_runtime/model_providers/baichuan,localai): Parse ToolPromptMessage. #4943 (#5138)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-06-13 13:08:30 +08:00
smoky
742b08e1d5 chore: update question classifier prompt (#5137)
Signed-off-by: 0xff-dev <stevenshuang521@gmail.com>
2024-06-13 13:04:51 +08:00
orangeclk
79e8489942 feat: support siliconflow (#5129) 2024-06-13 12:59:41 +08:00
Charlie.Wei
d6fa130cb5 remove dalle3 seed (#5136)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-13 08:05:55 +08:00
takatost
0c92f81efc chore: sync pyproject.toml from requirements.txt (#5130) 2024-06-13 00:06:05 +08:00
fishisnow
11fd4a5dcc Fix: fix load_yaml logging, Avoid setting the log level to warning (#5019)
Co-authored-by: huangyusong <huangyusong@yingzi.com>
2024-06-12 19:27:01 +08:00
Harry Wang
b399e8a359 fixed a typo and grammar error in sampled app (#5061) 2024-06-12 18:02:22 +08:00
非法操作
e04fc9b304 fix: select field not work when it is not required (#5101) 2024-06-12 17:46:53 +08:00
xielong
ea69dc2a7e feat: support hunyuan llm models (#5013)
Co-authored-by: takatost <takatost@users.noreply.github.com>
Co-authored-by: Bowen Liang <bowenliang@apache.org>
2024-06-12 17:24:23 +08:00
Pika
ecc7f130b4 fix(typo): misspelling (#5094) 2024-06-12 17:01:21 +08:00
zxhlyh
95443bd551 chore: workflow syncing modal (#5108) 2024-06-12 16:35:19 +08:00
sino
0ce97e6315 feat: support doubao llm function calling (#5100) 2024-06-12 15:43:50 +08:00
Bowen Liang
25b0a97851 build: use Poetry as default build system for dependency installation in CI jobs (#5088) 2024-06-12 14:43:03 +08:00
rerorero
28997772a5 fix: remote_url doesn't work for gemini (#5090) 2024-06-12 13:14:53 +08:00
Charlie.Wei
b7c72f7a97 dalle3 add style consistency parameter (#5067)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-12 12:59:03 +08:00
Joel
9f7b38c068 fix: #4970 (#5093) 2024-06-12 11:29:38 +08:00
非法操作
3b36ba797f feat: add duckduckgo img search, translate, ai chat (#5074) 2024-06-12 10:04:10 +08:00
ugyuji
4d2e6c3391 fix: Google HL Parameter for SearchApi (#5071) 2024-06-12 08:30:01 +08:00
Nam Vu
3520d35f38 fix: autoHeightTextarea dimensions in Firefox (#4891) 2024-06-12 08:24:58 +08:00
KVOJJJin
5f104bab57 Fix: infinite loading not work when message is too short (#5075) 2024-06-12 08:23:39 +08:00
orangeclk
2050a8b8f0 feat: add glm4 new models and zhipu embedding-2 (#5089) 2024-06-12 08:22:17 +08:00
takatost
e3544c6ef7 fix: dependency package versions are not synchronized to requirements.txt (#5084) 2024-06-11 22:21:18 +08:00
ra2230
472b976946 Arabic README.md (#5078) 2024-06-11 18:43:54 +08:00
Matri
f62f71a81a build: initial support for poetry build tool (#4513)
Co-authored-by: Bowen Liang <bowenliang@apache.org>
2024-06-11 13:11:28 +08:00
Takuya Ono
f426e1b3bd 🔧 Fix(docker/volumes/ssrf_proxy/squid.conf): The squid process on ssrf_proxy docker service crashes at startup (#5050) 2024-06-11 12:32:05 +08:00
sino
5f870ac950 chore: update maas model provider description (#5056) 2024-06-11 11:22:22 +08:00
Pascal M
415816cf35 feat: add dataset delete endpoint (#5048) 2024-06-11 11:21:38 +08:00
Pascal M
9103112555 fix: wrong link to web app repo in chatflow mode (#5062) 2024-06-11 11:20:52 +08:00
takatost
5986841e27 fix: issue where an error occurs when invoking TTS without selecting a voice (#5046) 2024-06-09 20:28:24 +08:00
Jaxon Ley
2573b138bf fix: update presence_penalty configuration for wenxin AI ernie-4.0-8k and ernie-3.5-8k models (#5039) 2024-06-09 14:44:11 +08:00
Takuya Ono
308ce66af5 🔧 fix docker-compose ssrf_proxy service WARNING: You should probably remove '::/0' from the ACL named 'all' (#5005) 2024-06-09 14:39:52 +08:00
Bowen Liang
bdad993901 improve: generalize vector factory classes and vector type (#5033) 2024-06-08 22:29:24 +08:00
zxhlyh
3b62ab564a feat: feature modal style (#5032) 2024-06-08 07:32:34 +08:00
Pika
d319d9fc5e fix(style): some style issues (#5029) 2024-06-07 20:59:39 +08:00
doufa
ea5c8a72e2 Fix language setting not success (#5023) 2024-06-07 20:02:08 +08:00
Jyong
3b60c28b3a deal the external image when extract docx image (#5024) 2024-06-07 20:00:39 +08:00
KVOJJJin
ea0219a5d5 Fix: z-index in header (#5017) 2024-06-07 16:01:33 +08:00
Jyong
481e7bc6b9 Fix/azure blob new version (#5004) 2024-06-06 23:36:13 +08:00
Charles Zhou
1ccba85c91 fix: modal z-index and cleanup (#4978) 2024-06-06 22:28:13 +08:00
doufa
2539e56514 fix: some base models cannot be selected in Azure OpenAI Service setting page (#4985) 2024-06-06 22:27:57 +08:00
takatost
3929d289e0 feat: set default memory messages limit to infinite (#5002) 2024-06-06 17:39:44 +08:00
Yeuoly
52585aea74 fix: typo in sd3 (#5000) 2024-06-06 17:08:49 +08:00
Yeuoly
73dee84cab fix: add handling for non-string type in variable template parser (#4996) 2024-06-06 16:38:13 +08:00
Joel
efecdccf35 feat: support login by given mail (#4991) 2024-06-06 15:01:58 +08:00
Joel
da5f2e168a fix: llm selector position is incorrect in not workflow app (#4982) 2024-06-06 10:47:36 +08:00
Joe
5cdb95be1f fix: gemini timeout error (#4955) 2024-06-06 10:19:03 +08:00
Bowen Liang
7fa735a43b chore: rename vdb tests for PGVector and PGvectoRS (#4973) 2024-06-06 07:22:49 +08:00
takatost
3579fd1b09 feat: add create tenant command (#4974) 2024-06-06 00:42:00 +08:00
Jyong
237b8fe3d9 add meta.doc_id index for tidb (#4963) 2024-06-05 20:45:43 +08:00
Jyong
02e4de5166 fix some tidb bugs (#4960) 2024-06-05 19:14:18 +08:00
Mark Sun
64c8093c1e Typo in Knowledge settings (#4958) 2024-06-05 18:31:24 +08:00
Weaxs
0797f9bc05 feat: support tidb vector (#4588) 2024-06-05 18:19:53 +08:00
非法操作
602c4e51ec fix: duckduckgo search does not work (#4949)
Co-authored-by: Jyong <76649700+johnjyong@users.noreply.github.com>
2024-06-05 17:33:58 +08:00
YC
9f8ca75a81 fixing a bug of handling header row when parsing xls file, and tune xls/xlsx parsing result to be more structured (#3600) 2024-06-05 15:28:43 +08:00
Yeuoly
80a87f36ea fix: missing iterator in task pipeline (#4948) 2024-06-05 15:10:20 +08:00
Charles Zhou
63addc9258 fix: missing dataset patch parameters in settings modal (#4901) 2024-06-05 14:21:59 +08:00
Bowen Liang
f32b440c4a chore: fix indention violations by applying E111 to E117 ruff rules (#4925) 2024-06-05 14:05:15 +08:00
crazywoola
6b6afb7708 fix: import error in web/app/components/header/account-setting/model-provider-page/declarations.ts (#4944) 2024-06-05 14:01:12 +08:00
zxhlyh
a4041cb40b fix: end node limit in next step (#4945) 2024-06-05 14:00:47 +08:00
JasonVV
7749b71fff Optimize knowledge retrieval performance by batching dataset quries. (#4917) 2024-06-05 13:30:32 +08:00
Joel
3006124e6d feat: pricing page add llm load balancing info (#4942) 2024-06-05 11:31:44 +08:00
Harry Wang
3d276f4a7f change "Import from text file" to "Import from file" (#4935) 2024-06-05 09:29:29 +08:00
takatost
b20d173324 pref: optimize feature model_load_balancing_enabled value fetch speed… (#4933) 2024-06-05 02:06:19 +08:00
takatost
f44d1e62d2 fix: bedrock get_num_tokens prompt_messages parameter name err (#4932) 2024-06-05 01:53:05 +08:00
takatost
21ac2afb3a fix: question classifier instruction npe (#4931) 2024-06-05 01:27:58 +08:00
takatost
f7dd327bc2 version to 0.6.10 (#4929) 2024-06-05 01:12:20 +08:00
takatost
09298a32e7 fix: vanna CVE-2024-5565 by disable visualize of ask func (#4930) 2024-06-05 00:46:22 +08:00
Nite Knite
37f292ea91 feat: model load balancing (#4926) 2024-06-05 00:13:29 +08:00
takatost
d1dbbc1e33 feat: backend model load balancing support (#4927) 2024-06-05 00:13:04 +08:00
Yeuoly
52ec152dd3 fix: incorrect parameters transforming while validating (#4928) 2024-06-05 00:01:30 +08:00
Jyong
c7bddb637b support instruction in classifier node (#4913) 2024-06-04 20:07:54 +08:00
Jyong
4e3b0c5aea support rename document (#4915) 2024-06-04 20:07:40 +08:00
Jyong
b6631cd878 modify rerank and splitter code directory (#4924) 2024-06-04 20:07:25 +08:00
Nam Vu
c212700341 fix: router replace in Explore page (#4918) 2024-06-04 19:41:54 +08:00
非法操作
e121788ff5 chore: make the error msg more clear when validate app token (#4919)
Co-authored-by: Jyong <76649700+johnjyong@users.noreply.github.com>
2024-06-04 18:04:10 +08:00
Joel
96460d5ea3 feat: document support rename in in dataset (#4732) 2024-06-04 15:10:34 +08:00
Jyong
9cf9720efa Fix/azure blob token expire (#4914) 2024-06-04 14:30:23 +08:00
Henry Lu
2d9f55b632 feat: Add Vanna.AI as a builtin tool (#4878)
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-06-04 14:05:29 +08:00
非法操作
7133a16511 chore: refactor the serpapi's google search tool (#4834) 2024-06-04 14:05:05 +08:00
Joel
a38dfc006e feat: question classify node support use var in instruction (#4710) 2024-06-04 14:01:40 +08:00
Moonlit
86e7c7321f Fixed a bug where any content in the 'fetch' was converted to True (#4400) 2024-06-04 13:27:23 +08:00
Bowen Liang
58db719a2c dep: bump pandas from 1.x to 2.x (#4820) 2024-06-04 13:24:28 +08:00
doufa
9abeb99b32 chore: modify tools/JinaReader label to Jina (#4908) 2024-06-04 13:21:05 +08:00
Jyong
d828a7fc35 fix azure blob token expire (#4911) 2024-06-04 13:04:56 +08:00
Ikko Eltociear Ashimine
c6f9ea4434 chore: update page.tsx (#4897) 2024-06-04 10:19:49 +08:00
Bowen Liang
fb6843815c chore: separate style checks into multiple jobs triggering on file changes (#4876) 2024-06-04 03:03:18 +08:00
dependabot[bot]
b97181a793 chore(deps): bump azure-storage-blob from 12.9.0 to 12.13.0 in /api (#4695)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 02:57:33 +08:00
Bowen Liang
5d15aca85f chore: remove unused code and class in text splitter (#4864) 2024-06-04 02:54:09 +08:00
Pan, Wen-Ming
b98a1a3303 feat: added Anthropic Claude3 models to Google Cloud Vertex AI (#4870)
Co-authored-by: pwm <pwm@google.com>
2024-06-04 02:52:46 +08:00
takatost
696c5308a9 chore: optimize nvidia nim credential schema and info (#4898) 2024-06-04 02:26:26 +08:00
Bowen Liang
3542d55e67 improve: generalize tool parameter converter (#4786) 2024-06-03 21:26:58 +08:00
Joshua
3c8a120e51 add-nvidia-mim (#4882) 2024-06-03 21:10:18 +08:00
Bowen Liang
cd24308f20 chore: add issue link tempate for IDEA (#4866) 2024-06-03 13:39:54 +08:00
Joel
69190e088e fix: update npm version to fix Incorrect argument types in createChatMessage (#4865) 2024-06-03 08:22:27 +08:00
Charlie.Wei
d058a234ba Fixed workflow tts feature audition (#4867) 2024-06-03 00:22:14 +08:00
Kishida Takashi
41e536109b fix: Incorrect argument types in createChatMessage (#4861) 2024-06-02 21:03:42 +08:00
Yeuoly
f916aa0f92 chore: upgrade sandbox (#4839) 2024-06-02 11:30:14 +08:00
Pan, Wen-Ming
cdbc260571 Bugfix: Vertex AI vision model not support image (#4853) 2024-06-02 11:11:09 +08:00
Bowen Liang
b234710af9 chore: fix invalid escape sequences by applying W605 rule (#4851) 2024-06-02 10:02:37 +08:00
Bowen Liang
23498883d4 chore: skip explicit installing jinja2 as testing dependency (#4845) 2024-06-02 09:49:20 +08:00
Bowen Liang
a47e8d0da2 test: CI test for db migration scripts on changes (#4739) 2024-05-31 16:45:34 +08:00
Bowen Liang
6dd0e07af8 test: triggering tests on changes and allow cancelling in-progress CI test jobs (#4743) 2024-05-31 16:42:14 +08:00
doufa
b1c9671a60 fix: status query not stop when leaving document embedding detail page (#4754) 2024-05-31 16:07:48 +08:00
Yeuoly
7aaa1ff270 chore: increase workflow max steps to 500 (#4835) 2024-05-31 15:16:35 +08:00
Yeuoly
85698ca4f7 chore: cleanup tools, remove useless code (#4833) 2024-05-31 14:19:59 +08:00
Oliver Lee
176d91937d fix 'NoneType' and new ContentType supported. (#4818) 2024-05-31 14:19:33 +08:00
Yash Parmar
e0da0744b5 add: ollama keep alive parameter added. issue #4024 (#4655) 2024-05-31 12:22:02 +08:00
zxhlyh
0b4902bdc2 fix: workflow app run (#4831) 2024-05-31 12:15:25 +08:00
xielong
e9904e66e6 chore: Enable case-insensitive search for large models (#4817) 2024-05-31 08:55:37 +08:00
crazywoola
3de8e8fd6a Feat/i18n workflow (#4819) 2024-05-30 21:03:32 +08:00
DomKing
38a470a873 fix: app_count of dataset is error when apps was deleted (#4810) 2024-05-30 19:23:46 +08:00
Whitewater
4308a79e89 fix: revision styles for workflow (#4087) 2024-05-30 19:10:14 +08:00
Krasus.Chen
93d3350c8c update sd-webui api parameters to v1.9.3 (#4798)
Co-authored-by: Your Name <chen@krasus.red>
2024-05-30 19:04:47 +08:00
Lesenelir
615c009c42 fix: remove redundant props (#4787) 2024-05-30 18:58:08 +08:00
Charles Zhou
a325a294bd feat: opportunistic tls flag for smtp (#4794) 2024-05-30 18:56:46 +08:00
zxhlyh
4b91383efc feat: workflow variable aggregator support group (#4811)
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-05-30 18:54:58 +08:00
paragonnov
18ab63bd37 add: i18n: update korean (#4813) 2024-05-30 17:40:35 +08:00
Joel
a7fb1ffcd8 feat: show more usage info in billing page (#4808) 2024-05-30 16:15:38 +08:00
Joel
11f173693b fix: some filed in model param selector has no left spacing (#4803) 2024-05-30 14:49:41 +08:00
zxhlyh
5b2cd8d03a chore: node help link (#4795) 2024-05-30 14:24:53 +08:00
SebastjanPrachovskij
b10e67be3b Add SearchApi tools (#4648) 2024-05-30 11:11:17 +08:00
Joel
d41c077fac chore: improve node user experience (#4792) 2024-05-30 10:53:02 +08:00
Joel
3175a2c76a fix: in tool and http node of iteration can not show item var correctly (#4791) 2024-05-30 10:40:27 +08:00
Kota-Yamaguchi
3b60b712ec feat: Add logging warning when MAIL_TYPE is not set (#4771) 2024-05-29 18:06:16 +08:00
zeroameli
afed3610fc fix organize agent's history messages without recalculating tokens (#4324)
Co-authored-by: chenyongzhao <chenyz@mama.cn>
2024-05-29 15:25:20 +08:00
Yeuoly
74f38eacda feat: support define tags in tool yaml (#4763) 2024-05-29 15:19:14 +08:00
Weaxs
b189faca52 feat: update ernie model (#4756) 2024-05-29 14:57:23 +08:00
Yeuoly
d4cd6149ac fix: incorrect workflow max call depth (#4759) 2024-05-29 14:52:28 +08:00
xielong
e1cd9aef8f feat: support baichuan3 turbo, baichuan3 turbo 128k, and baichuan4 (#4762) 2024-05-29 14:46:04 +08:00
Yeuoly
ba37275503 fix: confusing chart description (#4760) 2024-05-29 14:36:33 +08:00
非法操作
e01b44af61 style: fix annotation panel display misalignment (#4750) 2024-05-29 14:23:44 +08:00
majian
72a90074bc Add WORKFLOW_CALL_MAX_DEPTH env var. (#4713) 2024-05-29 13:39:11 +08:00
crazywoola
705a6e3a8e Fix/4742 ollama num gpu option not consistent with allowed values (#4751) 2024-05-29 13:33:35 +08:00
非法操作
f4a240d225 style: the 'all' of add tool panel should contain workflow tools (#4755) 2024-05-29 13:04:23 +08:00
xielong
793f0c1dd6 fix: Corrected schema link in model_runtime's README.md (#4757) 2024-05-29 13:03:21 +08:00
Charles Zhou
008edd0eeb fix: optimize sticky header styles z-index in tools - ProviderList component (#4746) 2024-05-29 08:36:11 +08:00
takatost
9e6b6e7b82 fix: workflow run sequence number slow sql (#4737) 2024-05-28 20:41:52 +08:00
xxhong
164d6e47b9 Show tool i18n name on chat pannel (#4724) 2024-05-28 18:58:02 +08:00
xielong
88b4d69278 fix: Correct context size for banchuan2-53b and banchuan2-turbo (#4721) 2024-05-28 16:37:44 +08:00
Joel
5bcbcd3c57 fix: retrieval value greater more than 1 caused ui problem (#4718) 2024-05-28 16:01:19 +08:00
Jyong
1b2d862973 add error msg for hit test (#4704) 2024-05-28 14:54:53 +08:00
Hash Brown
e6f6a59f3b style: update VarPanel to use whitespace-pre-wrap for value display (#4684) 2024-05-28 14:54:29 +08:00
Yeuoly
e198bc9b9a fix: workflow as tool garbled (#4707) 2024-05-28 14:51:42 +08:00
非法操作
b7f81f0999 fix: the new node name is generated based on the original node when duplicating (#4675) 2024-05-28 13:50:43 +08:00
doufa
eb8dc15ad6 fix: Input fields in the model provider's settings modal do not switch sequence via keyboard navigation (Tab key) (#4662) 2024-05-28 11:34:44 +08:00
Pika
2ee3a1b6f3 fix: key-value-table styles (#4678) 2024-05-28 10:57:40 +08:00
岩本宙士
0960b17fbc Add workflow translations for ja-JP (#4698)
Co-authored-by: crazywoola <427733928@qq.com>
2024-05-28 10:27:35 +08:00
crazywoola
6534566b7e feat: add América/São Paulo tz (#4701) 2024-05-28 10:12:18 +08:00
takatost
e60350d95d version to 0.6.9 (#4692) 2024-05-27 22:48:34 +08:00
Patryk Garstecki
f40743183e 🔧 Add env variable for time signature (#4650) 2024-05-27 22:20:49 +08:00
Yeuoly
e852a21634 Feat/workflow phase2 (#4687) 2024-05-27 22:01:11 +08:00
zxhlyh
45deaee762 feat: workflow new nodes (#4683)
Co-authored-by: Joel <iamjoel007@gmail.com>
Co-authored-by: Patryk Garstecki <patryk20120@yahoo.pl>
Co-authored-by: Sebastian.W <thiner@gmail.com>
Co-authored-by: 呆萌闷油瓶 <253605712@qq.com>
Co-authored-by: takatost <takatost@users.noreply.github.com>
Co-authored-by: rechardwang <wh_goodjob@163.com>
Co-authored-by: Nite Knite <nkCoding@gmail.com>
Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
Co-authored-by: Joshua <138381132+joshua20231026@users.noreply.github.com>
Co-authored-by: Weaxs <459312872@qq.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: leejoo0 <81673835+leejoo0@users.noreply.github.com>
Co-authored-by: JzoNg <jzongcode@gmail.com>
Co-authored-by: sino <sino2322@gmail.com>
Co-authored-by: Vikey Chen <vikeytk@gmail.com>
Co-authored-by: wanghl <Wang-HL@users.noreply.github.com>
Co-authored-by: Haolin Wang-汪皓临 <haolin.wang@atlaslovestravel.com>
Co-authored-by: Zixuan Cheng <61724187+Theysua@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: Bowen Liang <bowenliang@apache.org>
Co-authored-by: Bowen Liang <liangbowen@gf.com.cn>
Co-authored-by: fanghongtai <42790567+fanghongtai@users.noreply.github.com>
Co-authored-by: wxfanghongtai <wxfanghongtai@gf.com.cn>
Co-authored-by: Matri <qjp@bithuman.io>
Co-authored-by: Benjamin <benjaminx@gmail.com>
2024-05-27 21:57:08 +08:00
Leheng Lu
444fdb79dc fix typo: stopParameerRule -> stopParameterRule (#4681) 2024-05-27 20:42:07 +08:00
Bowen Liang
140dd873f1 fix: show exception message when sandbox execution fails (#4663) 2024-05-27 18:06:15 +08:00
crazywoola
27dae156db fix: colon in file mistral.mistral-small-2402-v1:0 (#4673) 2024-05-27 13:15:20 +08:00
Giovanny Gutiérrez
2deb23e00e fix: Show rerank in system for localai (#4652) 2024-05-27 12:09:51 +08:00
crazywoola
8152bc6fbf Feat/upgrade check i18n scripts (#4671) 2024-05-27 10:36:34 +08:00
Valerio Narcisi
af026c5953 fix: node.js sdk if request is a get data must not exist (#4618) 2024-05-27 08:48:07 +08:00
Nam Vu
11275cbaaf fix: z-index (#4065) 2024-05-27 08:47:27 +08:00
longzhihun
fe9bf5fc4a [seanguo] add support of amazon titan v2 and modify the price of amazon titan v1 (#4643)
Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
2024-05-26 23:30:22 +08:00
Charlie.Wei
cd4924d472 Fix tts audition (#4656)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-05-26 21:00:36 +08:00
kronus-ho
f56b984d97 Fix Unnecessary Newline Characters in Extracted Tool Response Text (#4646)
Co-authored-by: kronus <kronus@istarshine.com>
2024-05-25 15:24:59 +08:00
miendinh
f804adbff3 feat: Support for Vertex AI - load Default Application Configuration (#4641)
Co-authored-by: miendinh <miendinh@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-05-25 13:40:25 +08:00
10YearsDiary
109aabc6f2 fix: incorrect handling when http header value contain multiple colons. (#4574) 2024-05-25 13:20:06 +08:00
liuzhenghua
ad620f02c7 fix: the date is incorrect if the db field is timestamp and the TZ is not the UTC (#4624)
Co-authored-by: liuzhenghua-jk <liuzhenghua-jk@360shuke.com>
2024-05-25 13:11:18 +08:00
Charlie.Wei
5bd432a85f Fix tts audition (#4637)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-05-25 12:55:04 +08:00
yalei
026175c8f7 feat: update notion extractor (#3898)
Co-authored-by: duyalei <>
2024-05-24 20:30:48 +08:00
Krasus.Chen
f156014daa update lite8k/speed8k/128k max_token to newest (#4636)
Co-authored-by: Your Name <chen@krasus.red>
2024-05-24 19:33:42 +08:00
takatost
2cd55e456a fix: WORKFLOW_MAX_EXECUTION_STEPS spell error in config.py (#4642) 2024-05-24 16:32:26 +08:00
majian
8c2ca60c8b feat: Add WORKFLOW_MAX_EXECUTION_TIME env var (#4632) 2024-05-24 15:27:12 +08:00
Bowen Liang
3fda2245a4 improve: extract method for safe loading yaml file and avoid using PyYaml's FullLoader (#4031) 2024-05-24 12:08:12 +08:00
Patryk Garstecki
296887754f Support for Vertex AI (#4586) 2024-05-24 12:01:40 +08:00
QuietRocket
9ae72cdcf4 feat: Add Gemini Flash (#4616) 2024-05-24 11:43:06 +08:00
crazywoola
16fec084f5 Fix/4630 bug api suggested (#4633) 2024-05-24 11:11:12 +08:00
crazywoola
10c61da686 feat: add confirm ui (#4625) 2024-05-23 20:15:51 +08:00
Jyong
24624491cd add qdrant metadata.doc_id index when create qdrant collection (#4570) 2024-05-23 18:11:01 +08:00
Jyong
233c4150d1 support images and tables extract from docx (#4619) 2024-05-23 18:05:23 +08:00
Yeuoly
5893ebec55 fix: code node garbled in Javascript (#4615) 2024-05-23 17:18:57 +08:00
takatost
11642192d1 chore: add https://api.openai.com placeholder in OpenAI api base (#4604) 2024-05-23 12:56:05 +08:00
呆萌闷油瓶
e57bdd4e58 chore:update gpt-3.5-turbo and gpt-4-turbo parameter for azure (#4596) 2024-05-23 11:51:38 +08:00
somethingwentwell
461488e9bf Add Azure OpenAI API version for GPT4o support (#4569)
Co-authored-by: wwwc <wwwc@outlook.com>
2024-05-22 17:43:16 +08:00
sino
2988b67c24 fix: hide automatic button on automatic result page (#4494) 2024-05-22 16:44:20 +08:00
zxhlyh
4f62541bfb chore: remove model provider free token link (#4579) 2024-05-22 16:42:49 +08:00
非法操作
24576a39e5 fix: some google search result raise exception (#4567) 2024-05-22 14:28:52 +08:00
Justin Wu
3ab19be9ea Fix bedrock claude wrong pricing (#4572)
Co-authored-by: Justin Wu <justin.wu@ringcentral.com>
2024-05-22 14:28:28 +08:00
naporitan
5b009a5afb chore(api): Use channel from UI as API query parameter (#4562) 2024-05-22 14:28:03 +08:00
非法操作
3efb5fe7e2 Refactor part of the ProviderManager code to improve readability (#4524) 2024-05-22 11:18:03 +08:00
非法操作
ee53f98d8c Hide the copy button when there is no content to copy (#4546) 2024-05-22 11:15:13 +08:00
呆萌闷油瓶
d5a33a0323 feat:add gpt-4o for azure (#4568) 2024-05-22 11:02:43 +08:00
Jyong
f25927855e add qdrant metadata.doc_id index (#4559) 2024-05-22 01:42:08 +08:00
Garfield Dai
c873035084 oauth2 supports. (#4551) 2024-05-21 17:52:41 +08:00
Li Yi
f32bba6531 Update requirements.txt with latest OSS package with AuthV4 support (#4425) 2024-05-20 17:26:07 +08:00
Benjamin
40bc936739 chore: update yfinance dependency to version 0.2.40 (#4517) 2024-05-20 16:47:05 +08:00
rennokki
6b5685ef0c feat: Jina Search & Jina Reader CSS selectors (#4523) 2024-05-20 16:40:46 +08:00
Bowen Liang
e8e213ad1e chore: apply and fix flake8-bugbear lint rules (#4496) 2024-05-20 16:34:13 +08:00
Bowen Liang
5f4df34829 improve: generalize transformations and scripts of runner and preloads into TemplateTransformer (#4487) 2024-05-20 15:56:26 +08:00
Rain Chen
c255a20d7c allow to config max segmentation tokens length for RAG document using environment variable (#4375) 2024-05-20 13:20:27 +08:00
majian
b5204111da Add UNSTRUCTURED_API_KEY env support (#4369) 2024-05-20 13:14:17 +08:00
Bowen Liang
3a51f2a778 fix: workaround db migration error when adding custom_disclaimer column to recommended_apps (#4518)
Co-authored-by: takatost <takatost@gmail.com>
2024-05-20 12:33:21 +08:00
Ever
4086f5051c feat:Provide parameter config for mask_sensitive_info of MiniMax mode… (#4294)
Co-authored-by: 老潮 <zhangyongsheng@3vjia.com>
Co-authored-by: takatost <takatost@users.noreply.github.com>
Co-authored-by: takatost <takatost@gmail.com>
2024-05-20 10:15:27 +08:00
sino
5440108431 fix: read llm node's first prompt role by optional chaining (#4510) 2024-05-20 08:13:37 +08:00
Benjamin
46bd53a929 chore: sort categories in recommended app service response (#4498) 2024-05-19 22:44:29 +08:00
Matri
a10c2ccd41 fix: files data missed for message (#4512) 2024-05-19 22:42:52 +08:00
fanghongtai
1cca100a48 fix:modify spelling errors: lanuage ->language in schema.md (#4499)
Co-authored-by: wxfanghongtai <wxfanghongtai@gf.com.cn>
2024-05-19 18:31:05 +08:00
Bowen Liang
04ad46dd31 chore: skip unnecessary key checks prior to accessing a dictionary (#4497) 2024-05-19 18:30:45 +08:00
Patryk Garstecki
aa13d14019 Feat/chat custom disclaimer (#4306) 2024-05-18 10:52:48 +08:00
Zixuan Cheng
b1f003646b Update docker-compose.yaml- New DEBUG variable (#4476)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: Bowen Liang <bowenliang@apache.org>
2024-05-18 10:35:28 +08:00
wanghl
e90eccdf92 Fix: HTTP request node PARAMS parameters, if ':' appears in the value… (#4403)
Co-authored-by: Haolin Wang-汪皓临 <haolin.wang@atlaslovestravel.com>
2024-05-18 10:35:01 +08:00
Ikko Eltociear Ashimine
ba06447cd5 chore: update docker-compose.yaml (#4492) 2024-05-18 10:27:12 +08:00
Vikey Chen
9808520992 fix: copy button is always displayed on the chat logs page (#4488) 2024-05-17 22:26:19 +08:00
zxhlyh
8b931b085c fix: app logo (#4483) 2024-05-17 18:02:00 +08:00
sino
528faceb35 fix: cot agent token usage is empty (#4474) 2024-05-17 14:45:20 +08:00
Yeuoly
c2a8fa91b1 fix: cot gent duplicate messages (#4470) 2024-05-17 13:32:02 +08:00
Yeuoly
091fba74cb enhance: claude stream tool call (#4469) 2024-05-17 12:43:58 +08:00
Bowen Liang
083ef2e6fc improve: exract Code Node provider for each supported scripting language (#4164) 2024-05-17 11:58:12 +08:00
zxhlyh
de3a7603ac fix: workflow add next node from knowledge retrieval node (#4467) 2024-05-17 11:42:03 +08:00
jiaqianjing
0ac5d621b6 add llm: ernie-character-8k of wenxin (#4448) 2024-05-16 18:31:07 +08:00
Jyong
bdd409970f fix the wrong env variable AZURE_BLOB_CONTAINER_NAME (#4455) 2024-05-16 18:30:52 +08:00
takatost
d8f38f79f2 feat: add pre ping for sqlalchemy configuration (#4454) 2024-05-16 17:07:21 +08:00
Pika
0f1172f55b fix: self node type shouldn't show in the picker (#4445) 2024-05-16 13:25:09 +08:00
Han Fangyuan
3df47b7b59 fix: wrong category name in examples of question classifier completion prompt (#4421) 2024-05-16 13:04:57 +08:00
sino
6e9066ebf4 feat: support doubao llm and embeding models (#4431) 2024-05-16 11:41:24 +08:00
Garfield Dai
dd94931116 Remove useless code (#4416) 2024-05-15 16:14:49 +08:00
huchengyi
da81233d61 Custom sqlalchemy database uri scheme is supported (#4367) 2024-05-15 15:27:15 +08:00
VoidIsVoid
a76ae2d756 chore: remove useless code in knowledge_retrieval_node (#4412) 2024-05-15 15:24:40 +08:00
Charlie.Wei
97b65f9b4b Optimize webscraper (#4392)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-05-15 15:23:16 +08:00
zxhlyh
c0fe414e0a fix: workflow delete edge when node is selected (#4414) 2024-05-15 15:12:36 +08:00
Yeuoly
182dadd433 chore: remove model as tool (#4409) 2024-05-15 12:25:04 +08:00
非法操作
1d0f88264f Fix HTTP REQUEST NODE is always waiting but endpoint have responsed (#4395) 2024-05-15 11:05:46 +08:00
Yash Parmar
332baca538 FIX: fix the temperature value of ollama model (#4027) 2024-05-15 08:05:54 +08:00
Kota Matsumoto
e2a78888b9 Fix: setup google-storage client (#4296)
Co-authored-by: kotamat <kota1681@gmail.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-05-15 08:05:41 +08:00
Shuto Otaki
5d6d0e63c5 docs: Add CONTRIBUTING_JA.md (#4383) 2024-05-15 07:48:19 +08:00
Charles Zhou
2eb468f885 fix: add timeout to SMTPClient to prevent worker blocking (#4352) 2024-05-14 23:44:53 +08:00
kerlion
98140ae5d9 fix the issue of MILVUS_DATABASE has no effect. (#4353) 2024-05-14 19:54:31 +08:00
rennokki
d1ccb22d8a feat: Use Romanian & other langs in QA (#4205)
Co-authored-by: crazywoola <427733928@qq.com>
2024-05-14 17:48:24 +08:00
Joel
66c8070da8 fix: Jinja switch not aligned in vertical direction (#4374) 2024-05-14 16:29:41 +08:00
非法操作
3271e3e803 improve the code readability of http_executor node (#4360) 2024-05-14 16:11:12 +08:00
Yeuoly
16d47923c3 fix: requests timeout (#4370) 2024-05-14 16:01:23 +08:00
zxhlyh
6f1633fa75 fix: delete end node (#4372) 2024-05-14 15:51:08 +08:00
leejoo0
08e4103fa1 Create README_KR.md (#4364) 2024-05-14 15:36:03 +08:00
takatost
eee95190cc version to 0.6.8 (#4347) 2024-05-14 03:18:26 +08:00
Yeuoly
e8311357ff feat: gpt-4o (#4346) 2024-05-14 02:52:41 +08:00
sino
0f14fdd4c9 fix: handleUpdateWorkflowCanvas is not a function (#4343) 2024-05-13 20:36:23 +08:00
orangeclk
ece0f08a2b add yi models (#4335)
Co-authored-by: 陈力坤 <likunchen@caixin.com>
2024-05-13 17:40:53 +08:00
paragonnov
5edb3d55e5 feat: i18n: add korean language (ko-KR) (#4333) 2024-05-13 15:20:44 +08:00
非法操作
63382f758e fix typo (#4329) 2024-05-13 15:20:16 +08:00
Yeuoly
bbef964eb5 improve: code upgrade (#4231) 2024-05-13 14:39:14 +08:00
Ikko Eltociear Ashimine
e6db7ad1d5 chore: update gmpy2_pkcs10aep_cipher.py (#4314) 2024-05-13 10:45:29 +08:00
Weaxs
8cc492721b fix: minimax streaming function_call message (#4271) 2024-05-11 21:07:22 +08:00
Joshua
a80fe20456 add-some-new-models-hosted-on-nvidia (#4303) 2024-05-11 21:05:31 +08:00
Chenhe Gu
f7986805c6 Update README.md to remove outdated badge (#4302) 2024-05-11 20:48:15 +08:00
Joel
aa5ca90f00 fix: text generation app not show copy button (#4304) 2024-05-11 20:39:17 +08:00
Joel
4af00e4a45 feat: support copy run text result in debug panel in workflow (#4300) 2024-05-11 16:59:17 +08:00
Joel
c01c95d77f fix: chatflow run progress problem (#4298) 2024-05-11 16:23:31 +08:00
Nite Knite
20a9037d5b fix: align versions of react typing package (#4297) 2024-05-11 15:39:56 +08:00
Joel
34d3998566 fix: webapps not show number type input field (#4292) 2024-05-11 14:42:04 +08:00
rechardwang
198d6c00d6 Update docker-compose.yaml (#4288) 2024-05-11 13:41:12 +08:00
Joel
1663df8a05 feat: hide run detail in webapps and installed apps (#4289) 2024-05-11 13:40:27 +08:00
takatost
d8926a2571 feat: hide node detail outputs in webapp & installed app in explore (#3954) 2024-05-11 13:40:11 +08:00
呆萌闷油瓶
4796f9d914 feat:add gpt-4-turbo for azure (#4287) 2024-05-11 13:02:56 +08:00
Sebastian.W
a588df4371 Add rerank model type for LocalAI provider (#3952) 2024-05-11 11:29:28 +08:00
Patryk Garstecki
2c1c660c6e fix(Backend:http_executor): 🔧 prevent splitting JSON data as v… (#4276) 2024-05-11 11:23:35 +08:00
zxhlyh
13f4ed6e0e fix: workflow zoomin/out shortcuts (#4283) 2024-05-11 10:38:12 +08:00
YidaHu
1e451991db fix: deutsch edit app (#4270) 2024-05-11 10:07:54 +08:00
Louie Long
749b236d3d fix: do nothing if switch to current app (#4249)
Co-authored-by: langyong <langyong@lixiang.com>
2024-05-11 08:50:46 +08:00
TinsFox
00ce372b71 fix: hook dependency (#4242) 2024-05-11 08:43:37 +08:00
Patryk Garstecki
370e1c1a17 fix(frontend): 🔧 add privacy policy spaces (#4277) 2024-05-11 08:42:03 +08:00
KiyotakaMatsushita
28495273b4 feat: Add storage type and Google Storage settings to worker (#4266) 2024-05-10 18:54:08 +08:00
Whitewater
36a9c5cc6b fix: remove unexpected zip and add FlipForward arrow icon (#4263) 2024-05-10 18:52:41 +08:00
Bowen Liang
228de1f12a fix: miss usage of os.path.join for URL assembly and add tests on yarl (#4224) 2024-05-10 18:14:48 +08:00
Joel
01555463d2 feat: llm support jinja fe (#4260) 2024-05-10 18:14:05 +08:00
GalvinYang
6b99075dc8 fix: system default model name length (#4245) (#4246)
Co-authored-by: takatost <takatost@gmail.com>
2024-05-10 18:12:18 +08:00
Yeuoly
8578ee0864 feat: support LLM jinja2 template prompt (#3968)
Co-authored-by: Joel <iamjoel007@gmail.com>
2024-05-10 18:08:32 +08:00
Jyong
897e07f639 question classifier prompt optimize (#4262) 2024-05-10 17:22:46 +08:00
LiuVaayne
875249eb00 Feat/vector db pgvector (#3879) 2024-05-10 17:20:30 +08:00
Chenhe Gu
4d5a4e4cef correct comparison chart (#4254) 2024-05-10 14:54:38 +08:00
takatost
86a6e6bd04 feat: increase max steps to 50 in workflow (#4252) 2024-05-10 14:50:00 +08:00
takatost
8f3042e5b3 feat: Add draft hash check in workflow (#4251) 2024-05-10 14:48:29 +08:00
zxhlyh
a1ab87107b chore: workflow sync with hash (#4250) 2024-05-10 14:48:20 +08:00
zxhlyh
f49c99937c fix: workflow end node deletion (#4240) 2024-05-10 10:38:05 +08:00
zxhlyh
9b24f12bf5 feat: workflow interaction (#4214) 2024-05-09 17:18:51 +08:00
crazywoola
487ce7c82a fix: add missing translations (#4212) 2024-05-09 15:38:51 +08:00
TinsFox
cc835d523c refactor: install form (#4154) 2024-05-09 15:38:09 +08:00
takatost
64c3bc070a version to 0.6.7 (#4208) 2024-05-09 13:58:25 +08:00
faukwaa
7405b2e819 modify spelling errors: bulild -> build (#4206) 2024-05-09 13:49:19 +08:00
Louie Long
ca5081e327 fix delete log annotation (#4201)
Co-authored-by: langyong <langyong@lixiang.com>
2024-05-09 12:53:06 +08:00
tomo
a79941df22 fix: button widths (#4145) 2024-05-09 12:52:07 +08:00
zxhlyh
8137d63000 fix: workflow http node timeout & url check (#4175) 2024-05-08 13:20:26 +08:00
sino
4aa21242b6 feat: add volcengine maas model provider (#4142) 2024-05-08 12:45:53 +08:00
Yong723
8ce93faf08 Typo on deepseek.yaml and yi.yaml (#4170) 2024-05-08 10:52:04 +08:00
SASAKI Haruki
903ece6160 Fix:typo Incorrect Japanese 2 (#4167) 2024-05-08 09:04:37 +08:00
Su Yang
9f440c11e0 feat: DeepSeek (#4162) 2024-05-08 00:28:16 +08:00
Joshua
58bd5627bf Add-Deepseek (#4157) 2024-05-07 22:45:38 +08:00
Whitewater
97dcb8977a fix: stop event propagation when deleting selected workflow var node (#4158) 2024-05-07 21:00:43 +08:00
Moonlit
2fdd64c1b5 feat: add proxy configuration for Cohere model (#4152) 2024-05-07 18:12:13 +08:00
Jyong
591b993685 fix dataset segment update api not effect issue (#4151) 2024-05-07 17:47:20 +08:00
VoidIsVoid
543a00e597 feat: update model_provider jina to support custom url and model (#4110)
Co-authored-by: Gimling <huangjl@ruyi.ai>
Co-authored-by: takatost <takatost@gmail.com>
2024-05-07 17:43:24 +08:00
Minamiyama
f361c7004d feat: support vision models from xinference (#4094)
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-05-07 17:37:36 +08:00
Tomy
bb7c62777d Add support for local ai speech to text (#3921)
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-05-07 17:14:24 +08:00
Yeuoly
d51f52a649 fix: http authorization leakage (#4146) 2024-05-07 16:56:25 +08:00
Jyong
e353809680 question classifier optimize (#4147) 2024-05-07 16:44:27 +08:00
takatost
c2f0f958ef fix: passing in 0 as a numeric variable will be converted to null (#4148) 2024-05-07 16:38:23 +08:00
Charlie.Wei
087b7a6607 azure_openai add gpt-4-turbo-2024-04-09 model (#4144)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-05-07 15:55:23 +08:00
Patryk Garstecki
6271463240 feat(Languages): 👽 add pl-PL language (#4128) 2024-05-07 15:41:57 +08:00
Weaxs
6f1911533c bug fix: update minimax model_apis (#4116) 2024-05-07 14:40:24 +08:00
Yeuoly
d5d8b98d82 feat: support openai stream usage (#4140) 2024-05-07 13:49:45 +08:00
Yeuoly
e7fe7ec0f6 feat: support time format (#4138) 2024-05-07 13:02:00 +08:00
Bowen Liang
049abd698f improve: test CodeExecutor with code templates and extract CodeLanguage enum (#4098) 2024-05-07 12:37:18 +08:00
Fyphen
45d21677a0 Improved Japanese translation (#4119) 2024-05-07 12:25:01 +08:00
Yeuoly
76bec6ce7f feat: add http node max size env (#4137) 2024-05-07 12:07:56 +08:00
Pascal M
6563cb6ec6 fix: prevent http node overwrite on open (#4127) 2024-05-07 10:08:18 +08:00
S96EA
13cd409575 feat: support aliyun oss auth v4 (#3886)
Co-authored-by: owen <owen@owen.hawk-toad.ts.net>
2024-05-06 11:56:04 +08:00
羊羽
13292ff73e 🦄 refactor(dataset svc): delete check none (#4101)
Co-authored-by: baxiang <baxiang@lixiang.com>
2024-05-06 11:45:26 +08:00
Ikko Eltociear Ashimine
3f8e2456f7 fix: typo in get-automatic-res.tsx (#4097) 2024-05-06 11:36:19 +08:00
Buddypia
822ee7db88 fix: correct the license link (#4093) 2024-05-06 11:35:16 +08:00
Rhon Joe
94a650475d improve: menu collapse readability (#4099)
Co-authored-by: rongjun.qiu <qiurj@hengtonggroup.com.cn>
2024-05-06 11:34:56 +08:00
Shoma Sakamoto
03cf00422a Urgent Correction: Resolving Critical License Documentation Error in Dify's Japanese README (#4075)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-05-06 11:28:32 +08:00
Joshua
51a9e678f0 Leptonai integrate (#4079) 2024-05-05 14:37:47 +08:00
chenx5
ad76ee76a8 Update bedrock.yaml add Region Asia Pacific (Sydney) (#4016) 2024-05-05 10:49:17 +08:00
TinsFox
630136b5b7 Revert "fix: hydration warning (#3897)" (#4059) 2024-05-04 18:00:23 +08:00
Yeuoly
b5f101bdac fix: transform None into correct dest type (#4077) 2024-05-04 16:34:42 +08:00
Pan YANG
5940564d84 feat: add a new built-in tool of Slack Incoming Webhook (#4067) 2024-05-04 16:17:34 +08:00
Yeuoly
67902b5da7 fix: agent log timezone (#4076) 2024-05-04 16:17:15 +08:00
KVOJJJin
c0476c7881 Feat: frontend support timezone of timestamp (#4070) 2024-05-04 16:15:32 +08:00
Shohei Tanabe
f68b6b0e5e Fix typo: writeOpner -> writeOpener (#4060) 2024-05-03 18:55:47 +08:00
Bowen Liang
44857702ae test: add integration tests on CodeExecutor with the sandbox service (#4015) 2024-05-03 08:54:40 +08:00
sino
b1399cd5f9 fix: unable to fetch CoT agent runner log (#4052) 2024-05-03 08:54:15 +08:00
Garfield Dai
6f1e4a19a2 fix: workflow avg user interaction. (#4056) 2024-05-02 20:24:40 +08:00
takatost
93393e005e version to 0.6.6 (#4050) 2024-05-02 16:06:40 +08:00
Bowen Liang
4ea2755fce test: remove explicit env settings for CI pytests (#4041) 2024-05-02 00:49:39 +08:00
dependabot[bot]
ecb51a83d4 chore(deps): bump semver from 5.7.1 to 5.7.2 in /web (#4022)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 18:47:05 +08:00
Bowen Liang
093b5c0e63 fix: typo of jinja2 (#4019) 2024-04-30 18:39:02 +08:00
Joel
bf42b0ae44 fix: lodash version has warning (#4020)
Co-authored-by: nite-knite <nkCoding@gmail.com>
2024-04-30 18:11:49 +08:00
dependabot[bot]
342b4fd19d chore(deps): bump word-wrap from 1.2.3 to 1.2.5 in /web
Bumps [word-wrap](https://github.com/jonschlinkert/word-wrap) from 1.2.3 to 1.2.5.
- [Release notes](https://github.com/jonschlinkert/word-wrap/releases)
- [Commits](https://github.com/jonschlinkert/word-wrap/compare/1.2.3...1.2.5)

---
updated-dependencies:
- dependency-name: word-wrap
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-30 09:39:10 +00:00
orangeclk
cbdb861ee4 add glm-3-turbo max_tokens parameter setting (#4017)
Co-authored-by: 陈力坤 <likunchen@caixin.com>
2024-04-30 17:08:04 +08:00
sino
da5a8b9a59 feat: support question classifier node output (#4000) 2024-04-30 17:07:29 +08:00
Weaxs
1e6e8b446d feat: support minimax abab6.5, abab6.5s (#4012) 2024-04-30 17:02:01 +08:00
Joel
c1fdaa6ae0 fix: prompt undefined caused match problem (#4010) 2024-04-30 16:31:36 +08:00
Bowen Liang
142814d451 chore: skip deprecated field_schema param in creating payload index on Qdrant (#3903) 2024-04-30 16:16:10 +08:00
crazywoola
704755d005 fix: submitCodeExecutionTask (#4006) 2024-04-30 16:01:03 +08:00
Richards Tu
d1263700c0 Update the description and labels in Judge0ce tool (#3990)
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-30 14:58:29 +08:00
Rhon Joe
0704fe9695 fix(web): copy button visible at chat page normally (#4005)
Co-authored-by: rongjun.qiu <qiurj@hengtonggroup.com.cn>
2024-04-30 14:55:57 +08:00
Kei YAMAZAKI
1d3f1d88ef Enabled Notion integration setup in Docker Compose Deployment (#3919) 2024-04-30 14:48:39 +08:00
zxhlyh
8b3edac091 fix: prompt editor insert quickly (#4004) 2024-04-30 14:25:21 +08:00
zxhlyh
05cab85579 fix: workflow disable shortcuts when feature panel occured (#4001) 2024-04-30 13:35:49 +08:00
Yeuoly
b72fbe200d chore: add sandbox tag (#3997) 2024-04-30 12:35:19 +08:00
crazywoola
b1194da6a5 fix: ci (#3983) 2024-04-29 18:59:37 +08:00
Jyong
338e4669e5 add storage factory (#3922) 2024-04-29 18:22:03 +08:00
crazywoola
c5e2659771 Feat/install process refinement (#3982) 2024-04-29 17:55:52 +08:00
Jyong
1d432728ac add default value for QDRANT_GRPC_PORT (#3976) 2024-04-29 15:28:34 +08:00
KVOJJJin
2fd702a319 Fix: password check in page of install (#3978) 2024-04-29 15:27:45 +08:00
Richards Tu
f26ad16af7 Add new tool: Firecrawl (#3819)
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-04-29 14:20:36 +08:00
majian
8f2ae51fe5 feat: add support for request timeout settings in the HTTP request node. (#3854)
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-04-29 13:59:07 +08:00
Joshua
2f84d00300 fix-nvidia-llama3 (#3973) 2024-04-29 13:41:15 +08:00
takatost
b82a2d97ef fix: db connections not being released during workflow execution (#3971) 2024-04-29 12:42:09 +08:00
Jyong
3e9dbe3e0a add pgvecto_rs support and upgrade SQLAlchemy (#3833) 2024-04-29 11:58:17 +08:00
羊羽
975b2fb79e delete duplicate check get_dataset (#3966)
Co-authored-by: baxiang <baxiang@lixiang.com>
2024-04-29 11:57:26 +08:00
Joel
fa509ce64e feat: rename var name sync to used jinjia code (#3964) 2024-04-29 11:34:30 +08:00
TinsFox
99292edd46 chore: update @types/react (#3939) 2024-04-28 19:01:09 +08:00
Joel
3e992cb23c feat: code transform node editor support insert var by add slash or left brace (#3946)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2024-04-28 17:51:58 +08:00
Yeuoly
e7b4d024ee optimize: code node has a bad error message (#3949) 2024-04-28 17:40:29 +08:00
takatost
ff67a6d338 feat: llm text stream support for workflow app (#3798)
Co-authored-by: JzoNg <jzongcode@gmail.com>
2024-04-28 17:37:00 +08:00
zxhlyh
8e4989ed03 feat: workflow remove preview mode (#3941) 2024-04-28 17:09:56 +08:00
呆萌闷油瓶
0940f01634 enhancement:support Qdrant gRPC mode (#3929) 2024-04-28 15:33:32 +08:00
majian
9d1cb1bc92 improvement: Optimizing the experience of the app list page (#3885) 2024-04-28 13:52:45 +08:00
Pascal M
0ca4e30b19 feat: add start commands to devcontainer (#3902) 2024-04-28 12:30:56 +08:00
Joel
ba88f8a6f0 fix: code full screen in web app cause error (#3935) 2024-04-28 11:59:57 +08:00
studyinglover
aefe0cbf51 fix: api doc example error (#3925) 2024-04-28 10:18:07 +08:00
miendinh
9ad489d133 feat: Add google storage support (#3887)
Co-authored-by: miendinh <miendinh@users.noreply.github.com>
2024-04-27 18:26:52 +08:00
Bowen Liang
661b30784e chore: skip warning messages when pytest auto-collecting the vdb test class by removing Test prefix (#3906) 2024-04-27 16:36:09 +08:00
longzhihun
43a5ba9415 feat: add support for Bedrock LLAMA3 (#3890) 2024-04-27 13:13:09 +08:00
TinsFox
08a65d74d5 fix: hydration warning (#3897) 2024-04-26 21:34:29 +08:00
Garfield Dai
cefe156811 feat: replicate supports default version. (#3884) 2024-04-26 21:16:22 +08:00
yongjer
3b5b4d628b Add support for Traditional Chinese language (#3899)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-26 21:10:23 +08:00
TinsFox
8746e48df0 chore: integrate code-inspector-plugin (#3900) 2024-04-26 21:00:29 +08:00
Jyong
0ec8b57825 add together ai model setting (#3895) 2024-04-26 20:43:17 +08:00
Bowen Liang
045827043d test: improve vector store tests (#3855) 2024-04-26 19:18:42 +08:00
yalei
4d66a86579 fix: fetch page name of notion wiki (#3847) 2024-04-26 18:04:37 +08:00
miendinh
2a8881d0e8 fix: tool webscraper - too many redirects in case target url does not… (#3831)
Co-authored-by: miendinh <miendinh@users.noreply.github.com>
2024-04-26 17:58:46 +08:00
akou
ffc60bb917 add the comment in entrypoint.sh (#3882) 2024-04-26 17:19:49 +08:00
Ever
2e454c770b fix: copy invite link for HTTPS has deplicate origin (#3877) 2024-04-26 15:19:30 +08:00
Joel
7d711135bc fix: full screen editor not follow panel width (#3876) 2024-04-26 14:23:13 +08:00
Charlie.Wei
f62b2b5b45 optimize the knowledge failed documents query (#3870)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-04-26 11:47:23 +08:00
Bowen Liang
7919596a21 fix: UP031 style rule violation (#3866) 2024-04-26 11:24:08 +08:00
Joel
9b4898efeb fix: chat api doc not show title in english vision (#3864) 2024-04-26 10:32:45 +08:00
Bowen Liang
45dd1683fd test: add tests covering all methods of vector store (#3849) 2024-04-25 22:27:30 +08:00
takatost
8bca908f15 refactor: config file (#3852) 2024-04-25 22:26:45 +08:00
Garfield Dai
9cbb8ddd7f fix: billing tenant account role. (#3850) 2024-04-25 21:55:08 +08:00
Jingpan Xiong
1be222af2e fix: using api can not execute relyt vector database (#3766)
Co-authored-by: jingsi <jingsi@leadincloud.com>
2024-04-25 19:46:20 +08:00
Richards Tu
bf9fc8fef4 Reduce tool redundancy for [Judge0 CE] (#3837)
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-25 19:20:54 +08:00
Bowen Liang
86e7330fa2 test: refactor vdb tests by visitor design pattern (#3838) 2024-04-25 18:55:49 +08:00
takatost
34bfb715e1 fix: citations always appear in the chatflow app (#3844) 2024-04-25 18:31:38 +08:00
Joel
019d7069f8 fix: debug run not show total right tokens (#3843) 2024-04-25 18:22:30 +08:00
Bowen Liang
c54fcfb45d extract enum type for tenant account role (#3788) 2024-04-25 18:20:08 +08:00
zxhlyh
cde87cb225 fix: model parameter default value (#3841) 2024-04-25 18:04:37 +08:00
takatost
12435774ca feat: query prompt template support in chatflow (#3791)
Co-authored-by: Joel <iamjoel007@gmail.com>
2024-04-25 18:01:53 +08:00
Henrybit
80b9507e7a feat: add aliyun oss storage (#3690)
Co-authored-by: henrybit <qipenghui3056@sina.com>
2024-04-25 16:57:19 +08:00
takatost
0ac0f0ffd0 version to 0.6.5 (#3834) 2024-04-25 16:50:37 +08:00
KVOJJJin
3d14aba4b4 Fix: event of click away in message-log-modal (#3828) 2024-04-25 15:58:03 +08:00
Leon cap
64f694865c Update EN,KL,JA,FR,ES documentation Llma2 to Llama3 model support (#3827) 2024-04-25 15:52:00 +08:00
zxhlyh
d36b728088 fix: workflow sync data (#3824) 2024-04-25 14:02:06 +08:00
KVOJJJin
1a7b4c42ab fix: event of keyboard "enter" in text generator app (#3823) 2024-04-25 13:58:06 +08:00
Joel
2a64ce740e chore: remove anthropic pay entrance (#3822) 2024-04-25 13:18:59 +08:00
呆萌闷油瓶
78988ed60e fix:still enable SSL verification when using qdrant based on HTTP protocol (#3805) 2024-04-25 13:04:31 +08:00
Yeuoly
2832adda88 fix: missing url field when searching special keywords (#3820) 2024-04-25 12:33:58 +08:00
takatost
a4e4fb4094 fix: credentials validate failed for groqcloud model provider (#3817) 2024-04-25 12:09:44 +08:00
YidaHu
777ec64635 feat: add log_file environment variable (#3793) 2024-04-24 21:55:14 +08:00
Bowen Liang
9cec8c1750 test: add unit tests for vector stores of Milvus, Qdrant and Weaviate (#3688) 2024-04-24 21:52:42 +08:00
Bowen Liang
8ca5aa1190 use pymilvus 2.3.7 (#3790) 2024-04-24 18:37:08 +08:00
takatost
4d8f1b9ca4 feat: test all unit tests (#3787)
Co-authored-by: Joel <iamjoel007@gmail.com>
2024-04-24 17:33:01 +08:00
takatost
3da179f77b feat: add conversation_id and user_id in chatflow/workflow system vars (#3771)
Co-authored-by: Joel <iamjoel007@gmail.com>
2024-04-24 17:20:01 +08:00
Bowen Liang
a34e8cb0bd test: add test for PKCS1OAEP_Cipher with gmpy2 (#3760) 2024-04-24 17:15:31 +08:00
KVOJJJin
b249767c5c Fix: redirection of app remove (#3770) 2024-04-24 17:11:51 +08:00
Joel
89a7434565 fix: handle inputs show the focus ui together in tools node (#3763) 2024-04-24 15:53:07 +08:00
ugyuji
3b537cbdeb fix: endpoint for 'Update a document from a file' (#3751) 2024-04-24 15:25:53 +08:00
zxhlyh
731464f5b8 fix: workflow sync (#3756) 2024-04-24 15:19:19 +08:00
Joel
1ad70f8721 feat: support prompt messages sorting (#3757) 2024-04-24 15:09:01 +08:00
takatost
2ea8c73cd8 fix: type num of variable converted to str (#3758) 2024-04-24 15:07:56 +08:00
Jyong
f257f2c396 Knowledge optimization (#3755)
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: JzoNg <jzongcode@gmail.com>
2024-04-24 15:02:29 +08:00
Joel
3cd8e6f5c6 fix: llm editor readonly cover error (#3752) 2024-04-24 13:28:22 +08:00
KVOJJJin
0715db7681 chore: add selector for use app store (#3746) 2024-04-24 13:07:20 +08:00
zxhlyh
a39de8a686 fix: workflow restore (#3750) 2024-04-24 13:05:33 +08:00
Bowen Liang
ccaf335466 fix: rollback gmpy2 to 2.1.5 (#3745) 2024-04-24 12:53:23 +08:00
legao
40e36e9b52 fix: toggling AppDetailNav causes unnecessary component rerenders (#3718) 2024-04-24 12:07:28 +08:00
zxhlyh
9eebe9d54e fix: workflow node variable (#3743) 2024-04-24 11:41:12 +08:00
crazywoola
a23a191615 feat: add copy button to code (#3719) 2024-04-24 09:34:51 +08:00
Leo Q
7d9c5586f9 Update "@formatjs/intl-localematcher" to version 0.5.4 in package.json (#3726) 2024-04-24 09:06:23 +08:00
Ikko Eltociear Ashimine
f07c89bba4 Update README_JA.md (#3727) 2024-04-24 09:04:27 +08:00
1102
59cba930e5 bedrock llm Model file name change (#3714)
Co-authored-by: heshunchang <shuncanghe@clouditera.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-23 18:57:34 +08:00
zxhlyh
39ae56e136 fix: workflow connection (#3713) 2024-04-23 18:02:15 +08:00
Joel
f92130338b feat: prompt editor support auto height by content height and fix some bugs (#3712) 2024-04-23 17:46:59 +08:00
Bowen Liang
2867d29021 fix: milvus usage with create_collection (#3683) 2024-04-23 17:37:40 +08:00
呆萌闷油瓶
f76ac8bdee enhance:speedup xinference audio transcription (#3636) 2024-04-23 17:09:30 +08:00
zxhlyh
83caffe000 fix: workflow restore (#3711) 2024-04-23 17:02:23 +08:00
Luvian77
96160837d2 fix: cannot change file uploader method (#3710) 2024-04-23 17:02:12 +08:00
Yeuoly
3480f1c59e refactor: tool parameter cache (#3703) 2024-04-23 15:22:42 +08:00
zxhlyh
65ac4f69af fix: workflow shortcuts (#3701) 2024-04-23 14:45:57 +08:00
Yeuoly
2c50fab3dd fix: skip dataset icon (#3696) 2024-04-23 12:41:41 +08:00
Carson Kahn
9525ccac4f Localize links to localized READMEs (#3689) 2024-04-23 09:30:32 +08:00
Richards Tu
ff76c4bd5d Add new tool: Judge0 CE (#3684)
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-23 09:07:21 +08:00
Luvian77
5dacf77627 fix: Added prevention of click event propagation for overlay layer (#3666)
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-22 19:53:20 +08:00
Yeuoly
2a213c6af7 fix: incorrect type parser (#3682) 2024-04-22 19:32:41 +08:00
Bowen Liang
b2535e7db6 chore: update description of code interpreter tool (#3679)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-04-22 19:19:16 +08:00
longzhihun
28236147ee feat: add support for bedrock Mistral AI model (#3676)
Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
2024-04-22 17:24:02 +08:00
Chenhe Gu
4969783383 add groq llama3 (#3673) 2024-04-22 15:21:09 +08:00
takatost
b64080be1b version to 0.6.4 (#3670) 2024-04-22 12:13:31 +08:00
Bowen Liang
aadebd6d23 python 3.12 support (#3652) 2024-04-22 11:41:13 +08:00
xin.gao
71cc0074ef fix: delete tool parameters cache when sync draft workflow for run workflow use new parameter change in draft workflow (#3637) 2024-04-22 11:12:00 +08:00
takatost
d77f52bf85 Optimize README_CN (#3660) 2024-04-21 17:59:53 +08:00
Joel
b71163706b fix: workflow_run_id not log_id in workflow api doc (#3658) 2024-04-21 14:48:07 +08:00
saga.rey
1fb7df12d7 fix: in alembic's offline mode (db migrate with --sql option), skip data operations (#3533) 2024-04-21 09:44:35 +08:00
rmmedia
b3996b3221 Fix problem with scroll inside chat window (#3578) 2024-04-21 09:39:24 +08:00
YidaHu
7251748d59 fix: validate languages (#3638) 2024-04-20 10:50:10 +08:00
liuzhenghua
73e9f35ab1 feat: add file log (#3612)
Co-authored-by: liuzhenghua-jk <liuzhenghua-jk@360shuke.com>
2024-04-20 08:59:49 +08:00
Richards Tu
d7f0056e2d Fix error in [Update yaml and py file in Tavily Tool] (#3465)
Co-authored-by: Yeuoly <admin@srmxy.cn>
2024-04-19 16:51:51 +08:00
fuckqqcom
9b7b133cbc content fix to continue (#3633)
Co-authored-by: xiaohan <fuck@qq.com>
2024-04-19 16:51:38 +08:00
Joshua
7545e5de6c add-llama3-for-nvidia-api-catalog (#3631) 2024-04-19 14:51:22 +08:00
Yeuoly
a0c30702c1 feat: moonshot fc (#3629) 2024-04-19 14:04:30 +08:00
zxhlyh
03c988388e fix: chat rename (#3627) 2024-04-19 13:29:25 +08:00
sqj8899
0a56c522eb get dict key indexing_technique in DocumentAddByFileApi (#3615)
Co-authored-by: songqijun <songqijun@qipeng.com>
2024-04-19 09:37:11 +08:00
jeessy2
646858ea08 feat: Vision switch functionality is provided on OpenRouter (#3564) 2024-04-19 09:13:25 +08:00
Bowen Liang
d9b821cecc chore: apply ruff rules on tests and app.py (#3605) 2024-04-18 20:24:05 +08:00
Yeuoly
d5448e07ab seucirty: http smuggling (#3609) 2024-04-18 18:18:42 +08:00
Joel
3aa182e26a fix: copy invite link has duplicated origin (#3608) 2024-04-18 17:56:07 +08:00
Joshua
de3b490f8e Add mixtral 8x22b (#3606) 2024-04-18 17:44:22 +08:00
Garfield Dai
4481906be2 Feat/enterprise sso (#3602) 2024-04-18 17:33:32 +08:00
Yeuoly
d9f1a8ce9f feat: stable diffusion 3 (#3599) 2024-04-18 16:54:37 +08:00
aniaan
aa6d2e3035 fix(openai_api_compatible): fixing the error when converting chunk to json (#3570) 2024-04-18 16:54:16 +08:00
呆萌闷油瓶
4365843c20 enhance:speedup xinference embedding & rerank (#3587) 2024-04-18 16:54:00 +08:00
Matheus Mondaini
b4d2d635f7 docs: Update README.md (#3577) 2024-04-18 13:55:42 +08:00
Joshua
b9b28900b1 add-open-mixtral-8x22b (#3591) 2024-04-18 13:48:32 +08:00
Bowen Liang
d463b82aba test: add scripts for running tests on api module both locally and CI jobs (#3497) 2024-04-18 13:43:15 +08:00
Joel
ed861ff782 fix: json in raw text sometimes changed back to key value in HTTP node (#3586) 2024-04-18 12:08:18 +08:00
KVOJJJin
8cc1944160 Fix: use debounce for switch (#3585) 2024-04-18 11:54:54 +08:00
Joel
80e390b906 feat: add workflow api in Node.js sdk (#3584) 2024-04-18 11:23:18 +08:00
Yeuoly
c2acb2be60 feat: code (#3557) 2024-04-18 08:00:02 +08:00
Siddharth Jain
8ba95c08a1 added claude 3 opus (#3545) 2024-04-17 20:53:59 +08:00
Yeuoly
c7de51ca9a enhance: preload general packages (#3567) 2024-04-17 19:49:53 +08:00
liuzhenghua
e02ee3bb2e fix event/stream ping (#3553) 2024-04-17 18:28:24 +08:00
Jyong
394ceee141 optimize question classifier prompt and support keyword hit test (#3565) 2024-04-17 17:40:40 +08:00
Joel
40b48510f4 feat: economical index support retrieval testing (#3563) 2024-04-17 17:40:28 +08:00
Joel
be3b37114c fix: tool node show output text variable type error (#3556) 2024-04-17 15:26:18 +08:00
Yeuoly
e212a87b86 fix: json-reader-json-output (#3552) 2024-04-17 14:09:42 +08:00
takatost
b890c11c14 feat: filter empty content messages in llm node (#3547) 2024-04-17 13:30:33 +08:00
zxhlyh
2e27425e93 fix: workflow delete edge (#3541) 2024-04-17 11:09:43 +08:00
Bowen Liang
6269e011db fix: typo of PublishConfig (#3540) 2024-04-17 10:45:26 +08:00
KVOJJJin
e70482dfc0 feat: agent log (#3537)
Co-authored-by: jyong <718720800@qq.com>
2024-04-17 10:30:52 +08:00
takatost
9b8861e3e1 feat: increase read timeout of OpenAI Compatible API, Ollama, Nvidia LLM (#3538) 2024-04-17 09:25:50 +08:00
LeePui
38ca3b29b5 add support for swagger object type (#3426)
Co-authored-by: lipeikui <lipeikui@3vjia.com>
2024-04-16 19:54:17 +08:00
Bowen Liang
066076b157 chore: lint .env file templates (#3507) 2024-04-16 19:53:54 +08:00
buu
be27ac0e69 fix: the hover style of the card-item operation button container (#3520) 2024-04-16 18:09:06 +08:00
Jyong
9e6d4eeb92 fix the return with wrong datatype of segment (#3525) 2024-04-16 17:09:15 +08:00
1812 changed files with 84459 additions and 13102 deletions

View File

@@ -1,4 +1,4 @@
# Devlopment with devcontainer
# Development with devcontainer
This project includes a devcontainer configuration that allows you to open the project in a container with a fully configured development environment.
Both frontend and backend environments are initialized when the container is started.
## GitHub Codespaces
@@ -33,5 +33,5 @@ Performance Impact: While usually minimal, programs running inside a devcontaine
if you see such error message when you open this project in codespaces:
![Alt text](troubleshooting.png)
a simple workaround is change `/signin` endpoint into another one, then login with github account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine.
a simple workaround is change `/signin` endpoint into another one, then login with GitHub account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine.
The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204)

View File

@@ -32,8 +32,8 @@
]
}
},
"postStartCommand": "cd api && pip install -r requirements.txt",
"postCreateCommand": "cd web && npm install"
"postStartCommand": "./.devcontainer/post_start_command.sh",
"postCreateCommand": "./.devcontainer/post_create_command.sh"
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},

View File

@@ -0,0 +1,10 @@
#!/bin/bash
cd web && npm install
echo 'alias start-api="cd /workspaces/dify/api && flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
echo 'alias start-worker="cd /workspaces/dify/api && celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail"' >> ~/.bashrc
echo 'alias start-web="cd /workspaces/dify/web && npm run dev"' >> ~/.bashrc
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc
source /home/vscode/.bashrc

View File

@@ -0,0 +1,3 @@
#!/bin/bash
cd api && pip install -r requirements.txt

View File

@@ -8,13 +8,13 @@ body:
label: Self Checks
description: "To make sure we get to you in time, please check the following :)"
options:
- label: This is only for bug report, if you would like to ask a quesion, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general).
- label: This is only for bug report, if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general).
required: true
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: input

View File

@@ -1,7 +1,7 @@
name: "📚 Documentation Issue"
description: Report issues in our documentation
labels:
- ducumentation
- documentation
body:
- type: checkboxes
attributes:
@@ -12,7 +12,7 @@ body:
required: true
- label: I confirm that I am using English to submit report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:

View File

@@ -12,7 +12,7 @@ body:
required: true
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:

View File

@@ -12,7 +12,7 @@ body:
required: true
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
- label: "Please do not modify this template :) and fill in all the required fields."
required: true
- type: input
attributes:

View File

@@ -4,43 +4,31 @@ on:
pull_request:
branches:
- main
paths:
- api/**
- docker/**
concurrency:
group: api-tests-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii
AZURE_OPENAI_API_BASE: https://difyai-openai.openai.azure.com
AZURE_OPENAI_API_KEY: xxxxb1707exxxxxxxxxxaaxxxxxf94
ANTHROPIC_API_KEY: sk-ant-api11-IamNotARealKeyJustForMockTestKawaiiiiiiiiii-NotBaka-ASkksz
CHATGLM_API_BASE: http://a.abc.com:11451
XINFERENCE_SERVER_URL: http://a.abc.com:11451
XINFERENCE_GENERATION_MODEL_UID: generate
XINFERENCE_CHAT_MODEL_UID: chat
XINFERENCE_EMBEDDINGS_MODEL_UID: embedding
XINFERENCE_RERANK_MODEL_UID: rerank
GOOGLE_API_KEY: abcdefghijklmnopqrstuvwxyz
HUGGINGFACE_API_KEY: hf-awuwuwuwuwuwuwuwuwuwuwuwuwuwuwuwuwu
HUGGINGFACE_TEXT_GEN_ENDPOINT_URL: a
HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL: b
HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL: c
MOCK_SWITCH: true
CODE_MAX_STRING_LENGTH: 80000
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install APT packages
uses: awalsh128/cache-apt-pkgs-action@v1
with:
packages: ffmpeg
- name: Set up Python
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: '3.10'
python-version: ${{ matrix.python-version }}
cache: 'pip'
cache-dependency-path: |
./api/requirements.txt
@@ -49,11 +37,123 @@ jobs:
- name: Install dependencies
run: pip install -r ./api/requirements.txt -r ./api/requirements-dev.txt
- name: Run Unit tests
run: dev/pytest/pytest_unit_tests.sh
- name: Run ModelRuntime
run: pytest api/tests/integration_tests/model_runtime/anthropic api/tests/integration_tests/model_runtime/azure_openai api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py
run: dev/pytest/pytest_model_runtime.sh
- name: Run Tool
run: pytest api/tests/integration_tests/tools/test_all_provider.py
run: dev/pytest/pytest_tools.sh
- name: Set up Sandbox
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
services: |
sandbox
ssrf_proxy
- name: Run Workflow
run: pytest api/tests/integration_tests/workflow
run: dev/pytest/pytest_workflow.sh
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma)
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
docker/docker-compose.qdrant.yaml
docker/docker-compose.milvus.yaml
docker/docker-compose.pgvecto-rs.yaml
docker/docker-compose.pgvector.yaml
docker/docker-compose.chroma.yaml
services: |
weaviate
qdrant
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
- name: Test Vector Stores
run: dev/pytest/pytest_vdb.sh
test-in-poetry:
name: API Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'poetry'
cache-dependency-path: |
api/pyproject.toml
api/poetry.lock
- name: Poetry check
run: |
poetry check -C api
poetry show -C api
- name: Install dependencies
run: poetry install -C api --with dev
- name: Run Unit tests
run: poetry run -C api bash dev/pytest/pytest_unit_tests.sh
- name: Run ModelRuntime
run: poetry run -C api bash dev/pytest/pytest_model_runtime.sh
- name: Run Tool
run: poetry run -C api bash dev/pytest/pytest_tools.sh
- name: Set up Sandbox
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
services: |
sandbox
ssrf_proxy
- name: Run Workflow
run: poetry run -C api bash dev/pytest/pytest_workflow.sh
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma)
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
docker/docker-compose.qdrant.yaml
docker/docker-compose.milvus.yaml
docker/docker-compose.pgvecto-rs.yaml
docker/docker-compose.pgvector.yaml
docker/docker-compose.chroma.yaml
services: |
weaviate
qdrant
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

View File

@@ -17,7 +17,7 @@ env:
jobs:
build-and-push:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
if: github.repository == 'langgenius/dify'
strategy:
matrix:
include:

57
.github/workflows/db-migration-test.yml vendored Normal file
View File

@@ -0,0 +1,57 @@
name: DB Migration Test
on:
pull_request:
branches:
- main
paths:
- api/migrations/**
concurrency:
group: db-migration-test-${{ github.ref }}
cancel-in-progress: true
jobs:
db-migration-test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'poetry'
cache-dependency-path: |
api/pyproject.toml
api/poetry.lock
- name: Install dependencies
run: poetry install -C api
- name: Set up Middleware
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
services: |
db
- name: Prepare configs
run: |
cd api
cp .env.example .env
- name: Run DB Migration
run: |
cd api
poetry run python -m flask db upgrade

View File

@@ -6,7 +6,7 @@ on:
- main
concurrency:
group: dep-${{ github.head_ref || github.run_id }}
group: style-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
@@ -18,51 +18,93 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: api/**
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Set up Python
uses: actions/setup-python@v5
if: steps.changed-files.outputs.any_changed == 'true'
with:
python-version: '3.10'
- name: Python dependencies
run: pip install ruff
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry install -C api --only lint
- name: Ruff check
run: ruff check ./api
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -C api ruff check --preview ./api
- name: Dotenv check
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -C api dotenv-linter ./api/.env.example ./web/.env.example
- name: Lint hints
if: failure()
run: echo "Please run 'dev/reformat' to fix the fixable linting errors."
test:
name: ESLint and SuperLinter
web-style:
name: Web Style
runs-on: ubuntu-latest
needs: python-style
defaults:
run:
working-directory: ./web
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
fetch-depth: 0
files: web/**
- name: Setup NodeJS
uses: actions/setup-node@v4
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 20
cache: yarn
cache-dependency-path: ./web/package.json
- name: Web dependencies
run: |
cd ./web
yarn install --frozen-lockfile
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn install --frozen-lockfile
- name: Web style check
run: |
cd ./web
yarn run lint
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn run lint
superlinter:
name: SuperLinter
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
**.sh
**.yaml
**.yml
Dockerfile
dev/**
- name: Super-linter
uses: super-linter/super-linter/slim@v6
if: steps.changed-files.outputs.any_changed == 'true'
env:
BASH_SEVERITY: warning
DEFAULT_BRANCH: main
@@ -73,4 +115,5 @@ jobs:
VALIDATE_BASH_EXEC: true
VALIDATE_GITHUB_ACTIONS: true
VALIDATE_DOCKERFILE_HADOLINT: true
VALIDATE_XML: true
VALIDATE_YAML: true

View File

@@ -4,6 +4,13 @@ on:
pull_request:
branches:
- main
paths:
- sdks/**
concurrency:
group: sdk-tests-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
name: unit test for Node.js SDK

4
.gitignore vendored
View File

@@ -134,7 +134,8 @@ dmypy.json
web/.vscode/settings.json
# Intellij IDEA Files
.idea/
.idea/*
!.idea/vcs.xml
.ideaDataSources/
api/.env
@@ -148,6 +149,7 @@ docker/volumes/qdrant/*
docker/volumes/etcd/*
docker/volumes/minio/*
docker/volumes/milvus/*
docker/volumes/chroma/*
sdks/python-client/build
sdks/python-client/dist

16
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="IssueNavigationConfiguration">
<option name="links">
<list>
<IssueNavigationLink>
<option name="issueRegexp" value="#(\d+)" />
<option name="linkRegexp" value="https://github.com/langgenius/dify/issues/$1" />
</IssueNavigationLink>
</list>
</option>
</component>
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

View File

@@ -4,7 +4,7 @@ We need to be nimble and ship fast given where we are, but we also want to make
This guide, like Dify itself, is a constant work in progress. We highly appreciate your understanding if at times it lags behind the actual project, and welcome any feedback for us to improve.
In terms of licensing, please take a minute to read our short [License and Contributor Agreement](./license). The community also adheres to the [code of conduct](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
In terms of licensing, please take a minute to read our short [License and Contributor Agreement](./LICENSE). The community also adheres to the [code of conduct](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
## Before you jump in

View File

@@ -4,7 +4,7 @@
这份指南,就像 Dify 本身一样,是一个不断改进的工作。如果有时它落后于实际项目,我们非常感谢你的理解,并欢迎任何反馈以供我们改进。
在许可方面,请花一分钟阅读我们简短的[许可证和贡献者协议](./license)。社区还遵守[行为准则](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)。
在许可方面,请花一分钟阅读我们简短的[许可证和贡献者协议](./LICENSE)。社区还遵守[行为准则](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)。
## 在开始之前

160
CONTRIBUTING_JA.md Normal file
View File

@@ -0,0 +1,160 @@
Dify にコントリビュートしたいとお考えなのですね。それは素晴らしいことです。
私たちは、LLM アプリケーションの構築と管理のための最も直感的なワークフローを設計するという壮大な野望を持っています。人数も資金も限られている新興企業として、コミュニティからの支援は本当に重要です。
私たちは現状を鑑み、機敏かつ迅速に開発をする必要がありますが、同時にあなたのようなコントリビューターの方々に、可能な限りスムーズな貢献体験をしていただきたいと思っています。そのためにこのコントリビュートガイドを作成しました。
コードベースやコントリビュータの方々と私たちがどのように仕事をしているのかに慣れていただき、楽しいパートにすぐに飛び込めるようにすることが目的です。
このガイドは Dify そのものと同様に、継続的に改善されています。実際のプロジェクトに遅れをとることがあるかもしれませんが、ご理解のほどよろしくお願いいたします。
ライセンスに関しては、私たちの短い[ライセンスおよびコントリビューター規約](./LICENSE)をお読みください。また、コミュニティは[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)を遵守しています。
## 飛び込む前に
[既存の Issue](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) を探すか、[新しい Issue](https://github.com/langgenius/dify/issues/new/choose) を作成してください。私たちは Issue を 2 つのタイプに分類しています。
### 機能リクエスト
* 新しい機能要望を出す場合は、提案する機能が何を実現するものなのかを説明し、可能な限り多くのコンテキストを含めてください。[@perzeusss](https://github.com/perzeuss)は、あなたの要望を書き出すのに役立つ [Feature Request Copilot](https://udify.app/chat/MK2kVSnw1gakVwMX) を作ってくれました。気軽に試してみてください。
* 既存の課題から 1 つ選びたい場合は、その下にコメントを書いてください。
関連する方向で作業しているチームメンバーが参加します。すべてが良好であれば、コーディングを開始する許可が与えられます。私たちが変更を提案した場合にあなたの作業が無駄になることがないよう、それまでこの機能の作業を控えていただくようお願いいたします。
提案された機能がどの分野に属するかによって、あなたは異なるチーム・メンバーと話をするかもしれません。以下は、各チームメンバーが現在取り組んでいる分野の概要です。
| Member | Scope |
| --------------------------------------------------------------------------------------- | ------------------------------------ |
| [@yeuoly](https://github.com/Yeuoly) | エージェントアーキテクチャ |
| [@jyong](https://github.com/JohnJyong) | RAG パイプライン設計 |
| [@GarfieldDai](https://github.com/GarfieldDai) | workflow orchestrations の構築 |
| [@iamjoel](https://github.com/iamjoel) & [@zxhlyh](https://github.com/zxhlyh) | フロントエンドを使いやすくする |
| [@guchenhe](https://github.com/guchenhe) & [@crazywoola](https://github.com/crazywoola) | 開発者体験、何でも相談できる窓口 |
| [@takatost](https://github.com/takatost) | 全体的な製品の方向性とアーキテクチャ |
優先順位の付け方:
| Feature Type | Priority |
| --------------------------------------------------------------------------------------------------------------------- | --------------- |
| チームメンバーによってラベル付けされた優先度の高い機能 | High Priority |
| [community feedback board](https://github.com/langgenius/dify/discussions/categories/feedbacks)の人気の機能リクエスト | Medium Priority |
| 非コア機能とマイナーな機能強化 | Low Priority |
| 価値はあるが即効性はない | Future-Feature |
### その他 (バグレポート、パフォーマンスの最適化、誤字の修正など)
* すぐにコーディングを始めてください
優先順位の付け方:
| Issue Type | Priority |
| -------------------------------------------------------------------------------------- | --------------- |
| コア機能のバグ(ログインできない、アプリケーションが動作しない、セキュリティの抜け穴) | Critical |
| 致命的でないバグ、パフォーマンス向上 | Medium Priority |
| 細かな修正(誤字脱字、機能はするが分かりにくい UI | Low Priority |
## インストール
以下の手順で 、Difyのセットアップをしてください。
### 1. このリポジトリをフォークする
### 2. リポジトリをクローンする
フォークしたリポジトリをターミナルからクローンします。
```
git clone git@github.com:<github_username>/dify.git
```
### 3. 依存関係の確認
Dify を構築するには次の依存関係が必要です。それらがシステムにインストールされていることを確認してください。
- [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org)
- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) version 3.10.x
### 4. インストール
Dify はバックエンドとフロントエンドから構成されています。
まず`cd api/`でバックエンドのディレクトリに移動し、[Backend README](api/README.md)に従ってインストールします。
次に別のターミナルで、`cd web/`でフロントエンドのディレクトリに移動し、[Frontend README](web/README.md)に従ってインストールしてください。
よくある問題とトラブルシューティングの手順については、[installation FAQ](https://docs.dify.ai/getting-started/faq/install-faq) を確認してください。
### 5. ブラウザで dify にアクセスする
設定を確認するために、ブラウザで[http://localhost:3000](http://localhost:3000)(デフォルト、または自分で設定した URL とポート)にアクセスしてください。Dify が起動して実行中であることが確認できるはずです。
## 開発中
モデルプロバイダーを追加する場合は、[このガイド](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/README.md)が役立ちます。
Agent や Workflow にツールプロバイダーを追加する場合は、[このガイド](./api/core/tools/README.md)が役立ちます。
Dify のバックエンドとフロントエンドの概要を簡単に説明します。
### バックエンド
Dify のバックエンドは[Flask](https://flask.palletsprojects.com/en/3.0.x/)を使って Python で書かれています。ORM には[SQLAlchemy](https://www.sqlalchemy.org/)を、タスクキューには[Celery](https://docs.celeryq.dev/en/stable/getting-started/introduction.html)を使っています。認証ロジックは Flask-login 経由で行われます。
```
[api/]
├── constants // コードベース全体で使用される定数設定
├── controllers // APIルート定義とリクエスト処理ロジック
├── core // アプリケーションの中核的な管理、モデル統合、およびツール
├── docker // Dockerおよびコンテナ関連の設定
├── events // イベントのハンドリングと処理
├── extensions // 第三者のフレームワーク/プラットフォームとの拡張
├── fields // シリアライゼーション/マーシャリング用のフィールド定義
├── libs // 再利用可能なライブラリとヘルパー
├── migrations // データベースマイグレーションスクリプト
├── models // データベースモデルとスキーマ定義
├── services // ビジネスロジックの定義
├── storage // 秘密鍵の保存
├── tasks // 非同期タスクとバックグラウンドジョブの処理
└── tests // テスト関連のファイル
```
### フロントエンド
このウェブサイトは、Typescriptベースの[Next.js](https://nextjs.org/)テンプレートを使ってブートストラップされ、[Tailwind CSS](https://tailwindcss.com/)を使ってスタイリングされています。国際化には[React-i18next](https://react.i18next.com/)を使用しています。
```
[web/]
├── app // レイアウト、ページ、コンポーネント
│ ├── (commonLayout) // アプリ全体で共通のレイアウト
│ ├── (shareLayout) // トークン特有のセッションで共有されるレイアウト
│ ├── activate // アクティベートページ
│ ├── components // ページやレイアウトで共有されるコンポーネント
│ ├── install // インストールページ
│ ├── signin // サインインページ
│ └── styles // グローバルに共有されるスタイル
├── assets // 静的アセット
├── bin // ビルドステップで実行されるスクリプト
├── config // 調整可能な設定とオプション
├── context // アプリの異なる部分で使用される共有コンテキスト
├── dictionaries // 言語別の翻訳ファイル
├── docker // コンテナ設定
├── hooks // 再利用可能なフック
├── i18n // 国際化設定
├── models // データモデルとAPIレスポンスの形状を記述
├── public // ファビコンなどのメタアセット
├── service // APIアクションの形状を指定
├── test
├── types // 関数のパラメータと戻り値の記述
└── utils // 共有ユーティリティ関数
```
## PR を投稿する
いよいよ、私たちのリポジトリにプルリクエスト (PR) を提出する時が来ました。主要な機能については、まず `deploy/dev` ブランチにマージしてテストしてから `main` ブランチにマージします。
マージ競合などの問題が発生した場合、またはプル リクエストを開く方法がわからない場合は、[GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) をチェックしてみてください。
これで完了です!あなたの PR がマージされると、[README](https://github.com/langgenius/dify/blob/main/README.md) にコントリビューターとして紹介されます。
## ヘルプを得る
コントリビュート中に行き詰まったり、疑問が生じたりした場合は、GitHub の関連する issue から質問していただくか、[Discord](https://discord.gg/8Tpq4AcN9c)でチャットしてください。

View File

@@ -29,19 +29,17 @@
</p>
<p align="center">
<a href="./README.md"><img alt="Commits last month" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README_CN.md"><img alt="Commits last month" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
<a href="./README_JA.md"><img alt="Commits last month" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
<a href="./README_ES.md"><img alt="Commits last month" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_KL.md"><img alt="Commits last month" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_FR.md"><img alt="Commits last month" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
</p>
#
<p align="center">
<a href="https://trendshift.io/repositories/2152" target="_blank"><img src="https://trendshift.io/api/badge/repositories/2152" alt="langgenius%2Fdify | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</p>
Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production. Here's a list of the core features:
</br> </br>
@@ -54,7 +52,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
**2. Comprehensive model support**:
Seamless integration with hundreds of proprietary / open-source LLMs from dozens of inference providers and self-hosted solutions, covering GPT, Mistral, Llama2, and any OpenAI API-compatible models. A full list of supported model providers can be found [here](https://docs.dify.ai/getting-started/readme/model-providers).
Seamless integration with hundreds of proprietary / open-source LLMs from dozens of inference providers and self-hosted solutions, covering GPT, Mistral, Llama3, and any OpenAI API-compatible models. A full list of supported model providers can be found [here](https://docs.dify.ai/getting-started/readme/model-providers).
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
@@ -109,7 +107,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
<td align="center">Agent</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center"></td>
<td align="center"></td>
<td align="center">✅</td>
</tr>
<tr>
@@ -127,7 +125,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
<td align="center">❌</td>
</tr>
<tr>
<td align="center">Enterprise Feature (SSO/Access control)</td>
<td align="center">Enterprise Features (SSO/Access control)</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">❌</td>
@@ -187,10 +185,11 @@ After running, you can access the Dify dashboard in your browser at [http://loca
If you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run `docker-compose up -d` again. You can see the full list of environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) which allow Dify to be deployed on Kubernetes.
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## Contributing

226
README_AR.md Normal file
View File

@@ -0,0 +1,226 @@
![cover-v5-optimized](https://github.com/langgenius/dify/assets/13230914/f9e19af5-61ba-4119-b926-d10c4c06ebab)
<p align="center">
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
<a href="https://docs.dify.ai/getting-started/install-self-hosted">الاستضافة الذاتية</a> ·
<a href="https://docs.dify.ai">التوثيق</a> ·
<a href="https://cal.com/guchenhe/60-min-meeting">استفسارات الشركات</a>
</p>
<p align="center">
<a href="https://dify.ai" target="_blank">
<img alt="Static Badge" src="https://img.shields.io/badge/Product-F04438"></a>
<a href="https://dify.ai/pricing" target="_blank">
<img alt="Static Badge" src="https://img.shields.io/badge/free-pricing?logo=free&color=%20%23155EEF&label=pricing&labelColor=%20%23528bff"></a>
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
alt="chat on Discord"></a>
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on Twitter"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
<img alt="Commits last month" src="https://img.shields.io/github/commit-activity/m/langgenius/dify?labelColor=%20%2332b583&color=%20%2312b76a"></a>
<a href="https://github.com/langgenius/dify/" target="_blank">
<img alt="Issues closed" src="https://img.shields.io/github/issues-search?query=repo%3Alanggenius%2Fdify%20is%3Aclosed&label=issues%20closed&labelColor=%20%237d89b0&color=%20%235d6b98"></a>
<a href="https://github.com/langgenius/dify/discussions/" target="_blank">
<img alt="Discussion posts" src="https://img.shields.io/github/discussions/langgenius/dify?labelColor=%20%239b8afb&color=%20%237a5af8"></a>
</p>
<p align="center">
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
</p>
<div style="text-align: right;">
مشروع Dify هو منصة تطوير تطبيقات الذكاء الصناعي مفتوحة المصدر. تجمع واجهته البديهية بين سير العمل الذكي بالذكاء الاصطناعي وخط أنابيب RAG وقدرات الوكيل وإدارة النماذج وميزات الملاحظة وأكثر من ذلك، مما يتيح لك الانتقال بسرعة من المرحلة التجريبية إلى الإنتاج. إليك قائمة بالميزات الأساسية:
</br> </br>
**1. سير العمل**: قم ببناء واختبار سير عمل الذكاء الاصطناعي القوي على قماش بصري، مستفيدًا من جميع الميزات التالية وأكثر.
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
**2. الدعم الشامل للنماذج**: تكامل سلس مع مئات من LLMs الخاصة / مفتوحة المصدر من عشرات من موفري التحليل والحلول المستضافة ذاتيًا، مما يغطي GPT و Mistral و Llama3 وأي نماذج متوافقة مع واجهة OpenAI API. يمكن العثور على قائمة كاملة بمزودي النموذج المدعومين [هنا](https://docs.dify.ai/getting-started/readme/model-providers).
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
**3. بيئة التطوير للأوامر**: واجهة بيئة التطوير المبتكرة لصياغة الأمر ومقارنة أداء النموذج، وإضافة ميزات إضافية مثل تحويل النص إلى كلام إلى تطبيق قائم على الدردشة.
**4. خط أنابيب RAG**: قدرات RAG الواسعة التي تغطي كل شيء من استيعاب الوثائق إلى الاسترجاع، مع الدعم الفوري لاستخراج النص من ملفات PDF و PPT وتنسيقات الوثائق الشائعة الأخرى.
**5. قدرات الوكيل**: يمكنك تعريف الوكلاء بناءً على أمر وظيفة LLM أو ReAct، وإضافة أدوات مدمجة أو مخصصة للوكيل. توفر Dify أكثر من 50 أداة مدمجة لوكلاء الذكاء الاصطناعي، مثل البحث في Google و DELL·E وStable Diffusion و WolframAlpha.
**6. الـ LLMOps**: راقب وتحلل سجلات التطبيق والأداء على مر الزمن. يمكنك تحسين الأوامر والبيانات والنماذج باستمرار استنادًا إلى البيانات الإنتاجية والتعليقات.
**7.الواجهة الخلفية (Backend) كخدمة**: تأتي جميع عروض Dify مع APIs مطابقة، حتى يمكنك دمج Dify بسهولة في منطق أعمالك الخاص.
## مقارنة الميزات
<table style="width: 100%;">
<tr>
<th align="center">الميزة</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">نهج البرمجة</td>
<td align="center">موجّه لـ تطبيق + واجهة برمجة تطبيق (API)</td>
<td align="center">برمجة Python</td>
<td align="center">موجه لتطبيق</td>
<td align="center">واجهة برمجة تطبيق (API)</td>
</tr>
<tr>
<td align="center">LLMs المدعومة</td>
<td align="center">تنوع غني</td>
<td align="center">تنوع غني</td>
<td align="center">تنوع غني</td>
<td align="center">فقط OpenAI</td>
</tr>
<tr>
<td align="center">محرك RAG</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">✅</td>
</tr>
<tr>
<td align="center">الوكيل</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">✅</td>
</tr>
<tr>
<td align="center">سير العمل</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">✅</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">الملاحظة</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">ميزات الشركات (SSO / مراقبة الوصول)</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">❌</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">نشر محلي</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
</tr>
</table>
## استخدام Dify
- **سحابة </br>**
نحن نستضيف [خدمة Dify Cloud](https://dify.ai) لأي شخص لتجربتها بدون أي إعدادات. توفر كل قدرات النسخة التي تمت استضافتها ذاتيًا، وتتضمن 200 أمر GPT-4 مجانًا في خطة الصندوق الرملي.
- **استضافة ذاتية لنسخة المجتمع Dify</br>**
ابدأ سريعًا في تشغيل Dify في بيئتك باستخدام [دليل البدء السريع](#البدء السريع).
استخدم [توثيقنا](https://docs.dify.ai) للمزيد من المراجع والتعليمات الأعمق.
- **مشروع Dify للشركات / المؤسسات</br>**
نحن نوفر ميزات إضافية مركزة على الشركات. [جدول اجتماع معنا](https://cal.com/guchenhe/30min) أو [أرسل لنا بريدًا إلكترونيًا](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) لمناقشة احتياجات الشركات. </br>
> بالنسبة للشركات الناشئة والشركات الصغيرة التي تستخدم خدمات AWS، تحقق من [Dify Premium على AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) ونشرها في شبكتك الخاصة على AWS VPC بنقرة واحدة. إنها عرض AMI بأسعار معقولة مع خيار إنشاء تطبيقات بشعار وعلامة تجارية مخصصة.
## البقاء قدمًا
قم بإضافة نجمة إلى Dify على GitHub وتلق تنبيهًا فوريًا بالإصدارات الجديدة.
![نجمنا](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
## البداية السريعة
> قبل تثبيت Dify، تأكد من أن جهازك يلبي الحد الأدنى من متطلبات النظام التالية:
>
>- معالج >= 2 نواة
>- ذاكرة وصول عشوائي (RAM) >= 4 جيجابايت
</br>
أسهل طريقة لبدء تشغيل خادم Dify هي تشغيل ملف [docker-compose.yml](docker/docker-compose.yaml) الخاص بنا. قبل تشغيل أمر التثبيت، تأكد من تثبيت [Docker](https://docs.docker.com/get-docker/) و [Docker Compose](https://docs.docker.com/compose/install/) على جهازك:
```bash
cd docker
docker compose up -d
```
بعد التشغيل، يمكنك الوصول إلى لوحة تحكم Dify في متصفحك على [http://localhost/install](http://localhost/install) وبدء عملية التهيئة.
> إذا كنت ترغب في المساهمة في Dify أو القيام بتطوير إضافي، فانظر إلى [دليلنا للنشر من الشفرة (code) المصدرية](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
## الخطوات التالية
إذا كنت بحاجة إلى تخصيص التكوين، يرجى الرجوع إلى التعليقات في ملف [docker-compose.yml](docker/docker-compose.yaml) لدينا وتعيين التكوينات البيئية يدويًا. بعد إجراء التغييرات، يرجى تشغيل `docker-compose up -d` مرة أخرى. يمكنك رؤية قائمة كاملة بالمتغيرات البيئية [هنا](https://docs.dify.ai/getting-started/install-self-hosted/environments).
يوجد مجتمع خاص بـ [Helm Charts](https://helm.sh/) وملفات YAML التي تسمح بتنفيذ Dify على Kubernetes للنظام من الإيجابيات العلوية.
- [رسم بياني Helm من قبل @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [رسم بياني Helm من قبل @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [ملف YAML من قبل @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## المساهمة
لأولئك الذين يرغبون في المساهمة، انظر إلى [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) لدينا.
في الوقت نفسه، يرجى النظر في دعم Dify عن طريق مشاركته على وسائل التواصل الاجتماعي وفي الفعاليات والمؤتمرات.
> نحن نبحث عن مساهمين لمساعدة في ترجمة Dify إلى لغات أخرى غير اللغة الصينية المندرين أو الإنجليزية. إذا كنت مهتمًا بالمساعدة، يرجى الاطلاع على [README للترجمة](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) لمزيد من المعلومات، واترك لنا تعليقًا في قناة `global-users` على [خادم المجتمع على Discord](https://discord.gg/8Tpq4AcN9c).
**المساهمون**
<a href="https://github.com/langgenius/dify/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
</a>
## المجتمع والاتصال
* [مناقشة Github](https://github.com/langgenius/dify/discussions). الأفضل لـ: مشاركة التعليقات وطرح الأسئلة.
* [المشكلات على GitHub](https://github.com/langgenius/dify/issues). الأفضل لـ: الأخطاء التي تواجهها في استخدام Dify.AI، واقتراحات الميزات. انظر [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
* [البريد الإلكتروني](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). الأفضل لـ: الأسئلة التي تتعلق باستخدام Dify.AI.
* [Discord](https://discord.gg/FngNHpbcY7). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
* [تويتر](https://twitter.com/dify_ai). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
أو، قم بجدولة اجتماع مباشرة مع أحد أعضاء الفريق:
<table>
<tr>
<th>نقطة الاتصال</th>
<th>الغرض</th>
</tr>
<tr>
<td><a href='https://cal.com/guchenhe/15min' target='_blank'><img class="schedule-button" src='https://github.com/langgenius/dify/assets/13230914/9ebcd111-1205-4d71-83d5-948d70b809f5' alt='Git-Hub-README-Button-3x' style="width: 180px; height: auto; object-fit: contain;"/></a></td>
<td>استفسارات الأعمال واقتراحات حول المنتج</td>
</tr>
<tr>
<td><a href='https://cal.com/pinkbanana' target='_blank'><img class="schedule-button" src='https://github.com/langgenius/dify/assets/13230914/d1edd00a-d7e4-4513-be6c-e57038e143fd' alt='Git-Hub-README-Button-2x' style="width: 180px; height: auto; object-fit: contain;"/></a></td>
<td>المساهمات والمشكلات وطلبات الميزات</td>
</tr>
</table>
## تاريخ النجمة
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
## الكشف عن الأمان
لحماية خصوصيتك، يرجى تجنب نشر مشكلات الأمان على GitHub. بدلاً من ذلك، أرسل أسئلتك إلى security@dify.ai وسنقدم لك إجابة أكثر تفصيلاً.
## الرخصة
هذا المستودع متاح تحت [رخصة البرنامج الحر Dify](LICENSE)، والتي تعتبر بشكل أساسي Apache 2.0 مع بعض القيود الإضافية.

View File

@@ -35,6 +35,7 @@
<a href="./README_ES.md"><img alt="上个月的提交次数" src="https://img.shields.io/badge/西班牙语-d9d9d9"></a>
<a href="./README_KL.md"><img alt="上个月的提交次数" src="https://img.shields.io/badge/法语-d9d9d9"></a>
<a href="./README_FR.md"><img alt="上个月的提交次数" src="https://img.shields.io/badge/克林贡语-d9d9d9"></a>
<a href="./README_KR.md"><img alt="上个月的提交次数" src="https://img.shields.io/badge/韓國語-d9d9d9"></a>
</div>
@@ -44,11 +45,11 @@
<a href="https://trendshift.io/repositories/2152" target="_blank"><img src="https://trendshift.io/api/badge/repositories/2152" alt="langgenius%2Fdify | 趋势转变" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</div>
Dify 是一个开源的LLM应用开发平台。其直观的界面结合了AI工作流、RAG管道、代理功能、模型管理、可观性功能等,让您可以快速从原型到生产。以下是其核心功能列表:
Dify 是一个开源的 LLM 应用开发平台。其直观的界面结合了 AI 工作流、RAG 管道、Agent、模型管理、可观性功能等,让您可以快速从原型到生产。以下是其核心功能列表:
</br> </br>
**1. 工作流**:
视觉画布上构建和测试功能强大的AI工作流程利用以下所有功能以及更多功能。
在画布上构建和测试功能强大的 AI 工作流程,利用以下所有功能以及更多功能。
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
@@ -56,7 +57,7 @@ Dify 是一个开源的LLM应用开发平台。其直观的界面结合了AI工
**2. 全面的模型支持**:
与数百种专有/开源LLMs以及数十种推理提供商和自托管解决方案无缝集成涵盖GPT、Mistral、Llama2以及任何与OpenAI API兼容的模型。完整的支持模型提供商列表可在[此处](https://docs.dify.ai/getting-started/readme/model-providers)找到。
与数百种专有/开源 LLMs 以及数十种推理提供商和自托管解决方案无缝集成,涵盖 GPT、Mistral、Llama3 以及任何与 OpenAI API 兼容的模型。完整的支持模型提供商列表可在[此处](https://docs.dify.ai/getting-started/readme/model-providers)找到。
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
@@ -65,16 +66,16 @@ Dify 是一个开源的LLM应用开发平台。其直观的界面结合了AI工
用于制作提示、比较模型性能以及向基于聊天的应用程序添加其他功能(如文本转语音)的直观界面。
**4. RAG Pipeline**:
广泛的RAG功能涵盖从文档摄入到检索的所有内容支持从PDF、PPT和其他常见文档格式中提取文本的开箱即用的支持。
广泛的 RAG 功能,涵盖从文档摄入到检索的所有内容,支持从 PDF、PPT 和其他常见文档格式中提取文本的开箱即用的支持。
**5. Agent 智能体**:
您可以基于LLM函数调用或ReAct定义代理,并为代理添加预构建或自定义工具。Dify为AI代理提供了50多种内置工具如谷歌搜索、DELL·E、稳定扩散和WolframAlpha等。
您可以基于 LLM 函数调用或 ReAct 定义 Agent并为 Agent 添加预构建或自定义工具。Dify 为 AI Agent 提供了50多种内置工具如谷歌搜索、DELL·E、Stable Diffusion 和 WolframAlpha 等。
**6. LLMOps**:
随时间监视和分析应用程序日志和性能。您可以根据生产数据和注持续改进提示、数据集和模型。
随时间监视和分析应用程序日志和性能。您可以根据生产数据和注持续改进提示、数据集和模型。
**7. 后端即服务**:
所有Dify的功能都带有相应的API因此您可以轻松地将Dify集成到自己的业务逻辑中。
所有 Dify 的功能都带有相应的 API因此您可以轻松地将 Dify 集成到自己的业务逻辑中。
## 功能比较
@@ -84,21 +85,21 @@ Dify 是一个开源的LLM应用开发平台。其直观的界面结合了AI工
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI助理API</th>
<th align="center">OpenAI Assistant API</th>
</tr>
<tr>
<td align="center">编程方法</td>
<td align="center">API + 应用程序导向</td>
<td align="center">Python代码</td>
<td align="center">Python 代码</td>
<td align="center">应用程序导向</td>
<td align="center">API导向</td>
<td align="center">API 导向</td>
</tr>
<tr>
<td align="center">支持的LLMs</td>
<td align="center">支持的 LLMs</td>
<td align="center">丰富多样</td>
<td align="center">丰富多样</td>
<td align="center">丰富多样</td>
<td align="center">仅限OpenAI</td>
<td align="center">仅限 OpenAI</td>
</tr>
<tr>
<td align="center">RAG引擎</td>
@@ -108,21 +109,21 @@ Dify 是一个开源的LLM应用开发平台。其直观的界面结合了AI工
<td align="center">✅</td>
</tr>
<tr>
<td align="center">代理</td>
<td align="center">✅</td>
<td align="center">Agent</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">✅</td>
</tr>
<tr>
<td align="center">工作流</td>
<td align="center">工作流</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">✅</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">可观性</td>
<td align="center">可观性</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
@@ -185,10 +186,11 @@ docker compose up -d
#### 使用 Helm Chart 部署
使用 [Helm Chart](https://helm.sh/) 版本,可以在 Kubernetes 上部署 Dify。
使用 [Helm Chart](https://helm.sh/) 版本或者 YAML 文件,可以在 Kubernetes 上部署 Dify。
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [YAML 文件 by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
### 配置
@@ -202,7 +204,7 @@ docker compose up -d
## Contributing
对于那些想要贡献代码的人,请参阅我们的[贡献指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。
同时请考虑通过社交媒体、活动和会议来支持Dify的分享。
同时,请考虑通过社交媒体、活动和会议来支持 Dify 的分享。
> 我们正在寻找贡献者来帮助将Dify翻译成除了中文和英文之外的其他语言。如果您有兴趣帮助请参阅我们的[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)获取更多信息,并在我们的[Discord社区服务器](https://discord.gg/8Tpq4AcN9c)的`global-users`频道中留言。

View File

@@ -35,6 +35,7 @@
<a href="./README_ES.md"><img alt="Actividad de Commits el último mes" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_KL.md"><img alt="Actividad de Commits el último mes" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_FR.md"><img alt="Actividad de Commits el último mes" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="Actividad de Commits el último mes" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
</p>
#
@@ -54,7 +55,7 @@ Dify es una plataforma de desarrollo de aplicaciones de LLM de código abierto.
**2. Soporte de modelos completo**:
Integración perfecta con cientos de LLMs propietarios / de código abierto de docenas de proveedores de inferencia y soluciones auto-alojadas, que cubren GPT, Mistral, Llama2 y cualquier modelo compatible con la API de OpenAI. Se puede encontrar una lista completa de proveedores de modelos admitidos [aquí](https://docs.dify.ai/getting-started/readme/model-providers).
Integración perfecta con cientos de LLMs propietarios / de código abierto de docenas de proveedores de inferencia y soluciones auto-alojadas, que cubren GPT, Mistral, Llama3 y cualquier modelo compatible con la API de OpenAI. Se puede encontrar una lista completa de proveedores de modelos admitidos [aquí](https://docs.dify.ai/getting-started/readme/model-providers).
![proveedores-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
@@ -111,7 +112,7 @@ es basados en LLM Function Calling o ReAct, y agregar herramientas preconstruida
<td align="center">Agente</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center"></td>
<td align="center"></td>
<td align="center">✅</td>
</tr>
<tr>
@@ -191,10 +192,11 @@ Si necesitas personalizar la configuración, consulta los comentarios en nuestro
. Después de realizar los cambios, ejecuta `docker-compose up -d` nuevamente. Puedes ver la lista completa de variables de entorno [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
Si deseas configurar una instalación altamente disponible, hay [Gráficos Helm](https://helm.sh/) contribuidos por la comunidad que permiten implementar Dify en Kubernetes.
Si desea configurar una configuración de alta disponibilidad, la comunidad proporciona [Gráficos Helm](https://helm.sh/) y archivos YAML, a través de los cuales puede desplegar Dify en Kubernetes.
- [Gráfico Helm por @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Gráfico Helm por @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [Ficheros YAML por @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## Contribuir

View File

@@ -35,6 +35,7 @@
<a href="./README_ES.md"><img alt="Commits le mois dernier" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_KL.md"><img alt="Commits le mois dernier" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_FR.md"><img alt="Commits le mois dernier" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="Commits le mois dernier" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
</p>
#
@@ -54,7 +55,7 @@ Dify est une plateforme de développement d'applications LLM open source. Son in
**2. Prise en charge complète des modèles**:
Intégration transparente avec des centaines de LLM propriétaires / open source provenant de dizaines de fournisseurs d'inférence et de solutions auto-hébergées, couvrant GPT, Mistral, Llama2, et tous les modèles compatibles avec l'API OpenAI. Une liste complète des fournisseurs de modèles pris en charge se trouve [ici](https://docs.dify.ai/getting-started/readme/model-providers).
Intégration transparente avec des centaines de LLM propriétaires / open source provenant de dizaines de fournisseurs d'inférence et de solutions auto-hébergées, couvrant GPT, Mistral, Llama3, et tous les modèles compatibles avec l'API OpenAI. Une liste complète des fournisseurs de modèles pris en charge se trouve [ici](https://docs.dify.ai/getting-started/readme/model-providers).
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
@@ -111,7 +112,7 @@ ités d'agent**:
<td align="center">Agent</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center"></td>
<td align="center"></td>
<td align="center">✅</td>
</tr>
<tr>
@@ -191,10 +192,11 @@ Si vous devez personnaliser la configuration, veuillez
vous référer aux commentaires dans notre fichier [docker-compose.yml](docker/docker-compose.yaml) et définir manuellement la configuration de l'environnement. Après avoir apporté les modifications, veuillez exécuter à nouveau `docker-compose up -d`. Vous pouvez voir la liste complète des variables d'environnement [ici](https://docs.dify.ai/getting-started/install-self-hosted/environments).
Si vous souhaitez configurer une installation hautement disponible, il existe des [Helm Charts](https://helm.sh/) contribués par la communauté qui permettent de déployer Dify sur Kubernetes.
Si vous souhaitez configurer une configuration haute disponibilité, la communauté fournit des [Helm Charts](https://helm.sh/) et des fichiers YAML, à travers lesquels vous pouvez déployer Dify sur Kubernetes.
- [Helm Chart par @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart par @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [Fichier YAML par @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## Contribuer

View File

@@ -2,9 +2,9 @@
<p align="center">
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
<a href="https://docs.dify.ai/getting-started/install-self-hosted">自己ホスティング</a> ·
<a href="https://docs.dify.ai/getting-started/install-self-hosted">セルフホスティング</a> ·
<a href="https://docs.dify.ai">ドキュメント</a> ·
<a href="https://cal.com/guchenhe/dify-demo">デモのスケジュール</a>
<a href="https://cal.com/guchenhe/dify-demo">デモの予約</a>
</p>
<p align="center">
@@ -35,6 +35,7 @@
<a href="./README_ES.md"><img alt="先月のコミット" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_KL.md"><img alt="先月のコミット" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_FR.md"><img alt="先月のコミット" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="先月のコミット" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
</p>
#
@@ -43,39 +44,37 @@
<a href="https://trendshift.io/repositories/2152" target="_blank"><img src="https://trendshift.io/api/badge/repositories/2152" alt="langgenius%2Fdify | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</p>
DifyはオープンソースのLLMアプリケーション開発プラットフォームです。直感的なインターフェスには、AIワークフロー、RAGパイプライン、エージェント機能、モデル管理、観測機能などが組み合わさっており、プロトタイプから本番までの移行を迅速に行うことができます。以下は、主要機能のリストです:
DifyはオープンソースのLLMアプリケーション開発プラットフォームです。直感的なインターフェスには、AIワークフロー、RAGパイプライン、エージェント機能、モデル管理、観測機能などが組み合わさっており、プロトタイプから生産まで迅速に進めることができます。以下の機能が含まれます:
</br> </br>
**1. ワークフロー**:
ビジュアルキャンバス上で強力なAIワークフローを構築しテストし、以下の機能を活用してプロトタイプを超えることができます。
強力なAIワークフローをビジュアルキャンバス上で構築しテストできます。すべての機能、および以下の機能を使用できます。
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
**2. 網羅的なモデルサポート**:
数百のプロプライエタリ/オープンソースのLLMと、数十の推論プロバイダーおよびセルフホスティングソリューションとのシームレスな統合を提供します。GPT、Mistral、Llama2、およびOpenAI API互換のモデルをカバーします。サポートされているモデルプロバイダーの完全なリストは[こちら](https://docs
.dify.ai/getting-started/readme/model-providers)をご覧ください。
**2. 総合的なモデルサポート**:
数百のプロプライエタリ/オープンソースのLLMと、数十の推論プロバイダーおよびセルフホスティングソリューションとのシームレスな統合を提供します。GPT、Mistral、Llama3、OpenAI API互換性のあるすべてのモデルを統合されています。サポートされているモデルプロバイダーの完全なリストは[こちら](https://docs.dify.ai/getting-started/readme/model-providers)をご覧ください。
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
**3. プロンプトIDE**:
チャットベースのアプリにテキスト読み上げなどの追加機能を追加するプロンプト作成、モデルパフォーマンス比較する直感的なインターフェース
プロンプト作成、モデルパフォーマンス比較が行え、チャットベースのアプリに音声合成などの機能も追加できます
**4. RAGパイプライン**:
文書の取り込みから取得までをカバーする幅広いRAG機能で、PDF、PPTなどの一般的なドキュメント形式からのテキスト抽出に対するアウトオブボックスのサポートを提供します。
ドキュメントの取り込みから検索までをカバーする広範なRAG機能ができます。ほかにもPDF、PPT、その他の一般的なドキュメントフォーマットからのテキスト抽出のサーポイントも提供します。
**5. エージェント機能**:
LLM関数呼び出しまたはReActに基づいてエージェント定義し、エージェント向けの事前構築済みまたはカスタムツールを追加できます。Difyには、Google検索、DELL·E、Stable Diffusion、WolframAlphaなどのAIエージェント用の50以上の組み込みツールが用意されています。
LLM Function CallingやReActに基づエージェント定義が可能で、AIエージェント用のプリビルトまたはカスタムツールを追加できます。Difyには、Google検索、DELL·E、Stable Diffusion、WolframAlphaなどのAIエージェント用の50以上の組み込みツールが提供します。
**6. LLMOps**:
アプリケーションログパフォーマンスを時間の経過とともにモニタリングおよび分析します。本番データと注釈に基づいて、プロンプト、データセット、およびモデルを継続的に改善できます。
アプリケーションログパフォーマンスを監視と分析し、生産のデータと注釈に基づいて、プロンプト、データセット、モデルを継続的に改善できます。
**7. Backend-as-a-Service**:
Difyのすべての提供には、それに対応するAPIが付属しており、独自のビジネスロジックにDifyをシームレスに統合できます。
すべての機能はAPIを提供されており、Difyを自分のビジネスロジックに簡単に統合できます。
## 機能比較
@@ -96,9 +95,9 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
</tr>
<tr>
<td align="center">サポートされているLLM</td>
<td align="center">豊富なバリエーション</td>
<td align="center">豊富なバリエーション</td>
<td align="center">豊富なバリエーション</td>
<td align="center">バラエティ豊か</td>
<td align="center">バラエティ豊か</td>
<td align="center">バラエティ豊か</td>
<td align="center">OpenAIのみ</td>
</tr>
<tr>
@@ -112,7 +111,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
<td align="center">エージェント</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center"></td>
<td align="center"></td>
<td align="center">✅</td>
</tr>
<tr>
@@ -148,36 +147,34 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
## Difyの使用方法
- **クラウド </br>**
[こちら](https://dify.ai)のDify Cloudサービスを利用して、セットアップ不要で誰でも試すことができます。サンドボックスプランは、200回の無料のGPT-4呼び出しが含まれています。
[こちら](https://dify.ai)のDify Cloudサービスを利用して、セットアップ不要で試すことができます。サンドボックスプランは、200回のGPT-4呼び出しが無料で含まれています。
- **Dify Community Editionのセルフホスティング</br>**
この[スターターガイド](#quick-start)を使用して、環境でDifyをすばやく実行できます。
さらなる参照や詳細な手順については、[ドキュメント](https://docs.dify.ai)をご覧ください。
この[スターガイド](#quick-start)を使用して、ローカル環境でDifyを簡単に実行できます。
詳しくは[ドキュメント](https://docs.dify.ai)をご覧ください。
- **エンタープライズ/組織向けのDify</br>**
追加のエンタープライズ向け機能を提供しています。[こちらからミーティ
ングを予約](https://cal.com/guchenhe/30min)したり、[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)してエンタープライズのニーズについて相談してください。 </br>
> AWSを使用しているスタートアップや中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで独自のAWS VPCにデプロイできます。カスタムロゴとブランディングでアプリを作成するオプションを備えた手頃な価格のAMIオファリングです。
- **企業/組織向けのDify</br>**
企業中心の機能を提供しています。[こちらからミーティングを予約](https://cal.com/guchenhe/30min)したり、[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)して企業のニーズについて相談してください。 </br>
> AWSを使用しているスタートアップ企業や中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで自分のAWS VPCにデプロイできます。さらに、手頃な価格のAMIオファリングどして、ロゴやブランディングをカスタマイズしてアプリケーションを作成するオプションがあります。
## 先を見る
## 最新の情報を入手
GitHubでDifyにスターを付け新しいリリースをすぐに通知されます。
GitHubでDifyにスターを付けることで、Difyに関する新しいニュースを受け取れます。
![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
## クイックスタート
> Difyをインストールする前に、マシンが以下の最小システム要件を満たしていることを確認してください
> Difyをインストールする前に、お使いのマシンが以下の最小システム要件を満たしていることを確認してください:
>
>- CPU >= 2コア
>- RAM >= 4GB
</br>
Difyサーバーを起動する最も簡単な方法は、当社の[docker-compose.yml](docker/docker-compose.yaml)ファイルを実行することです。インストールコマンドを実行する前に、マシンに[Docker](https://docs.docker.com/get-docker/)と[Docker Compose](https://docs.docker.com/compose/install/)がインストールされていることを確認してください。
Difyサーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml)ファイルを実行することです。インストールコマンドを実行する前に、マシンに[Docker](https://docs.docker.com/get-docker/)と[Docker Compose](https://docs.docker.com/compose/install/)がインストールされていることを確認してください。
```bash
cd docker
@@ -192,10 +189,11 @@ docker compose up -d
環境設定をカスタマイズする場合は、[docker-compose.yml](docker/docker-compose.yaml)ファイル内のコメントを参照して、環境設定を手動で設定してください。変更を加えた後は、再び `docker-compose up -d` を実行してください。環境変数の完全なリストは[こちら](https://docs.dify.ai/getting-started/install-self-hosted/environments)をご覧ください。
高可用性のセットアップを構成する場合、コミュニティによって提供されている[Helm Charts](https://helm.sh/)があり、これによりKubernetes上にDifyを展開できます。
高可用性設定を設定する必要がある場合、コミュニティ[Helm Charts](https://helm.sh/)とYAMLファイルにより、DifyをKubernetesにデプロイすることができます。
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## 貢献
@@ -215,12 +213,12 @@ docker compose up -d
## コミュニティ & お問い合わせ
* [Github Discussion](https://github.com/langgenius/dify/discussions). 主に: フィードバックの共有や質問。
* [GitHub Issues](https://github.com/langgenius/dify/issues). 主に: Dify.AI使用中に遭遇したバグや機能提案。
* [GitHub Issues](https://github.com/langgenius/dify/issues). 主に: Dify.AI使用する際に発生するエラーや問題については、[貢献ガイド](CONTRIBUTING_JA.md)を参照してください
* [Email](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). 主に: Dify.AIの使用に関する質問。
* [Discord](https://discord.gg/FngNHpbcY7). 主に: アプリケーションの共有やコミュニティとの交流。
* [Twitter](https://twitter.com/dify_ai). 主に: アプリケーションの共有やコミュニティとの交流。
または、直接チームメンバーとミーティングをスケジュールします
または、直接チームメンバーとミーティングをスケジュール:
<table>
<tr>
@@ -231,7 +229,7 @@ docker compose up -d
<td><a href='https://cal.com
/guchenhe/30min'>ミーティング</a></td>
<td>無料の30分間のミーティングをスケジュールしてください。</td>
<td>無料の30分間のミーティングをスケジュール</td>
</tr>
<tr>
<td><a href='mailto:support@dify.ai?subject=[GitHub]Technical%20Support'>技術サポート</a></td>
@@ -246,4 +244,4 @@ docker compose up -d
## ライセンス
プロジェクトはMITライセンスの下で利用可能です。[LICENSE](LICENSE)をご参照ください
このリポジトリは、Dify Open Source License にいくつかの追加制限を加えた[Difyオープンソースライセンス](LICENSE)の下で利用可能です

View File

@@ -35,6 +35,7 @@
<a href="./README_ES.md"><img alt="Commits last month" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_KL.md"><img alt="Commits last month" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_FR.md"><img alt="Commits last month" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="Commits last month" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
</p>
#
@@ -54,7 +55,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
**2. Comprehensive model support**:
Seamless integration with hundreds of proprietary / open-source LLMs from dozens of inference providers and self-hosted solutions, covering GPT, Mistral, Llama2, and any OpenAI API-compatible models. A full list of supported model providers can be found [here](https://docs.dify.ai/getting-started/readme/model-providers).
Seamless integration with hundreds of proprietary / open-source LLMs from dozens of inference providers and self-hosted solutions, covering GPT, Mistral, Llama3, and any OpenAI API-compatible models. A full list of supported model providers can be found [here](https://docs.dify.ai/getting-started/readme/model-providers).
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
@@ -111,7 +112,7 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
<td align="center">Agent</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center"></td>
<td align="center"></td>
<td align="center">✅</td>
</tr>
<tr>
@@ -189,11 +190,11 @@ After running, you can access the Dify dashboard in your browser at [http://loca
If you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run `docker-compose up -d` again. You can see the full list of environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) which allow Dify to be deployed on Kubernetes.
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## Contributing

243
README_KR.md Normal file
View File

@@ -0,0 +1,243 @@
![cover-v5-optimized](https://github.com/langgenius/dify/assets/13230914/f9e19af5-61ba-4119-b926-d10c4c06ebab)
<p align="center">
<a href="https://cloud.dify.ai">Dify 클라우드</a> ·
<a href="https://docs.dify.ai/getting-started/install-self-hosted">셀프-호스팅</a> ·
<a href="https://docs.dify.ai">문서</a> ·
<a href="https://cal.com/guchenhe/60-min-meeting">기업 문의</a>
</p>
<p align="center">
<a href="https://dify.ai" target="_blank">
<img alt="Static Badge" src="https://img.shields.io/badge/Product-F04438"></a>
<a href="https://dify.ai/pricing" target="_blank">
<img alt="Static Badge" src="https://img.shields.io/badge/free-pricing?logo=free&color=%20%23155EEF&label=pricing&labelColor=%20%23528bff"></a>
<a href="https://discord.gg/FngNHpbcY7" target="_blank">
<img src="https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb"
alt="chat on Discord"></a>
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on Twitter"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
<img alt="Commits last month" src="https://img.shields.io/github/commit-activity/m/langgenius/dify?labelColor=%20%2332b583&color=%20%2312b76a"></a>
<a href="https://github.com/langgenius/dify/" target="_blank">
<img alt="Issues closed" src="https://img.shields.io/github/issues-search?query=repo%3Alanggenius%2Fdify%20is%3Aclosed&label=issues%20closed&labelColor=%20%237d89b0&color=%20%235d6b98"></a>
<a href="https://github.com/langgenius/dify/discussions/" target="_blank">
<img alt="Discussion posts" src="https://img.shields.io/github/discussions/langgenius/dify?labelColor=%20%239b8afb&color=%20%237a5af8"></a>
</p>
<p align="center">
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
<a href="./README_KR.md"><img alt="한국어 README" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
</p>
Dify는 오픈 소스 LLM 앱 개발 플랫폼입니다. 직관적인 인터페이스를 통해 AI 워크플로우, RAG 파이프라인, 에이전트 기능, 모델 관리, 관찰 기능 등을 결합하여 프로토타입에서 프로덕션까지 빠르게 전환할 수 있습니다. 주요 기능 목록은 다음과 같습니다:</br> </br>
**1. 워크플로우**:
다음 기능들을 비롯한 다양한 기능을 활용하여 시각적 캔버스에서 강력한 AI 워크플로우를 구축하고 테스트하세요.
https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
**2. 포괄적인 모델 지원:**:
수십 개의 추론 제공업체와 자체 호스팅 솔루션에서 제공하는 수백 개의 독점 및 오픈 소스 LLM과 원활하게 통합되며, GPT, Mistral, Llama3 및 모든 OpenAI API 호환 모델을 포함합니다. 지원되는 모델 제공업체의 전체 목록은 [여기](https://docs.dify.ai/getting-started/readme/model-providers)에서 확인할 수 있습니다.
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
**3. 통합 개발환경**:
프롬프트를 작성하고, 모델 성능을 비교하며, 텍스트-음성 변환과 같은 추가 기능을 채팅 기반 앱에 추가할 수 있는 직관적인 인터페이스를 제공합니다.
**4. RAG 파이프라인**:
문서 수집부터 검색까지 모든 것을 다루며, PDF, PPT 및 기타 일반적인 문서 형식에서 텍스트 추출을 위한 기본 지원이 포함되어 있는 광범위한 RAG 기능을 제공합니다.
**5. 에이전트 기능**:
LLM 함수 호출 또는 ReAct를 기반으로 에이전트를 정의하고 에이전트에 대해 사전 구축된 도구나 사용자 정의 도구를 추가할 수 있습니다. Dify는 Google Search, DELL·E, Stable Diffusion, WolframAlpha 등 AI 에이전트를 위한 50개 이상의 내장 도구를 제공합니다.
**6. LLMOps**:
시간 경과에 따른 애플리케이션 로그와 성능을 모니터링하고 분석합니다. 생산 데이터와 주석을 기반으로 프롬프트, 데이터세트, 모델을 지속적으로 개선할 수 있습니다.
**7. Backend-as-a-Service**:
Dify의 모든 제품에는 해당 API가 함께 제공되므로 Dify를 자신의 비즈니스 로직에 쉽게 통합할 수 있습니다.
## 기능 비교
<table style="width: 100%;">
<tr>
<th align="center">기능</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">프로그래밍 접근 방식</td>
<td align="center">API + 앱 중심</td>
<td align="center">Python 코드</td>
<td align="center">앱 중심</td>
<td align="center">API 중심</td>
</tr>
<tr>
<td align="center">지원되는 LLMs</td>
<td align="center">다양한 종류</td>
<td align="center">다양한 종류</td>
<td align="center">다양한 종류</td>
<td align="center">OpenAI 전용</td>
</tr>
<tr>
<td align="center">RAG 엔진</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">✅</td>
</tr>
<tr>
<td align="center">에이전트</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">✅</td>
</tr>
<tr>
<td align="center">워크플로우</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">✅</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">가시성</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">기업용 기능 (SSO/접근 제어)</td>
<td align="center">✅</td>
<td align="center">❌</td>
<td align="center">❌</td>
<td align="center">❌</td>
</tr>
<tr>
<td align="center">로컬 배포</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">✅</td>
<td align="center">❌</td>
</tr>
</table>
## Dify 사용하기
- **클라우드 </br>**
우리는 누구나 설정이 필요 없이 사용해 볼 수 있도록 [Dify 클라우드](https://dify.ai) 서비스를 호스팅합니다. 이는 자체 배포 버전의 모든 기능을 제공하며, 샌드박스 플랜에서 무료로 200회의 GPT-4 호출을 포함합니다.
- **셀프-호스팅 Dify 커뮤니티 에디션</br>**
환경에서 Dify를 빠르게 실행하려면 이 [스타터 가이드를](#quick-start) 참조하세요.
추가 참조 및 더 심층적인 지침은 [문서](https://docs.dify.ai)를 사용하세요.
- **기업 / 조직을 위한 Dify</br>**
우리는 추가적인 기업 중심 기능을 제공합니다. 당사와 [미팅일정](https://cal.com/guchenhe/30min)을 잡거나 [이메일 보내기](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)를 통해 기업 요구 사항을 논의하십시오. </br>
> AWS를 사용하는 스타트업 및 중소기업의 경우 [AWS Marketplace에서 Dify Premium](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)을 확인하고 한 번의 클릭으로 자체 AWS VPC에 배포하십시오. 맞춤형 로고와 브랜딩이 포함된 앱을 생성할 수 있는 옵션이 포함된 저렴한 AMI 제품입니다.
## 앞서가기
GitHub에서 Dify에 별표를 찍어 새로운 릴리스를 즉시 알림 받으세요.
![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
## 빠른 시작
>Dify를 설치하기 전에 컴퓨터가 다음과 같은 최소 시스템 요구 사항을 충족하는지 확인하세요 :
>- CPU >= 2 Core
>- RAM >= 4GB
</br>
Dify 서버를 시작하는 가장 쉬운 방법은 [docker-compose.yml](docker/docker-compose.yaml) 파일을 실행하는 것입니다. 설치 명령을 실행하기 전에 [Docker](https://docs.docker.com/get-docker/) 및 [Docker Compose](https://docs.docker.com/compose/install/)가 머신에 설치되어 있는지 확인하세요.
```bash
cd docker
docker compose up -d
```
실행 후 브라우저의 [http://localhost/install](http://localhost/install) 에서 Dify 대시보드에 액세스하고 초기화 프로세스를 시작할 수 있습니다.
> Dify에 기여하거나 추가 개발을 하고 싶다면 소스 코드에서 [배포에 대한 가이드](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)를 참조하세요.
## 다음 단계
구성 커스터마이징이 필요한 경우, [docker-compose.yml](docker/docker-compose.yaml) 파일의 코멘트를 참조하여 환경 구성을 수동으로 설정하십시오. 변경 후 `docker-compose up -d` 를 다시 실행하십시오. 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 확인할 수 있습니다.
Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했다는 커뮤니티가 제공하는 [Helm Charts](https://helm.sh/)와 YAML 파일이 존재합니다.
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
- [YAML file by @Winson-030](https://github.com/Winson-030/dify-kubernetes)
## 기여
코드에 기여하고 싶은 분들은 [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요.
동시에 Dify를 소셜 미디어와 행사 및 컨퍼런스에 공유하여 지원하는 것을 고려해 주시기 바랍니다.
> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요.
**기여자**
<a href="https://github.com/langgenius/dify/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
</a>
## 커뮤니티 & 연락처
* [Github 토론](https://github.com/langgenius/dify/discussions). 피드백 공유 및 질문하기에 적합합니다.
* [GitHub 이슈](https://github.com/langgenius/dify/issues). Dify.AI 사용 중 발견한 버그와 기능 제안에 적합합니다. [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요.
* [이메일](mailto:support@dify.ai?subject=[GitHub]Questions%20About%20Dify). Dify.AI 사용에 대한 질문하기에 적합합니다.
* [디스코드](https://discord.gg/FngNHpbcY7). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
* [트위터](https://twitter.com/dify_ai). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
또는 팀원과 직접 미팅을 예약하세요:
<table>
<tr>
<th>연락처</th>
<th>목적</th>
</tr>
<tr>
<td><a href='https://cal.com/guchenhe/15min' target='_blank'><img class="schedule-button" src='https://github.com/langgenius/dify/assets/13230914/9ebcd111-1205-4d71-83d5-948d70b809f5' alt='Git-Hub-README-Button-3x' style="width: 180px; height: auto; object-fit: contain;"/></a></td>
<td>비즈니스 문의 및 제품 피드백</td>
</tr>
<tr>
<td><a href='https://cal.com/pinkbanana' target='_blank'><img class="schedule-button" src='https://github.com/langgenius/dify/assets/13230914/d1edd00a-d7e4-4513-be6c-e57038e143fd' alt='Git-Hub-README-Button-2x' style="width: 180px; height: auto; object-fit: contain;"/></a></td>
<td>기여, 이슈 및 기능 요청</td>
</tr>
</table>
## Star 히스토리
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
## 보안 공개
개인정보 보호를 위해 보안 문제를 GitHub에 게시하지 마십시오. 대신 security@dify.ai로 질문을 보내주시면 더 자세한 답변을 드리겠습니다.
## 라이선스
이 저장소는 기본적으로 몇 가지 추가 제한 사항이 있는 Apache 2.0인 [Dify 오픈 소스 라이선스](LICENSE)에 따라 사용할 수 있습니다.

View File

@@ -1,6 +1,3 @@
# Server Edition
EDITION=SELF_HOSTED
# Your App secret key will be used for securely signing the session cookie
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
@@ -20,6 +17,9 @@ APP_WEB_URL=http://127.0.0.1:3000
# Files URL
FILES_URL=http://127.0.0.1:5001
# The time in seconds after the signature is rejected
FILES_ACCESS_TIMEOUT=300
# celery configuration
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
@@ -42,6 +42,7 @@ DB_DATABASE=dify
# storage type: local, s3, azure-blob
STORAGE_TYPE=local
STORAGE_LOCAL_PATH=storage
S3_USE_AWS_MANAGED_IAM=false
S3_ENDPOINT=https://your-bucket-name.storage.s3.clooudflare.com
S3_BUCKET_NAME=your-bucket-name
S3_ACCESS_KEY=your-access-key
@@ -52,12 +53,23 @@ AZURE_BLOB_ACCOUNT_NAME=your-account-name
AZURE_BLOB_ACCOUNT_KEY=your-account-key
AZURE_BLOB_CONTAINER_NAME=yout-container-name
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
# Aliyun oss Storage configuration
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
ALIYUN_OSS_ACCESS_KEY=your-access-key
ALIYUN_OSS_SECRET_KEY=your-secret-key
ALIYUN_OSS_ENDPOINT=your-endpoint
ALIYUN_OSS_AUTH_VERSION=v1
ALIYUN_OSS_REGION=your-region
# Google Storage configuration
GOOGLE_STORAGE_BUCKET_NAME=yout-bucket-name
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON=your-google-service-account-json-base64-string
# CORS configuration
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
# Vector database configuration, support: weaviate, qdrant, milvus, relyt
# Vector database configuration, support: weaviate, qdrant, milvus, relyt, pgvecto_rs, pgvector
VECTOR_STORE=weaviate
# Weaviate configuration
@@ -70,6 +82,8 @@ WEAVIATE_BATCH_SIZE=100
QDRANT_URL=http://localhost:6333
QDRANT_API_KEY=difyai123456
QDRANT_CLIENT_TIMEOUT=20
QDRANT_GRPC_ENABLED=false
QDRANT_GRPC_PORT=6334
# Milvus configuration
MILVUS_HOST=127.0.0.1
@@ -85,6 +99,44 @@ RELYT_USER=postgres
RELYT_PASSWORD=postgres
RELYT_DATABASE=postgres
# Tencent configuration
TENCENT_VECTOR_DB_URL=http://127.0.0.1
TENCENT_VECTOR_DB_API_KEY=dify
TENCENT_VECTOR_DB_TIMEOUT=30
TENCENT_VECTOR_DB_USERNAME=dify
TENCENT_VECTOR_DB_DATABASE=dify
TENCENT_VECTOR_DB_SHARD=1
TENCENT_VECTOR_DB_REPLICAS=2
# PGVECTO_RS configuration
PGVECTO_RS_HOST=localhost
PGVECTO_RS_PORT=5431
PGVECTO_RS_USER=postgres
PGVECTO_RS_PASSWORD=difyai123456
PGVECTO_RS_DATABASE=postgres
# PGVector configuration
PGVECTOR_HOST=127.0.0.1
PGVECTOR_PORT=5433
PGVECTOR_USER=postgres
PGVECTOR_PASSWORD=postgres
PGVECTOR_DATABASE=postgres
# Tidb Vector configuration
TIDB_VECTOR_HOST=xxx.eu-central-1.xxx.aws.tidbcloud.com
TIDB_VECTOR_PORT=4000
TIDB_VECTOR_USER=xxx.root
TIDB_VECTOR_PASSWORD=xxxxxx
TIDB_VECTOR_DATABASE=dify
# Chroma configuration
CHROMA_HOST=127.0.0.1
CHROMA_PORT=8000
CHROMA_TENANT=default_tenant
CHROMA_DATABASE=default_database
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
CHROMA_AUTH_CREDENTIALS=difyai123456
# Upload configuration
UPLOAD_FILE_SIZE_LIMIT=15
UPLOAD_FILE_BATCH_LIMIT=5
@@ -100,10 +152,11 @@ RESEND_API_KEY=
RESEND_API_URL=https://api.resend.com
# smtp configuration
SMTP_SERVER=smtp.gmail.com
SMTP_PORT=587
SMTP_PORT=465
SMTP_USERNAME=123
SMTP_PASSWORD=abc
SMTP_USE_TLS=false
SMTP_USE_TLS=true
SMTP_OPPORTUNISTIC_TLS=false
# Sentry configuration
SENTRY_DSN=
@@ -118,27 +171,9 @@ NOTION_CLIENT_SECRET=you-client-secret
NOTION_CLIENT_ID=you-client-id
NOTION_INTERNAL_SECRET=you-internal-secret
# Hosted Model Credentials
HOSTED_OPENAI_API_KEY=
HOSTED_OPENAI_API_BASE=
HOSTED_OPENAI_API_ORGANIZATION=
HOSTED_OPENAI_TRIAL_ENABLED=false
HOSTED_OPENAI_QUOTA_LIMIT=200
HOSTED_OPENAI_PAID_ENABLED=false
HOSTED_AZURE_OPENAI_ENABLED=false
HOSTED_AZURE_OPENAI_API_KEY=
HOSTED_AZURE_OPENAI_API_BASE=
HOSTED_AZURE_OPENAI_QUOTA_LIMIT=200
HOSTED_ANTHROPIC_API_BASE=
HOSTED_ANTHROPIC_API_KEY=
HOSTED_ANTHROPIC_TRIAL_ENABLED=false
HOSTED_ANTHROPIC_QUOTA_LIMIT=600000
HOSTED_ANTHROPIC_PAID_ENABLED=false
ETL_TYPE=dify
UNSTRUCTURED_API_URL=
UNSTRUCTURED_API_KEY=
SSRF_PROXY_HTTP_URL=
SSRF_PROXY_HTTPS_URL=
@@ -160,3 +195,25 @@ CODE_MAX_NUMBER_ARRAY_LENGTH=1000
# API Tool configuration
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
API_TOOL_DEFAULT_READ_TIMEOUT=60
# HTTP Node configuration
HTTP_REQUEST_MAX_CONNECT_TIMEOUT=300
HTTP_REQUEST_MAX_READ_TIMEOUT=600
HTTP_REQUEST_MAX_WRITE_TIMEOUT=600
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 # 10MB
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 # 1MB
# Log file path
LOG_FILE=
# Indexing configuration
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000
# Workflow runtime configuration
WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
# App configuration
APP_MAX_EXECUTION_TIME=1200

View File

@@ -17,7 +17,8 @@
"FLASK_DEBUG": "1",
"GEVENT_SUPPORT": "True"
},
"console": "integratedTerminal"
"console": "integratedTerminal",
"python": "${command:python.interpreterPath}"
},
{
"name": "Python: Flask",
@@ -36,7 +37,8 @@
"--debug"
],
"jinja": true,
"justMyCode": true
"justMyCode": true,
"python": "${command:python.interpreterPath}"
}
]
}

View File

@@ -17,16 +17,105 @@
```bash
sed -i "/^SECRET_KEY=/c\SECRET_KEY=$(openssl rand -base64 42)" .env
```
4. If you use Anaconda, create a new environment and activate it
4. Create environment.
Dify API service uses [Poetry](https://python-poetry.org/docs/) to manage dependencies. You can execute `poetry shell` to activate the environment.
> Using pip can be found [below](#usage-with-pip).
6. Install dependencies
```bash
poetry install
```
In case of contributors missing to update dependencies for `pyproject.toml`, you can perform the following shell instead.
```bash
poetry shell # activate current environment
poetry add $(cat requirements.txt) # install dependencies of production and update pyproject.toml
poetry add $(cat requirements-dev.txt) --group dev # install dependencies of development and update pyproject.toml
```
7. Run migrate
Before the first launch, migrate the database to the latest version.
```bash
poetry run python -m flask db upgrade
```
8. Start backend
```bash
poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug
```
9. Start Dify [web](../web) service.
10. Setup your application by visiting `http://localhost:3000`...
11. If you need to debug local async processing, please start the worker service.
```bash
poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail
```
The started celery app handles the async tasks, e.g. dataset importing and documents indexing.
## Testing
1. Install dependencies for both the backend and the test environment
```bash
poetry install --with dev
```
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash
cd ../
poetry run -C api bash dev/pytest/pytest_all_tests.sh
```
## Usage with pip
> [!NOTE]
> In the next version, we will deprecate pip as the primary package management tool for dify api service, currently Poetry and pip coexist.
1. Start the docker-compose stack
The backend require some middleware, including PostgreSQL, Redis, and Weaviate, which can be started together using `docker-compose`.
```bash
cd ../docker
docker-compose -f docker-compose.middleware.yaml -p dify up -d
cd ../api
```
2. Copy `.env.example` to `.env`
3. Generate a `SECRET_KEY` in the `.env` file.
```bash
sed -i "/^SECRET_KEY=/c\SECRET_KEY=$(openssl rand -base64 42)" .env
```
4. Create environment.
If you use Anaconda, create a new environment and activate it
```bash
conda create --name dify python=3.10
conda activate dify
```
5. Install dependencies
6. Install dependencies
```bash
pip install -r requirements.txt
```
6. Run migrate
7. Run migrate
Before the first launch, migrate the database to the latest version.
@@ -34,24 +123,27 @@
flask db upgrade
```
⚠️ If you encounter problems with jieba, for example
```
> flask db upgrade
Error: While importing 'app', an ImportError was raised:
```
Please run the following command instead.
```
pip install -r requirements.txt --upgrade --force-reinstall
```
7. Start backend:
8. Start backend:
```bash
flask run --host 0.0.0.0 --port=5001 --debug
```
8. Setup your application by visiting http://localhost:5001/console/api/setup or other apis...
9. If you need to debug local async processing, please start the worker service by running
`celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail`.
The started celery app handles the async tasks, e.g. dataset importing and documents indexing.
9. Setup your application by visiting http://localhost:5001/console/api/setup or other apis...
10. If you need to debug local async processing, please start the worker service.
```bash
celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail
```
The started celery app handles the async tasks, e.g. dataset importing and documents indexing.
## Testing
1. Install dependencies for both the backend and the test environment
```bash
pip install -r requirements.txt -r requirements-dev.txt
```
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash
dev/pytest/pytest_all_tests.sh
```

View File

@@ -4,23 +4,28 @@ if not os.environ.get("DEBUG") or os.environ.get("DEBUG").lower() != 'true':
from gevent import monkey
monkey.patch_all()
# if os.environ.get("VECTOR_STORE") == 'milvus':
import grpc.experimental.gevent
grpc.experimental.gevent.init_gevent()
import json
import logging
import sys
import threading
import time
import warnings
from logging.handlers import RotatingFileHandler
from flask import Flask, Response, request
from flask_cors import CORS
from werkzeug.exceptions import Unauthorized
from commands import register_commands
from config import CloudEditionConfig, Config
from config import Config
# DO NOT REMOVE BELOW
from events import event_handlers
from extensions import (
ext_celery,
ext_code_based_extension,
@@ -37,11 +42,8 @@ from extensions import (
from extensions.ext_database import db
from extensions.ext_login import login_manager
from libs.passport import PassportService
from services.account_service import AccountService
# DO NOT REMOVE BELOW
from events import event_handlers
from models import account, dataset, model, source, task, tool, tools, web
from services.account_service import AccountService
# DO NOT REMOVE ABOVE
@@ -73,20 +75,32 @@ config_type = os.getenv('EDITION', default='SELF_HOSTED') # ce edition first
# ----------------------------
def create_app(test_config=None) -> Flask:
def create_app() -> Flask:
app = DifyApp(__name__)
if test_config:
app.config.from_object(test_config)
else:
if config_type == "CLOUD":
app.config.from_object(CloudEditionConfig())
else:
app.config.from_object(Config())
app.config.from_object(Config())
app.secret_key = app.config['SECRET_KEY']
logging.basicConfig(level=app.config.get('LOG_LEVEL', 'INFO'))
log_handlers = None
log_file = app.config.get('LOG_FILE')
if log_file:
log_dir = os.path.dirname(log_file)
os.makedirs(log_dir, exist_ok=True)
log_handlers = [
RotatingFileHandler(
filename=log_file,
maxBytes=1024 * 1024 * 1024,
backupCount=5
),
logging.StreamHandler(sys.stdout)
]
logging.basicConfig(
level=app.config.get('LOG_LEVEL'),
format=app.config.get('LOG_FORMAT'),
datefmt=app.config.get('LOG_DATEFORMAT'),
handlers=log_handlers
)
initialize_extensions(app)
register_blueprints(app)
@@ -115,7 +129,7 @@ def initialize_extensions(app):
@login_manager.request_loader
def load_user_from_request(request_from_flask_login):
"""Load user based on the request."""
if request.blueprint == 'console':
if request.blueprint in ['console', 'inner_api']:
# Check if the user_id contains a dot, indicating the old format
auth_header = request.headers.get('Authorization', '')
if not auth_header:
@@ -151,6 +165,7 @@ def unauthorized_handler():
def register_blueprints(app):
from controllers.console import bp as console_app_bp
from controllers.files import bp as files_bp
from controllers.inner_api import bp as inner_api_bp
from controllers.service_api import bp as service_api_bp
from controllers.web import bp as web_bp
@@ -188,6 +203,8 @@ def register_blueprints(app):
)
app.register_blueprint(files_bp)
app.register_blueprint(inner_api_bp)
# create app
app = create_app()

View File

@@ -1,12 +1,15 @@
import base64
import json
import secrets
from typing import Optional
import click
from flask import current_app
from werkzeug.exceptions import NotFound
from constants.languages import languages
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.models.document import Document
from extensions.ext_database import db
from libs.helper import email as email_validate
@@ -17,6 +20,7 @@ from models.dataset import Dataset, DatasetCollectionBinding, DocumentSegment
from models.dataset import Document as DatasetDocument
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
from models.provider import Provider, ProviderModel
from services.account_service import RegisterService, TenantService
@click.command('reset-password', help='Reset the account password.')
@@ -57,7 +61,7 @@ def reset_password(email, new_password, password_confirm):
account.password = base64_password_hashed
account.password_salt = base64_salt
db.session.commit()
click.echo(click.style('Congratulations!, password has been reset.', fg='green'))
click.echo(click.style('Congratulations! Password has been reset.', fg='green'))
@click.command('reset-email', help='Reset the account email.')
@@ -263,15 +267,15 @@ def migrate_knowledge_vector_database():
skipped_count = skipped_count + 1
continue
collection_name = ''
if vector_type == "weaviate":
if vector_type == VectorType.WEAVIATE:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"type": VectorType.WEAVIATE,
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == "qdrant":
elif vector_type == VectorType.QDRANT:
if dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == dataset.collection_binding_id). \
@@ -284,20 +288,20 @@ def migrate_knowledge_vector_database():
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'qdrant',
"type": VectorType.QDRANT,
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == "milvus":
elif vector_type == VectorType.MILVUS:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"type": VectorType.MILVUS,
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == "relyt":
elif vector_type == VectorType.RELYT:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
@@ -305,8 +309,24 @@ def migrate_knowledge_vector_database():
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.TENCENT:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": VectorType.TENCENT,
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.PGVECTOR:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": VectorType.PGVECTOR,
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
raise ValueError(f"Vector store {vector_type} is not supported.")
vector = Vector(dataset)
click.echo(f"Start to migrate dataset {dataset.id}.")
@@ -440,9 +460,105 @@ def convert_to_agent_apps():
click.echo(click.style('Congratulations! Converted {} agent apps.'.format(len(proceeded_app_ids)), fg='green'))
@click.command('add-qdrant-doc-id-index', help='add qdrant doc_id index.')
@click.option('--field', default='metadata.doc_id', prompt=False, help='index field , default is metadata.doc_id.')
def add_qdrant_doc_id_index(field: str):
click.echo(click.style('Start add qdrant doc_id index.', fg='green'))
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if vector_type != "qdrant":
click.echo(click.style('Sorry, only support qdrant vector store.', fg='red'))
return
create_count = 0
try:
bindings = db.session.query(DatasetCollectionBinding).all()
if not bindings:
click.echo(click.style('Sorry, no dataset collection bindings found.', fg='red'))
return
import qdrant_client
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models import PayloadSchemaType
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig
for binding in bindings:
qdrant_config = QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT'),
grpc_port=config.get('QDRANT_GRPC_PORT'),
prefer_grpc=config.get('QDRANT_GRPC_ENABLED')
)
try:
client = qdrant_client.QdrantClient(**qdrant_config.to_qdrant_params())
# create payload index
client.create_payload_index(binding.collection_name, field,
field_schema=PayloadSchemaType.KEYWORD)
create_count += 1
except UnexpectedResponse as e:
# Collection does not exist, so return
if e.status_code == 404:
click.echo(click.style(f'Collection not found, collection_name:{binding.collection_name}.', fg='red'))
continue
# Some other error occurred, so re-raise the exception
else:
click.echo(click.style(f'Failed to create qdrant index, collection_name:{binding.collection_name}.', fg='red'))
except Exception as e:
click.echo(click.style('Failed to create qdrant client.', fg='red'))
click.echo(
click.style(f'Congratulations! Create {create_count} collection indexes.',
fg='green'))
@click.command('create-tenant', help='Create account and tenant.')
@click.option('--email', prompt=True, help='The email address of the tenant account.')
@click.option('--language', prompt=True, help='Account language, default: en-US.')
def create_tenant(email: str, language: Optional[str] = None):
"""
Create tenant account
"""
if not email:
click.echo(click.style('Sorry, email is required.', fg='red'))
return
# Create account
email = email.strip()
if '@' not in email:
click.echo(click.style('Sorry, invalid email address.', fg='red'))
return
account_name = email.split('@')[0]
if language not in languages:
language = 'en-US'
# generate random password
new_password = secrets.token_urlsafe(16)
# register account
account = RegisterService.register(
email=email,
name=account_name,
password=new_password,
language=language
)
TenantService.create_owner_tenant_if_not_exist(account)
click.echo(click.style('Congratulations! Account and tenant created.\n'
'Account: {}\nPassword: {}'.format(email, new_password), fg='green'))
def register_commands(app):
app.cli.add_command(reset_password)
app.cli.add_command(reset_email)
app.cli.add_command(reset_encrypt_key_pair)
app.cli.add_command(vdb_migrate)
app.cli.add_command(convert_to_agent_apps)
app.cli.add_command(add_qdrant_doc_id_index)
app.cli.add_command(create_tenant)

View File

@@ -5,6 +5,7 @@ import dotenv
dotenv.load_dotenv()
DEFAULTS = {
'EDITION': 'SELF_HOSTED',
'DB_USERNAME': 'postgres',
'DB_PASSWORD': '',
'DB_HOST': 'localhost',
@@ -22,22 +23,31 @@ DEFAULTS = {
'SERVICE_API_URL': 'https://api.dify.ai',
'APP_WEB_URL': 'https://udify.app',
'FILES_URL': '',
'FILES_ACCESS_TIMEOUT': 300,
'S3_USE_AWS_MANAGED_IAM': 'False',
'S3_ADDRESS_STYLE': 'auto',
'STORAGE_TYPE': 'local',
'STORAGE_LOCAL_PATH': 'storage',
'CHECK_UPDATE_URL': 'https://updates.dify.ai',
'DEPLOY_ENV': 'PRODUCTION',
'SQLALCHEMY_DATABASE_URI_SCHEME': 'postgresql',
'SQLALCHEMY_POOL_SIZE': 30,
'SQLALCHEMY_MAX_OVERFLOW': 10,
'SQLALCHEMY_POOL_RECYCLE': 3600,
'SQLALCHEMY_POOL_PRE_PING': 'False',
'SQLALCHEMY_ECHO': 'False',
'SENTRY_TRACES_SAMPLE_RATE': 1.0,
'SENTRY_PROFILES_SAMPLE_RATE': 1.0,
'WEAVIATE_GRPC_ENABLED': 'True',
'WEAVIATE_BATCH_SIZE': 100,
'QDRANT_CLIENT_TIMEOUT': 20,
'QDRANT_GRPC_ENABLED': 'False',
'QDRANT_GRPC_PORT': '6334',
'CELERY_BACKEND': 'database',
'LOG_LEVEL': 'INFO',
'LOG_FILE': '',
'LOG_FORMAT': '%(asctime)s.%(msecs)03d %(levelname)s [%(threadName)s] [%(filename)s:%(lineno)d] - %(message)s',
'LOG_DATEFORMAT': '%Y-%m-%d %H:%M:%S',
'HOSTED_OPENAI_QUOTA_LIMIT': 200,
'HOSTED_OPENAI_TRIAL_ENABLED': 'False',
'HOSTED_OPENAI_TRIAL_MODELS': 'gpt-3.5-turbo,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-16k,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-0125,text-davinci-003',
@@ -61,6 +71,7 @@ DEFAULTS = {
'INVITE_EXPIRY_HOURS': 72,
'BILLING_ENABLED': 'False',
'CAN_REPLACE_LOGO': 'False',
'MODEL_LB_ENABLED': 'False',
'ETL_TYPE': 'dify',
'KEYWORD_STORE': 'jieba',
'BATCH_UPLOAD_LIMIT': 20,
@@ -69,6 +80,13 @@ DEFAULTS = {
'TOOL_ICON_CACHE_MAX_AGE': 3600,
'MILVUS_DATABASE': 'default',
'KEYWORD_DATA_SOURCE_TYPE': 'database',
'INNER_API': 'False',
'ENTERPRISE_ENABLED': 'False',
'INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH': 1000,
'WORKFLOW_MAX_EXECUTION_STEPS': 500,
'WORKFLOW_MAX_EXECUTION_TIME': 1200,
'WORKFLOW_CALL_MAX_DEPTH': 5,
'APP_MAX_EXECUTION_TIME': 1200,
}
@@ -99,12 +117,16 @@ class Config:
# ------------------------
# General Configurations.
# ------------------------
self.CURRENT_VERSION = "0.6.3"
self.CURRENT_VERSION = "0.6.11"
self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED"
self.EDITION = get_env('EDITION')
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
self.TESTING = False
self.LOG_LEVEL = get_env('LOG_LEVEL')
self.LOG_FILE = get_env('LOG_FILE')
self.LOG_FORMAT = get_env('LOG_FORMAT')
self.LOG_DATEFORMAT = get_env('LOG_DATEFORMAT')
self.API_COMPRESSION_ENABLED = get_bool_env('API_COMPRESSION_ENABLED')
# The backend URL prefix of the console API.
# used to concatenate the login authorization callback or notion integration callback.
@@ -127,12 +149,21 @@ class Config:
# Url is signed and has expiration time.
self.FILES_URL = get_env('FILES_URL') if get_env('FILES_URL') else self.CONSOLE_API_URL
# File Access Time specifies a time interval in seconds for the file to be accessed.
# The default value is 300 seconds.
self.FILES_ACCESS_TIMEOUT = int(get_env('FILES_ACCESS_TIMEOUT'))
# Your App secret key will be used for securely signing the session cookie
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
# Alternatively you can set it with `SECRET_KEY` environment variable.
self.SECRET_KEY = get_env('SECRET_KEY')
# Enable or disable the inner API.
self.INNER_API = get_bool_env('INNER_API')
# The inner API key is used to authenticate the inner API.
self.INNER_API_KEY = get_env('INNER_API_KEY')
# cors settings
self.CONSOLE_CORS_ALLOW_ORIGINS = get_cors_allow_origins(
'CONSOLE_CORS_ALLOW_ORIGINS', self.CONSOLE_WEB_URL)
@@ -149,14 +180,17 @@ class Config:
key: get_env(key) for key in
['DB_USERNAME', 'DB_PASSWORD', 'DB_HOST', 'DB_PORT', 'DB_DATABASE', 'DB_CHARSET']
}
self.SQLALCHEMY_DATABASE_URI_SCHEME = get_env('SQLALCHEMY_DATABASE_URI_SCHEME')
db_extras = f"?client_encoding={db_credentials['DB_CHARSET']}" if db_credentials['DB_CHARSET'] else ""
self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}"
self.SQLALCHEMY_DATABASE_URI = f"{self.SQLALCHEMY_DATABASE_URI_SCHEME}://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}"
self.SQLALCHEMY_ENGINE_OPTIONS = {
'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')),
'max_overflow': int(get_env('SQLALCHEMY_MAX_OVERFLOW')),
'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE'))
'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE')),
'pool_pre_ping': get_bool_env('SQLALCHEMY_POOL_PRE_PING'),
'connect_args': {'options': '-c timezone=UTC'},
}
self.SQLALCHEMY_ECHO = get_bool_env('SQLALCHEMY_ECHO')
@@ -180,32 +214,58 @@ class Config:
if self.CELERY_BACKEND == 'database' else self.CELERY_BROKER_URL
self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://')
# ------------------------
# Code Execution Sandbox Configurations.
# ------------------------
self.CODE_EXECUTION_ENDPOINT = get_env('CODE_EXECUTION_ENDPOINT')
self.CODE_EXECUTION_API_KEY = get_env('CODE_EXECUTION_API_KEY')
# ------------------------
# File Storage Configurations.
# ------------------------
self.STORAGE_TYPE = get_env('STORAGE_TYPE')
self.STORAGE_LOCAL_PATH = get_env('STORAGE_LOCAL_PATH')
# S3 Storage settings
self.S3_USE_AWS_MANAGED_IAM = get_bool_env('S3_USE_AWS_MANAGED_IAM')
self.S3_ENDPOINT = get_env('S3_ENDPOINT')
self.S3_BUCKET_NAME = get_env('S3_BUCKET_NAME')
self.S3_ACCESS_KEY = get_env('S3_ACCESS_KEY')
self.S3_SECRET_KEY = get_env('S3_SECRET_KEY')
self.S3_REGION = get_env('S3_REGION')
self.S3_ADDRESS_STYLE = get_env('S3_ADDRESS_STYLE')
# Azure Blob Storage settings
self.AZURE_BLOB_ACCOUNT_NAME = get_env('AZURE_BLOB_ACCOUNT_NAME')
self.AZURE_BLOB_ACCOUNT_KEY = get_env('AZURE_BLOB_ACCOUNT_KEY')
self.AZURE_BLOB_CONTAINER_NAME = get_env('AZURE_BLOB_CONTAINER_NAME')
self.AZURE_BLOB_ACCOUNT_URL = get_env('AZURE_BLOB_ACCOUNT_URL')
# Aliyun Storage settings
self.ALIYUN_OSS_BUCKET_NAME = get_env('ALIYUN_OSS_BUCKET_NAME')
self.ALIYUN_OSS_ACCESS_KEY = get_env('ALIYUN_OSS_ACCESS_KEY')
self.ALIYUN_OSS_SECRET_KEY = get_env('ALIYUN_OSS_SECRET_KEY')
self.ALIYUN_OSS_ENDPOINT = get_env('ALIYUN_OSS_ENDPOINT')
self.ALIYUN_OSS_REGION = get_env('ALIYUN_OSS_REGION')
self.ALIYUN_OSS_AUTH_VERSION = get_env('ALIYUN_OSS_AUTH_VERSION')
# Google Cloud Storage settings
self.GOOGLE_STORAGE_BUCKET_NAME = get_env('GOOGLE_STORAGE_BUCKET_NAME')
self.GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 = get_env('GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64')
# ------------------------
# Vector Store Configurations.
# Currently, only support: qdrant, milvus, zilliz, weaviate, relyt
# Currently, only support: qdrant, milvus, zilliz, weaviate, relyt, pgvector
# ------------------------
self.VECTOR_STORE = get_env('VECTOR_STORE')
self.KEYWORD_STORE = get_env('KEYWORD_STORE')
# qdrant settings
self.QDRANT_URL = get_env('QDRANT_URL')
self.QDRANT_API_KEY = get_env('QDRANT_API_KEY')
self.QDRANT_CLIENT_TIMEOUT = get_env('QDRANT_CLIENT_TIMEOUT')
self.QDRANT_GRPC_ENABLED = get_env('QDRANT_GRPC_ENABLED')
self.QDRANT_GRPC_PORT = get_env('QDRANT_GRPC_PORT')
# milvus / zilliz setting
self.MILVUS_HOST = get_env('MILVUS_HOST')
@@ -228,6 +288,45 @@ class Config:
self.RELYT_PASSWORD = get_env('RELYT_PASSWORD')
self.RELYT_DATABASE = get_env('RELYT_DATABASE')
# tencent settings
self.TENCENT_VECTOR_DB_URL = get_env('TENCENT_VECTOR_DB_URL')
self.TENCENT_VECTOR_DB_API_KEY = get_env('TENCENT_VECTOR_DB_API_KEY')
self.TENCENT_VECTOR_DB_TIMEOUT = get_env('TENCENT_VECTOR_DB_TIMEOUT')
self.TENCENT_VECTOR_DB_USERNAME = get_env('TENCENT_VECTOR_DB_USERNAME')
self.TENCENT_VECTOR_DB_DATABASE = get_env('TENCENT_VECTOR_DB_DATABASE')
self.TENCENT_VECTOR_DB_SHARD = get_env('TENCENT_VECTOR_DB_SHARD')
self.TENCENT_VECTOR_DB_REPLICAS = get_env('TENCENT_VECTOR_DB_REPLICAS')
# pgvecto rs settings
self.PGVECTO_RS_HOST = get_env('PGVECTO_RS_HOST')
self.PGVECTO_RS_PORT = get_env('PGVECTO_RS_PORT')
self.PGVECTO_RS_USER = get_env('PGVECTO_RS_USER')
self.PGVECTO_RS_PASSWORD = get_env('PGVECTO_RS_PASSWORD')
self.PGVECTO_RS_DATABASE = get_env('PGVECTO_RS_DATABASE')
# pgvector settings
self.PGVECTOR_HOST = get_env('PGVECTOR_HOST')
self.PGVECTOR_PORT = get_env('PGVECTOR_PORT')
self.PGVECTOR_USER = get_env('PGVECTOR_USER')
self.PGVECTOR_PASSWORD = get_env('PGVECTOR_PASSWORD')
self.PGVECTOR_DATABASE = get_env('PGVECTOR_DATABASE')
# tidb-vector settings
self.TIDB_VECTOR_HOST = get_env('TIDB_VECTOR_HOST')
self.TIDB_VECTOR_PORT = get_env('TIDB_VECTOR_PORT')
self.TIDB_VECTOR_USER = get_env('TIDB_VECTOR_USER')
self.TIDB_VECTOR_PASSWORD = get_env('TIDB_VECTOR_PASSWORD')
self.TIDB_VECTOR_DATABASE = get_env('TIDB_VECTOR_DATABASE')
# chroma settings
self.CHROMA_HOST = get_env('CHROMA_HOST')
self.CHROMA_PORT = get_env('CHROMA_PORT')
self.CHROMA_TENANT = get_env('CHROMA_TENANT')
self.CHROMA_DATABASE = get_env('CHROMA_DATABASE')
self.CHROMA_AUTH_PROVIDER = get_env('CHROMA_AUTH_PROVIDER')
self.CHROMA_AUTH_CREDENTIALS = get_env('CHROMA_AUTH_CREDENTIALS')
# ------------------------
# Mail Configurations.
# ------------------------
@@ -241,9 +340,10 @@ class Config:
self.SMTP_USERNAME = get_env('SMTP_USERNAME')
self.SMTP_PASSWORD = get_env('SMTP_PASSWORD')
self.SMTP_USE_TLS = get_bool_env('SMTP_USE_TLS')
self.SMTP_OPPORTUNISTIC_TLS = get_bool_env('SMTP_OPPORTUNISTIC_TLS')
# ------------------------
# Workpace Configurations.
# Workspace Configurations.
# ------------------------
self.INVITE_EXPIRY_HOURS = int(get_env('INVITE_EXPIRY_HOURS'))
@@ -268,6 +368,24 @@ class Config:
self.UPLOAD_FILE_SIZE_LIMIT = int(get_env('UPLOAD_FILE_SIZE_LIMIT'))
self.UPLOAD_FILE_BATCH_LIMIT = int(get_env('UPLOAD_FILE_BATCH_LIMIT'))
self.UPLOAD_IMAGE_FILE_SIZE_LIMIT = int(get_env('UPLOAD_IMAGE_FILE_SIZE_LIMIT'))
self.BATCH_UPLOAD_LIMIT = get_env('BATCH_UPLOAD_LIMIT')
# RAG ETL Configurations.
self.ETL_TYPE = get_env('ETL_TYPE')
self.UNSTRUCTURED_API_URL = get_env('UNSTRUCTURED_API_URL')
self.UNSTRUCTURED_API_KEY = get_env('UNSTRUCTURED_API_KEY')
self.KEYWORD_DATA_SOURCE_TYPE = get_env('KEYWORD_DATA_SOURCE_TYPE')
# Indexing Configurations.
self.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH = get_env('INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH')
# Tool Configurations.
self.TOOL_ICON_CACHE_MAX_AGE = get_env('TOOL_ICON_CACHE_MAX_AGE')
self.WORKFLOW_MAX_EXECUTION_STEPS = int(get_env('WORKFLOW_MAX_EXECUTION_STEPS'))
self.WORKFLOW_MAX_EXECUTION_TIME = int(get_env('WORKFLOW_MAX_EXECUTION_TIME'))
self.WORKFLOW_CALL_MAX_DEPTH = int(get_env('WORKFLOW_CALL_MAX_DEPTH'))
self.APP_MAX_EXECUTION_TIME = int(get_env('APP_MAX_EXECUTION_TIME'))
# Moderation in app Configurations.
self.OUTPUT_MODERATION_BUFFER_SIZE = int(get_env('OUTPUT_MODERATION_BUFFER_SIZE'))
@@ -282,6 +400,12 @@ class Config:
# ------------------------
# Platform Configurations.
# ------------------------
self.GITHUB_CLIENT_ID = get_env('GITHUB_CLIENT_ID')
self.GITHUB_CLIENT_SECRET = get_env('GITHUB_CLIENT_SECRET')
self.GOOGLE_CLIENT_ID = get_env('GOOGLE_CLIENT_ID')
self.GOOGLE_CLIENT_SECRET = get_env('GOOGLE_CLIENT_SECRET')
self.OAUTH_REDIRECT_PATH = get_env('OAUTH_REDIRECT_PATH')
self.HOSTED_OPENAI_API_KEY = get_env('HOSTED_OPENAI_API_KEY')
self.HOSTED_OPENAI_API_BASE = get_env('HOSTED_OPENAI_API_BASE')
self.HOSTED_OPENAI_API_ORGANIZATION = get_env('HOSTED_OPENAI_API_ORGANIZATION')
@@ -313,30 +437,15 @@ class Config:
self.HOSTED_FETCH_APP_TEMPLATES_MODE = get_env('HOSTED_FETCH_APP_TEMPLATES_MODE')
self.HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN = get_env('HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN')
self.ETL_TYPE = get_env('ETL_TYPE')
self.UNSTRUCTURED_API_URL = get_env('UNSTRUCTURED_API_URL')
# Model Load Balancing Configurations.
self.MODEL_LB_ENABLED = get_bool_env('MODEL_LB_ENABLED')
# Platform Billing Configurations.
self.BILLING_ENABLED = get_bool_env('BILLING_ENABLED')
# ------------------------
# Enterprise feature Configurations.
# **Before using, please contact business@dify.ai by email to inquire about licensing matters.**
# ------------------------
self.ENTERPRISE_ENABLED = get_bool_env('ENTERPRISE_ENABLED')
self.CAN_REPLACE_LOGO = get_bool_env('CAN_REPLACE_LOGO')
self.BATCH_UPLOAD_LIMIT = get_env('BATCH_UPLOAD_LIMIT')
self.CODE_EXECUTION_ENDPOINT = get_env('CODE_EXECUTION_ENDPOINT')
self.CODE_EXECUTION_API_KEY = get_env('CODE_EXECUTION_API_KEY')
self.API_COMPRESSION_ENABLED = get_bool_env('API_COMPRESSION_ENABLED')
self.TOOL_ICON_CACHE_MAX_AGE = get_env('TOOL_ICON_CACHE_MAX_AGE')
self.KEYWORD_DATA_SOURCE_TYPE = get_env('KEYWORD_DATA_SOURCE_TYPE')
class CloudEditionConfig(Config):
def __init__(self):
super().__init__()
self.EDITION = "CLOUD"
self.GITHUB_CLIENT_ID = get_env('GITHUB_CLIENT_ID')
self.GITHUB_CLIENT_SECRET = get_env('GITHUB_CLIENT_SECRET')
self.GOOGLE_CLIENT_ID = get_env('GOOGLE_CLIENT_ID')
self.GOOGLE_CLIENT_SECRET = get_env('GOOGLE_CLIENT_SECRET')
self.OAUTH_REDIRECT_PATH = get_env('OAUTH_REDIRECT_PATH')

View File

@@ -1,10 +1,11 @@
languages = ['en-US', 'zh-Hans', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN']
languages = ['en-US', 'zh-Hans', 'zh-Hant', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN', 'pl-PL']
language_timezone_mapping = {
'en-US': 'America/New_York',
'zh-Hans': 'Asia/Shanghai',
'zh-Hant': 'Asia/Taipei',
'pt-BR': 'America/Sao_Paulo',
'es-ES': 'Europe/Madrid',
'fr-FR': 'Europe/Paris',
@@ -15,6 +16,8 @@ language_timezone_mapping = {
'it-IT': 'Europe/Rome',
'uk-UA': 'Europe/Kyiv',
'vi-VN': 'Asia/Ho_Chi_Minh',
'ro-RO': 'Europe/Bucharest',
'pl-PL': 'Europe/Warsaw',
}

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-

View File

@@ -1,22 +1,57 @@
from flask import Blueprint
from libs.external_api import ExternalApi
bp = Blueprint('console', __name__, url_prefix='/console/api')
api = ExternalApi(bp)
# Import other controllers
from . import admin, apikey, extension, feature, setup, version, ping
from . import admin, apikey, extension, feature, ping, setup, version
# Import app controllers
from .app import (advanced_prompt_template, annotation, app, audio, completion, conversation, generator, message,
model_config, site, statistic, workflow, workflow_run, workflow_app_log, workflow_statistic, agent)
from .app import (
advanced_prompt_template,
agent,
annotation,
app,
audio,
completion,
conversation,
generator,
message,
model_config,
site,
statistic,
workflow,
workflow_app_log,
workflow_run,
workflow_statistic,
)
# Import auth controllers
from .auth import activate, data_source_oauth, login, oauth
from .auth import activate, data_source_bearer_auth, data_source_oauth, login, oauth
# Import billing controllers
from .billing import billing
# Import datasets controllers
from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing
from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing, website
# Import explore controllers
from .explore import (audio, completion, conversation, installed_app, message, parameter, recommended_app,
saved_message, workflow)
from .explore import (
audio,
completion,
conversation,
installed_app,
message,
parameter,
recommended_app,
saved_message,
workflow,
)
# Import tag controllers
from .tag import tags
# Import workspace controllers
from .workspace import account, members, model_providers, models, tool_providers, workspace
from .workspace import account, load_balancing_config, members, model_providers, models, tool_providers, workspace

View File

@@ -48,6 +48,7 @@ class InsertExploreAppListApi(Resource):
parser.add_argument('desc', type=str, location='json')
parser.add_argument('copyright', type=str, location='json')
parser.add_argument('privacy_policy', type=str, location='json')
parser.add_argument('custom_disclaimer', type=str, location='json')
parser.add_argument('language', type=supported_language, required=True, nullable=False, location='json')
parser.add_argument('category', type=str, required=True, nullable=False, location='json')
parser.add_argument('position', type=int, required=True, nullable=False, location='json')
@@ -62,6 +63,7 @@ class InsertExploreAppListApi(Resource):
desc = args['desc'] if args['desc'] else ''
copy_right = args['copyright'] if args['copyright'] else ''
privacy_policy = args['privacy_policy'] if args['privacy_policy'] else ''
custom_disclaimer = args['custom_disclaimer'] if args['custom_disclaimer'] else ''
else:
desc = site.description if site.description else \
args['desc'] if args['desc'] else ''
@@ -69,6 +71,8 @@ class InsertExploreAppListApi(Resource):
args['copyright'] if args['copyright'] else ''
privacy_policy = site.privacy_policy if site.privacy_policy else \
args['privacy_policy'] if args['privacy_policy'] else ''
custom_disclaimer = site.custom_disclaimer if site.custom_disclaimer else \
args['custom_disclaimer'] if args['custom_disclaimer'] else ''
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
@@ -78,6 +82,7 @@ class InsertExploreAppListApi(Resource):
description=desc,
copyright=copy_right,
privacy_policy=privacy_policy,
custom_disclaimer=custom_disclaimer,
language=args['language'],
category=args['category'],
position=args['position']
@@ -93,6 +98,7 @@ class InsertExploreAppListApi(Resource):
recommended_app.description = desc
recommended_app.copyright = copy_right
recommended_app.privacy_policy = privacy_policy
recommended_app.custom_disclaimer = custom_disclaimer
recommended_app.language = args['language']
recommended_app.category = args['category']
recommended_app.position = args['position']

View File

@@ -1,26 +1,25 @@
import json
import uuid
from flask_login import current_user
from flask_restful import Resource, inputs, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, BadRequest
from flask_restful import Resource, inputs, marshal, marshal_with, reqparse
from werkzeug.exceptions import BadRequest, Forbidden, abort
from controllers.console import api
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.agent.entities import AgentToolEntity
from extensions.ext_database import db
from core.tools.tool_manager import ToolManager
from core.tools.utils.configuration import ToolParameterConfigurationManager
from fields.app_fields import (
app_detail_fields,
app_detail_fields_with_site,
app_pagination_fields,
)
from libs.login import login_required
from models.model import App, AppMode, AppModelConfig
from services.app_service import AppService
from models.model import App, AppModelConfig, AppMode
from core.tools.utils.configuration import ToolParameterConfigurationManager
from core.tools.tool_manager import ToolManager
from services.tag_service import TagService
ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion']
@@ -30,21 +29,29 @@ class AppListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(app_pagination_fields)
def get(self):
"""Get app list"""
def uuid_list(value):
try:
return [str(uuid.UUID(v)) for v in value.split(',')]
except ValueError:
abort(400, message="Invalid UUID format in tag_ids.")
parser = reqparse.RequestParser()
parser.add_argument('page', type=inputs.int_range(1, 99999), required=False, default=1, location='args')
parser.add_argument('limit', type=inputs.int_range(1, 100), required=False, default=20, location='args')
parser.add_argument('mode', type=str, choices=['chat', 'workflow', 'agent-chat', 'channel', 'all'], default='all', location='args', required=False)
parser.add_argument('name', type=str, location='args', required=False)
parser.add_argument('tag_ids', type=uuid_list, location='args', required=False)
args = parser.parse_args()
# get app list
app_service = AppService()
app_pagination = app_service.get_paginate_apps(current_user.current_tenant_id, args)
if not app_pagination:
return {'data': [], 'total': 0, 'page': 1, 'limit': 20, 'has_more': False}
return app_pagination
return marshal(app_pagination, app_pagination_fields)
@setup_required
@login_required
@@ -61,8 +68,8 @@ class AppListApi(Resource):
parser.add_argument('icon_background', type=str, location='json')
args = parser.parse_args()
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
if 'mode' not in args or args['mode'] is None:
@@ -82,8 +89,8 @@ class AppImportApi(Resource):
@cloud_edition_billing_resource_check('apps')
def post(self):
"""Import app"""
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
@@ -109,43 +116,9 @@ class AppApi(Resource):
@marshal_with(app_detail_fields_with_site)
def get(self, app_model):
"""Get app detail"""
# get original app model config
if app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
model_config: AppModelConfig = app_model.app_model_config
agent_mode = model_config.agent_mode_dict
# decrypt agent tool parameters if it's secret-input
for tool in agent_mode.get('tools') or []:
if not isinstance(tool, dict) or len(tool.keys()) <= 3:
continue
agent_tool_entity = AgentToolEntity(**tool)
# get tool
try:
tool_runtime = ToolManager.get_agent_tool_runtime(
tenant_id=current_user.current_tenant_id,
agent_tool=agent_tool_entity,
)
manager = ToolParameterConfigurationManager(
tenant_id=current_user.current_tenant_id,
tool_runtime=tool_runtime,
provider_name=agent_tool_entity.provider_id,
provider_type=agent_tool_entity.provider_type,
)
app_service = AppService()
# get decrypted parameters
if agent_tool_entity.tool_parameters:
parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
masked_parameter = manager.mask_tool_parameters(parameters or {})
else:
masked_parameter = {}
# override tool parameters
tool['tool_parameters'] = masked_parameter
except Exception as e:
pass
# override agent mode
model_config.agent_mode = json.dumps(agent_mode)
db.session.commit()
app_model = app_service.get_app(app_model)
return app_model
@@ -174,7 +147,7 @@ class AppApi(Resource):
@get_app_model
def delete(self, app_model):
"""Delete app"""
if not current_user.is_admin_or_owner:
if not current_user.is_editor:
raise Forbidden()
app_service = AppService()
@@ -191,8 +164,8 @@ class AppCopyApi(Resource):
@marshal_with(app_detail_fields_with_site)
def post(self, app_model):
"""Copy app"""
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
@@ -265,6 +238,9 @@ class AppSiteStatus(Resource):
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_model):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('enable_site', type=bool, required=True, location='json')
args = parser.parse_args()
@@ -282,6 +258,9 @@ class AppApiStatus(Resource):
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_model):
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('enable_api', type=bool, required=True, location='json')
args = parser.parse_args()

View File

@@ -85,7 +85,7 @@ class ChatMessageTextApi(Resource):
response = AudioService.transcript_tts(
app_model=app_model,
text=request.form['text'],
voice=request.form.get('voice'),
voice=request.form['voice'],
streaming=False
)

View File

@@ -6,7 +6,7 @@ from flask_restful import Resource, marshal_with, reqparse
from flask_restful.inputs import int_range
from sqlalchemy import func, or_
from sqlalchemy.orm import joinedload
from werkzeug.exceptions import NotFound
from werkzeug.exceptions import Forbidden, NotFound
from controllers.console import api
from controllers.console.app.wraps import get_app_model
@@ -33,6 +33,8 @@ class CompletionConversationApi(Resource):
@get_app_model(mode=AppMode.COMPLETION)
@marshal_with(conversation_pagination_fields)
def get(self, app_model):
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, location='args')
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -106,6 +108,8 @@ class CompletionConversationDetailApi(Resource):
@get_app_model(mode=AppMode.COMPLETION)
@marshal_with(conversation_message_detail_fields)
def get(self, app_model, conversation_id):
if not current_user.is_admin_or_owner:
raise Forbidden()
conversation_id = str(conversation_id)
return _get_conversation(app_model, conversation_id)
@@ -115,6 +119,8 @@ class CompletionConversationDetailApi(Resource):
@account_initialization_required
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
def delete(self, app_model, conversation_id):
if not current_user.is_admin_or_owner:
raise Forbidden()
conversation_id = str(conversation_id)
conversation = db.session.query(Conversation) \
@@ -137,6 +143,8 @@ class ChatConversationApi(Resource):
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@marshal_with(conversation_with_summary_pagination_fields)
def get(self, app_model):
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, location='args')
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@@ -225,6 +233,8 @@ class ChatConversationDetailApi(Resource):
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@marshal_with(conversation_detail_fields)
def get(self, app_model, conversation_id):
if not current_user.is_admin_or_owner:
raise Forbidden()
conversation_id = str(conversation_id)
return _get_conversation(app_model, conversation_id)
@@ -234,6 +244,8 @@ class ChatConversationDetailApi(Resource):
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@account_initialization_required
def delete(self, app_model, conversation_id):
if not current_user.is_admin_or_owner:
raise Forbidden()
conversation_id = str(conversation_id)
conversation = db.session.query(Conversation) \

View File

@@ -91,3 +91,9 @@ class DraftWorkflowNotExist(BaseHTTPException):
error_code = 'draft_workflow_not_exist'
description = "Draft workflow need to be initialized."
code = 400
class DraftWorkflowNotSync(BaseHTTPException):
error_code = 'draft_workflow_not_sync'
description = "Workflow graph might have been modified, please refresh and resubmit."
code = 400

View File

@@ -57,6 +57,7 @@ class ModelConfigResource(Resource):
try:
tool_runtime = ToolManager.get_agent_tool_runtime(
tenant_id=current_user.current_tenant_id,
app_id=app_model.id,
agent_tool=agent_tool_entity,
)
manager = ToolParameterConfigurationManager(
@@ -64,6 +65,7 @@ class ModelConfigResource(Resource):
tool_runtime=tool_runtime,
provider_name=agent_tool_entity.provider_id,
provider_type=agent_tool_entity.provider_type,
identity_id=f'AGENT.{app_model.id}'
)
except Exception as e:
continue
@@ -94,6 +96,7 @@ class ModelConfigResource(Resource):
try:
tool_runtime = ToolManager.get_agent_tool_runtime(
tenant_id=current_user.current_tenant_id,
app_id=app_model.id,
agent_tool=agent_tool_entity,
)
except Exception as e:
@@ -104,6 +107,7 @@ class ModelConfigResource(Resource):
tool_runtime=tool_runtime,
provider_name=agent_tool_entity.provider_id,
provider_type=agent_tool_entity.provider_type,
identity_id=f'AGENT.{app_model.id}'
)
manager.delete_tool_parameters_cache()
@@ -111,9 +115,11 @@ class ModelConfigResource(Resource):
if agent_tool_entity.tool_parameters:
if key not in masked_parameter_map:
continue
if agent_tool_entity.tool_parameters == masked_parameter_map[key]:
agent_tool_entity.tool_parameters = parameter_map[key]
for masked_key, masked_value in masked_parameter_map[key].items():
if masked_key in agent_tool_entity.tool_parameters and \
agent_tool_entity.tool_parameters[masked_key] == masked_value:
agent_tool_entity.tool_parameters[masked_key] = parameter_map[key].get(masked_key)
# encrypt parameters
if agent_tool_entity.tool_parameters:

View File

@@ -23,6 +23,7 @@ def parse_app_site_args():
parser.add_argument('customize_domain', type=str, required=False, location='json')
parser.add_argument('copyright', type=str, required=False, location='json')
parser.add_argument('privacy_policy', type=str, required=False, location='json')
parser.add_argument('custom_disclaimer', type=str, required=False, location='json')
parser.add_argument('customize_token_strategy', type=str, choices=['must', 'allow', 'not_allow'],
required=False,
location='json')
@@ -39,8 +40,8 @@ class AppSite(Resource):
def post(self, app_model):
args = parse_app_site_args()
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be editor, admin, or owner
if not current_user.is_editor:
raise Forbidden()
site = db.session.query(Site). \
@@ -56,6 +57,7 @@ class AppSite(Resource):
'customize_domain',
'copyright',
'privacy_policy',
'custom_disclaimer',
'customize_token_strategy',
'prompt_public'
]:
@@ -63,13 +65,6 @@ class AppSite(Resource):
if value is not None:
setattr(site, attr_name, value)
if attr_name == 'title':
app_model.name = value
elif attr_name == 'icon':
app_model.icon = value
elif attr_name == 'icon_background':
app_model.icon_background = value
db.session.commit()
return site

View File

@@ -7,7 +7,7 @@ from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist
from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist, DraftWorkflowNotSync
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
@@ -20,6 +20,7 @@ from libs.helper import TimestampField, uuid_value
from libs.login import current_user, login_required
from models.model import App, AppMode
from services.app_generate_service import AppGenerateService
from services.errors.app import WorkflowHashNotEqualError
from services.workflow_service import WorkflowService
logger = logging.getLogger(__name__)
@@ -59,6 +60,7 @@ class DraftWorkflowApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('graph', type=dict, required=True, nullable=False, location='json')
parser.add_argument('features', type=dict, required=True, nullable=False, location='json')
parser.add_argument('hash', type=str, required=False, location='json')
args = parser.parse_args()
elif 'text/plain' in content_type:
try:
@@ -71,7 +73,8 @@ class DraftWorkflowApi(Resource):
args = {
'graph': data.get('graph'),
'features': data.get('features')
'features': data.get('features'),
'hash': data.get('hash')
}
except json.JSONDecodeError:
return {'message': 'Invalid JSON data'}, 400
@@ -79,15 +82,21 @@ class DraftWorkflowApi(Resource):
abort(415)
workflow_service = WorkflowService()
workflow = workflow_service.sync_draft_workflow(
app_model=app_model,
graph=args.get('graph'),
features=args.get('features'),
account=current_user
)
try:
workflow = workflow_service.sync_draft_workflow(
app_model=app_model,
graph=args.get('graph'),
features=args.get('features'),
unique_hash=args.get('hash'),
account=current_user
)
except WorkflowHashNotEqualError:
raise DraftWorkflowNotSync()
return {
"result": "success",
"hash": workflow.unique_hash,
"updated_at": TimestampField().format(workflow.updated_at or workflow.created_at)
}
@@ -128,6 +137,71 @@ class AdvancedChatDraftWorkflowRunApi(Resource):
logging.exception("internal server error.")
raise InternalServerError()
class AdvancedChatDraftRunIterationNodeApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT])
def post(self, app_model: App, node_id: str):
"""
Run draft workflow iteration node
"""
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, location='json')
args = parser.parse_args()
try:
response = AppGenerateService.generate_single_iteration(
app_model=app_model,
user=current_user,
node_id=node_id,
args=args,
streaming=True
)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
raise ConversationCompletedError()
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class WorkflowDraftRunIterationNodeApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.WORKFLOW])
def post(self, app_model: App, node_id: str):
"""
Run draft workflow iteration node
"""
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, location='json')
args = parser.parse_args()
try:
response = AppGenerateService.generate_single_iteration(
app_model=app_model,
user=current_user,
node_id=node_id,
args=args,
streaming=True
)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
raise ConversationCompletedError()
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class DraftWorkflowRunApi(Resource):
@setup_required
@@ -317,6 +391,8 @@ api.add_resource(AdvancedChatDraftWorkflowRunApi, '/apps/<uuid:app_id>/advanced-
api.add_resource(DraftWorkflowRunApi, '/apps/<uuid:app_id>/workflows/draft/run')
api.add_resource(WorkflowTaskStopApi, '/apps/<uuid:app_id>/workflow-runs/tasks/<string:task_id>/stop')
api.add_resource(DraftWorkflowNodeRunApi, '/apps/<uuid:app_id>/workflows/draft/nodes/<string:node_id>/run')
api.add_resource(AdvancedChatDraftRunIterationNodeApi, '/apps/<uuid:app_id>/advanced-chat/workflows/draft/iteration/nodes/<string:node_id>/run')
api.add_resource(WorkflowDraftRunIterationNodeApi, '/apps/<uuid:app_id>/workflows/draft/iteration/nodes/<string:node_id>/run')
api.add_resource(PublishedWorkflowApi, '/apps/<uuid:app_id>/workflows/publish')
api.add_resource(DefaultBlockConfigsApi, '/apps/<uuid:app_id>/workflows/default-workflow-block-configs')
api.add_resource(DefaultBlockConfigApi, '/apps/<uuid:app_id>/workflows/default-workflow-block-configs'

View File

@@ -227,7 +227,7 @@ class WorkflowAverageAppInteractionStatistic(Resource):
{{start}}
{{end}}
GROUP BY date, c.created_by) sub
GROUP BY sub.created_by, sub.date
GROUP BY sub.date
"""
arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}

View File

@@ -0,0 +1,67 @@
from flask_login import current_user
from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.auth.error import ApiKeyAuthFailedError
from libs.login import login_required
from services.auth.api_key_auth_service import ApiKeyAuthService
from ..setup import setup_required
from ..wraps import account_initialization_required
class ApiKeyAuthDataSource(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
# The role of the current user in the table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
data_source_api_key_bindings = ApiKeyAuthService.get_provider_auth_list(current_user.current_tenant_id)
if data_source_api_key_bindings:
return {
'settings': [data_source_api_key_binding.to_dict() for data_source_api_key_binding in
data_source_api_key_bindings]}
return {'settings': []}
class ApiKeyAuthDataSourceBinding(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
# The role of the current user in the table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('category', type=str, required=True, nullable=False, location='json')
parser.add_argument('provider', type=str, required=True, nullable=False, location='json')
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
ApiKeyAuthService.validate_api_key_auth_args(args)
try:
ApiKeyAuthService.create_provider_auth(current_user.current_tenant_id, args)
except Exception as e:
raise ApiKeyAuthFailedError(str(e))
return {'result': 'success'}, 200
class ApiKeyAuthDataSourceBindingDelete(Resource):
@setup_required
@login_required
@account_initialization_required
def delete(self, binding_id):
# The role of the current user in the table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
ApiKeyAuthService.delete_provider_auth(current_user.current_tenant_id, binding_id)
return {'result': 'success'}, 200
api.add_resource(ApiKeyAuthDataSource, '/api-key-auth/data-source')
api.add_resource(ApiKeyAuthDataSourceBinding, '/api-key-auth/data-source/binding')
api.add_resource(ApiKeyAuthDataSourceBindingDelete, '/api-key-auth/data-source/<uuid:binding_id>')

View File

@@ -0,0 +1,7 @@
from libs.exception import BaseHTTPException
class ApiKeyAuthFailedError(BaseHTTPException):
error_code = 'auth_failed'
description = "{message}"
code = 500

View File

@@ -26,10 +26,13 @@ class LoginApi(Resource):
try:
account = AccountService.authenticate(args['email'], args['password'])
except services.errors.account.AccountLoginError:
return {'code': 'unauthorized', 'message': 'Invalid email or password'}, 401
except services.errors.account.AccountLoginError as e:
return {'code': 'unauthorized', 'message': str(e)}, 401
TenantService.create_owner_tenant_if_not_exist(account)
# SELF_HOSTED only have one workspace
tenants = TenantService.get_join_tenants(account)
if len(tenants) == 0:
return {'result': 'fail', 'data': 'workspace not found, please contact system admin to invite you to join in a workspace'}
AccountService.update_last_login(account, request)

View File

@@ -16,7 +16,7 @@ from extensions.ext_database import db
from fields.data_source_fields import integrate_list_fields, integrate_notion_info_list_fields
from libs.login import login_required
from models.dataset import Document
from models.source import DataSourceBinding
from models.source import DataSourceOauthBinding
from services.dataset_service import DatasetService, DocumentService
from tasks.document_indexing_sync_task import document_indexing_sync_task
@@ -29,9 +29,9 @@ class DataSourceApi(Resource):
@marshal_with(integrate_list_fields)
def get(self):
# get workspace data source integrates
data_source_integrates = db.session.query(DataSourceBinding).filter(
DataSourceBinding.tenant_id == current_user.current_tenant_id,
DataSourceBinding.disabled == False
data_source_integrates = db.session.query(DataSourceOauthBinding).filter(
DataSourceOauthBinding.tenant_id == current_user.current_tenant_id,
DataSourceOauthBinding.disabled == False
).all()
base_url = request.url_root.rstrip('/')
@@ -71,7 +71,7 @@ class DataSourceApi(Resource):
def patch(self, binding_id, action):
binding_id = str(binding_id)
action = str(action)
data_source_binding = DataSourceBinding.query.filter_by(
data_source_binding = DataSourceOauthBinding.query.filter_by(
id=binding_id
).first()
if data_source_binding is None:
@@ -124,7 +124,7 @@ class DataSourceNotionListApi(Resource):
data_source_info = json.loads(document.data_source_info)
exist_page_ids.append(data_source_info['notion_page_id'])
# get all authorized pages
data_source_bindings = DataSourceBinding.query.filter_by(
data_source_bindings = DataSourceOauthBinding.query.filter_by(
tenant_id=current_user.current_tenant_id,
provider='notion',
disabled=False
@@ -163,12 +163,12 @@ class DataSourceNotionApi(Resource):
def get(self, workspace_id, page_id, page_type):
workspace_id = str(workspace_id)
page_id = str(page_id)
data_source_binding = DataSourceBinding.query.filter(
data_source_binding = DataSourceOauthBinding.query.filter(
db.and_(
DataSourceBinding.tenant_id == current_user.current_tenant_id,
DataSourceBinding.provider == 'notion',
DataSourceBinding.disabled == False,
DataSourceBinding.source_info['workspace_id'] == f'"{workspace_id}"'
DataSourceOauthBinding.tenant_id == current_user.current_tenant_id,
DataSourceOauthBinding.provider == 'notion',
DataSourceOauthBinding.disabled == False,
DataSourceOauthBinding.source_info['workspace_id'] == f'"{workspace_id}"'
)
).first()
if not data_source_binding:

View File

@@ -8,13 +8,14 @@ import services
from controllers.console import api
from controllers.console.apikey import api_key_fields, api_key_list
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import DatasetNameDuplicateError
from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db
from fields.app_fields import related_app_list
@@ -48,11 +49,14 @@ class DatasetListApi(Resource):
limit = request.args.get('limit', default=20, type=int)
ids = request.args.getlist('ids')
provider = request.args.get('provider', default="vendor")
search = request.args.get('keyword', default=None, type=str)
tag_ids = request.args.getlist('tag_ids')
if ids:
datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
else:
datasets, total = DatasetService.get_datasets(page, limit, provider,
current_user.current_tenant_id, current_user)
current_user.current_tenant_id, current_user, search, tag_ids)
# check embedding setting
provider_manager = ProviderManager()
@@ -103,8 +107,8 @@ class DatasetListApi(Resource):
help='Invalid indexing technique.')
args = parser.parse_args()
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
try:
@@ -184,11 +188,15 @@ class DatasetApi(Resource):
help='Invalid indexing technique.')
parser.add_argument('permission', type=str, location='json', choices=(
'only_me', 'all_team_members'), help='Invalid permission.')
parser.add_argument('embedding_model', type=str,
location='json', help='Invalid embedding model.')
parser.add_argument('embedding_model_provider', type=str,
location='json', help='Invalid embedding model provider.')
parser.add_argument('retrieval_model', type=dict, location='json', help='Invalid retrieval model.')
args = parser.parse_args()
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
dataset = DatasetService.update_dataset(
@@ -205,14 +213,17 @@ class DatasetApi(Resource):
def delete(self, dataset_id):
dataset_id_str = str(dataset_id)
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
if DatasetService.delete_dataset(dataset_id_str, current_user):
return {'result': 'success'}, 204
else:
raise NotFound("Dataset not found.")
try:
if DatasetService.delete_dataset(dataset_id_str, current_user):
return {'result': 'success'}, 204
else:
raise NotFound("Dataset not found.")
except services.errors.dataset.DatasetInUseError:
raise DatasetInUseError()
class DatasetQueryApi(Resource):
@@ -304,6 +315,22 @@ class DatasetIndexingEstimateApi(Resource):
document_model=args['doc_form']
)
extract_settings.append(extract_setting)
elif args['info_list']['data_source_type'] == 'website_crawl':
website_info_list = args['info_list']['website_info_list']
for url in website_info_list['urls']:
extract_setting = ExtractSetting(
datasource_type="website_crawl",
website_info={
"provider": website_info_list['provider'],
"job_id": website_info_list['job_id'],
"url": url,
"tenant_id": current_user.current_tenant_id,
"mode": 'crawl',
"only_main_content": website_info_list['only_main_content']
},
document_model=args['doc_form']
)
extract_settings.append(extract_setting)
else:
raise ValueError('Data source type not support')
indexing_runner = IndexingRunner()
@@ -469,20 +496,21 @@ class DatasetRetrievalSettingApi(Resource):
@account_initialization_required
def get(self):
vector_type = current_app.config['VECTOR_STORE']
if vector_type == 'milvus':
return {
'retrieval_method': [
'semantic_search'
]
}
elif vector_type == 'qdrant' or vector_type == 'weaviate':
return {
'retrieval_method': [
'semantic_search', 'full_text_search', 'hybrid_search'
]
}
else:
raise ValueError("Unsupported vector db type.")
match vector_type:
case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCENT:
return {
'retrieval_method': [
'semantic_search'
]
}
case VectorType.QDRANT | VectorType.WEAVIATE:
return {
'retrieval_method': [
'semantic_search', 'full_text_search', 'hybrid_search'
]
}
case _:
raise ValueError(f"Unsupported vector db type {vector_type}.")
class DatasetRetrievalSettingMockApi(Resource):
@@ -490,26 +518,45 @@ class DatasetRetrievalSettingMockApi(Resource):
@login_required
@account_initialization_required
def get(self, vector_type):
match vector_type:
case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCEN:
return {
'retrieval_method': [
'semantic_search'
]
}
case VectorType.QDRANT | VectorType.WEAVIATE:
return {
'retrieval_method': [
'semantic_search', 'full_text_search', 'hybrid_search'
]
}
case _:
raise ValueError(f"Unsupported vector db type {vector_type}.")
if vector_type == 'milvus':
return {
'retrieval_method': [
'semantic_search'
]
}
elif vector_type == 'qdrant' or vector_type == 'weaviate':
return {
'retrieval_method': [
'semantic_search', 'full_text_search', 'hybrid_search'
]
}
else:
raise ValueError("Unsupported vector db type.")
class DatasetErrorDocs(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
return {
'data': [marshal(item, document_status_fields) for item in results],
'total': len(results)
}, 200
api.add_resource(DatasetListApi, '/datasets')
api.add_resource(DatasetApi, '/datasets/<uuid:dataset_id>')
api.add_resource(DatasetQueryApi, '/datasets/<uuid:dataset_id>/queries')
api.add_resource(DatasetErrorDocs, '/datasets/<uuid:dataset_id>/error-docs')
api.add_resource(DatasetIndexingEstimateApi, '/datasets/indexing-estimate')
api.add_resource(DatasetRelatedAppListApi, '/datasets/<uuid:dataset_id>/related-apps')
api.add_resource(DatasetIndexingStatusApi, '/datasets/<uuid:dataset_id>/indexing-status')

View File

@@ -1,9 +1,12 @@
import logging
from argparse import ArgumentTypeError
from datetime import datetime, timezone
from flask import request
from flask_login import current_user
from flask_restful import Resource, fields, marshal, marshal_with, reqparse
from sqlalchemy import asc, desc
from transformers.hf_argparser import string_to_bool
from werkzeug.exceptions import Forbidden, NotFound
import services
@@ -140,7 +143,11 @@ class DatasetDocumentListApi(Resource):
limit = request.args.get('limit', default=20, type=int)
search = request.args.get('keyword', default=None, type=str)
sort = request.args.get('sort', default='-created_at', type=str)
fetch = request.args.get('fetch', default=False, type=bool)
# "yes", "true", "t", "y", "1" convert to True, while others convert to False.
try:
fetch = string_to_bool(request.args.get('fetch', default='false'))
except (ArgumentTypeError, ValueError, Exception) as e:
fetch = False
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
@@ -219,8 +226,8 @@ class DatasetDocumentListApi(Resource):
if not dataset:
raise NotFound('Dataset not found.')
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
try:
@@ -233,7 +240,7 @@ class DatasetDocumentListApi(Resource):
location='json')
parser.add_argument('data_source', type=dict, required=False, location='json')
parser.add_argument('process_rule', type=dict, required=False, location='json')
parser.add_argument('duplicate', type=bool, nullable=False, location='json')
parser.add_argument('duplicate', type=bool, default=True, nullable=False, location='json')
parser.add_argument('original_document_id', type=str, required=False, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
@@ -271,8 +278,8 @@ class DatasetInitApi(Resource):
@marshal_with(dataset_and_document_fields)
@cloud_edition_billing_resource_check('vector_space')
def post(self):
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
@@ -393,9 +400,6 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
def get(self, dataset_id, batch):
dataset_id = str(dataset_id)
batch = str(batch)
dataset = DatasetService.get_dataset(dataset_id)
if dataset is None:
raise NotFound("Dataset not found.")
documents = self.get_batch_documents(dataset_id, batch)
response = {
"tokens": 0,
@@ -461,6 +465,20 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
document_model=document.doc_form
)
extract_settings.append(extract_setting)
elif document.data_source_type == 'website_crawl':
extract_setting = ExtractSetting(
datasource_type="website_crawl",
website_info={
"provider": data_source_info['provider'],
"job_id": data_source_info['job_id'],
"url": data_source_info['url'],
"tenant_id": current_user.current_tenant_id,
"mode": data_source_info['mode'],
"only_main_content": data_source_info['only_main_content']
},
document_model=document.doc_form
)
extract_settings.append(extract_setting)
else:
raise ValueError('Data source type not support')
@@ -628,8 +646,8 @@ class DocumentProcessingApi(DocumentResource):
document_id = str(document_id)
document = self.get_document(dataset_id, document_id)
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
if action == "pause":
@@ -692,8 +710,8 @@ class DocumentMetadataApi(DocumentResource):
doc_type = req_data.get('doc_type')
doc_metadata = req_data.get('doc_metadata')
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
if doc_type is None or doc_metadata is None:
@@ -739,8 +757,8 @@ class DocumentStatusApi(DocumentResource):
document = self.get_document(dataset_id, document_id)
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
indexing_cache_key = 'document_{}_indexing'.format(document.id)
@@ -883,6 +901,98 @@ class DocumentRecoverApi(DocumentResource):
return {'result': 'success'}, 204
class DocumentRetryApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def post(self, dataset_id):
"""retry document."""
parser = reqparse.RequestParser()
parser.add_argument('document_ids', type=list, required=True, nullable=False,
location='json')
args = parser.parse_args()
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
retry_documents = []
if not dataset:
raise NotFound('Dataset not found.')
for document_id in args['document_ids']:
try:
document_id = str(document_id)
document = DocumentService.get_document(dataset.id, document_id)
# 404 if document not found
if document is None:
raise NotFound("Document Not Exists.")
# 403 if document is archived
if DocumentService.check_archived(document):
raise ArchivedDocumentImmutableError()
# 400 if document is completed
if document.indexing_status == 'completed':
raise DocumentAlreadyFinishedError()
retry_documents.append(document)
except Exception as e:
logging.error(f"Document {document_id} retry failed: {str(e)}")
continue
# retry document
DocumentService.retry_document(dataset_id, retry_documents)
return {'result': 'success'}, 204
class DocumentRenameApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(document_fields)
def post(self, dataset_id, document_id):
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, nullable=False, location='json')
args = parser.parse_args()
try:
document = DocumentService.rename_document(dataset_id, document_id, args['name'])
except services.errors.document.DocumentIndexingError:
raise DocumentIndexingError('Cannot delete document during indexing.')
return document
class WebsiteDocumentSyncApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id, document_id):
"""sync website document."""
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
document_id = str(document_id)
document = DocumentService.get_document(dataset.id, document_id)
if not document:
raise NotFound('Document not found.')
if document.tenant_id != current_user.current_tenant_id:
raise Forbidden('No permission.')
if document.data_source_type != 'website_crawl':
raise ValueError('Document is not a website document.')
# 403 if document is archived
if DocumentService.check_archived(document):
raise ArchivedDocumentImmutableError()
# sync document
DocumentService.sync_website_document(dataset_id, document)
return {'result': 'success'}, 200
api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
api.add_resource(DatasetDocumentListApi,
'/datasets/<uuid:dataset_id>/documents')
@@ -908,3 +1018,8 @@ api.add_resource(DocumentStatusApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')
api.add_resource(DocumentRetryApi, '/datasets/<uuid:dataset_id>/retry')
api.add_resource(DocumentRenameApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/rename')
api.add_resource(WebsiteDocumentSyncApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/website-sync')

View File

@@ -126,8 +126,8 @@ class DatasetDocumentSegmentApi(Resource):
raise NotFound('Dataset not found.')
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
try:
@@ -302,8 +302,8 @@ class DatasetDocumentSegmentUpdateApi(Resource):
).first()
if not segment:
raise NotFound('Segment not found.')
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)

View File

@@ -71,3 +71,15 @@ class InvalidMetadataError(BaseHTTPException):
error_code = 'invalid_metadata'
description = "The metadata content is incorrect. Please check and verify."
code = 400
class WebsiteCrawlError(BaseHTTPException):
error_code = 'crawl_failed'
description = "{message}"
code = 500
class DatasetInUseError(BaseHTTPException):
error_code = 'dataset_in_use'
description = "The dataset is being used by some apps. Please remove the dataset from the apps before deleting it."
code = 409

View File

@@ -12,7 +12,7 @@ from controllers.console.app.error import (
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.console.datasets.error import DatasetNotInitializedError, HighQualityDatasetOnlyError
from controllers.console.datasets.error import DatasetNotInitializedError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import (
@@ -45,10 +45,6 @@ class HitTestingApi(Resource):
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
# only high quality dataset can be used for hit testing
if dataset.indexing_technique != 'high_quality':
raise HighQualityDatasetOnlyError()
parser = reqparse.RequestParser()
parser.add_argument('query', type=str, location='json')
parser.add_argument('retrieval_model', type=dict, required=False, location='json')

View File

@@ -0,0 +1,49 @@
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.datasets.error import WebsiteCrawlError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required
from services.website_service import WebsiteService
class WebsiteCrawlApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('provider', type=str, choices=['firecrawl'],
required=True, nullable=True, location='json')
parser.add_argument('url', type=str, required=True, nullable=True, location='json')
parser.add_argument('options', type=dict, required=True, nullable=True, location='json')
args = parser.parse_args()
WebsiteService.document_create_args_validate(args)
# crawl url
try:
result = WebsiteService.crawl_url(args)
except Exception as e:
raise WebsiteCrawlError(str(e))
return result, 200
class WebsiteCrawlStatusApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, job_id: str):
parser = reqparse.RequestParser()
parser.add_argument('provider', type=str, choices=['firecrawl'], required=True, location='args')
args = parser.parse_args()
# get crawl status
try:
result = WebsiteService.get_crawl_status(job_id, args['provider'])
except Exception as e:
raise WebsiteCrawlError(str(e))
return result, 200
api.add_resource(WebsiteCrawlApi, '/website/crawl')
api.add_resource(WebsiteCrawlStatusApi, '/website/crawl/status/<string:job_id>')

View File

@@ -76,7 +76,7 @@ class ChatTextApi(InstalledAppResource):
response = AudioService.transcript_tts(
app_model=app_model,
text=request.form['text'],
voice=request.form.get('voice'),
voice=request.form['voice'] if request.form.get('voice') else app_model.app_model_config.text_to_speech_dict.get('voice'),
streaming=False
)
return {'data': response.data.decode('latin1')}

View File

@@ -21,6 +21,7 @@ recommended_app_fields = {
'description': fields.String(attribute='description'),
'copyright': fields.String,
'privacy_policy': fields.String,
'custom_disclaimer': fields.String,
'category': fields.String,
'position': fields.Integer,
'is_listed': fields.Boolean

View File

@@ -1,17 +1,28 @@
from flask_login import current_user
from flask_restful import Resource
from libs.login import login_required
from services.feature_service import FeatureService
from . import api
from .wraps import cloud_utm_record
from .setup import setup_required
from .wraps import account_initialization_required, cloud_utm_record
class FeatureApi(Resource):
@setup_required
@login_required
@account_initialization_required
@cloud_utm_record
def get(self):
return FeatureService.get_features(current_user.current_tenant_id).dict()
return FeatureService.get_features(current_user.current_tenant_id).model_dump()
class SystemFeatureApi(Resource):
def get(self):
return FeatureService.get_system_features().model_dump()
api.add_resource(FeatureApi, '/features')
api.add_resource(SystemFeatureApi, '/system-features')

View File

@@ -58,6 +58,8 @@ class SetupApi(Resource):
password=args['password']
)
TenantService.create_owner_tenant_if_not_exist(account)
setup()
AccountService.update_last_login(account, request)

View File

@@ -0,0 +1,159 @@
from flask import request
from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.tag_fields import tag_fields
from libs.login import login_required
from models.model import Tag
from services.tag_service import TagService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 40:
raise ValueError('Name must be between 1 to 50 characters.')
return name
class TagListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(tag_fields)
def get(self):
tag_type = request.args.get('type', type=str)
keyword = request.args.get('keyword', default=None, type=str)
tags = TagService.get_tags(tag_type, current_user.current_tenant_id, keyword)
return tags, 200
@setup_required
@login_required
@account_initialization_required
def post(self):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('name', nullable=False, required=True,
help='Name must be between 1 to 50 characters.',
type=_validate_name)
parser.add_argument('type', type=str, location='json',
choices=Tag.TAG_TYPE_LIST,
nullable=True,
help='Invalid tag type.')
args = parser.parse_args()
tag = TagService.save_tags(args)
response = {
'id': tag.id,
'name': tag.name,
'type': tag.type,
'binding_count': 0
}
return response, 200
class TagUpdateDeleteApi(Resource):
@setup_required
@login_required
@account_initialization_required
def patch(self, tag_id):
tag_id = str(tag_id)
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('name', nullable=False, required=True,
help='Name must be between 1 to 50 characters.',
type=_validate_name)
args = parser.parse_args()
tag = TagService.update_tags(args, tag_id)
binding_count = TagService.get_tag_binding_count(tag_id)
response = {
'id': tag.id,
'name': tag.name,
'type': tag.type,
'binding_count': binding_count
}
return response, 200
@setup_required
@login_required
@account_initialization_required
def delete(self, tag_id):
tag_id = str(tag_id)
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
TagService.delete_tag(tag_id)
return 200
class TagBindingCreateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('tag_ids', type=list, nullable=False, required=True, location='json',
help='Tag IDs is required.')
parser.add_argument('target_id', type=str, nullable=False, required=True, location='json',
help='Target ID is required.')
parser.add_argument('type', type=str, location='json',
choices=Tag.TAG_TYPE_LIST,
nullable=True,
help='Invalid tag type.')
args = parser.parse_args()
TagService.save_tag_binding(args)
return 200
class TagBindingDeleteApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('tag_id', type=str, nullable=False, required=True,
help='Tag ID is required.')
parser.add_argument('target_id', type=str, nullable=False, required=True,
help='Target ID is required.')
parser.add_argument('type', type=str, location='json',
choices=Tag.TAG_TYPE_LIST,
nullable=True,
help='Invalid tag type.')
args = parser.parse_args()
TagService.delete_tag_binding(args)
return 200
api.add_resource(TagListApi, '/tags')
api.add_resource(TagUpdateDeleteApi, '/tags/<uuid:tag_id>')
api.add_resource(TagBindingCreateApi, '/tag-bindings/create')
api.add_resource(TagBindingDeleteApi, '/tag-bindings/remove')

View File

@@ -17,13 +17,19 @@ class VersionApi(Resource):
args = parser.parse_args()
check_update_url = current_app.config['CHECK_UPDATE_URL']
if not check_update_url:
return {
'version': '0.0.0',
'release_date': '',
'release_notes': '',
'can_auto_update': False
result = {
'version': current_app.config['CURRENT_VERSION'],
'release_date': '',
'release_notes': '',
'can_auto_update': False,
'features': {
'can_replace_logo': current_app.config['CAN_REPLACE_LOGO'],
'model_load_balancing_enabled': current_app.config['MODEL_LB_ENABLED']
}
}
if not check_update_url:
return result
try:
response = requests.get(check_update_url, {
@@ -31,20 +37,15 @@ class VersionApi(Resource):
})
except Exception as error:
logging.warning("Check update version error: {}.".format(str(error)))
return {
'version': args.get('current_version'),
'release_date': '',
'release_notes': '',
'can_auto_update': False
}
result['version'] = args.get('current_version')
return result
content = json.loads(response.content)
return {
'version': content['version'],
'release_date': content['releaseDate'],
'release_notes': content['releaseNotes'],
'can_auto_update': content['canAutoUpdate']
}
result['version'] = content['version']
result['release_date'] = content['releaseDate']
result['release_notes'] = content['releaseNotes']
result['can_auto_update'] = content['canAutoUpdate']
return result
api.add_resource(VersionApi, '/version')

View File

@@ -0,0 +1,106 @@
from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from libs.login import current_user, login_required
from models.account import TenantAccountRole
from services.model_load_balancing_service import ModelLoadBalancingService
class LoadBalancingCredentialsValidateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider: str):
if not TenantAccountRole.is_privileged_role(current_user.current_tenant.current_role):
raise Forbidden()
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('model', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=[mt.value for mt in ModelType], location='json')
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
# validate model load balancing credentials
model_load_balancing_service = ModelLoadBalancingService()
result = True
error = None
try:
model_load_balancing_service.validate_load_balancing_credentials(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type'],
credentials=args['credentials']
)
except CredentialsValidateFailedError as ex:
result = False
error = str(ex)
response = {'result': 'success' if result else 'error'}
if not result:
response['error'] = error
return response
class LoadBalancingConfigCredentialsValidateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider: str, config_id: str):
if not TenantAccountRole.is_privileged_role(current_user.current_tenant.current_role):
raise Forbidden()
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('model', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=[mt.value for mt in ModelType], location='json')
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
# validate model load balancing config credentials
model_load_balancing_service = ModelLoadBalancingService()
result = True
error = None
try:
model_load_balancing_service.validate_load_balancing_credentials(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type'],
credentials=args['credentials'],
config_id=config_id,
)
except CredentialsValidateFailedError as ex:
result = False
error = str(ex)
response = {'result': 'success' if result else 'error'}
if not result:
response['error'] = error
return response
# Load Balancing Config
api.add_resource(LoadBalancingCredentialsValidateApi,
'/workspaces/current/model-providers/<string:provider>/models/load-balancing-configs/credentials-validate')
api.add_resource(LoadBalancingConfigCredentialsValidateApi,
'/workspaces/current/model-providers/<string:provider>/models/load-balancing-configs/<string:config_id>/credentials-validate')

View File

@@ -9,7 +9,7 @@ from controllers.console.wraps import account_initialization_required, cloud_edi
from extensions.ext_database import db
from fields.member_fields import account_with_role_list_fields
from libs.login import login_required
from models.account import Account
from models.account import Account, TenantAccountRole
from services.account_service import RegisterService, TenantService
from services.errors.account import AccountAlreadyInTenantError
@@ -43,7 +43,7 @@ class MemberInviteEmailApi(Resource):
invitee_emails = args['emails']
invitee_role = args['role']
interface_language = args['language']
if invitee_role not in ['admin', 'normal']:
if not TenantAccountRole.is_non_owner_role(invitee_role):
return {'code': 'invalid-role', 'message': 'Invalid role'}, 400
inviter = current_user
@@ -114,7 +114,7 @@ class MemberUpdateRoleApi(Resource):
args = parser.parse_args()
new_role = args['role']
if new_role not in ['admin', 'normal', 'owner']:
if not TenantAccountRole.is_valid_role(new_role):
return {'code': 'invalid-role', 'message': 'Invalid role'}, 400
member = Account.query.get(str(member_id))

View File

@@ -11,6 +11,7 @@ from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.utils.encoders import jsonable_encoder
from libs.login import login_required
from services.model_load_balancing_service import ModelLoadBalancingService
from services.model_provider_service import ModelProviderService
@@ -41,6 +42,9 @@ class DefaultModelApi(Resource):
@login_required
@account_initialization_required
def post(self):
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('model_settings', type=list, required=True, nullable=False, location='json')
args = parser.parse_args()
@@ -94,7 +98,7 @@ class ModelProviderModelApi(Resource):
@login_required
@account_initialization_required
def post(self, provider: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
if not current_user.is_admin_or_owner:
raise Forbidden()
tenant_id = current_user.current_tenant_id
@@ -103,21 +107,56 @@ class ModelProviderModelApi(Resource):
parser.add_argument('model', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=[mt.value for mt in ModelType], location='json')
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
parser.add_argument('credentials', type=dict, required=False, nullable=True, location='json')
parser.add_argument('load_balancing', type=dict, required=False, nullable=True, location='json')
parser.add_argument('config_from', type=str, required=False, nullable=True, location='json')
args = parser.parse_args()
model_provider_service = ModelProviderService()
model_load_balancing_service = ModelLoadBalancingService()
try:
model_provider_service.save_model_credentials(
if ('load_balancing' in args and args['load_balancing'] and
'enabled' in args['load_balancing'] and args['load_balancing']['enabled']):
if 'configs' not in args['load_balancing']:
raise ValueError('invalid load balancing configs')
# save load balancing configs
model_load_balancing_service.update_load_balancing_configs(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type'],
credentials=args['credentials']
configs=args['load_balancing']['configs']
)
except CredentialsValidateFailedError as ex:
raise ValueError(str(ex))
# enable load balancing
model_load_balancing_service.enable_model_load_balancing(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type']
)
else:
# disable load balancing
model_load_balancing_service.disable_model_load_balancing(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type']
)
if args.get('config_from', '') != 'predefined-model':
model_provider_service = ModelProviderService()
try:
model_provider_service.save_model_credentials(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type'],
credentials=args['credentials']
)
except CredentialsValidateFailedError as ex:
raise ValueError(str(ex))
return {'result': 'success'}, 200
@@ -125,7 +164,7 @@ class ModelProviderModelApi(Resource):
@login_required
@account_initialization_required
def delete(self, provider: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
if not current_user.is_admin_or_owner:
raise Forbidden()
tenant_id = current_user.current_tenant_id
@@ -169,11 +208,73 @@ class ModelProviderModelCredentialApi(Resource):
model=args['model']
)
model_load_balancing_service = ModelLoadBalancingService()
is_load_balancing_enabled, load_balancing_configs = model_load_balancing_service.get_load_balancing_configs(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type']
)
return {
"credentials": credentials
"credentials": credentials,
"load_balancing": {
"enabled": is_load_balancing_enabled,
"configs": load_balancing_configs
}
}
class ModelProviderModelEnableApi(Resource):
@setup_required
@login_required
@account_initialization_required
def patch(self, provider: str):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('model', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=[mt.value for mt in ModelType], location='json')
args = parser.parse_args()
model_provider_service = ModelProviderService()
model_provider_service.enable_model(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type']
)
return {'result': 'success'}
class ModelProviderModelDisableApi(Resource):
@setup_required
@login_required
@account_initialization_required
def patch(self, provider: str):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('model', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=[mt.value for mt in ModelType], location='json')
args = parser.parse_args()
model_provider_service = ModelProviderService()
model_provider_service.disable_model(
tenant_id=tenant_id,
provider=provider,
model=args['model'],
model_type=args['model_type']
)
return {'result': 'success'}
class ModelProviderModelValidateApi(Resource):
@setup_required
@@ -258,6 +359,10 @@ class ModelProviderAvailableModelApi(Resource):
api.add_resource(ModelProviderModelApi, '/workspaces/current/model-providers/<string:provider>/models')
api.add_resource(ModelProviderModelEnableApi, '/workspaces/current/model-providers/<string:provider>/models/enable',
endpoint='model-provider-model-enable')
api.add_resource(ModelProviderModelDisableApi, '/workspaces/current/model-providers/<string:provider>/models/disable',
endpoint='model-provider-model-disable')
api.add_resource(ModelProviderModelCredentialApi,
'/workspaces/current/model-providers/<string:provider>/models/credentials')
api.add_resource(ModelProviderModelValidateApi,

View File

@@ -9,8 +9,13 @@ from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.utils.encoders import jsonable_encoder
from libs.helper import alphanumeric, uuid_value
from libs.login import login_required
from services.tools_manage_service import ToolManageService
from services.tools.api_tools_manage_service import ApiToolManageService
from services.tools.builtin_tools_manage_service import BuiltinToolManageService
from services.tools.tool_labels_service import ToolLabelsService
from services.tools.tools_manage_service import ToolCommonService
from services.tools.workflow_tools_manage_service import WorkflowToolManageService
class ToolProviderListApi(Resource):
@@ -21,7 +26,11 @@ class ToolProviderListApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return ToolManageService.list_tool_providers(user_id, tenant_id)
req = reqparse.RequestParser()
req.add_argument('type', type=str, choices=['builtin', 'model', 'api', 'workflow'], required=False, nullable=True, location='args')
args = req.parse_args()
return ToolCommonService.list_tool_providers(user_id, tenant_id, args.get('type', None))
class ToolBuiltinProviderListToolsApi(Resource):
@setup_required
@@ -31,7 +40,7 @@ class ToolBuiltinProviderListToolsApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return jsonable_encoder(ToolManageService.list_builtin_tool_provider_tools(
return jsonable_encoder(BuiltinToolManageService.list_builtin_tool_provider_tools(
user_id,
tenant_id,
provider,
@@ -48,7 +57,7 @@ class ToolBuiltinProviderDeleteApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return ToolManageService.delete_builtin_tool_provider(
return BuiltinToolManageService.delete_builtin_tool_provider(
user_id,
tenant_id,
provider,
@@ -70,7 +79,7 @@ class ToolBuiltinProviderUpdateApi(Resource):
args = parser.parse_args()
return ToolManageService.update_builtin_tool_provider(
return BuiltinToolManageService.update_builtin_tool_provider(
user_id,
tenant_id,
provider,
@@ -85,7 +94,7 @@ class ToolBuiltinProviderGetCredentialsApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return ToolManageService.get_builtin_tool_provider_credentials(
return BuiltinToolManageService.get_builtin_tool_provider_credentials(
user_id,
tenant_id,
provider,
@@ -94,35 +103,10 @@ class ToolBuiltinProviderGetCredentialsApi(Resource):
class ToolBuiltinProviderIconApi(Resource):
@setup_required
def get(self, provider):
icon_bytes, mimetype = ToolManageService.get_builtin_tool_provider_icon(provider)
icon_bytes, mimetype = BuiltinToolManageService.get_builtin_tool_provider_icon(provider)
icon_cache_max_age = int(current_app.config.get('TOOL_ICON_CACHE_MAX_AGE'))
return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age)
class ToolModelProviderIconApi(Resource):
@setup_required
def get(self, provider):
icon_bytes, mimetype = ToolManageService.get_model_tool_provider_icon(provider)
return send_file(io.BytesIO(icon_bytes), mimetype=mimetype)
class ToolModelProviderListToolsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('provider', type=str, required=True, nullable=False, location='args')
args = parser.parse_args()
return jsonable_encoder(ToolManageService.list_model_tool_provider_tools(
user_id,
tenant_id,
args['provider'],
))
class ToolApiProviderAddApi(Resource):
@setup_required
@login_required
@@ -141,10 +125,12 @@ class ToolApiProviderAddApi(Resource):
parser.add_argument('provider', type=str, required=True, nullable=False, location='json')
parser.add_argument('icon', type=dict, required=True, nullable=False, location='json')
parser.add_argument('privacy_policy', type=str, required=False, nullable=True, location='json')
parser.add_argument('labels', type=list[str], required=False, nullable=True, location='json', default=[])
parser.add_argument('custom_disclaimer', type=str, required=False, nullable=True, location='json')
args = parser.parse_args()
return ToolManageService.create_api_tool_provider(
return ApiToolManageService.create_api_tool_provider(
user_id,
tenant_id,
args['provider'],
@@ -153,6 +139,8 @@ class ToolApiProviderAddApi(Resource):
args['schema_type'],
args['schema'],
args.get('privacy_policy', ''),
args.get('custom_disclaimer', ''),
args.get('labels', []),
)
class ToolApiProviderGetRemoteSchemaApi(Resource):
@@ -166,7 +154,7 @@ class ToolApiProviderGetRemoteSchemaApi(Resource):
args = parser.parse_args()
return ToolManageService.get_api_tool_provider_remote_schema(
return ApiToolManageService.get_api_tool_provider_remote_schema(
current_user.id,
current_user.current_tenant_id,
args['url'],
@@ -186,7 +174,7 @@ class ToolApiProviderListToolsApi(Resource):
args = parser.parse_args()
return jsonable_encoder(ToolManageService.list_api_tool_provider_tools(
return jsonable_encoder(ApiToolManageService.list_api_tool_provider_tools(
user_id,
tenant_id,
args['provider'],
@@ -211,10 +199,12 @@ class ToolApiProviderUpdateApi(Resource):
parser.add_argument('original_provider', type=str, required=True, nullable=False, location='json')
parser.add_argument('icon', type=dict, required=True, nullable=False, location='json')
parser.add_argument('privacy_policy', type=str, required=True, nullable=True, location='json')
parser.add_argument('labels', type=list[str], required=False, nullable=True, location='json')
parser.add_argument('custom_disclaimer', type=str, required=True, nullable=True, location='json')
args = parser.parse_args()
return ToolManageService.update_api_tool_provider(
return ApiToolManageService.update_api_tool_provider(
user_id,
tenant_id,
args['provider'],
@@ -224,6 +214,8 @@ class ToolApiProviderUpdateApi(Resource):
args['schema_type'],
args['schema'],
args['privacy_policy'],
args['custom_disclaimer'],
args.get('labels', []),
)
class ToolApiProviderDeleteApi(Resource):
@@ -243,7 +235,7 @@ class ToolApiProviderDeleteApi(Resource):
args = parser.parse_args()
return ToolManageService.delete_api_tool_provider(
return ApiToolManageService.delete_api_tool_provider(
user_id,
tenant_id,
args['provider'],
@@ -263,7 +255,7 @@ class ToolApiProviderGetApi(Resource):
args = parser.parse_args()
return ToolManageService.get_api_tool_provider(
return ApiToolManageService.get_api_tool_provider(
user_id,
tenant_id,
args['provider'],
@@ -274,7 +266,7 @@ class ToolBuiltinProviderCredentialsSchemaApi(Resource):
@login_required
@account_initialization_required
def get(self, provider):
return ToolManageService.list_builtin_provider_credentials_schema(provider)
return BuiltinToolManageService.list_builtin_provider_credentials_schema(provider)
class ToolApiProviderSchemaApi(Resource):
@setup_required
@@ -287,7 +279,7 @@ class ToolApiProviderSchemaApi(Resource):
args = parser.parse_args()
return ToolManageService.parser_api_schema(
return ApiToolManageService.parser_api_schema(
schema=args['schema'],
)
@@ -307,7 +299,7 @@ class ToolApiProviderPreviousTestApi(Resource):
args = parser.parse_args()
return ToolManageService.test_api_tool_preview(
return ApiToolManageService.test_api_tool_preview(
current_user.current_tenant_id,
args['provider_name'] if args['provider_name'] else '',
args['tool_name'],
@@ -317,6 +309,153 @@ class ToolApiProviderPreviousTestApi(Resource):
args['schema'],
)
class ToolWorkflowProviderCreateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
if not current_user.is_admin_or_owner:
raise Forbidden()
user_id = current_user.id
tenant_id = current_user.current_tenant_id
reqparser = reqparse.RequestParser()
reqparser.add_argument('workflow_app_id', type=uuid_value, required=True, nullable=False, location='json')
reqparser.add_argument('name', type=alphanumeric, required=True, nullable=False, location='json')
reqparser.add_argument('label', type=str, required=True, nullable=False, location='json')
reqparser.add_argument('description', type=str, required=True, nullable=False, location='json')
reqparser.add_argument('icon', type=dict, required=True, nullable=False, location='json')
reqparser.add_argument('parameters', type=list[dict], required=True, nullable=False, location='json')
reqparser.add_argument('privacy_policy', type=str, required=False, nullable=True, location='json', default='')
reqparser.add_argument('labels', type=list[str], required=False, nullable=True, location='json')
args = reqparser.parse_args()
return WorkflowToolManageService.create_workflow_tool(
user_id,
tenant_id,
args['workflow_app_id'],
args['name'],
args['label'],
args['icon'],
args['description'],
args['parameters'],
args['privacy_policy'],
args.get('labels', []),
)
class ToolWorkflowProviderUpdateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
if not current_user.is_admin_or_owner:
raise Forbidden()
user_id = current_user.id
tenant_id = current_user.current_tenant_id
reqparser = reqparse.RequestParser()
reqparser.add_argument('workflow_tool_id', type=uuid_value, required=True, nullable=False, location='json')
reqparser.add_argument('name', type=alphanumeric, required=True, nullable=False, location='json')
reqparser.add_argument('label', type=str, required=True, nullable=False, location='json')
reqparser.add_argument('description', type=str, required=True, nullable=False, location='json')
reqparser.add_argument('icon', type=dict, required=True, nullable=False, location='json')
reqparser.add_argument('parameters', type=list[dict], required=True, nullable=False, location='json')
reqparser.add_argument('privacy_policy', type=str, required=False, nullable=True, location='json', default='')
reqparser.add_argument('labels', type=list[str], required=False, nullable=True, location='json')
args = reqparser.parse_args()
if not args['workflow_tool_id']:
raise ValueError('incorrect workflow_tool_id')
return WorkflowToolManageService.update_workflow_tool(
user_id,
tenant_id,
args['workflow_tool_id'],
args['name'],
args['label'],
args['icon'],
args['description'],
args['parameters'],
args['privacy_policy'],
args.get('labels', []),
)
class ToolWorkflowProviderDeleteApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
if not current_user.is_admin_or_owner:
raise Forbidden()
user_id = current_user.id
tenant_id = current_user.current_tenant_id
reqparser = reqparse.RequestParser()
reqparser.add_argument('workflow_tool_id', type=uuid_value, required=True, nullable=False, location='json')
args = reqparser.parse_args()
return WorkflowToolManageService.delete_workflow_tool(
user_id,
tenant_id,
args['workflow_tool_id'],
)
class ToolWorkflowProviderGetApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('workflow_tool_id', type=uuid_value, required=False, nullable=True, location='args')
parser.add_argument('workflow_app_id', type=uuid_value, required=False, nullable=True, location='args')
args = parser.parse_args()
if args.get('workflow_tool_id'):
tool = WorkflowToolManageService.get_workflow_tool_by_tool_id(
user_id,
tenant_id,
args['workflow_tool_id'],
)
elif args.get('workflow_app_id'):
tool = WorkflowToolManageService.get_workflow_tool_by_app_id(
user_id,
tenant_id,
args['workflow_app_id'],
)
else:
raise ValueError('incorrect workflow_tool_id or workflow_app_id')
return jsonable_encoder(tool)
class ToolWorkflowProviderListToolApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('workflow_tool_id', type=uuid_value, required=True, nullable=False, location='args')
args = parser.parse_args()
return jsonable_encoder(WorkflowToolManageService.list_single_workflow_tools(
user_id,
tenant_id,
args['workflow_tool_id'],
))
class ToolBuiltinListApi(Resource):
@setup_required
@login_required
@@ -325,7 +464,7 @@ class ToolBuiltinListApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return jsonable_encoder([provider.to_dict() for provider in ToolManageService.list_builtin_tools(
return jsonable_encoder([provider.to_dict() for provider in BuiltinToolManageService.list_builtin_tools(
user_id,
tenant_id,
)])
@@ -338,20 +477,43 @@ class ToolApiListApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return jsonable_encoder([provider.to_dict() for provider in ToolManageService.list_api_tools(
return jsonable_encoder([provider.to_dict() for provider in ApiToolManageService.list_api_tools(
user_id,
tenant_id,
)])
class ToolWorkflowListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return jsonable_encoder([provider.to_dict() for provider in WorkflowToolManageService.list_tenant_workflow_tools(
user_id,
tenant_id,
)])
class ToolLabelsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
return jsonable_encoder(ToolLabelsService.list_tool_labels())
# tool provider
api.add_resource(ToolProviderListApi, '/workspaces/current/tool-providers')
# builtin tool provider
api.add_resource(ToolBuiltinProviderListToolsApi, '/workspaces/current/tool-provider/builtin/<provider>/tools')
api.add_resource(ToolBuiltinProviderDeleteApi, '/workspaces/current/tool-provider/builtin/<provider>/delete')
api.add_resource(ToolBuiltinProviderUpdateApi, '/workspaces/current/tool-provider/builtin/<provider>/update')
api.add_resource(ToolBuiltinProviderGetCredentialsApi, '/workspaces/current/tool-provider/builtin/<provider>/credentials')
api.add_resource(ToolBuiltinProviderCredentialsSchemaApi, '/workspaces/current/tool-provider/builtin/<provider>/credentials_schema')
api.add_resource(ToolBuiltinProviderIconApi, '/workspaces/current/tool-provider/builtin/<provider>/icon')
api.add_resource(ToolModelProviderIconApi, '/workspaces/current/tool-provider/model/<provider>/icon')
api.add_resource(ToolModelProviderListToolsApi, '/workspaces/current/tool-provider/model/tools')
# api tool provider
api.add_resource(ToolApiProviderAddApi, '/workspaces/current/tool-provider/api/add')
api.add_resource(ToolApiProviderGetRemoteSchemaApi, '/workspaces/current/tool-provider/api/remote')
api.add_resource(ToolApiProviderListToolsApi, '/workspaces/current/tool-provider/api/tools')
@@ -361,5 +523,15 @@ api.add_resource(ToolApiProviderGetApi, '/workspaces/current/tool-provider/api/g
api.add_resource(ToolApiProviderSchemaApi, '/workspaces/current/tool-provider/api/schema')
api.add_resource(ToolApiProviderPreviousTestApi, '/workspaces/current/tool-provider/api/test/pre')
# workflow tool provider
api.add_resource(ToolWorkflowProviderCreateApi, '/workspaces/current/tool-provider/workflow/create')
api.add_resource(ToolWorkflowProviderUpdateApi, '/workspaces/current/tool-provider/workflow/update')
api.add_resource(ToolWorkflowProviderDeleteApi, '/workspaces/current/tool-provider/workflow/delete')
api.add_resource(ToolWorkflowProviderGetApi, '/workspaces/current/tool-provider/workflow/get')
api.add_resource(ToolWorkflowProviderListToolApi, '/workspaces/current/tool-provider/workflow/tools')
api.add_resource(ToolBuiltinListApi, '/workspaces/current/tools/builtin')
api.add_resource(ToolApiListApi, '/workspaces/current/tools/api')
api.add_resource(ToolApiListApi, '/workspaces/current/tools/api')
api.add_resource(ToolWorkflowListApi, '/workspaces/current/tools/workflow')
api.add_resource(ToolLabelsApi, '/workspaces/current/tool-labels')

View File

@@ -3,6 +3,7 @@ import logging
from flask import request
from flask_login import current_user
from flask_restful import Resource, fields, inputs, marshal, marshal_with, reqparse
from werkzeug.exceptions import Unauthorized
import services
from controllers.console import api
@@ -19,7 +20,7 @@ from controllers.console.wraps import account_initialization_required, cloud_edi
from extensions.ext_database import db
from libs.helper import TimestampField
from libs.login import login_required
from models.account import Tenant
from models.account import Tenant, TenantStatus
from services.account_service import TenantService
from services.file_service import FileService
from services.workspace_service import WorkspaceService
@@ -116,6 +117,16 @@ class TenantApi(Resource):
tenant = current_user.current_tenant
if tenant.status == TenantStatus.ARCHIVE:
tenants = TenantService.get_join_tenants(current_user)
# if there is any tenant, switch to the first one
if len(tenants) > 0:
TenantService.switch_tenant(current_user, tenants[0].id)
tenant = tenants[0]
# else, raise Unauthorized
else:
raise Unauthorized('workspace is archived')
return WorkspaceService.get_tenant_info(tenant), 200
@@ -150,13 +161,13 @@ class CustomConfigWorkspaceApi(Resource):
parser.add_argument('replace_webapp_logo', type=str, location='json')
args = parser.parse_args()
tenant = db.session.query(Tenant).filter(Tenant.id == current_user.current_tenant_id).one_or_404()
custom_config_dict = {
'remove_webapp_brand': args['remove_webapp_brand'],
'replace_webapp_logo': args['replace_webapp_logo'],
'replace_webapp_logo': args['replace_webapp_logo'] if args['replace_webapp_logo'] is not None else tenant.custom_config_dict.get('replace_webapp_logo') ,
}
tenant = db.session.query(Tenant).filter(Tenant.id == current_user.current_tenant_id).one_or_404()
tenant.custom_config_dict = custom_config_dict
db.session.commit()

View File

@@ -1,5 +1,5 @@
# -*- coding:utf-8 -*-
from flask import Blueprint
from libs.external_api import ExternalApi
bp = Blueprint('files', __name__)

View File

@@ -0,0 +1,9 @@
from flask import Blueprint
from libs.external_api import ExternalApi
bp = Blueprint('inner_api', __name__, url_prefix='/inner/api')
api = ExternalApi(bp)
from .workspace import workspace

View File

@@ -0,0 +1,37 @@
from flask_restful import Resource, reqparse
from controllers.console.setup import setup_required
from controllers.inner_api import api
from controllers.inner_api.wraps import inner_api_only
from events.tenant_event import tenant_was_created
from models.account import Account
from services.account_service import TenantService
class EnterpriseWorkspace(Resource):
@setup_required
@inner_api_only
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('owner_email', type=str, required=True, location='json')
args = parser.parse_args()
account = Account.query.filter_by(email=args['owner_email']).first()
if account is None:
return {
'message': 'owner account not found.'
}, 404
tenant = TenantService.create_tenant(args['name'])
TenantService.create_tenant_member(tenant, account, role='owner')
tenant_was_created.send(tenant)
return {
'message': 'enterprise workspace created.'
}
api.add_resource(EnterpriseWorkspace, '/enterprise/workspace')

View File

@@ -0,0 +1,61 @@
from base64 import b64encode
from functools import wraps
from hashlib import sha1
from hmac import new as hmac_new
from flask import abort, current_app, request
from extensions.ext_database import db
from models.model import EndUser
def inner_api_only(view):
@wraps(view)
def decorated(*args, **kwargs):
if not current_app.config['INNER_API']:
abort(404)
# get header 'X-Inner-Api-Key'
inner_api_key = request.headers.get('X-Inner-Api-Key')
if not inner_api_key or inner_api_key != current_app.config['INNER_API_KEY']:
abort(404)
return view(*args, **kwargs)
return decorated
def inner_api_user_auth(view):
@wraps(view)
def decorated(*args, **kwargs):
if not current_app.config['INNER_API']:
return view(*args, **kwargs)
# get header 'X-Inner-Api-Key'
authorization = request.headers.get('Authorization')
if not authorization:
return view(*args, **kwargs)
parts = authorization.split(':')
if len(parts) != 2:
return view(*args, **kwargs)
user_id, token = parts
if ' ' in user_id:
user_id = user_id.split(' ')[1]
inner_api_key = request.headers.get('X-Inner-Api-Key')
data_to_sign = f'DIFY {user_id}'
signature = hmac_new(inner_api_key.encode('utf-8'), data_to_sign.encode('utf-8'), sha1)
signature = b64encode(signature.digest()).decode('utf-8')
if signature != token:
return view(*args, **kwargs)
kwargs['user'] = db.session.query(EndUser).filter(EndUser.id == user_id).first()
return view(*args, **kwargs)
return decorated

View File

@@ -1,5 +1,5 @@
# -*- coding:utf-8 -*-
from flask import Blueprint
from libs.external_api import ExternalApi
bp = Blueprint('service_api', __name__, url_prefix='/v1')

View File

@@ -97,7 +97,7 @@ class MessageListApi(Resource):
class MessageFeedbackApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser, message_id):
message_id = str(message_id)
@@ -114,7 +114,7 @@ class MessageFeedbackApi(Resource):
class MessageSuggestedApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY, required=True))
def get(self, app_model: App, end_user: EndUser, message_id):
message_id = str(message_id)
app_mode = AppMode.value_of(app_model.mode)

View File

@@ -1,9 +1,10 @@
from flask import request
from flask_restful import marshal, reqparse
from werkzeug.exceptions import NotFound
import services.dataset_service
from controllers.service_api import api
from controllers.service_api.dataset.error import DatasetNameDuplicateError
from controllers.service_api.dataset.error import DatasetInUseError, DatasetNameDuplicateError
from controllers.service_api.wraps import DatasetApiResource
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
@@ -19,15 +20,20 @@ def _validate_name(name):
return name
class DatasetApi(DatasetApiResource):
"""Resource for get datasets."""
class DatasetListApi(DatasetApiResource):
"""Resource for datasets."""
def get(self, tenant_id):
"""Resource for getting datasets."""
page = request.args.get('page', default=1, type=int)
limit = request.args.get('limit', default=20, type=int)
provider = request.args.get('provider', default="vendor")
search = request.args.get('keyword', default=None, type=str)
tag_ids = request.args.getlist('tag_ids')
datasets, total = DatasetService.get_datasets(page, limit, provider,
tenant_id, current_user)
tenant_id, current_user, search, tag_ids)
# check embedding setting
provider_manager = ProviderManager()
configurations = provider_manager.get_configurations(
@@ -62,9 +68,9 @@ class DatasetApi(DatasetApiResource):
}
return response, 200
"""Resource for datasets."""
def post(self, tenant_id):
"""Resource for creating datasets."""
parser = reqparse.RequestParser()
parser.add_argument('name', nullable=False, required=True,
help='type is required. Name must be between 1 to 40 characters.',
@@ -86,6 +92,34 @@ class DatasetApi(DatasetApiResource):
return marshal(dataset, dataset_detail_fields), 200
class DatasetApi(DatasetApiResource):
"""Resource for dataset."""
api.add_resource(DatasetApi, '/datasets')
def delete(self, _, dataset_id):
"""
Deletes a dataset given its ID.
Args:
dataset_id (UUID): The ID of the dataset to be deleted.
Returns:
dict: A dictionary with a key 'result' and a value 'success'
if the dataset was successfully deleted. Omitted in HTTP response.
int: HTTP status code 204 indicating that the operation was successful.
Raises:
NotFound: If the dataset with the given ID does not exist.
"""
dataset_id_str = str(dataset_id)
try:
if DatasetService.delete_dataset(dataset_id_str, current_user):
return {'result': 'success'}, 204
else:
raise NotFound("Dataset not found.")
except services.errors.dataset.DatasetInUseError:
raise DatasetInUseError()
api.add_resource(DatasetListApi, '/datasets')
api.add_resource(DatasetApi, '/datasets/<uuid:dataset_id>')

View File

@@ -174,7 +174,7 @@ class DocumentAddByFileApi(DatasetApiResource):
if not dataset:
raise ValueError('Dataset is not exist.')
if not dataset.indexing_technique and not args['indexing_technique']:
if not dataset.indexing_technique and not args.get('indexing_technique'):
raise ValueError('indexing_technique is required.')
# save file info

View File

@@ -71,3 +71,9 @@ class InvalidMetadataError(BaseHTTPException):
error_code = 'invalid_metadata'
description = "The metadata content is incorrect. Please check and verify."
code = 400
class DatasetInUseError(BaseHTTPException):
error_code = 'dataset_in_use'
description = "The dataset is being used by some apps. Please remove the dataset from the apps before deleting it."
code = 409

View File

@@ -8,11 +8,11 @@ from flask import current_app, request
from flask_login import user_logged_in
from flask_restful import Resource
from pydantic import BaseModel
from werkzeug.exceptions import Forbidden, NotFound, Unauthorized
from werkzeug.exceptions import Forbidden, Unauthorized
from extensions.ext_database import db
from libs.login import _get_user
from models.account import Account, Tenant, TenantAccountJoin
from models.account import Account, Tenant, TenantAccountJoin, TenantStatus
from models.model import ApiToken, App, EndUser
from services.feature_service import FeatureService
@@ -39,13 +39,17 @@ def validate_app_token(view: Optional[Callable] = None, *, fetch_user_arg: Optio
app_model = db.session.query(App).filter(App.id == api_token.app_id).first()
if not app_model:
raise NotFound()
raise Forbidden("The app no longer exists.")
if app_model.status != 'normal':
raise NotFound()
raise Forbidden("The app's status is abnormal.")
if not app_model.enable_api:
raise NotFound()
raise Forbidden("The app's API service has been disabled.")
tenant = db.session.query(Tenant).filter(Tenant.id == app_model.tenant_id).first()
if tenant.status == TenantStatus.ARCHIVE:
raise Forbidden("The workspace's status is archived.")
kwargs['app_model'] = app_model
@@ -137,6 +141,7 @@ def validate_dataset_token(view=None):
.filter(Tenant.id == api_token.tenant_id) \
.filter(TenantAccountJoin.tenant_id == Tenant.id) \
.filter(TenantAccountJoin.role.in_(['owner'])) \
.filter(Tenant.status == TenantStatus.NORMAL) \
.one_or_none() # TODO: only owner information is required, so only one is returned.
if tenant_account_join:
tenant, ta = tenant_account_join

View File

@@ -1,9 +1,9 @@
# -*- coding:utf-8 -*-
from flask import Blueprint
from libs.external_api import ExternalApi
bp = Blueprint('web', __name__, url_prefix='/api')
api = ExternalApi(bp)
from . import app, audio, completion, conversation, file, message, passport, saved_message, site, workflow
from . import app, audio, completion, conversation, feature, file, message, passport, saved_message, site, workflow

View File

@@ -1,14 +1,10 @@
import json
from flask import current_app
from flask_restful import fields, marshal_with
from controllers.web import api
from controllers.web.error import AppUnavailableError
from controllers.web.wraps import WebApiResource
from extensions.ext_database import db
from models.model import App, AppModelConfig, AppMode
from models.tools import ApiToolProvider
from models.model import App, AppMode
from services.app_service import AppService

View File

@@ -74,7 +74,7 @@ class TextApi(WebApiResource):
app_model=app_model,
text=request.form['text'],
end_user=end_user.external_user_id,
voice=request.form.get('voice'),
voice=request.form['voice'] if request.form.get('voice') else None,
streaming=False
)

View File

@@ -115,3 +115,9 @@ class UnsupportedFileTypeError(BaseHTTPException):
error_code = 'unsupported_file_type'
description = "File type not allowed."
code = 415
class WebSSOAuthRequiredError(BaseHTTPException):
error_code = 'web_sso_auth_required'
description = "Web SSO authentication required."
code = 401

View File

@@ -0,0 +1,12 @@
from flask_restful import Resource
from controllers.web import api
from services.feature_service import FeatureService
class SystemFeatureApi(Resource):
def get(self):
return FeatureService.get_system_features().model_dump()
api.add_resource(SystemFeatureApi, '/system-features')

View File

@@ -5,14 +5,21 @@ from flask_restful import Resource
from werkzeug.exceptions import NotFound, Unauthorized
from controllers.web import api
from controllers.web.error import WebSSOAuthRequiredError
from extensions.ext_database import db
from libs.passport import PassportService
from models.model import App, EndUser, Site
from services.feature_service import FeatureService
class PassportResource(Resource):
"""Base resource for passport."""
def get(self):
system_features = FeatureService.get_system_features()
if system_features.sso_enforced_for_web:
raise WebSSOAuthRequiredError()
app_code = request.headers.get('X-App-Code')
if app_code is None:
raise Unauthorized('X-App-Code header is missing.')
@@ -28,7 +35,7 @@ class PassportResource(Resource):
app_model = db.session.query(App).filter(App.id == site.app_id).first()
if not app_model or app_model.status != 'normal' or not app_model.enable_site:
raise NotFound()
end_user = EndUser(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
@@ -36,6 +43,7 @@ class PassportResource(Resource):
is_anonymous=True,
session_id=generate_session_id(),
)
db.session.add(end_user)
db.session.commit()
@@ -53,8 +61,10 @@ class PassportResource(Resource):
'access_token': tk,
}
api.add_resource(PassportResource, '/passport')
def generate_session_id():
"""
Generate a unique session ID.

View File

@@ -6,6 +6,7 @@ from werkzeug.exceptions import Forbidden
from controllers.web import api
from controllers.web.wraps import WebApiResource
from extensions.ext_database import db
from models.account import TenantStatus
from models.model import Site
from services.feature_service import FeatureService
@@ -30,6 +31,7 @@ class AppSiteApi(WebApiResource):
'description': fields.String,
'copyright': fields.String,
'privacy_policy': fields.String,
'custom_disclaimer': fields.String,
'default_language': fields.String,
'prompt_public': fields.Boolean
}
@@ -54,6 +56,9 @@ class AppSiteApi(WebApiResource):
if not site:
raise Forbidden()
if app_model.tenant.status == TenantStatus.ARCHIVE:
raise Forbidden()
can_replace_logo = FeatureService.get_features(app_model.tenant_id).can_replace_logo
return AppSiteInfo(app_model.tenant, app_model, site, end_user.id, can_replace_logo)

View File

@@ -2,11 +2,13 @@ from functools import wraps
from flask import request
from flask_restful import Resource
from werkzeug.exceptions import NotFound, Unauthorized
from werkzeug.exceptions import BadRequest, NotFound, Unauthorized
from controllers.web.error import WebSSOAuthRequiredError
from extensions.ext_database import db
from libs.passport import PassportService
from models.model import App, EndUser, Site
from services.feature_service import FeatureService
def validate_jwt_token(view=None):
@@ -21,34 +23,60 @@ def validate_jwt_token(view=None):
return decorator(view)
return decorator
def decode_jwt_token():
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise Unauthorized('Authorization header is missing.')
system_features = FeatureService.get_system_features()
if ' ' not in auth_header:
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
auth_scheme, tk = auth_header.split(None, 1)
auth_scheme = auth_scheme.lower()
try:
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise Unauthorized('Authorization header is missing.')
if auth_scheme != 'bearer':
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
decoded = PassportService().verify(tk)
app_code = decoded.get('app_code')
app_model = db.session.query(App).filter(App.id == decoded['app_id']).first()
site = db.session.query(Site).filter(Site.code == app_code).first()
if not app_model:
raise NotFound()
if not app_code or not site:
raise Unauthorized('Site URL is no longer valid.')
if app_model.enable_site is False:
raise Unauthorized('Site is disabled.')
end_user = db.session.query(EndUser).filter(EndUser.id == decoded['end_user_id']).first()
if not end_user:
raise NotFound()
if ' ' not in auth_header:
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
auth_scheme, tk = auth_header.split(None, 1)
auth_scheme = auth_scheme.lower()
if auth_scheme != 'bearer':
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
decoded = PassportService().verify(tk)
app_code = decoded.get('app_code')
app_model = db.session.query(App).filter(App.id == decoded['app_id']).first()
site = db.session.query(Site).filter(Site.code == app_code).first()
if not app_model:
raise NotFound()
if not app_code or not site:
raise BadRequest('Site URL is no longer valid.')
if app_model.enable_site is False:
raise BadRequest('Site is disabled.')
end_user = db.session.query(EndUser).filter(EndUser.id == decoded['end_user_id']).first()
if not end_user:
raise NotFound()
_validate_web_sso_token(decoded, system_features)
return app_model, end_user
except Unauthorized as e:
if system_features.sso_enforced_for_web:
raise WebSSOAuthRequiredError()
raise Unauthorized(e.description)
def _validate_web_sso_token(decoded, system_features):
# Check if SSO is enforced for web, and if the token source is not SSO, raise an error and redirect to SSO login
if system_features.sso_enforced_for_web:
source = decoded.get('token_source')
if not source or source != 'sso':
raise WebSSOAuthRequiredError()
# Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login
if not system_features.sso_enforced_for_web:
source = decoded.get('token_source')
if source and source == 'sso':
raise Unauthorized('sso token expired.')
return app_model, end_user
class WebApiResource(Resource):
method_decorators = [validate_jwt_token]

View File

@@ -39,6 +39,7 @@ from core.tools.entities.tool_entities import (
from core.tools.tool.dataset_retriever_tool import DatasetRetrieverTool
from core.tools.tool.tool import Tool
from core.tools.tool_manager import ToolManager
from core.tools.utils.tool_parameter_converter import ToolParameterConverter
from extensions.ext_database import db
from models.model import Conversation, Message, MessageAgentThought
from models.tools import ToolConversationVariables
@@ -128,6 +129,8 @@ class BaseAgentRunner(AppRunner):
self.files = application_generate_entity.files
else:
self.files = []
self.query = None
self._current_thoughts: list[PromptMessage] = []
def _repack_app_generate_entity(self, app_generate_entity: AgentChatAppGenerateEntity) \
-> AgentChatAppGenerateEntity:
@@ -163,7 +166,9 @@ class BaseAgentRunner(AppRunner):
"""
tool_entity = ToolManager.get_agent_tool_runtime(
tenant_id=self.tenant_id,
app_id=self.app_config.app_id,
agent_tool=tool,
invoke_from=self.application_generate_entity.invoke_from
)
tool_entity.load_variables(self.variables_pool)
@@ -182,21 +187,11 @@ class BaseAgentRunner(AppRunner):
if parameter.form != ToolParameter.ToolParameterForm.LLM:
continue
parameter_type = 'string'
parameter_type = ToolParameterConverter.get_parameter_type(parameter.type)
enum = []
if parameter.type == ToolParameter.ToolParameterType.STRING:
parameter_type = 'string'
elif parameter.type == ToolParameter.ToolParameterType.BOOLEAN:
parameter_type = 'boolean'
elif parameter.type == ToolParameter.ToolParameterType.NUMBER:
parameter_type = 'number'
elif parameter.type == ToolParameter.ToolParameterType.SELECT:
for option in parameter.options:
enum.append(option.value)
parameter_type = 'string'
else:
raise ValueError(f"parameter type {parameter.type} is not supported")
if parameter.type == ToolParameter.ToolParameterType.SELECT:
enum = [option.value for option in parameter.options]
message_tool.parameters['properties'][parameter.name] = {
"type": parameter_type,
"description": parameter.llm_description or '',
@@ -277,20 +272,10 @@ class BaseAgentRunner(AppRunner):
if parameter.form != ToolParameter.ToolParameterForm.LLM:
continue
parameter_type = 'string'
parameter_type = ToolParameterConverter.get_parameter_type(parameter.type)
enum = []
if parameter.type == ToolParameter.ToolParameterType.STRING:
parameter_type = 'string'
elif parameter.type == ToolParameter.ToolParameterType.BOOLEAN:
parameter_type = 'boolean'
elif parameter.type == ToolParameter.ToolParameterType.NUMBER:
parameter_type = 'number'
elif parameter.type == ToolParameter.ToolParameterType.SELECT:
for option in parameter.options:
enum.append(option.value)
parameter_type = 'string'
else:
raise ValueError(f"parameter type {parameter.type} is not supported")
if parameter.type == ToolParameter.ToolParameterType.SELECT:
enum = [option.value for option in parameter.options]
prompt_tool.parameters['properties'][parameter.name] = {
"type": parameter_type,
@@ -462,7 +447,7 @@ class BaseAgentRunner(AppRunner):
for message in messages:
if message.id == self.message.id:
continue
result.append(self.organize_agent_user_prompt(message))
agent_thoughts: list[MessageAgentThought] = message.agent_thoughts
if agent_thoughts:

View File

@@ -15,6 +15,7 @@ from core.model_runtime.entities.message_entities import (
ToolPromptMessage,
UserPromptMessage,
)
from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
from core.tools.entities.tool_entities import ToolInvokeMeta
from core.tools.tool.tool import Tool
from core.tools.tool_engine import ToolEngine
@@ -42,9 +43,9 @@ class CotAgentRunner(BaseAgentRunner, ABC):
self._init_react_state(query)
# check model mode
if 'Observation' not in app_generate_entity.model_config.stop:
if app_generate_entity.model_config.provider not in self._ignore_observation_providers:
app_generate_entity.model_config.stop.append('Observation')
if 'Observation' not in app_generate_entity.model_conf.stop:
if app_generate_entity.model_conf.provider not in self._ignore_observation_providers:
app_generate_entity.model_conf.stop.append('Observation')
app_config = self.app_config
@@ -108,9 +109,9 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=app_generate_entity.model_config.parameters,
model_parameters=app_generate_entity.model_conf.parameters,
tools=[],
stop=app_generate_entity.model_config.stop,
stop=app_generate_entity.model_conf.stop,
stream=True,
user=self.user_id,
callbacks=[],
@@ -121,7 +122,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
raise ValueError("failed to invoke llm")
usage_dict = {}
react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks)
react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks, usage_dict)
scratchpad = AgentScratchpadUnit(
agent_response='',
thought='',
@@ -140,8 +141,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
if isinstance(chunk, AgentScratchpadUnit.Action):
action = chunk
# detect action
scratchpad.agent_response += json.dumps(chunk.dict())
scratchpad.action_str = json.dumps(chunk.dict())
scratchpad.agent_response += json.dumps(chunk.model_dump())
scratchpad.action_str = json.dumps(chunk.model_dump())
scratchpad.action = action
else:
scratchpad.agent_response += chunk
@@ -189,7 +190,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
if not scratchpad.action:
# failed to extract action, return final answer directly
final_answer = scratchpad.agent_response or ''
final_answer = ''
else:
if scratchpad.action.action_name.lower() == "final answer":
# action is final answer, return final answer directly
@@ -219,7 +220,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
tool_input={scratchpad.action.action_name: scratchpad.action.action_input},
thought=scratchpad.thought,
observation={scratchpad.action.action_name: tool_invoke_response},
tool_invoke_meta=tool_invoke_meta.to_dict(),
tool_invoke_meta={scratchpad.action.action_name: tool_invoke_meta.to_dict()},
answer=scratchpad.agent_response,
messages_ids=message_file_ids,
llm_usage=usage_dict['usage']
@@ -373,7 +374,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
return message
def _organize_historic_prompt_messages(self) -> list[PromptMessage]:
def _organize_historic_prompt_messages(self, current_session_messages: list[PromptMessage] = None) -> list[PromptMessage]:
"""
organize historic prompt messages
"""
@@ -381,6 +382,13 @@ class CotAgentRunner(BaseAgentRunner, ABC):
scratchpad: list[AgentScratchpadUnit] = []
current_scratchpad: AgentScratchpadUnit = None
self.history_prompt_messages = AgentHistoryPromptTransform(
model_config=self.model_config,
prompt_messages=current_session_messages or [],
history_messages=self.history_prompt_messages,
memory=self.memory
).get_prompt()
for message in self.history_prompt_messages:
if isinstance(message, AssistantPromptMessage):
current_scratchpad = AgentScratchpadUnit(

View File

@@ -32,9 +32,6 @@ class CotChatAgentRunner(CotAgentRunner):
# organize system prompt
system_message = self._organize_system_prompt()
# organize historic prompt messages
historic_messages = self._historic_prompt_messages
# organize current assistant messages
agent_scratchpad = self._agent_scratchpad
if not agent_scratchpad:
@@ -57,6 +54,13 @@ class CotChatAgentRunner(CotAgentRunner):
query_messages = UserPromptMessage(content=self._query)
if assistant_messages:
# organize historic prompt messages
historic_messages = self._organize_historic_prompt_messages([
system_message,
query_messages,
*assistant_messages,
UserPromptMessage(content='continue')
])
messages = [
system_message,
*historic_messages,
@@ -65,6 +69,8 @@ class CotChatAgentRunner(CotAgentRunner):
UserPromptMessage(content='continue')
]
else:
# organize historic prompt messages
historic_messages = self._organize_historic_prompt_messages([system_message, query_messages])
messages = [system_message, *historic_messages, query_messages]
# join all messages

View File

@@ -19,11 +19,11 @@ class CotCompletionAgentRunner(CotAgentRunner):
return system_prompt
def _organize_historic_prompt(self) -> str:
def _organize_historic_prompt(self, current_session_messages: list[PromptMessage] = None) -> str:
"""
Organize historic prompt
"""
historic_prompt_messages = self._historic_prompt_messages
historic_prompt_messages = self._organize_historic_prompt_messages(current_session_messages)
historic_prompt = ""
for message in historic_prompt_messages:

View File

@@ -8,7 +8,7 @@ class AgentToolEntity(BaseModel):
"""
Agent Tool Entity.
"""
provider_type: Literal["builtin", "api"]
provider_type: Literal["builtin", "api", "workflow"]
provider_id: str
tool_name: str
tool_parameters: dict[str, Any] = {}

View File

@@ -17,6 +17,7 @@ from core.model_runtime.entities.message_entities import (
ToolPromptMessage,
UserPromptMessage,
)
from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
from core.tools.entities.tool_entities import ToolInvokeMeta
from core.tools.tool_engine import ToolEngine
from models.model import Message
@@ -24,21 +25,18 @@ from models.model import Message
logger = logging.getLogger(__name__)
class FunctionCallAgentRunner(BaseAgentRunner):
def run(self,
message: Message, query: str, **kwargs: Any
) -> Generator[LLMResultChunk, None, None]:
"""
Run FunctionCall agent application
"""
self.query = query
app_generate_entity = self.application_generate_entity
app_config = self.app_config
prompt_template = app_config.prompt_template.simple_prompt_template or ''
prompt_messages = self.history_prompt_messages
prompt_messages = self._init_system_message(prompt_template, prompt_messages)
prompt_messages = self._organize_user_query(query, prompt_messages)
# convert tools into ModelRuntime Tool format
tool_instances, prompt_messages_tools = self._init_prompt_tools()
@@ -81,13 +79,14 @@ class FunctionCallAgentRunner(BaseAgentRunner):
)
# recalc llm max tokens
prompt_messages = self._organize_prompt_messages()
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=app_generate_entity.model_config.parameters,
model_parameters=app_generate_entity.model_conf.parameters,
tools=prompt_messages_tools,
stop=app_generate_entity.model_config.stop,
stop=app_generate_entity.model_conf.stop,
stream=self.stream_tool_call,
user=self.user_id,
callbacks=[],
@@ -203,7 +202,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
else:
assistant_message.content = response
prompt_messages.append(assistant_message)
self._current_thoughts.append(assistant_message)
# save thought
self.save_agent_thought(
@@ -265,12 +264,14 @@ class FunctionCallAgentRunner(BaseAgentRunner):
}
tool_responses.append(tool_response)
prompt_messages = self._organize_assistant_message(
tool_call_id=tool_call_id,
tool_call_name=tool_call_name,
tool_response=tool_response['tool_response'],
prompt_messages=prompt_messages,
)
if tool_response['tool_response'] is not None:
self._current_thoughts.append(
ToolPromptMessage(
content=tool_response['tool_response'],
tool_call_id=tool_call_id,
name=tool_call_name,
)
)
if len(tool_responses) > 0:
# save agent thought
@@ -300,8 +301,6 @@ class FunctionCallAgentRunner(BaseAgentRunner):
iteration_step += 1
prompt_messages = self._clear_user_prompt_image_messages(prompt_messages)
self.update_db_variables(self.variables_pool, self.db_variables_pool)
# publish end event
self.queue_manager.publish(QueueMessageEndEvent(llm_result=LLMResult(
@@ -393,24 +392,6 @@ class FunctionCallAgentRunner(BaseAgentRunner):
return prompt_messages
def _organize_assistant_message(self, tool_call_id: str = None, tool_call_name: str = None, tool_response: str = None,
prompt_messages: list[PromptMessage] = None) -> list[PromptMessage]:
"""
Organize assistant message
"""
prompt_messages = deepcopy(prompt_messages)
if tool_response is not None:
prompt_messages.append(
ToolPromptMessage(
content=tool_response,
tool_call_id=tool_call_id,
name=tool_call_name,
)
)
return prompt_messages
def _clear_user_prompt_image_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
"""
As for now, gpt supports both fc and vision at the first iteration.
@@ -428,4 +409,26 @@ class FunctionCallAgentRunner(BaseAgentRunner):
for content in prompt_message.content
])
return prompt_messages
return prompt_messages
def _organize_prompt_messages(self):
prompt_template = self.app_config.prompt_template.simple_prompt_template or ''
self.history_prompt_messages = self._init_system_message(prompt_template, self.history_prompt_messages)
query_prompt_messages = self._organize_user_query(self.query, [])
self.history_prompt_messages = AgentHistoryPromptTransform(
model_config=self.model_config,
prompt_messages=[*query_prompt_messages, *self._current_thoughts],
history_messages=self.history_prompt_messages,
memory=self.memory
).get_prompt()
prompt_messages = [
*self.history_prompt_messages,
*query_prompt_messages,
*self._current_thoughts
]
if len(self._current_thoughts) != 0:
# clear messages after the first iteration
prompt_messages = self._clear_user_prompt_image_messages(prompt_messages)
return prompt_messages

View File

@@ -9,7 +9,7 @@ from core.model_runtime.entities.llm_entities import LLMResultChunk
class CotAgentOutputParser:
@classmethod
def handle_react_stream_output(cls, llm_response: Generator[LLMResultChunk, None, None]) -> \
def handle_react_stream_output(cls, llm_response: Generator[LLMResultChunk, None, None], usage_dict: dict) -> \
Generator[Union[str, AgentScratchpadUnit.Action], None, None]:
def parse_action(json_str):
try:
@@ -58,6 +58,8 @@ class CotAgentOutputParser:
thought_idx = 0
for response in llm_response:
if response.delta.usage:
usage_dict['usage'] = response.delta.usage
response = response.delta.message.content
if not isinstance(response, str):
continue

View File

@@ -11,7 +11,7 @@ class SensitiveWordAvoidanceConfigManager:
if not sensitive_word_avoidance_dict:
return None
if 'enabled' in sensitive_word_avoidance_dict and sensitive_word_avoidance_dict['enabled']:
if sensitive_word_avoidance_dict.get('enabled'):
return SensitiveWordAvoidanceEntity(
type=sensitive_word_avoidance_dict.get('type'),
config=sensitive_word_avoidance_dict.get('config'),

View File

@@ -1,7 +1,7 @@
from typing import Optional
from core.agent.entities import AgentEntity, AgentPromptEntity, AgentToolEntity
from core.tools.prompt.template import REACT_PROMPT_TEMPLATES
from core.agent.prompt.template import REACT_PROMPT_TEMPLATES
class AgentConfigManager:

View File

@@ -239,4 +239,4 @@ class WorkflowUIBasedAppConfig(AppConfig):
"""
Workflow UI Based App Config Entity.
"""
workflow_id: str
workflow_id: str

View File

@@ -14,7 +14,7 @@ class FileUploadConfigManager:
"""
file_upload_dict = config.get('file_upload')
if file_upload_dict:
if 'image' in file_upload_dict and file_upload_dict['image']:
if file_upload_dict.get('image'):
if 'enabled' in file_upload_dict['image'] and file_upload_dict['image']['enabled']:
image_config = {
'number_limits': file_upload_dict['image']['number_limits'],

View File

@@ -9,7 +9,7 @@ class MoreLikeThisConfigManager:
more_like_this = False
more_like_this_dict = config.get('more_like_this')
if more_like_this_dict:
if 'enabled' in more_like_this_dict and more_like_this_dict['enabled']:
if more_like_this_dict.get('enabled'):
more_like_this = True
return more_like_this

View File

@@ -4,7 +4,7 @@ class RetrievalResourceConfigManager:
show_retrieve_source = False
retriever_resource_dict = config.get('retriever_resource')
if retriever_resource_dict:
if 'enabled' in retriever_resource_dict and retriever_resource_dict['enabled']:
if retriever_resource_dict.get('enabled'):
show_retrieve_source = True
return show_retrieve_source

View File

@@ -9,7 +9,7 @@ class SpeechToTextConfigManager:
speech_to_text = False
speech_to_text_dict = config.get('speech_to_text')
if speech_to_text_dict:
if 'enabled' in speech_to_text_dict and speech_to_text_dict['enabled']:
if speech_to_text_dict.get('enabled'):
speech_to_text = True
return speech_to_text

Some files were not shown because too many files have changed in this diff Show More