Compare commits

...

569 Commits

Author SHA1 Message Date
8bf892b306 feat: bump version to 0.3.25 (#1300) 2023-10-10 13:03:49 +08:00
8480b0197b fix: prompt for baichuan text generation models (#1299) 2023-10-10 13:01:18 +08:00
df07fb5951 feat: provider add baichuan (#1298) 2023-10-09 23:10:43 -05:00
4ab4bcc074 feat: support openllm embedding (#1293) 2023-10-09 23:09:35 -05:00
1d4f019de4 feat: add baichuan llm support (#1294)
Co-authored-by: zxhlyh <jasonapring2015@outlook.com>
2023-10-09 23:09:26 -05:00
677aacc8e3 feat: upgrade xinference client to 0.5.2 (#1292) 2023-10-09 08:12:58 -05:00
fda937175d feat: qdrant support in docker compose (#1286) 2023-10-08 12:04:04 -05:00
024250803a feat: move login_required wrapper outside (#1281) 2023-10-08 05:21:32 -05:00
b711ce33b7 Application share qrcode (#1277)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
2023-10-08 09:34:49 +08:00
52bec63275 chore(web): strong type (#1259) 2023-10-07 04:42:16 -05:00
657fa80f4d fix devcontainer issue (#1273) 2023-10-07 10:34:25 +08:00
373e90ee6d fix: detached model in completion thread (#1269) 2023-10-02 22:27:25 +08:00
41d4c5b424 fix: count down thread in completion db not commit (#1267) 2023-10-02 10:19:26 +08:00
86a9dea428 fix: db not commit when streaming output (#1266) 2023-10-01 16:41:52 +08:00
8606d80c66 fix: request timeout when openai completion (#1265) 2023-10-01 16:00:23 +08:00
5bffa1d918 feat: bump version to 0.3.24 (#1262) 2023-09-28 18:32:06 +08:00
c9b0fe47bf Fix/notion sync (#1258) 2023-09-28 14:39:13 +08:00
bcd744b6b7 fix: doc (#1256) 2023-09-28 11:26:04 +08:00
5e511e01bf Fix/dataset api key delete (#1255)
Co-authored-by: jyong <jyong@dify.ai>
2023-09-28 10:41:41 +08:00
52291c645e fix: dataset footer styles (#1254) 2023-09-28 10:06:52 +08:00
a31466d34e fix: db session not commit before long llm call running (#1251) 2023-09-27 21:40:26 +08:00
d38eac959b fix: wenxin model name invalid when llm call (#1248) 2023-09-27 16:29:13 +08:00
9dbb8acd4b Feat/dataset support api service (#1240)
Co-authored-by: Joel <iamjoel007@gmail.com>
Co-authored-by: crazywoola <427733928@qq.com>
2023-09-27 16:06:49 +08:00
46154c6705 Feat/dataset service api (#1245)
Co-authored-by: jyong <jyong@dify.ai>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-09-27 16:06:32 +08:00
54ff03c35d fix: dataset query error. (#1244) 2023-09-27 15:24:54 +08:00
18c710c906 feat: support binding context var (#1227)
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-09-27 14:53:22 +08:00
59236b789f Fix: dataset list refresh (#1216) 2023-09-27 10:31:46 +08:00
fd3d43cae1 Fix: debounce of dataset creation (#1237) 2023-09-27 10:31:27 +08:00
8eae643911 Fix App logs page modal show different model icon. (#1224) 2023-09-27 08:54:52 +08:00
fd9413874a fix: FATAL: role "root" does not exist. (#1233) 2023-09-26 10:20:00 +08:00
227f9fb77d Feat/api jwt (#1212) 2023-09-25 12:49:16 +08:00
c40ee7e629 feat: batch run support retry errors and decrease rate limit times (#1215) 2023-09-25 10:20:50 +08:00
841e967d48 Fix: add loading for dataset creation (#1214) 2023-09-24 01:35:20 -05:00
9df0dcedae fix: dataset eslint error (#1221) 2023-09-22 22:38:33 +08:00
724e053732 Fix/qdrant data issue (#1203)
Co-authored-by: jyong <jyong@dify.ai>
2023-09-22 14:21:26 +08:00
e409895c02 Feat/huggingface embedding support (#1211)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-09-22 13:59:02 +08:00
32d9b6181c fix: transaction not commit during long LLM calls (#1213) 2023-09-22 12:43:06 +08:00
2b018fade2 fix: transaction hangs due to message commit block during long LLM calls (#1206) 2023-09-21 11:22:10 +08:00
e65f9cb17a Complete type defined. (#1200) 2023-09-19 23:27:06 -05:00
1367f34398 fix: provider spark free quota text (#1201) 2023-09-20 11:46:25 +08:00
e47f6b879a add help wanted issue template (#1199) 2023-09-19 20:02:41 -05:00
5809edd74b feat: bump version to 0.3.23 (#1198) 2023-09-20 00:14:36 +08:00
05bfa11915 build: update devDependencies (#1125) 2023-09-19 13:31:48 +08:00
435f804c6f fix: gpt-3.5-turbo-instruct context size to 8192 (#1196) 2023-09-19 02:10:22 +08:00
ae3f1ac0a9 feat: support gpt-3.5-turbo-instruct model (#1195) 2023-09-19 02:05:04 +08:00
269a465fc4 Feat/improve vector database logic (#1193)
Co-authored-by: jyong <jyong@dify.ai>
2023-09-18 18:15:41 +08:00
60e0bbd713 Feat/provider add zhipuai (#1192)
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-09-18 18:02:05 +08:00
827c97f0d3 feat: add zhipuai (#1188) 2023-09-18 17:32:31 +08:00
c8bd76cd66 fix: inference embedding validate (#1187) 2023-09-16 03:09:36 +08:00
ec5f585df4 1111 wrong embedding model displayed in datasets (#1186) 2023-09-15 07:54:45 -05:00
1de48f33ca feat(web): service request return generics type (#1157) 2023-09-15 07:54:20 -05:00
6b41a9593e fix: text error (#1184) 2023-09-15 14:15:28 +08:00
82267083e8 fix: model param description error (#1183) 2023-09-15 11:36:01 +08:00
c385961d33 chore: Optimization model parameter description (#1181) 2023-09-15 11:14:14 +08:00
20bab6edec Restore the application template (#1174)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
2023-09-14 08:28:32 -05:00
67bed54f32 Mermaid front end rendering (#1166)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
2023-09-14 14:09:23 +08:00
leo
562a571281 fix: Improved fallback solution for avatar image loading failure (#1172) 2023-09-14 13:31:35 +08:00
fc68c81791 fix: correct invite url (#1173) 2023-09-14 12:07:34 +08:00
5d9070bc60 Feat/add blocking mode resource return (#1171)
Co-authored-by: jyong <jyong@dify.ai>
2023-09-13 18:53:35 +08:00
b11fb0dfd1 fix LocalAI is missing in lang/en (#1169) 2023-09-13 10:08:33 +08:00
d1c5c5f160 add video to cn readme (#1165) 2023-09-12 08:30:12 -05:00
0b1d1440aa Update README.md (#1164) 2023-09-12 07:48:35 -05:00
0c420d64b3 chore: hover conversation show option button (#1160) 2023-09-12 16:35:13 +08:00
f9082104ed feat: add hosted moderation (#1158) 2023-09-12 10:26:12 +08:00
983834cd52 feat: spark check (#1134) 2023-09-11 17:31:03 +08:00
96d10c8b39 feat: spark free quota verify (#1152) 2023-09-11 17:30:54 +08:00
24cb992843 feat: bump version to 0.3.22 (#1153) 2023-09-11 12:04:06 +08:00
7907c0bf58 Update bug_report.yml (#1151) 2023-09-11 10:48:37 +08:00
ebf4fd9a09 Update issue template (#1150) 2023-09-11 10:45:10 +08:00
38b9901274 fix(web): complete some ts type (#1148) 2023-09-11 09:30:17 +08:00
642842d61b Feat:dataset retiever resource (#1123)
Co-authored-by: jyong <jyong@dify.ai>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-09-10 15:17:43 +08:00
e161c511af Feat:csv & docx support (#1139)
Co-authored-by: jyong <jyong@dify.ai>
2023-09-10 15:17:22 +08:00
f29e82685e feat: bump version to 0.3.21 (#1145) 2023-09-10 12:34:54 +08:00
3a5ae96e7b fix: TRANSFORMERS_OFFLINE orders in Dockerfile (#1144) 2023-09-10 12:26:13 +08:00
b63a685386 feat: set transformers offline default true (#1143) 2023-09-10 12:20:58 +08:00
877da82b06 feat: cache huggingface gpt2 tokenizer files (#1138) 2023-09-10 12:16:21 +08:00
6637629045 fix: remove the deprecated depends_on.condition format (#1142) 2023-09-10 12:07:20 +08:00
e925b6c572 fix: log page compatible old query (#1141) 2023-09-10 11:29:25 +08:00
5412f4aba5 fix: in log page not show user query (#1140) 2023-09-10 09:30:30 +08:00
2d5ad0d208 feat: support optional query content (#1097)
Co-authored-by: Garfield Dai <dai.hai@foxmail.com>
2023-09-10 00:12:34 +08:00
1ade70aa1e feat: bump version to 0.3.20 (#1135) 2023-09-09 23:47:14 +08:00
2658c4d57b fix: answer returned null when response_mode was blocking (#1133) 2023-09-09 23:22:21 +08:00
84c76bc04a Feat/chat add origin (#1130) 2023-09-09 19:17:12 +08:00
6effcd3755 feat: optimize celery start cmd (#1129) 2023-09-09 13:48:29 +08:00
d9866489f0 feat: add health check and depend condition in docker compose (#1113) 2023-09-09 13:47:08 +08:00
c4d8bdc3db fix: hf hosted inference check (#1128) 2023-09-09 00:29:48 +08:00
681eb1cfcc fix: click inner link no jump (#1118) 2023-09-08 10:21:42 +08:00
a5d21f3b09 fix: shortening invite url (#1100)
Co-authored-by: MatriQi <matri@aifi.io>
2023-09-07 17:15:57 +08:00
7ba068c3e4 fix: self host embedding missing base url config (#1116) 2023-09-07 14:56:38 +08:00
b201eeedbd fix: optimize styles (#1112) 2023-09-07 14:24:09 +08:00
f28cb84977 fix(web): fix AppCard Menu popover open bug (#1107) 2023-09-07 09:47:31 +08:00
714872cd58 chore: enchancment frontend readme (#1110) 2023-09-07 09:43:24 +08:00
0708bd60ee fix: try to fix chunk load error (#1109) 2023-09-06 15:47:53 +08:00
23a6c85b80 chore: handle workspace apps scrollbar (#1101) 2023-09-05 15:56:21 +08:00
4a28599fbd fix: optimize feedback and app icon (#1099) 2023-09-05 09:13:59 +08:00
7c66d3c793 feat: Optimize the description for Azure deployment name (#1091) 2023-09-04 14:26:22 +08:00
cc9edfffd8 fix: markdown code lang capitalization and line number color (#1098) 2023-09-04 11:31:25 +08:00
6fa2454c9a fix: change frontend start script (#1096) 2023-09-04 11:10:32 +08:00
487e699021 fix: ui in chat openning statement (#1094) 2023-09-04 10:26:46 +08:00
a7cdb745c1 feat: support spark v2 validate (#1086) 2023-09-01 20:53:32 +08:00
73c86ee6a0 fix: prompt of title generation (#1084) 2023-09-01 14:55:58 +08:00
48eb590065 feat: optimize last_active_at update (#1083) 2023-09-01 13:58:26 +08:00
33562a9d8d feat: optimize prompt (#1080) 2023-09-01 11:46:06 +08:00
c9194ba382 chore(api): api image multistage build (#1069) 2023-09-01 11:13:22 +08:00
a199fa6388 feat: optimize high load sql query of document segment (#1078) 2023-09-01 10:52:39 +08:00
4c8608dc61 feat: optimize conversation title generation output must be a valid JSON (#1077) 2023-09-01 10:31:42 +08:00
a6b0f788e7 feat: add visual studio code debug config. (#1068)
Co-authored-by: Keruberosu <631677014@qq.com>
2023-09-01 09:15:06 +08:00
df6604a734 feat: optimize generation of conversation title (#1075) 2023-09-01 02:28:37 +08:00
1ca86cf9ce feat: bump version to 0.3.19 (#1074) 2023-08-31 21:42:58 +08:00
78e26f8b75 fix: summary no docs (#1073) 2023-08-31 20:19:26 +08:00
2191312bb9 fix: segments query missing idx hit (#1072) 2023-08-31 19:39:44 +08:00
fcc6b41ab7 feat: decrease claude model request time by set max top_k to 10 (#1071) 2023-08-31 18:23:44 +08:00
9458b8978f feat: siderbar operation support portal (#1061) 2023-08-31 17:46:51 +08:00
d75e8aeafa feat: disable anthropic retry (#1067) 2023-08-31 16:44:46 +08:00
2eba98a465 feat: optimize anthropic connection pool (#1066) 2023-08-31 16:18:59 +08:00
a7a7aab7a0 fix: csv import error (#1063) 2023-08-31 15:42:28 +08:00
86bfbb47d5 chore: doc issue (#1062) 2023-08-31 14:54:16 +08:00
d33a269548 refactor(file extractor): file extractor (#1059) 2023-08-31 14:45:31 +08:00
d3f8ea2df0 Feat/support to invite multiple users (#1011) 2023-08-31 01:18:31 +08:00
7df56ed617 fix error weaviate vector (#1058)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-30 20:34:17 +08:00
e34dcc0406 feat: code support copy (#1057) 2023-08-30 18:08:47 +08:00
a834ba8759 feat: support rename conversation (#1056) 2023-08-30 17:32:32 +08:00
c67f345d0e Fix: disable operations of dataset when embedding unavailable (#1055)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-30 17:27:19 +08:00
8b8e510bfe fix: handle AttributeError for datasets and index (#1052) 2023-08-30 11:14:16 +08:00
3db839a5cb 773 change embed title welcome to use (#1053) 2023-08-30 11:03:25 +08:00
417c19577a feat: add LocalAI local embedding model support (#1021)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-08-29 22:22:02 +08:00
b5953039de recreate qdrant vector (#1049)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-29 15:00:36 +08:00
a43e80dd9c add qdrant migration (#1046)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-29 10:37:04 +08:00
ad5f27bc5f fix openpyxl dimensions error (#1041) 2023-08-29 10:36:48 +08:00
05e0985f29 chore: match new dataset tool format (#1044) 2023-08-29 09:07:45 +08:00
7b3314c5db fix: dataset desc (#1045) 2023-08-29 09:07:27 +08:00
a55ba6e614 Fix/ignore economy dataset (#1043)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-29 03:37:45 +08:00
f9bec1edf8 chore: perfect type definition (#1003) 2023-08-28 19:48:53 +08:00
16199e968e fix notion import limit check (#1042)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-28 16:49:03 +08:00
02452421d5 fix: pub generate message text return null (#1037) 2023-08-28 16:43:54 +08:00
3a5c7c75ad Fix/model selector (#1032) 2023-08-28 10:54:41 +08:00
a7415ecfd8 Fix/upload document limit (#1033) 2023-08-28 10:53:45 +08:00
934def5fcc Fix: eslint (#1030) 2023-08-27 17:06:16 +08:00
0796791de5 feat: hf inference endpoint stream support (#1028) 2023-08-26 19:48:34 +08:00
6c148b223d fix: dataset query truncated (#1026) 2023-08-26 17:35:17 +08:00
4b168f4838 fix: maintenance notice (#1025) 2023-08-26 16:09:55 +08:00
1c114eaef3 feat: update contributing (#1020) 2023-08-25 21:19:13 +08:00
e053215155 fix document estimate parameter (#1019)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-25 20:10:08 +08:00
13482b0fc1 feat: maintenance notice (#1016) 2023-08-25 19:38:52 +08:00
38fa152cc4 fix update document index technique (#1018)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-25 18:29:55 +08:00
2d9616c29c fix: xinference last token being ignored (#1013) 2023-08-25 18:15:05 +08:00
915e26527b update dataset index struct (#1012)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-25 15:52:33 +08:00
2d604d9330 Fix/filter empty segment (#1004)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-25 15:50:29 +08:00
e7199826cc embedding model available check (#1009)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-25 00:25:16 +08:00
70e24b7594 fix: loading and calc rem (#1006) 2023-08-24 23:24:33 +08:00
c1602aafc7 refactor:cache in place & function name (#1001) 2023-08-24 22:54:21 +08:00
a3fec11438 fix: styles (#1005) 2023-08-24 22:37:46 +08:00
b1fd1b3ab3 Feat/vector db manage (#997)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-24 21:27:31 +08:00
5397799aac document limit (#999)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-24 21:27:13 +08:00
8e837dde1a feat: bump version to 0.3.18 (#1000) 2023-08-24 18:13:18 +08:00
9ae91a2ec3 feat: optimize xinference request max token key and stop reason (#998) 2023-08-24 18:11:15 +08:00
276d3d10a0 fix: apps loading issue (#994) 2023-08-24 17:57:38 +08:00
f13623184a fix style in app share (#995) 2023-08-24 17:57:25 +08:00
ef61e1487f fix: safetensor arm complie error (#996) 2023-08-24 17:38:10 +08:00
701e2b334f feat: remove unnecessary prompt of baichuan (#993) 2023-08-24 15:30:59 +08:00
6ebd6e7890 feat: bump version to 0.3.17 (#992) 2023-08-24 15:12:47 +08:00
bd3a9b2f8d fix: xinference-chat-stream-response (#991) 2023-08-24 14:39:34 +08:00
18d3877151 feat: optimize xinference stream (#989) 2023-08-24 13:58:34 +08:00
53e83d8697 feat: optimize baichuan prompt (#988) 2023-08-24 12:07:10 +08:00
6377fc75c6 chore: update lintrc config (#986) 2023-08-24 11:46:59 +08:00
2c30d19cbe feat: add baichuan prompt (#985) 2023-08-24 10:22:36 +08:00
9b247fccd4 feat: adjust hf max tokens (#979) 2023-08-23 22:24:50 +08:00
3d38aa7138 feat: bump version to 0.3.16 2023-08-23 20:16:54 +08:00
7d2552b3f2 feat: upgrade xinference to 0.2.1 which support stream response (#977) 2023-08-23 20:15:45 +08:00
117a209ad4 Fix:condition for dataset availability check (#973) 2023-08-23 19:57:27 +08:00
071e7800a0 fix: add hf task field (#976)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-08-23 19:48:31 +08:00
a76fde3d23 feat: optimize hf inference endpoint (#975) 2023-08-23 19:47:50 +08:00
1fc57d7358 normalize embedding (#974)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-23 19:10:11 +08:00
916d8be0ae fix: activation page reload issue after activating (#964) 2023-08-23 13:54:40 +08:00
a38412de7b update doc (#965) 2023-08-23 12:29:52 +08:00
9c9f0ddb93 fix: user activation request 404 issue (#963) 2023-08-23 08:57:25 +08:00
f8fbe96da4 feat: bump version to 0.3.15 (#959) 2023-08-22 18:20:33 +08:00
215a27fd95 Feat/add xinference openllm provider (#958) 2023-08-22 18:19:10 +08:00
5cba2e7087 fix: web reader tool retrieve content empty (#957) 2023-08-22 18:01:16 +08:00
5623839c71 update document segment (#950)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-22 17:59:24 +08:00
78d3aa5fcd fix: embedding init err (#956) 2023-08-22 17:43:59 +08:00
a7c78d2cd2 fix: spark provider field name (#955) 2023-08-22 17:28:18 +08:00
4db35fa375 chore: obsolete info api use new api (#954) 2023-08-22 16:59:57 +08:00
e67a1413b6 chore: create btn to first place (#953) 2023-08-22 16:20:56 +08:00
4f3053a8cc fix: xinference chat completion error (#952) 2023-08-22 15:58:04 +08:00
b3c2bf125f Feat/model providers (#951) 2023-08-22 15:38:12 +08:00
9d5299e9ec fix: segment error tip & save segment disable when loading (#949) 2023-08-22 15:22:16 +08:00
aee15adf1b update document segment (#948)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-22 15:19:09 +08:00
b185a70c21 Fix/speech to text button (#947) 2023-08-22 14:55:20 +08:00
a3aba7a9aa fix: provider model not delete when reset key pair (#946) 2023-08-22 13:48:58 +08:00
866ee5da91 fix: openllm generate cutoff (#945) 2023-08-22 13:43:36 +08:00
e8039a7da8 fix: add flex-wrap to categories container (#944) 2023-08-22 13:39:52 +08:00
5e0540077a chore: perfect type definition (#940) 2023-08-22 10:58:06 +08:00
b346bd9b83 fix: default language improvement in activation page (#942) 2023-08-22 09:28:37 +08:00
062e2e915b fix: login improvement (#941) 2023-08-21 21:26:32 +08:00
e0a48c4972 fix: xinference chat support (#939) 2023-08-21 20:44:29 +08:00
f53242c081 Feat/add document status tooltip (#937) 2023-08-21 18:07:51 +08:00
4b53bb1a32 Feat/token support (#909)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: jyong <jyong@dify.ai>
2023-08-21 13:57:18 +08:00
4c49ecedb5 feat: optimize web reader summary in 3.5 (#933) 2023-08-21 11:58:01 +08:00
4ff1870a4b fix: web reader tool missing nodejs (#932) 2023-08-21 11:26:11 +08:00
6c832ee328 fix: remove openllm pypi package because of this package too large (#931) 2023-08-21 02:12:28 +08:00
25264e7852 feat: add xinference embedding model support (#930) 2023-08-20 19:35:07 +08:00
18dd0d569d fix: xinference max_tokens alisa error (#929) 2023-08-20 19:12:52 +08:00
3ea8d7a019 feat: add openllm support (#928) 2023-08-20 19:04:33 +08:00
da3f10a55e feat: server xinference support (#927) 2023-08-20 17:46:41 +08:00
8c991b5b26 Fix Readme.md typo error. (#926) 2023-08-20 12:02:04 +08:00
22c1aafb9b fix: document paused at format error (#925) 2023-08-20 01:54:12 +08:00
8d6d1c442b feat: optimize generate name length (#924) 2023-08-19 23:34:38 +08:00
95b179fb39 fix: replicate text generation model validate (#923) 2023-08-19 21:40:42 +08:00
3a0a9e2d8f fix: embedding get price definition missing (#922) 2023-08-19 21:31:40 +08:00
0a0d63457d feat: record price unit in messages (#919) 2023-08-19 18:51:40 +08:00
920fb6d0e1 fix: embedding price config (#918) 2023-08-19 16:54:08 +08:00
fd0fc8f4fe Fix/price calc (#862) 2023-08-19 16:41:35 +08:00
1c552ff23a fix: azure embedding model credentials include base_model_name is invalid for openai sdk (#917) 2023-08-19 16:24:18 +08:00
5163dd38e5 fix: run extra model serval ex not return (#916) 2023-08-19 14:35:16 +08:00
2a27dad2fb fix: run model serval ex not return (#915) 2023-08-19 14:16:41 +08:00
930f74c610 feat: remove unuse envs (#912) 2023-08-18 21:34:28 +08:00
3f250c9e12 Update README_CN.md 2023-08-18 20:39:40 +08:00
fa408d264c Update README.md 2023-08-18 20:38:52 +08:00
09ea27f1ee feat: optimize service api authorization header invalid error (#910) 2023-08-18 20:32:44 +08:00
db7156dafd Feature/mutil embedding model (#908)
Co-authored-by: JzoNg <jzongcode@gmail.com>
Co-authored-by: jyong <jyong@dify.ai>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-08-18 17:37:31 +08:00
4420281d96 Feat/segment add tag (#907) 2023-08-18 17:18:58 +08:00
d9afebe216 feat: optimize output parse (#906) 2023-08-18 17:00:40 +08:00
1d9cc5ca05 fix: universal chat when default model invalid (#905) 2023-08-18 16:20:42 +08:00
edb06f6aed fix: react router agent direct output (#904) 2023-08-18 14:31:20 +08:00
6ca3bcbcfd fix: sensitive_word_avoidance npe (#902) 2023-08-18 11:43:56 +08:00
71a9d63232 fix entrypoint script line endings (#900) 2023-08-18 10:42:44 +08:00
fb62017e50 Fix/embedding chat (#899)
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-08-18 10:39:05 +08:00
9adbeadeec feat: claude paid optimize (#890) 2023-08-17 16:56:20 +08:00
2f7b234cc5 fix: max token not exist in generate summary when calc rest tokens (#891) 2023-08-17 16:33:32 +08:00
4f5f9506ab Feat/pay modal (#889) 2023-08-17 15:49:22 +08:00
0cc0b6e052 fix: error raise status code not exist (#888) 2023-08-17 15:33:35 +08:00
cd78adb0ab feat: support show model display name (#887) 2023-08-17 15:13:35 +08:00
f42e7d1a61 feat: add spark v2 support (#885) 2023-08-17 15:08:57 +08:00
c4d759dfba fix: wenxin error not raise when stream mode (#884) 2023-08-17 13:40:00 +08:00
a58f95fa91 fix: web dockfile (#883) 2023-08-17 13:07:07 +08:00
39574dcf6b feat: optimize prompt of suggested_questions_after_answer (#881) 2023-08-17 10:46:33 +08:00
5b06ded0b1 build: improve dockerfile (#851)
Co-authored-by: MatriQi <matri@aifi.io>
2023-08-17 10:25:11 +08:00
155a4733f6 Feat/customizable file upload config (#818) 2023-08-16 23:14:27 +08:00
b7c29ea1b6 feat: optimize model when app create (#875) 2023-08-16 22:29:18 +08:00
cc2d71c253 feat: optimize override app model config convert (#874) 2023-08-16 20:48:42 +08:00
cd11613952 Update README.md (#865) 2023-08-16 19:26:35 +08:00
e0d6d00a87 Update README_CN.md (#867) 2023-08-16 19:26:11 +08:00
2dfb3e95f6 feat: optimize error record in agent (#869) 2023-08-16 15:55:42 +08:00
f207e180df fix multi thread app context (#868)
Co-authored-by: jyong <jyong@dify.ai>
2023-08-16 15:39:31 +08:00
948d64bbef fix: get_num_tokens_from_messages params error (#866) 2023-08-16 14:58:44 +08:00
01e912e543 fix: promptEng menu in wrong place (#864) 2023-08-16 14:56:17 +08:00
f95f6db0e3 feat: support app rename and make app card ui better (#766)
Co-authored-by: Gillian97 <jinling.sunshine@gmail.com>
2023-08-16 10:31:08 +08:00
216fc5d312 feat: bump version 0.3.14 (#861) 2023-08-15 22:46:15 +08:00
7a8590980e fix: dataset direct output (#860) 2023-08-15 22:27:31 +08:00
e8c14bb732 feat: rename title in site both rename name in app (#857) 2023-08-15 20:42:32 +08:00
bf45f08e78 chore: handle provider name capitalization (#855) 2023-08-15 17:22:40 +08:00
2c77a74c40 fix: frontend permission check (#784) 2023-08-15 13:35:47 +08:00
440cf63317 fix: setting modal margin (#849) 2023-08-15 12:05:27 +08:00
50b11e925b fix: change config string variable limit (#837)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2023-08-15 11:26:58 +08:00
7cc81b4269 fix: var config content can not be saved (#841) 2023-08-15 09:51:43 +08:00
93b0813b73 Update README.md (#839) 2023-08-15 09:43:21 +08:00
649b44aefa Update README_CN.md (#840) 2023-08-15 09:43:11 +08:00
1e95d74ae2 update doc (#838) 2023-08-15 09:25:37 +08:00
700d5f2673 update llms (#835) 2023-08-14 22:41:40 +08:00
3b8234e486 feat: bump version to 0.3.13 (#830) 2023-08-14 16:36:49 +08:00
0feb0bf7c0 fix: free quota tip (#831) 2023-08-14 16:36:04 +08:00
c5d148bf94 fix #794 input bug (#801) 2023-08-14 15:29:18 +08:00
e5e86fc033 Feat/apply free quota (#828)
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-08-14 12:46:28 +08:00
cc52cdc2a9 Feat/add free provider apply (#829) 2023-08-14 12:44:35 +08:00
42a417167f feat: add system default model help tip (#827) 2023-08-13 22:50:31 +08:00
4b0d9272ef Fix 802 (#826) 2023-08-13 20:30:17 +08:00
48a303b8e9 Feature/fix disable site (#825) 2023-08-13 17:32:23 +08:00
8e15ba6cd6 Fix/no trial provider (#823) 2023-08-13 14:56:32 +08:00
7898937eae feat: optimize message return (#822) 2023-08-13 13:51:12 +08:00
1bd0a76a20 feat: optimize error raise (#820) 2023-08-13 00:59:36 +08:00
2f179d61dc fix: completion error when dataset was deleted (#819) 2023-08-13 00:25:05 +08:00
7457550673 feat: frontend remove gpt4 check (#815) 2023-08-12 15:05:51 +08:00
c13a90ee69 only admin and owner can delete app (#810) 2023-08-12 14:18:21 +08:00
5a7b51f809 fix: label (#809) 2023-08-12 10:41:05 +08:00
f18ce203b5 feat: optimize error logging (#808) 2023-08-12 02:22:43 +08:00
b81b8637ec feat: temp remove paid option of anthropic (#807) 2023-08-12 01:54:38 +08:00
0c6f92d9be Feat/only tag arm64 build (#806) 2023-08-12 01:44:18 +08:00
55b24c373f Revert "Fix/disable site when change code" (#805) 2023-08-12 01:38:53 +08:00
d10ef17f17 feat: frontend multi models support (#804)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-08-12 00:57:13 +08:00
5fa2161b05 feat: server multi models support (#799) 2023-08-12 00:57:00 +08:00
d8b712b325 fix bug desc/copyright/privacy_policy none (#796) 2023-08-11 18:21:11 +08:00
220f7c81e9 build: fix .dockerignore file (#800) 2023-08-11 18:19:44 +08:00
fc7e4ac75b fix: automatically create tenant for user (#793) 2023-08-11 18:18:11 +08:00
39933aeb62 feat: add readme (#791) 2023-08-09 20:15:24 +08:00
beb8065660 fix: remove ruby from repo due to main gitignore (#790) 2023-08-09 19:47:50 +08:00
36080fe352 fix: add missing code (#788) 2023-08-09 19:36:39 +08:00
a510f32124 Add Ruby's SDK implement code. (#786) 2023-08-09 19:21:52 +08:00
cc277227ad fix i is not incremented due to violating the uniqueness constraint w… (#771)
Co-authored-by: 李啸吟 <746963140@qq.com>
2023-08-08 21:19:06 +08:00
3d194787b4 Fix/disable site when change code (#775) 2023-08-08 10:00:00 +08:00
a8d5ef9894 fix: members page z-index bug (#768) 2023-08-08 09:17:31 +08:00
6242e91a6b Fix: Install page redirects to signin if Dify finished setup. (#762) 2023-08-07 13:19:47 +08:00
cc7b5d128b fix: doc issue in #757 (#767) 2023-08-07 11:30:39 +08:00
f914eb95eb fix: doc links (#763) 2023-08-07 10:50:45 +08:00
8ae1eb0ebb lint: frontend linting issues (#744) 2023-08-07 10:20:40 +08:00
2ba89d0deb fix: chatbot not show all in small screen (#765) 2023-08-07 09:40:16 +08:00
3b08bf1c6c feat: add app icon modify route (#760) 2023-08-06 16:21:35 +08:00
95689ec451 fix: modify app name & icon raise 401 (#759) 2023-08-06 16:11:04 +08:00
51554361fc refactor: Added project name to Docker Compose command (#753) 2023-08-05 21:54:42 +08:00
491d29cc87 feat: optimize multi platform image build (#754) 2023-08-05 17:23:57 +08:00
6a7a71af1f perf: operational feedback (#749) 2023-08-05 10:11:48 +08:00
a25e038a8b fix: text copy issue (#723) 2023-08-04 10:49:13 +08:00
5d783a4922 fix: wrong version tag of base docker image (#739) 2023-08-03 22:22:27 +08:00
f0eab73f3d Update README.md (#735) 2023-08-03 16:33:49 +08:00
a693569621 fix: unable to open switch (#726) 2023-08-03 16:33:30 +08:00
30c67dcd8c fix: package changed made build pipe fail again (#732) 2023-08-03 13:20:52 +08:00
2295cce489 Update README_CN.md (#730) 2023-08-03 13:18:03 +08:00
bfbaf2daa5 fix: package changed made build pipe fail (#731) 2023-08-03 12:25:33 +08:00
dfe10e9dfe fix: generate_more_like_this function issue (#722) 2023-08-03 11:37:09 +08:00
60ac915c9c Fix: hide qa in cloud version (#729) 2023-08-03 11:28:42 +08:00
b1b9e3ff53 refactor: move dev packages to devDependencies (#719) 2023-08-03 10:49:25 +08:00
c4c47ae8c6 feat: add doc (#728) 2023-08-03 10:40:36 +08:00
17c3a63e50 fix: explore app list grid style conflict and remove useless style (#725) 2023-08-03 09:51:00 +08:00
654985177f fix: segment resort in dataset retrieve by index_node_id_to_position (#721) 2023-08-02 21:31:54 +08:00
0d791839e6 perf:repeated select workspace (#710) 2023-08-02 17:33:45 +08:00
0fc76f7e17 fix(web): fix style override issue (#713) 2023-08-02 17:32:11 +08:00
41d33ee837 fix: abnormal styles (#711) 2023-08-02 17:31:30 +08:00
9485cc9308 fix: can not choose emoji (#716) 2023-08-02 15:22:27 +08:00
e18211ffea feat: fix azure completion choices return empty (#708) 2023-08-01 15:36:53 +08:00
a856ef387b feat: dashboard add tps chart (#706)
Co-authored-by: John Wang <takatost@gmail.com>
2023-08-01 15:17:20 +08:00
fa73aa8dbf add embedding max retries (#699) 2023-07-31 23:28:37 +08:00
c48ec1334e fix web style (#684) 2023-07-31 16:24:51 +08:00
1647970fb6 Add trobleshooting notes for devcontainer (#687) 2023-07-31 16:24:37 +08:00
12ecf89a87 feat: fix completion log error (#692) 2023-07-31 15:38:13 +08:00
a0bd15245a Fix/app logs today filter (#689) 2023-07-31 13:30:04 +08:00
0c18cab111 feat: add queue to celery task (#688) 2023-07-31 13:13:08 +08:00
396197e881 fix: not annotation error in log (#686) 2023-07-31 11:50:35 +08:00
6a564e2d5c fix: server side render trigger GitHub api rate limit (#685) 2023-07-31 11:07:44 +08:00
f369202c12 feat: remove llama index citation (#679) 2023-07-30 01:46:27 +08:00
a4678845dd feat: bump version to 0.3.12 (#674) 2023-07-29 17:49:35 +08:00
174ebb51db add qa thread control (#677) 2023-07-29 17:49:18 +08:00
626c78a690 fix: agent parse result error (#676) 2023-07-29 17:00:38 +08:00
9eaae770a6 Feat/add thread control (#675) 2023-07-29 17:00:21 +08:00
ca60610306 logging qa error (#672) 2023-07-29 01:51:18 +08:00
082f8b17ab Feat/milvus support (#671)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: JzoNg <jzongcode@gmail.com>
2023-07-28 22:19:39 +08:00
cf93d8d6e2 Feat: Q&A format segmentation support (#668)
Co-authored-by: jyong <718720800@qq.com>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-07-28 20:47:15 +08:00
aae2fb8a30 fix: dataset retrieve npe when dataset desc is null (#669) 2023-07-28 17:40:36 +08:00
23e52f14e3 feat: chat add page title (#667) 2023-07-28 14:44:45 +08:00
c5b68fb273 fix: app config speech-to-text feature (#665) 2023-07-28 14:02:32 +08:00
6f17c9b2fe fix: next version (#666) 2023-07-28 14:02:17 +08:00
c98311b325 Update LICENSE (#663) 2023-07-28 09:45:10 +08:00
d44d4bd6fd feat: support query date tool (#662) 2023-07-27 22:27:05 +08:00
2adaceab82 feat: bump version to 0.3.11 (#654) 2023-07-27 22:25:32 +08:00
d979955c8a feat: optimize current time (#661) 2023-07-27 22:15:07 +08:00
eae670ea4a feat: enchance chat user experience (#660) 2023-07-27 18:04:41 +08:00
b5825142d1 feat: add current time tool in universal chat agent (#659) 2023-07-27 17:39:36 +08:00
741e9303d4 fix: use sharp logo replace old logo (#658) 2023-07-27 16:34:30 +08:00
538e3fc256 fix: return message error in blocking mode (#657) 2023-07-27 16:14:45 +08:00
ba3dc8cae0 feat: fix dataset retrieve agent llm not support error (#656) 2023-07-27 15:45:52 +08:00
ae7c0380dc Feat/application api add speech to text (#655) 2023-07-27 14:53:19 +08:00
23e3413655 feat: chat in explore support agent (#647)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-07-27 13:27:34 +08:00
4fdb37771a feat: universal chat in explore (#649)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-07-27 13:08:57 +08:00
94b54b7ca9 feat: replace the end user column in the web page Log & Ann. with the… (#653)
Co-authored-by: Hao Fu <hao.fu@helloklarity.com>
2023-07-27 12:48:43 +08:00
f9412f5fdb fix: site enable check (#645) 2023-07-26 11:11:09 +08:00
1d6829f400 Feat/application config user input field collapse (#643) 2023-07-26 10:27:52 +08:00
f8bae897e5 fix: switch workspace (#642) 2023-07-26 10:25:35 +08:00
dd1172b57e Perf: Support for password display and hiding (#636)
Co-authored-by: Selenium39 <selenium39@qq.com>
2023-07-24 14:48:00 +08:00
67d326a558 fix(web): fix svg unrecognized props (#631) 2023-07-24 10:31:56 +08:00
fe747040bc downgrade next version (#626) 2023-07-21 12:27:23 +08:00
7d6c925cbc fix(web): using Tooltip unique selector key (#622) 2023-07-21 11:15:00 +08:00
f488d06b20 fix: Top P description error (#624) 2023-07-21 09:15:52 +08:00
c00a19ced3 fix(web): fix Embedded copy status when toggle options (#621) 2023-07-21 09:06:51 +08:00
e9810a6df2 fix: azure openai embedding model name error (#612) 2023-07-20 13:52:54 +08:00
cae15013e0 fix: azure openai deployment list was deprecated suddenly (#611) 2023-07-20 13:46:39 +08:00
52c84da051 add clean unused dataset command (#609) 2023-07-20 11:08:28 +08:00
026f0bfce9 Feat/clean vector dataset (#605) 2023-07-19 21:30:25 +08:00
d19181fb29 chore: minify embed js (#604) 2023-07-19 19:48:44 +08:00
2f9de2229f feat: embed into other site support set custom host (#580)
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-07-19 19:43:07 +08:00
34f55739e0 fix(web): fix #596 copy-to-clipboard issue (#602) 2023-07-19 19:29:37 +08:00
668b059c07 fix: quick switch and click create conversation button may caused fetch conversation list error (#603) 2023-07-19 17:17:29 +08:00
753e5f1500 Fix/application configuration preview style (#597) 2023-07-19 12:41:35 +08:00
a6af8e5d8f Fix/new conversation in mobile phone (#593) 2023-07-18 16:57:28 +08:00
3e1d5ac51b Feat/header ssr (#594) 2023-07-18 16:57:14 +08:00
b0091452ca feat: add bash before entrypoint.sh in Dockerfile (#592) 2023-07-18 16:22:34 +08:00
eff115267f fix: anthropic completion error in blocking mode (#591) 2023-07-18 15:12:52 +08:00
07cde4f8fe feat: bump 0.3.10 (#589) 2023-07-18 15:04:49 +08:00
9f28a48a92 index add to db when dataset updated (#588) 2023-07-18 15:02:33 +08:00
0d3cd3b16a fix: azure provider select error when use custom azure provider (#587) 2023-07-18 14:34:09 +08:00
3dc82fb044 feat: remove davinci required model from azure provider (#586) 2023-07-18 14:14:56 +08:00
cb6e73347e Feat/add ruby sdk (#583) 2023-07-18 10:18:58 +08:00
ecd6cbaee6 Fix/use embedded chatbot with no track mode (#582) 2023-07-18 09:45:17 +08:00
d54e942264 Feat: hide password setting and invitation link in cloud version (#581) 2023-07-18 08:54:14 +08:00
28ba721455 Update README_CN.md (#575) 2023-07-17 11:08:26 +08:00
784dd7848e Update README.md (#576) 2023-07-17 11:08:03 +08:00
e2a5f8ba1a feat: bump version to 0.3.9 (#574) 2023-07-17 09:47:23 +08:00
8e11200306 feat: frontend support claude (#573)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-07-17 00:14:32 +08:00
7599f79a17 feat: claude api support (#572) 2023-07-17 00:14:19 +08:00
510389909c fix: change chatbot avart to dify icon (#571) 2023-07-16 16:30:55 +08:00
2c6e00174b add document limit check (#570) 2023-07-16 13:21:56 +08:00
24f3456990 fix: account check in runtime (#569) 2023-07-15 23:58:15 +08:00
20514ff288 fix: table too wide fix text generation ui (#566) 2023-07-14 18:15:56 +08:00
381d255290 fix setting-modal provider encrypted tip style (#565) 2023-07-14 17:10:02 +08:00
7f320f9146 feat: bump version to 0.3.8 (#559) 2023-07-14 11:53:15 +08:00
cd51d3323b feat: member invitation and activation (#535)
Co-authored-by: John Wang <takatost@gmail.com>
2023-07-14 11:19:26 +08:00
004b3caa43 Feature/add delete to service (#555) 2023-07-14 10:37:33 +08:00
dbe10799e3 fix: user cancel conversation show error (#558) 2023-07-13 10:32:45 +08:00
054ba88434 fix: regeneration not clear like status and sub more items (#557) 2023-07-13 10:31:07 +08:00
da82a11b26 feat: batch run support export as csv file (#556) 2023-07-13 09:30:16 +08:00
fec607db81 Feat/embedding (#553)
Co-authored-by: Gillian97 <jinling.sunshine@gmail.com>
Co-authored-by: Joel <iamjoel007@gmail.com>
2023-07-12 17:27:50 +08:00
397a92f2ee convert audio wav to mp3 (#552) 2023-07-12 17:18:56 +08:00
b91e226063 fix: api doc update conversation list api to real response (#548) 2023-07-12 13:53:06 +08:00
da5782df92 fix: mobile not auto show generation res (#544) 2023-07-11 17:16:28 +08:00
9af0da4450 fix jwt in web (#545) 2023-07-11 17:07:52 +08:00
d49ac1e4ac Feature/use jwt in web (#533)
Co-authored-by: crazywoola <li.zheng@dentsplysirona.com>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2023-07-11 15:21:20 +08:00
57de19a5ca feat: bump version to 0.3.7 (#540) 2023-07-10 15:23:38 +08:00
7c00a0b6a3 fix voice input in safari (#537) 2023-07-10 10:16:38 +08:00
a93506df18 Fix/dataset clean task (#534) 2023-07-08 17:29:56 +08:00
a03a92e9db Feat/chat support voice input (#532) 2023-07-07 17:50:42 +08:00
feebb5dd1f feat: dataset list add order by created at (#531) 2023-07-07 11:51:48 +08:00
6eee7cb42c feat: fix azure embedding Too many inputs problem (#530) 2023-07-07 11:17:36 +08:00
11baff6740 feat: text generation application support run batch (#529) 2023-07-07 10:35:05 +08:00
cde1797cc0 feat: max token add tip (#525) 2023-07-06 15:57:04 +08:00
d143284d99 Fix: stop embedding status display (#523) 2023-07-06 10:51:30 +08:00
2b94545190 fix check version api (#520) 2023-07-05 11:11:38 +08:00
ed6648a41e feat: dataset list add order by created at (#487) 2023-07-05 11:00:21 +08:00
5e2c3eeac3 fix: chat app added new var old conversation not work (#511) 2023-07-04 14:33:41 +08:00
b23d8a912b fix: add missing like i18n (#512) 2023-07-04 14:21:51 +08:00
4f13f8fd0a fix: change langenius text to dify (#498) 2023-07-02 14:01:11 +08:00
561c9cabd5 fix: input text repeat (#492) 2023-06-29 17:27:48 +08:00
39ea967b30 refact common layout (#490) 2023-06-29 15:30:12 +08:00
da04ff040b fix: remove document from dataset error when vector index npe (#489) 2023-06-29 13:09:22 +08:00
b9b0866a46 fix: generate summary error when tokens=4097 (#488) 2023-06-29 12:54:50 +08:00
c6ab7eebd9 fix: delete operation style error (#485) 2023-06-29 09:24:31 +08:00
db4e6d81c5 fix: choose dataset not selected after one page (#481) 2023-06-29 09:22:42 +08:00
df68a7c82b feat: Optimize the quality of the title generate (#484) 2023-06-28 19:59:20 +08:00
838825d747 feat: optimize conversation operation (#479) 2023-06-28 17:53:23 +08:00
a87f6f2837 fix: modal disappear (#478) 2023-06-28 16:44:17 +08:00
9d98669e7d fix: dataset destination error (#477) 2023-06-28 15:51:07 +08:00
408fbb0c70 fix: title, summary, suggested questions generate (#476) 2023-06-28 15:43:33 +08:00
998f819b04 use sub to operate all (#475) 2023-06-28 14:58:40 +08:00
6194b82752 feat: bump to 0.3.6 (#474) 2023-06-28 14:23:20 +08:00
334f46d0b6 Fix/json format (#466) 2023-06-28 13:58:50 +08:00
2eea114ac0 fix special code (#473) 2023-06-28 13:58:36 +08:00
97e9ebd29a Feature/add is deleted to conversations (#470) 2023-06-28 13:31:51 +08:00
ec261aea54 feat: conversation app support pin and delete conversation (#467) 2023-06-28 11:16:54 +08:00
accc5faae3 fix: delete dataset not trigger show start new conversation message (#471) 2023-06-28 10:39:40 +08:00
0462f09ecc fix: app nav call detail match explore app detail page (#469) 2023-06-27 18:40:24 +08:00
1226d73159 Feat/refact header (#468) 2023-06-27 18:02:01 +08:00
c67ecff3fe Fix/json format (#465) 2023-06-27 17:15:03 +08:00
d5b42c09ee fix: template parse error when history include {{any}} (#463) 2023-06-27 16:35:50 +08:00
835bf9fd8d fix: template parse error when pre prompt include {{}} (#462) 2023-06-27 15:51:55 +08:00
c720f831af feat: optimize template parse (#460) 2023-06-27 15:30:38 +08:00
df5763be37 feat: optimize openai error raise (#459) 2023-06-27 12:34:47 +08:00
80eebc2414 feat: upgrade nextjs version (#457) 2023-06-27 12:12:41 +08:00
17d196126c Feat/add icons (#450) 2023-06-26 15:36:52 +08:00
addf150a9e fix: hove x scroll shake (#449) 2023-06-26 13:35:12 +08:00
cad1532f7c feat: optimize index_struct copy (#442) 2023-06-25 17:52:22 +08:00
951afcaaed feat: optimize weaviate error msg (#441) 2023-06-25 17:05:56 +08:00
3241e4015b feat: upgrade langchain (#430)
Co-authored-by: jyong <718720800@qq.com>
2023-06-25 16:49:14 +08:00
Bin
1dee5de9b4 bugfix: conversation parameters (#438) 2023-06-25 16:14:42 +08:00
742bad93b5 feat: bump version to 0.3.5 (#433) 2023-06-21 16:18:41 +08:00
bb3cc6bba6 fix: file size limit to 15M (#431) 2023-06-21 16:08:57 +08:00
23ef2262bd fix: filter empty value in xlsx to improve vector similarity hit (#422) 2023-06-21 11:25:52 +08:00
d637a147ee feat: support batch upload files (#419) 2023-06-21 09:44:01 +08:00
8a4d19d9ba fix: actions 2023-06-21 09:10:07 +08:00
bea382f0dc fix: dataset can only choose first page data (#425)
Support infinite scroll loader data.
2023-06-20 18:08:28 +08:00
8b39e48957 fix REDIS_USERNAME format (#414) 2023-06-19 22:14:47 +08:00
5b4538f021 feat: add more labels 2023-06-19 22:09:02 +08:00
36dc05c4da fix chinese encoding (#411) 2023-06-19 18:41:17 +08:00
54f3bbbf47 feat: bump version to 0.3.4 (#406) 2023-06-19 16:44:48 +08:00
f797fab206 Fix/dataset add pages tip (#410) 2023-06-19 16:32:25 +08:00
ce2996e7d4 Fix/dataset init (#409) 2023-06-19 16:32:03 +08:00
82d07ed2a8 doc: add annaconda info (#402) 2023-06-19 11:09:40 +08:00
c39d8f954e fix: word break in en and other languages (#385) 2023-06-19 09:36:05 +08:00
226f28edcb Feature/self host notion import (#397) 2023-06-17 19:50:21 +08:00
402b0b81d2 feat: add community helm support readme (#395) 2023-06-17 18:25:40 +08:00
b08c19d926 fix encoding is none (#394) 2023-06-17 15:21:48 +08:00
9253f72dea Feat/dataset notion import (#392)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: JzoNg <jzongcode@gmail.com>
2023-06-16 21:47:51 +08:00
f350948bde Fix the issue of decoding a non-UTF-8 encoded file using UTF-8 (#389) 2023-06-16 14:23:03 +08:00
eeb2c28526 Fix the issue of decoding a non-UTF-8 encoded file using UTF-8 encodi… (#378) 2023-06-16 14:12:07 +08:00
673288d58e fix(i18n): Make text gender neutral (#379) 2023-06-16 07:25:50 +08:00
772d67fd65 feat: suport var select options sortable (#376) 2023-06-15 17:07:17 +08:00
7552a6be36 feat: add last active at for accounts (#375) 2023-06-15 13:59:36 +08:00
33200090e8 feat: update actions 2023-06-15 12:51:51 +08:00
01a6c725fa fix: max token tooltip description (#370) 2023-06-15 10:06:43 +08:00
f6e04389e4 Community i18n doc (#365) 2023-06-15 09:39:56 +08:00
e22814b291 fix application model selector style (#360) 2023-06-14 14:23:41 +08:00
a66ef7210b feat: bump version to 0.3.3 (#359) 2023-06-14 12:17:56 +08:00
184afa69ff feat: add gpt-3.5-turbo-16k support and update openai gpt-3.5-turbo & Embedding Ada v2 unit price (#358) 2023-06-14 12:17:43 +08:00
ab115b5f87 fix: completion stop invalid (#355) 2023-06-13 17:47:42 +08:00
3bbc4ad3db fix: change default help link to english (#354) 2023-06-13 17:12:51 +08:00
87af414a52 feat: stop response enchancement (#352) 2023-06-13 16:34:53 +08:00
72555d5df8 feat: add frontend sentry docker compose config (#353) 2023-06-13 16:30:31 +08:00
fff39a307a feat: use react sentry to support pass config via runtime (#350) 2023-06-13 16:04:54 +08:00
a11f36ca60 fix: stop completion response not save to db (#351) 2023-06-13 15:47:58 +08:00
433f8cb57e Feature/add emoji to webapp (#345) 2023-06-13 14:54:12 +08:00
cd136fb293 feat: add WEAVIATE_BATCH_SIZE (#349) 2023-06-13 14:49:40 +08:00
6a3ab36101 feat: optimize weaviate batch size (#348) 2023-06-13 11:28:15 +08:00
1af968e73a feat: optimize api language support (#344) 2023-06-13 10:06:49 +08:00
94646f29c3 Update README_CN.md (#342) 2023-06-12 21:14:34 +08:00
e028a0595c Update README.md (#341) 2023-06-12 21:14:21 +08:00
b16a7b0b3b feat: stop response call api (#340) 2023-06-12 16:37:03 +08:00
e083a7067b Create README_ES.md (#335) 2023-06-10 18:25:13 +08:00
205459d54d fix: button abnormal style (#333) 2023-06-10 13:19:08 +08:00
3d14431b96 Fix/excel data format (#334) 2023-06-09 20:21:11 +08:00
2ba0ee989a feat: bump version to 0.3.2 (#330) 2023-06-09 16:25:26 +08:00
b055470147 Fix: xls not supported (#329) 2023-06-09 16:11:27 +08:00
5943385d42 Fix: the bug that allows regular users to add unregistered users to the workspace. (#328) 2023-06-09 16:07:53 +08:00
0abd67288b feat: support xlsx file parsing (#304)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2023-06-09 15:57:19 +08:00
bbe58327c8 feat: remove ph (#327) 2023-06-09 14:39:37 +08:00
299c51ebc4 feat: npm sdk to 2.0 to fix steaming problem (#326) 2023-06-09 14:36:48 +08:00
3a7f58d2a6 Feature/fix streaming mode (#324) 2023-06-09 14:24:59 +08:00
6123bba96d feat: add reset-encrypt-key-pair cmd for self hosted mode (#325) 2023-06-09 11:36:38 +08:00
d5ab3b5072 fix: output code too long break ui (#320) 2023-06-08 16:27:37 +08:00
df26f82536 Feature/support xlsx (#311) 2023-06-08 15:23:38 +08:00
dbe0c43515 Chore: support gradient border and text (#317) 2023-06-08 09:38:11 +08:00
f4052fdbc7 fix: analysis all time param (#316) 2023-06-07 22:18:21 +08:00
b5ade19c75 feat: fix frontend docker image build fail (#314) 2023-06-07 16:47:49 +08:00
040eacb8bd fix: safari 14 not show modal (#310) 2023-06-07 09:59:33 +08:00
20899c44ff fix: segment search by keyword (#303) 2023-06-07 00:45:25 +08:00
35a2beb195 delete segment not commit (#309) 2023-06-06 23:16:51 +08:00
2056093855 update docker compose cmd (#308) 2023-06-06 20:26:45 +08:00
2bf48514bc fix markdown parser (#230) 2023-06-06 19:51:40 +08:00
c109b1a920 fix: stale.yml 2023-06-06 15:27:04 +08:00
45499328b8 fix: actions 2023-06-06 15:22:20 +08:00
4c61aa399d Create stale.yml 2023-06-06 15:19:27 +08:00
3e380c082a fix: reset some config not work: like var required status, dataset, feature status (#305) 2023-06-06 14:58:56 +08:00
53db5bab36 Feat/add GitHub star icon (#302) 2023-06-06 11:22:00 +08:00
6483beb096 Feat/auto rule generate (#300) 2023-06-06 10:52:02 +08:00
e61c84ca72 fix: header nav load more app (#296) 2023-06-06 10:42:32 +08:00
d70086b841 feat: sentry to dify account (#299) 2023-06-06 10:29:38 +08:00
a3ee037d6d feat: optimize output parse failed error (#298) 2023-06-05 11:23:51 +08:00
2de18a6490 fix: ignore VSCode setting.json path (#297) 2023-06-05 10:54:09 +08:00
4134e915ce fix: tooltip covered by high z index element (#295) 2023-06-05 10:49:06 +08:00
a838ba7b46 Chore/ignore vscode setting (#293) 2023-06-05 10:15:16 +08:00
5f38214a41 chore: mute handle message cut off (#291) 2023-06-05 09:55:03 +08:00
19b5cb1e10 feat: fix json end with `` (#285) 2023-06-02 17:34:24 +08:00
2478c88e07 feat: increase dataset description length to 400 (#283) 2023-06-02 14:03:18 +08:00
59e59c19b2 fix: missing imports (#281) 2023-06-01 23:40:34 +08:00
c67f626b66 Feat: Support re-segmentation (#114)
Co-authored-by: John Wang <takatost@gmail.com>
Co-authored-by: Jyong <718720800@qq.com>
Co-authored-by: 金伟强 <iamjoel007@gmail.com>
2023-06-01 23:19:36 +08:00
f65a3ad1cc Feature/replace default icon in overview (#279) 2023-06-01 13:06:56 +08:00
490858a4d5 feat: auto rule generator (#273) 2023-05-31 22:03:15 +08:00
44a1aa5e44 fix: dataset_tool npe (#274) 2023-05-31 17:16:27 +08:00
a616bf3129 Fix/long more suggestion not see all (#272) 2023-05-31 17:09:55 +08:00
f2f19484b8 fix: text generation too long hide the operation btn (#271) 2023-05-31 16:24:30 +08:00
f572b55237 chore: link prefetch deprecated. Remove warning message. (#270) 2023-05-31 14:56:14 +08:00
554570dc22 feat: feature support UI preview (#269) 2023-05-31 14:10:59 +08:00
5239b2c7ab Feat/dashboard more chart (#266) 2023-05-31 11:21:30 +08:00
ae94b067b3 feat: new stats (#265) 2023-05-31 11:20:24 +08:00
5e772bd10b fix: stop response btn hide messages (#261) 2023-05-30 16:15:08 +08:00
91bcbd0b26 fix: svg attr in ts file (#260) 2023-05-30 15:26:26 +08:00
54bb309d87 fix: remove sentry for community edtion and dev (#259) 2023-05-30 15:09:25 +08:00
75f7a96025 feat: ignore validate failed error log (#256) 2023-05-30 12:25:42 +08:00
ccd80653ff fix: query empty not allow (#255) 2023-05-30 12:24:51 +08:00
5ca88a4fd9 fix: raw json parse in llm router chain (#254) 2023-05-30 12:16:45 +08:00
a1c6cecf10 feat: bump to 0.3.1 (#253) 2023-05-30 11:31:22 +08:00
c5ccf382df chore: input area highlight and moblie hide tooltip (#251) 2023-05-30 11:16:31 +08:00
8358d0abfa fix: config file lint error (#250) 2023-05-30 10:32:26 +08:00
bad3b14438 fix: member invite text (#249) 2023-05-30 09:59:05 +08:00
f42ef494f8 Fix: correct links in app list (#248) 2023-05-30 08:08:33 +08:00
bb7f454ecd fix: dataset desc npe (#246) 2023-05-29 19:56:36 +08:00
7f48fadd41 fix: prompt template parantheses select error (#244) 2023-05-29 19:10:31 +08:00
af2138e8b8 fix: json parse in router chain output (#243) 2023-05-29 18:25:01 +08:00
091beffae7 feat: add code style (#242) 2023-05-29 17:49:01 +08:00
408fb502a1 fix: no var text still show split line (#239) 2023-05-29 14:35:21 +08:00
7660539689 fix: markdown code always show scrollbar (#237) 2023-05-29 14:05:59 +08:00
5a6061ff61 chore: handle sentry warning (#236) 2023-05-29 13:58:32 +08:00
970950e3a8 feat: support select multi datasets (#235) 2023-05-29 13:52:56 +08:00
431b2fd4a8 Feat: add sentry (#234) 2023-05-29 11:38:24 +08:00
88545184be feat: support multi datasets router chain mode (#231) 2023-05-28 22:44:54 +08:00
2c23caacd4 fix: introduction key error (#221) 2023-05-26 20:49:38 +08:00
9edea9bc49 fix: one chinese character cost token nums (#219) 2023-05-26 16:24:59 +08:00
d43279a1cc fix: robot emoji (#217) 2023-05-26 15:26:56 +08:00
10848d74a0 fix: changelog link (#216) 2023-05-26 10:22:35 +08:00
f9df23a091 fix: default icon (#213) 2023-05-26 09:55:37 +08:00
17a1c05728 fix: var highlight problme (#214) 2023-05-25 23:38:06 +08:00
66782ef19c chore: title support i18n (#212) 2023-05-25 22:13:43 +08:00
fb7f509e5c chore: show explore entrance (#211) 2023-05-25 21:49:12 +08:00
1a5acf43aa Fix/shared lock (#210) 2023-05-25 21:31:11 +08:00
1426 changed files with 83229 additions and 15329 deletions

8
.devcontainer/Dockerfile Normal file
View File

@ -0,0 +1,8 @@
FROM mcr.microsoft.com/devcontainers/python:3.10
COPY . .
# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>

37
.devcontainer/README.md Normal file
View File

@ -0,0 +1,37 @@
# Devlopment with devcontainer
This project includes a devcontainer configuration that allows you to open the project in a container with a fully configured development environment.
Both frontend and backend environments are initialized when the container is started.
## GitHub Codespaces
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langgenius/dify)
you can simply click the button above to open this project in GitHub Codespaces.
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
## VS Code Dev Containers
[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langgenius/dify)
if you have VS Code installed, you can click the button above to open this project in VS Code Dev Containers.
You can learn more in the [Dev Containers documentation](https://code.visualstudio.com/docs/devcontainers/containers).
## Pros of Devcontainer
Unified Development Environment: By using devcontainers, you can ensure that all developers are developing in the same environment, reducing the occurrence of "it works on my machine" type of issues.
Quick Start: New developers can set up their development environment in a few simple steps, without spending a lot of time on environment configuration.
Isolation: Devcontainers isolate your project from your host operating system, reducing the chance of OS updates or other application installations impacting the development environment.
## Cons of Devcontainer
Learning Curve: For developers unfamiliar with Docker and VS Code, using devcontainers may be somewhat complex.
Performance Impact: While usually minimal, programs running inside a devcontainer may be slightly slower than those running directly on the host.
## Troubleshooting
if you see such error message when you open this project in codespaces:
![Alt text](troubleshooting.png)
a simple workaround is change `/signin` endpoint into another one, then login with github account and close the tab, then change it back to `/signin` endpoint. Then all things will be fine.
The reason is `signin` endpoint is not allowed in codespaces, details can be found [here](https://github.com/orgs/community/discussions/5204)

View File

@ -0,0 +1,52 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/anaconda
{
"name": "Python 3.10",
"build": {
"context": "..",
"dockerfile": "Dockerfile"
},
"features": {
"ghcr.io/devcontainers/features/node:1": {
"nodeGypDependencies": true,
"version": "lts"
},
"ghcr.io/devcontainers-contrib/features/npm-package:1": {
"package": "typescript",
"version": "latest"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {
"moby": true,
"azureDnsAutoDetection": true,
"installDockerBuildx": true,
"version": "latest",
"dockerDashComposeVersion": "v2"
}
},
"customizations": {
"vscode": {
"extensions": [
"ms-python.pylint",
"GitHub.copilot",
"ms-python.python"
]
}
},
"postStartCommand": "cd api && pip install -r requirements.txt",
"postCreateCommand": "cd web && npm install"
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "python --version",
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}

3
.devcontainer/noop.txt Normal file
View File

@ -0,0 +1,3 @@
This file copied into the container along with environment.yml* from the parent
folder. This file is included to prevents the Dockerfile COPY instruction from
failing if no environment.yml is found.

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

49
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@ -0,0 +1,49 @@
name: "🕷️ Bug report"
description: Report errors or unexpected behavior
labels:
- bug
body:
- type: markdown
attributes:
value: Please make sure to [search for existing issues](https://github.com/langgenius/dify/issues) before filing a new one!
- type: input
attributes:
label: Dify version
placeholder: 0.3.21
description: See about section in Dify console
validations:
required: true
- type: dropdown
attributes:
label: Cloud or Self Hosted
description: How / Where was Dify installed from?
multiple: true
options:
- Cloud
- Self Hosted
- Other (please specify in "Steps to Reproduce")
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: We highly suggest including screenshots and a bug report log.
placeholder: Having detailed steps helps us reproduce the bug.
validations:
required: true
- type: textarea
attributes:
label: ✔️ Expected Behavior
placeholder: What were you expecting?
validations:
required: false
- type: textarea
attributes:
label: ❌ Actual Behavior
placeholder: What happened instead?
validations:
required: false

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: "\U0001F4DA Dify user documentation"
url: https://docs.dify.ai/getting-started/readme
about: Documentation for users of Dify
- name: "\U0001F4DA Dify dev documentation"
url: https://docs.dify.ai/getting-started/install-self-hosted
about: Documentation for people interested in developing and contributing for Dify

View File

@ -0,0 +1,11 @@
name: "📚 Documentation Issue"
description: Report issues in our documentation
labels:
- ducumentation
body:
- type: textarea
attributes:
label: Provide a description of requested docs changes
placeholder: Briefly describe which document needs to be corrected and why.
validations:
required: true

View File

@ -0,0 +1,26 @@
name: "⭐ Feature or enhancement request"
description: Propose something new.
labels:
- enhancement
body:
- type: textarea
attributes:
label: Description of the new feature / enhancement
placeholder: What is the expected behavior of the proposed feature?
validations:
required: true
- type: textarea
attributes:
label: Scenario when this would be used?
placeholder: What is the scenario this would be used? Why is this important to your workflow as a dify user?
validations:
required: true
- type: textarea
attributes:
label: Supporting information
placeholder: "Having additional evidence, data, tweets, blog posts, research, ... anything is extremely helpful. This information provides context to the scenario that may otherwise be lost."
validations:
required: false
- type: markdown
attributes:
value: Please limit one request per issue.

11
.github/ISSUE_TEMPLATE/help_wanted.yml vendored Normal file
View File

@ -0,0 +1,11 @@
name: "🤝 Help Wanted"
description: "Request help from the community"
labels:
- help-wanted
body:
- type: textarea
attributes:
label: Provide a description of the help you need
placeholder: Briefly describe what you need help with.
validations:
required: true

View File

@ -0,0 +1,46 @@
name: "🌐 Localization/Translation issue"
description: Report incorrect translations.
labels:
- translation
body:
- type: markdown
attributes:
value: Please make sure to [search for existing issues](https://github.com/langgenius/dify/issues) before filing a new one!
- type: input
attributes:
label: Dify version
placeholder: 0.3.21
description: Hover over system tray icon or look at Settings
validations:
required: true
- type: input
attributes:
label: Utility with translation issue
placeholder: Some area
description: Please input here the utility with the translation issue
validations:
required: true
- type: input
attributes:
label: 🌐 Language affected
placeholder: "German"
validations:
required: true
- type: textarea
attributes:
label: ❌ Actual phrase(s)
placeholder: What is there? Please include a screenshot as that is extremely helpful.
validations:
required: true
- type: textarea
attributes:
label: ✔️ Expected phrase(s)
placeholder: What was expected?
validations:
required: true
- type: textarea
attributes:
label: Why is the current translation wrong
placeholder: Why do you feel this is incorrect?
validations:
required: true

View File

@ -1,32 +0,0 @@
---
name: "\U0001F41B Bug report"
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
<!--
Please provide a clear and concise description of what the bug is. Include
screenshots if needed. Please test using the latest version of the relevant
Dify packages to make sure your issue has not already been fixed.
-->
Dify version: Cloud | Self Host
## Steps To Reproduce
<!--
Your bug will get fixed much faster if we can run your code and it doesn't
have dependencies other than Dify. Issues without reproduction steps or
code examples may be immediately closed as not actionable.
-->
1.
2.
## The current behavior
## The expected behavior

View File

@ -1,20 +0,0 @@
---
name: "\U0001F680 Feature request"
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@ -1,10 +0,0 @@
---
name: "\U0001F914 Questions and Help"
about: Ask a usage or consultation question
title: ''
labels: ''
assignees: ''
---

38
.github/workflows/api-unit-tests.yml vendored Normal file
View File

@ -0,0 +1,38 @@
name: Run Pytest
on:
pull_request:
branches:
- main
push:
branches:
- deploy/dev
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.10'
- name: Cache pip dependencies
uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('api/requirements.txt') }}
restore-keys: ${{ runner.os }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
pip install -r api/requirements.txt
- name: Run pytest
run: pytest api/tests/unit_tests

View File

@ -42,12 +42,14 @@ jobs:
uses: docker/build-push-action@v4
with:
context: "{{defaultContext}}:api"
platforms: linux/amd64,linux/arm64
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
build-args: |
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Deploy to server
if: github.ref == 'refs/heads/deploy/dev'

View File

@ -42,12 +42,14 @@ jobs:
uses: docker/build-push-action@v4
with:
context: "{{defaultContext}}:web"
platforms: linux/amd64,linux/arm64
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
build-args: |
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Deploy to server
if: github.ref == 'refs/heads/deploy/dev'

View File

@ -19,7 +19,9 @@ def check_file_for_chinese_comments(file_path):
def main():
has_chinese = False
excluded_files = ["model_template.py", 'stopwords.py', 'commands.py', 'indexing_runner.py']
excluded_files = ["model_template.py", 'stopwords.py', 'commands.py',
'indexing_runner.py', 'web_reader_tool.py', 'spark_provider.py',
'prompts.py']
for root, _, files in os.walk("."):
for file in files:

30
.github/workflows/stale.yml vendored Normal file
View File

@ -0,0 +1,30 @@
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
#
# You can adjust the behavior by modifying this file.
# For more information, see:
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
- cron: '0 3 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: 30
days-before-issue-close: 3
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
stale-pr-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'
any-of-labels: 'duplicate,question,invalid,wontfix,no-issue-activity,no-pr-activity,enhancement'

7
.gitignore vendored
View File

@ -109,6 +109,7 @@ venv/
ENV/
env.bak/
venv.bak/
.conda/
# Spyder project settings
.spyderproject
@ -130,7 +131,7 @@ dmypy.json
.idea/'
.DS_Store
.vscode
web/.vscode/settings.json
# Intellij IDEA Files
.idea/
@ -143,7 +144,11 @@ docker/volumes/app/storage/*
docker/volumes/db/data/*
docker/volumes/redis/data/*
docker/volumes/weaviate/*
docker/volumes/qdrant/*
sdks/python-client/build
sdks/python-client/dist
sdks/python-client/dify_client.egg-info
.vscode/*
!.vscode/launch.json

27
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,27 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python: Flask",
"type": "python",
"request": "launch",
"module": "flask",
"env": {
"FLASK_APP": "api/app.py",
"FLASK_DEBUG": "1",
"GEVENT_SUPPORT": "True"
},
"args": [
"run",
"--host=0.0.0.0",
"--port=5001",
"--debug"
],
"jinja": true,
"justMyCode": true
}
]
}

View File

@ -53,4 +53,9 @@ Did you have an issue, like a merge conflict, or don't know how to open a pull r
## Community channels
Stuck somewhere? Have any questions? Join the [Discord Community Server](https://discord.gg/AhzKf7dNgk). We are here to help!
Stuck somewhere? Have any questions? Join the [Discord Community Server](https://discord.gg/j3XRWSPBf7). We are here to help!
### i18n (Internationalization) Support
We are looking for contributors to help with translations in other languages. If you are interested in helping, please join the [Discord Community Server](https://discord.gg/AhzKf7dNgk) and let us know.
Also check out the [Frontend i18n README]((web/i18n/README_EN.md)) for more information.

View File

@ -16,15 +16,15 @@
## 本地开发
要设置一个可工作的开发环境,只需 fork 项目的 git 存储库,并使用适当的软件包管理器安装后端和前端依赖项,然后创建并运行 docker-compose 堆栈
要设置一个可工作的开发环境,只需 fork 项目的 git 存储库,并使用适当的软件包管理器安装后端和前端依赖项,然后创建并运行 docker-compose。
### Fork存储库
您需要 fork [存储](https://github.com/langgenius/dify)。
您需要 fork [Git 仓](https://github.com/langgenius/dify)。
### 克隆存储库
克隆您在 GitHub 上 fork 的存储库:
克隆您在 GitHub 上 fork 的库:
```
git clone git@github.com:<github_username>/dify.git
@ -51,3 +51,7 @@ git clone git@github.com:<github_username>/dify.git
## 社区渠道
遇到困难了吗?有任何问题吗? 加入 [Discord Community Server](https://discord.gg/AhzKf7dNgk),我们将为您提供帮助。
### 多语言支持
需要参与贡献翻译内容,请参阅[前端多语言翻译 README](web/i18n/README_CN.md)。

View File

@ -52,4 +52,4 @@ git clone git@github.com:<github_username>/dify.git
## コミュニティチャンネル
お困りですか?何か質問がありますか? [Discord Community サーバ](https://discord.gg/AhzKf7dNgk)に参加してください。私たちがお手伝いします!
お困りですか?何か質問がありますか? [Discord Community サーバ](https://discord.gg/j3XRWSPBf7) に参加してください。私たちがお手伝いします!

36
LICENSE
View File

@ -1,26 +1,26 @@
# Dify Open Source License
The Dify project uses a combination of the Apache License 2.0, MIT License, and an additional agreement to protect against direct competition with Dify Cloud services.
The Dify project is licensed under the Apache License 2.0, with the following additional conditions:
As a contributor, you should agree that your contributed code:
a. Might be subject to a more permissive open source license in the future.
1. Dify is permitted to be used for commercialization, such as using Dify as a "backend-as-a-service" for your other applications, or delivering it to enterprises as an application development platform. However, when the following conditions are met, you must contact the producer to obtain a commercial license:
a. Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify.AI source code to operate a multi-tenant SaaS service that is similar to the Dify.AI service edition.
b. LOGO and copyright information: In the process of using Dify, you may not remove or modify the LOGO or copyright information in the Dify console.
Please contact business@dify.ai by email to inquire about licensing matters.
2. As a contributor, you should agree that your contributed code:
a. The producer can adjust the open-source agreement to be more strict or relaxed.
b. Can be used for commercial purposes, such as Dify's cloud business.
The following components are open source under the MIT license, allowing you to build and develop applications based on them:
- WebApp elements, e.g., web/app/components/share
- Derived WebApp Template projects
The remaining parts of the project are open source under the Apache License 2.0.
With the Apache License 2.0, MIT License, and this supplementary agreement, anyone can freely use, modify, and distribute Dify, provided that:
- If you use Dify solely as a backend service for other applications, no authorization is needed for commercial or closed source purposes.
- If you wish to use Dify for commercial and closed source SaaS services similar to Dify Cloud, please contact us for authorization.
Apart from this, all other rights and restrictions follow the Apache License 2.0. If you need more detailed information, you can refer to the full version of Apache License 2.0.
The interactive design of this product is protected by appearance patent.
© 2023 LangGenius, Inc.
----------
Licensed under the Apache License, Version 2.0 (the "License");
@ -34,13 +34,3 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------
The MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -2,13 +2,11 @@
<p align="center">
<a href="./README.md">English</a> |
<a href="./README_CN.md">简体中文</a> |
<a href="./README_JA.md">日本語</a>
<a href="./README_JA.md">日本語</a> |
<a href="./README_ES.md">Español</a>
</p>
[Website](https://dify.ai) • [Docs](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
Vote for us on Product Hunt ↓
<a href="https://www.producthunt.com/posts/dify-ai"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?sanitize=true&post_id=dify-ai&theme=light" alt="Product Hunt Badge" width="250" height="54"></a>
#### [Website](https://dify.ai) • [Docs](https://docs.dify.ai) • [Deployment Docs](https://docs.dify.ai/getting-started/install-self-hosted) • [FAQ](https://docs.dify.ai/getting-started/faq) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
**Dify** is an easy-to-use LLMOps platform designed to empower more people to create sustainable, AI-native applications. With visual orchestration for various application types, Dify offers out-of-the-box, ready-to-use applications that can also serve as Backend-as-a-Service APIs. Unify your development process with one API for plugins and datasets integration, and streamline your operations using a single interface for prompt engineering, visual analytics, and continuous improvement.
@ -17,11 +15,48 @@ Applications created with Dify include:
Out-of-the-box web sites supporting form mode and chat conversation mode
A single API encompassing plugin capabilities, context enhancement, and more, saving you backend coding effort
Visual data analysis, log review, and annotation for applications
Dify is compatible with Langchain, meaning we'll gradually support multiple LLMs, currently supported:
- GPT 3 (text-davinci-003)
- GPT 3.5 Turbo(ChatGPT)
- GPT-4
https://github.com/langgenius/dify/assets/100913391/f6e658d5-31b3-4c16-a0af-9e191da4d0f6
## Highlighted Features
**1. LLMs support:** Choose capabilities based on different models when building your Dify AI apps. Dify is compatible with Langchain, meaning it will support various LLMs. Currently supported:
- [x] **OpenAI**: GPT4, GPT3.5-turbo, GPT3.5-turbo-16k, text-davinci-003
- [x] **Azure OpenAI Service**
- [x] **Anthropic**: Claude2, Claude-instant
- [x] **Replicate**
- [x] **Hugging Face Hub**
- [x] **ChatGLM**
- [x] **Llama2**
- [x] **MiniMax**
- [x] **Spark**
- [x] **Wenxin**
- [x] **Tongyi**
We provide the following free resources for registered Dify cloud users (sign up at [dify.ai](https://dify.ai)):
* 600,000 free Claude model tokens to build Claude-powered apps
* 200 free OpenAI queries to build OpenAI-based apps
**2. Visual orchestration:** Build an AI app in minutes by writing and debugging prompts visually.
**3. Text embedding:** Fully automated text preprocessing embeds your data as context without complex concepts. Supports PDF, TXT, and syncing data from Notion, webpages, APIs.
**4. API-based:** Backend-as-a-service. Access web apps directly or integrate via APIs without complex backend setup.
**5. Plugins:** Dify "Smart Chat" now supports first-party plugins like web browsing, Google search, Wikipedia to enable online lookup, analyzing web content, and explaining the AI's reasoning process conversationally.
**6. Team workspaces:** Team members can join workspaces to collaboratively edit, manage, and use team AI apps.
**7. Data labeling and improvement:** Visually inspect AI app logs and improve data via labeling. Observe the AI's reasoning process to continuously enhance performance. (Coming soon)
## Use cases
* [Create an AI ChatBot with Business Data in Minutes.](https://docs.dify.ai/use-cases/create-an-ai-chatbot-with-business-data-in-minutes)
* [How to Build an Notion AI Assistant Based on Your Own Notes?](https://docs.dify.ai/use-cases/build-an-notion-ai-assistant)
* [Create a Midjoureny Prompt Bot Without Code in Just a Few Minutes.](https://docs.dify.ai/use-cases/create-a-midjoureny-prompt-bot-with-dify)
## Use Cloud Services
@ -33,7 +68,7 @@ Visit [Dify.ai](https://dify.ai)
Before installing Dify, make sure your machine meets the following minimum system requirements:
- CPU >= 1 Core
- CPU >= 2 Core
- RAM >= 4GB
### Quick Start
@ -42,11 +77,16 @@ The easiest way to start the Dify server is to run our [docker-compose.yml](dock
```bash
cd docker
docker-compose up -d
docker compose up -d
```
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization installation process.
### Helm Chart
A big thanks to @BorisPolonsky for providing us with a [Helm Chart](https://helm.sh/) version, which allows Dify to be deployed on Kubernetes.
You can go to https://github.com/BorisPolonsky/dify-helm for deployment information.
### Configuration
If you need to customize the configuration, please refer to the comments in our [docker-compose.yml](docker/docker-compose.yaml) file and manually set the environment configuration. After making the changes, please run 'docker-compose up -d' again.
@ -59,8 +99,6 @@ Features under development:
We will support more datasets, including text, webpages, and even Notion content. Users can build AI applications based on their own data sources.
- **Plugins**, introducing ChatGPT Plugin-standard plugins for applications, or using Dify-produced plugins
We will release plugins complying with ChatGPT standard, or Dify's own plugins to enable more capabilities in applications.
- **Open-source models**, e.g. adopting Llama as a model provider or for further fine-tuning
We will work with excellent open-source models like Llama, by providing them as model options in our platform, or using them for further fine-tuning.
## Q&A
@ -85,6 +123,32 @@ A: English and Chinese are currently supported, and you can contribute language
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
## Contributing
We welcome you to contribute to Dify to help make Dify better. We welcome contributions in various ways, submitting code, issues, new ideas, or sharing the interesting and useful AI applications you have created based on Dify. At the same time, we also welcome you to share Dify at different events, conferences, and social media.
### Submit a Pull Request
To ensure proper review, all code contributions, including from contributors with direct commit access, must be submitted as PR requests and approved by core developers before merging branches.
We welcome PRs from everyone! If you're willing to help out, you can learn more about how to contribute code to the project in the [Contribution Guide](CONTRIBUTING.md).
### Submit issues or ideas
You can submit your issues or ideas by adding issues to the Dify repository. If you encounter issues, please describe the steps you took to encounter the issue as much as possible so we can better discover it. If you have any new ideas for our product, we also welcome your feedback. Please share your insights as much as possible so we can get more feedback and further discussion in the community.
### Share your applications
We encourage all community members to share their AI applications built on Dify, which can be applied to different scenarios or different users. This will provide powerful inspiration for people who want to create AI capabilities! You can share your experience by [submitting an issue in the Dify-user-case repository](https://github.com/langgenius/dify-user-case/issues).
### Share Dify with others
We encourage community contributors to actively demonstrate different aspects of using Dify. You can talk or share any feature of using Dify at meetups and conferences, blogs or social media. We believe your unique sharing will be of great help to others! Mention @Dify.AI on Twitter and/or communicate on [Discord](https://discord.gg/FngNHpbcY7) so we can give pointers and tips and help you spread the word by promoting your content on the different Dify communication channels.
### Help others
You can also help people in need of help on Discord, GitHub issues or other social platforms, guide others to solve problems encountered during use and share usage experiences. This is also a great contribution! If you want to become a maintainer of the Dify community, please contact the official team via [Discord](https://discord.gg/FngNHpbcY7) or email us at support@dify.ai.
## Contact Us
If you have any questions, suggestions, or partnership inquiries, feel free to contact us through the following channels:
@ -95,12 +159,6 @@ If you have any questions, suggestions, or partnership inquiries, feel free to c
We're eager to assist you and together create more fun and useful AI applications!
## Contributing
To ensure proper review, all code contributions - including those from contributors with direct commit access - must be submitted via pull requests and approved by the core development team prior to being merged.
We welcome all pull requests! If you'd like to help, check out the [Contribution Guide](CONTRIBUTING.md) for more information on how to get started.
## Security
To protect your privacy, please avoid posting security issues on GitHub. Instead, send your questions to security@dify.ai and we will provide you with a more detailed answer.
@ -110,7 +168,6 @@ To protect your privacy, please avoid posting security issues on GitHub. Instead
This software uses the following open-source software:
- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
- Liu, J. (2022). LlamaIndex [Computer software]. doi: 10.5281/zenodo.1234.
For more information, please refer to the official website or license text of the respective software.

View File

@ -2,16 +2,14 @@
<p align="center">
<a href="./README.md">English</a> |
<a href="./README_CN.md">简体中文</a> |
<a href="./README_JA.md">日本語</a>
<a href="./README_JA.md">日本語</a> |
<a href="./README_ES.md">Español</a>
</p>
[官方网站](https://dify.ai) • [文档](https://docs.dify.ai/v/zh-hans) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
#### [官方网站](https://dify.ai) • [使用文档](https://docs.dify.ai/v/zh-hans) · [部署文档](https://docs.dify.ai/v/zh-hans/getting-started/install-self-hosted) · [FAQ](https://docs.dify.ai/v/zh-hans/getting-started/faq) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
在 Product Hunt 上投我们一票吧 ↓
<a href="https://www.producthunt.com/posts/dify-ai"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?sanitize=true&post_id=dify-ai&theme=light" alt="Product Hunt Badge" width="250" height="54"></a>
**Dify** 是一个易用的 LLMOps 平台,旨在让更多人可以创建可持续运营的原生 AI 应用。Dify 提供多种类型应用的可视化编排,应用可开箱即用,也能以“后端即服务”的 API 提供服务。
**Dify** 是一个易用的 LLMOps 平台,基于不同的大型语言模型能力,让更多人可以简易地创建可持续运营的原生 AI 应用。Dify 提供多种类型应用的可视化编排,应用可开箱即用,也能以“后端即服务”的 API 提供服务。
通过 Dify 创建的应用包含了:
@ -19,15 +17,45 @@
- 一套 API 即可包含插件、上下文增强等能力,替你省下了后端代码的编写工作
- 可视化的对应用进行数据分析,查阅日志或进行标注
Dify 兼容 Langchain这意味着我们将逐步支持多种 LLMs ,目前已支持:
https://github.com/langgenius/dify/assets/100913391/f6e658d5-31b3-4c16-a0af-9e191da4d0f6
## 核心能力
1. **模型支持:** 你可以在 Dify 上选择基于不同模型的能力来开发你的 AI 应用。Dify 兼容 Langchain这意味着我们将逐步支持多种 LLMs ,目前支持的模型供应商:
- [x] **OpenAI**GPT4、GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003
- [x] **Azure OpenAI Service**
- [x] **Anthropic**Claude2、Claude-instant
- [x] **Replicate**
- [x] **Hugging Face Hub**
- [x] **ChatGLM**
- [x] **Llama2**
- [x] **MiniMax**
- [x] **讯飞星火大模型**
- [x] **文心一言**
- [x] **通义千问**
我们为所有注册云端版的用户免费提供以下资源(登录 [dify.ai](https://cloud.dify.ai) 即可使用):
* 60 万 Tokens Claude 模型的消息调用额度,用于创建基于 Claude 模型的 AI 应用
* 200 次 OpenAI 模型的消息调用额度,用于创建基于 OpenAI 模型的 AI 应用
* 300 万 讯飞星火大模型 Token 的调用额度,用于创建基于讯飞星火大模型的 AI 应用
* 100 万 MiniMax Token 的调用额度,用于创建基于 MiniMax 模型的 AI 应用
2. **可视化编排 Prompt** 通过界面化编写 prompt 并调试,只需几分钟即可发布一个 AI 应用。
3. **文本 Embedding 处理(数据集)**:全自动完成文本预处理,使用你的数据作为上下文,无需理解晦涩的概念和技术处理。支持 PDF、txt 等文件格式,支持从 Notion、网页、API 同步数据。
4. **基于 API 开发:** 后端即服务。您可以直接访问网页应用,也可以接入 API 集成到您的应用中,无需关注复杂的后端架构和部署过程。
5. **插件能力:** Dify 「智聊」平台已支持网页浏览、Google 搜索、Wikipedia 查询等第一方插件,可在对话中实现联网搜索、分析网页内容、展示 AI 的推理过程。
6. **团队 Workspace** 团队成员可加入 Workspace 编辑、管理和使用团队内的 AI 应用。
6. **数据标注与改进:** 可视化查阅 AI 应用日志并对数据进行改进标注,观测 AI 的推理过程不断提高其性能。Coming soon
-----------------------------
## Use cases
* [几分钟创建一个带有业务数据的官网 AI 智能客服](https://docs.dify.ai/v/zh-hans/use-cases/create-an-ai-chatbot-with-business-data-in-minutes)
* [构建一个 Notion AI 助手](https://docs.dify.ai/v/zh-hans/use-cases/build-an-notion-ai-assistant)
* [创建 Midjoureny 提示词机器人](https://docs.dify.ai/v/zh-hans/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code)
- GPT 3 (text-davinci-003)
- GPT 3.5 Turbo(ChatGPT)
- GPT-4
## 使用云服务
访问 [Dify.ai](https://cloud.dify.ai)
访问 [Dify.ai](https://cloud.dify.ai) 使用云端版。
## 安装社区版
@ -35,7 +63,7 @@ Dify 兼容 Langchain这意味着我们将逐步支持多种 LLMs ,目前
在安装 Dify 之前,请确保您的机器满足以下最低系统要求:
- CPU >= 1 Core
- CPU >= 2 Core
- RAM >= 4GB
### 快速启动
@ -44,11 +72,16 @@ Dify 兼容 Langchain这意味着我们将逐步支持多种 LLMs ,目前
```bash
cd docker
docker-compose up -d
docker compose up -d
```
运行后,可以在浏览器上访问 [http://localhost/install](http://localhost/install) 进入 Dify 控制台并开始初始化安装操作。
### Helm Chart
非常感谢 @BorisPolonsky 为我们提供了一个 [Helm Chart](https://helm.sh/) 版本,可以在 Kubernetes 上部署 Dify。
您可以前往 https://github.com/BorisPolonsky/dify-helm 来获取部署信息。
### 配置
需要自定义配置,请参考我们的 [docker-compose.yml](docker/docker-compose.yaml) 文件中的注释,并手动设置环境配置,修改完毕后,请再次执行 `docker-compose up -d`
@ -57,12 +90,8 @@ docker-compose up -d
我们正在开发中的功能:
- **数据集**,支持更多的数据集,例如同步 Notion 或网页的内容
我们将支持更多的数据集,包括文本、网页,甚至 Notion 内容。用户可以根据自己的数据源构建 AI 应用程序
- **插件**,推出符合 ChatGPT 标准的插件,或使用 Dify 产生的插件
我们将发布符合 ChatGPT 标准的插件,或者 Dify 自己的插件,以在应用程序中启用更多功能。
- **开源模型**,例如采用 Llama 作为模型提供者,或进行进一步的微调
我们将与优秀的开源模型如 Llama 合作,通过在我们的平台中提供它们作为模型选项,或使用它们进行进一步的微调。
- **数据集**,支持更多的数据集,通过网页、API 同步内容。用户可以根据自己的数据源构建 AI 应用程序。
- **插件**,我们将发布符合 ChatGPT 标准的插件,支持更多 Dify 自己的插件,支持用户自定义插件能力,以在应用程序中启用更多功能,例如以支持以目标为导向的分解推理任务
## Q&A
@ -76,16 +105,39 @@ A: 一个有价值的应用由 Prompt Engineering、上下文增强和 Fine-tune
**Q: 如果要创建一个自己的应用,我需要准备什么?**
A: 我们假定你已经有了 OpenAI API Key如果没有请去注册一个。如果你已经有了一些内容可以作为训练上下文就太好了。
A: 我们假定你已经有了 OpenAI 或 Claude 等模型的 API Key如果没有请去注册一个。如果你已经有了一些内容可以作为训练上下文就太好了。
**Q: 提供哪些界面语言?**
A: 现已支持英文中文,你可以为我们贡献语言包。
A: 支持英文中文,你可以为我们贡献语言包并提供维护支持
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
## 贡献
我们欢迎你为 Dify 作出贡献帮助 Dify 变得更好。我们欢迎各种方式的贡献,提交代码、问题、新想法、或者分享你基于 Dify 创建出的各种有趣有用的 AI 应用。同时,我们也欢迎你在不同的活动、研讨会、社交媒体上分享 Dify。
### 贡献代码
为了确保正确审查,所有代码贡献 - 包括来自具有直接提交更改权限的贡献者 - 都必须提交 PR 请求并在合并分支之前得到核心开发人员的批准。
我们欢迎所有人提交 PR如果您愿意提供帮助可以在 [贡献指南](CONTRIBUTING_CN.md) 中了解有关如何为项目做出代码贡献的更多信息。
### 提交问题或想法
你可以通过 Dify 代码仓库新增 issues 来提交你的问题或想法。如遇到问题,请尽可能描述你遇到问题的操作步骤,以便我们更好地发现它。如果你对我们的产品有任何新想法,也欢迎向我们反馈,请尽可能多地分享你的见解,以便我们在社区中获得更多反馈和进一步讨论。
### 分享你的应用
我们鼓励所有社区成员分享他们基于 Dify 创造出的 AI 应用,它们可以是应用于不同情景或不同用户,这将有助于为希望基于 AI 能力创造的人们提供强大灵感!你可以通过 [Dify-user-case 仓库项目提交 issue](https://github.com/langgenius/dify-user-case) 来分享你的应用案例。
### 向别人分享 Dify
我们鼓励社区贡献者们积极展示你使用 Dify 的不同角度。你可以通过线下研讨会、博客或社交媒体上谈论或分享你使用 Dify 的任意功能,相信你独特的使用分享会给别人带来非常大的帮助!如果你需要任何指导帮助,欢迎联系我们 support@dify.ai ,你也可以在 twitter @Dify.AI 或在 [Discord 社区](https://discord.gg/FngNHpbcY7)交流来帮助你传播信息。
### 帮助别人
你还可以在 Discord、GitHub issues或其他社交平台上帮助需要帮助的人指导别人解决使用过程中遇到的问题和分享使用经验。这也是个非常了不起的贡献如果你希望成为 Dify 社区的维护者,请通过[Discord 社区](https://discord.gg/FngNHpbcY7) 联系官方团队或邮件联系我们 support@dify.ai.
## 联系我们
如果您有任何问题、建议或合作意向,欢迎通过以下方式联系我们:
@ -94,12 +146,6 @@ A: 现已支持英文与中文,你可以为我们贡献语言包。
- 在我们的 [Discord 社区](https://discord.gg/FngNHpbcY7) 上加入讨论
- 发送邮件至 hello@dify.ai
## 贡献代码
为了确保正确审查,所有代码贡献 - 包括来自具有直接提交更改权限的贡献者 - 都必须提交 PR 请求并在合并分支之前得到核心开发人员的批准。
我们欢迎所有人提交 PR如果您愿意提供帮助可以在 [贡献指南](CONTRIBUTING_CN.md) 中了解有关如何为项目做出贡献的更多信息。
## 安全
为了保护您的隐私,请避免在 GitHub 上发布安全问题。发送问题至 security@dify.ai我们将为您做更细致的解答。
@ -109,7 +155,6 @@ A: 现已支持英文与中文,你可以为我们贡献语言包。
本软件使用了以下开源软件:
- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
- Liu, J. (2022). LlamaIndex [Computer software]. doi: 10.5281/zenodo.1234.
更多信息,请参考相应软件的官方网站或许可证文本。

123
README_ES.md Normal file
View File

@ -0,0 +1,123 @@
![](./images/describe-en.png)
<p align="center">
<a href="./README.md">English</a> |
<a href="./README_CN.md">简体中文</a> |
<a href="./README_JA.md">日本語</a> |
<a href="./README_ES.md">Español</a>
</p>
[Sitio web](https://dify.ai) • [Documentación](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
**Dify** es una plataforma LLMOps fácil de usar diseñada para capacitar a más personas para que creen aplicaciones sostenibles basadas en IA. Con orquestación visual para varios tipos de aplicaciones, Dify ofrece aplicaciones listas para usar que también pueden funcionar como APIs de Backend-as-a-Service. Unifica tu proceso de desarrollo con una API para la integración de complementos y conjuntos de datos, y agiliza tus operaciones utilizando una interfaz única para la ingeniería de indicaciones, análisis visual y mejora continua.
Las aplicaciones creadas con Dify incluyen:
- Sitios web listos para usar que admiten el modo de formulario y el modo de conversación por chat.
- Una API única que abarca capacidades de complementos, mejora de contexto y más, lo que te ahorra esfuerzo de programación en el backend.
- Análisis visual de datos, revisión de registros y anotación para aplicaciones.
Dify es compatible con Langchain, lo que significa que gradualmente admitiremos múltiples LLMs, actualmente compatibles con:
- GPT 3 (text-davinci-003)
- GPT 3.5 Turbo (ChatGPT)
- GPT-4
## Usar servicios en la nube
Visita [Dify.ai](https://dify.ai)
## Instalar la Edición Comunitaria
### Requisitos del sistema
Antes de instalar Dify, asegúrate de que tu máquina cumple con los siguientes requisitos mínimos del sistema:
- CPU >= 2 Core
- RAM >= 4GB
### Inicio rápido
La forma más sencilla de iniciar el servidor de Dify es ejecutar nuestro archivo [docker-compose.yml](docker/docker-compose.yaml). Antes de ejecutar el comando de instalación, asegúrate de que [Docker](https://docs.docker.com/get-docker/) y [Docker Compose](https://docs.docker.com/compose/install/) estén instalados en tu máquina:
```bash
cd docker
docker compose up -d
```
Después de ejecutarlo, puedes acceder al panel de control de Dify en tu navegador desde [http://localhost/install](http://localhost/install) y comenzar el proceso de instalación de inicialización.
### Helm Chart
Un gran agradecimiento a @BorisPolonsky por proporcionarnos una versión de [Helm Chart](https://helm.sh/), que permite desplegar Dify en Kubernetes.
Puede ir a https://github.com/BorisPolonsky/dify-helm para obtener información de despliegue.
### Configuración
Si necesitas personalizar la configuración, consulta los comentarios en nuestro archivo [docker-compose.yml](docker/docker-compose.yaml) y configura manualmente la configuración del entorno. Después de realizar los cambios, ejecuta nuevamente 'docker-compose up -d'.
## Hoja de ruta
Funciones en desarrollo:
- **Conjuntos de datos**, admitiendo más conjuntos de datos, por ejemplo, sincronización de contenido desde Notion o páginas web.
Admitiremos más conjuntos de datos, incluidos texto, páginas web e incluso contenido de Notion. Los usuarios pueden construir aplicaciones de IA basadas en sus propias fuentes de datos
- **Complementos**, introduciendo complementos estándar de ChatGPT para aplicaciones, o utilizando complementos producidos por Dify.
Lanzaremos complementos que cumplan con el estándar de ChatGPT, o nuestros propios complementos de Dify para habilitar más capacidades en las aplicaciones.
- **Modelos de código abierto**, por ejemplo, adoptar Llama como proveedor de modelos o para un ajuste adicional.
Trabajaremos con excelentes modelos de código abierto como Llama, proporcionándolos como opciones de modelos en nuestra plataforma o utilizándolos para un ajuste adicional.
## Preguntas y respuestas
**P: ¿Qué puedo hacer con Dify?**
R: Dify es una herramienta de desarrollo y operaciones de LLM, simple pero poderosa. Puedes usarla para construir aplicaciones de calidad comercial y asistentes personales. Si deseas desarrollar tus propias aplicaciones, LangDifyGenius puede ahorrarte trabajo en el backend al integrar con OpenAI y ofrecer capacidades de operaciones visuales, lo que te permite mejorar y entrenar continuamente tu modelo GPT.
**P: ¿Cómo uso Dify para "entrenar" mi propio modelo?**
R: Una aplicación valiosa consta de Ingeniería de indicaciones, mejora de contexto y ajuste fino. Hemos creado un enfoque de programación híbrida que combina las indicaciones con lenguajes de programación (similar a un motor de plantillas), lo que facilita la incorporación de texto largo o la captura de subtítulos de un video de YouTube ingresado por el usuario, todo lo cual se enviará como contexto para que los LLM lo procesen. Damos gran importancia a la operabilidad de la aplicación, con los datos generados por los usuarios durante el uso de la aplicación disponibles para análisis, anotación y entrenamiento continuo. Sin las herramientas adecuadas, estos pasos pueden llevar mucho tiempo.
**P: ¿Qué necesito preparar si quiero crear mi propia aplicación?**
R: Suponemos que ya tienes una clave de API de OpenAI; si no la tienes, por favor regístrate. ¡Si ya tienes contenido que pueda servir como contexto de entrenamiento, eso es genial!
**P: ¿Qué idiomas de interfaz están disponibles?**
R: Actualmente se admiten inglés y chino, y puedes contribuir con paquetes de idiomas.
## Historial de estrellas
[![Gráfico de historial de estrellas](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
## Contáctanos
Si tienes alguna pregunta, sugerencia o consulta sobre asociación, no dudes en contactarnos a través de los siguientes canales:
- Presentar un problema o una solicitud de extracción en nuestro repositorio de GitHub.
- Únete a la discusión en nuestra comunidad de [Discord](https://discord.gg/FngNHpbcY7).
- Envía un correo electrónico a hello@dify.ai.
¡Estamos ansiosos por ayudarte y crear juntos aplicaciones de IA más divertidas y útiles!
## Contribuciones
Para garantizar una revisión adecuada, todas las contribuciones de código, incluidas las de los colaboradores con acceso directo a los compromisos, deben enviarse mediante solicitudes de extracción y ser aprobadas por el equipo principal de
desarrollo antes de fusionarse.
¡Agradecemos todas las solicitudes de extracción! Si deseas ayudar, consulta la [Guía de Contribución](CONTRIBUTING.md) para obtener más información sobre cómo comenzar.
## Seguridad
Para proteger tu privacidad, evita publicar problemas de seguridad en GitHub. En su lugar, envía tus preguntas a security@dify.ai y te proporcionaremos una respuesta más detallada.
## Citación
Este software utiliza el siguiente software de código abierto:
- Chase, H. (2022). LangChain [Software de computadora]. https://github.com/hwchase17/langchain
Para obtener más información, consulta el sitio web oficial o el texto de la licencia del software correspondiente.
## Licencia
Este repositorio está disponible bajo la [Licencia de código abierto de Dify](LICENSE).

View File

@ -2,14 +2,12 @@
<p align="center">
<a href="./README.md">English</a> |
<a href="./README_CN.md">简体中文</a> |
<a href="./README_JA.md">日本語</a>
<a href="./README_JA.md">日本語</a> |
<a href="./README_ES.md">Español</a>
</p>
[Web サイト](https://dify.ai) • [ドキュメント](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
Product Huntで私たちに投票してください ↓
<a href="https://www.producthunt.com/posts/dify-ai"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?sanitize=true&post_id=dify-ai&theme=light" alt="Product Hunt Badge" width="250" height="54"></a>
**Dify** は、より多くの人々が持続可能な AI ネイティブアプリケーションを作成できるように設計された、使いやすい LLMOps プラットフォームです。様々なアプリケーションタイプに対応したビジュアルオーケストレーションにより Dify は Backend-as-a-Service API としても機能する、すぐに使えるアプリケーションを提供します。プラグインやデータセットを統合するための1つの API で開発プロセスを統一し、プロンプトエンジニアリング、ビジュアル分析、継続的な改善のための1つのインターフェイスを使って業務を合理化します。
@ -43,11 +41,16 @@ Dify サーバーを起動する最も簡単な方法は、[docker-compose.yml](
```bash
cd docker
docker-compose up -d
docker compose up -d
```
実行後、ブラウザで [http://localhost/install](http://localhost/install) にアクセスし、初期化インストール作業を開始することができます。
### Helm Chart
@BorisPolonsky に大感謝します。彼は Dify を Kubernetes 上にデプロイするための [Helm Chart](https://helm.sh/) バージョンを提供してくれました。
デプロイ情報については、https://github.com/BorisPolonsky/dify-helm をご覧ください。
### 構成
カスタマイズが必要な場合は、[docker-compose.yml](docker/docker-compose.yaml) ファイルのコメントを参照し、手動で環境設定をお願いします。変更後、再度 'docker-compose up -d' を実行してください。
@ -111,7 +114,6 @@ A: 現在、英語と中国語に対応しており、言語パックを寄贈
本ソフトウェアは、以下のオープンソースソフトウェアを使用しています:
- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
- Liu, J. (2022). LlamaIndex [Computer software]. doi: 10.5281/zenodo.1234.
詳しくは、各ソフトウェアの公式サイトまたはライセンス文をご参照ください。

View File

@ -1,2 +1,11 @@
.env
storage/privkeys/*
*.env.*
storage/privkeys/*
# Logs
logs
*.log*
# jetbrains
.idea

View File

@ -8,13 +8,15 @@ EDITION=SELF_HOSTED
SECRET_KEY=
# Console API base URL
CONSOLE_URL=http://127.0.0.1:5001
CONSOLE_API_URL=http://127.0.0.1:5001
CONSOLE_WEB_URL=http://127.0.0.1:3000
# Service API base URL
API_URL=http://127.0.0.1:5001
SERVICE_API_URL=http://127.0.0.1:5001
# Web APP base URL
APP_URL=http://127.0.0.1:3000
APP_API_URL=http://127.0.0.1:5001
APP_WEB_URL=http://127.0.0.1:3000
# celery configuration
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
@ -22,6 +24,7 @@ CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
# redis configuration
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_USERNAME=
REDIS_PASSWORD=difyai123456
REDIS_DB=0
@ -47,24 +50,6 @@ S3_REGION=your-region
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
# Cookie configuration
COOKIE_HTTPONLY=true
COOKIE_SAMESITE=None
COOKIE_SECURE=true
# Session configuration
SESSION_PERMANENT=true
SESSION_USE_SIGNER=true
## support redis, sqlalchemy
SESSION_TYPE=redis
# session redis configuration
SESSION_REDIS_HOST=localhost
SESSION_REDIS_PORT=6379
SESSION_REDIS_PASSWORD=difyai123456
SESSION_REDIS_DB=2
# Vector database configuration, support: weaviate, qdrant
VECTOR_STORE=weaviate
@ -72,10 +57,16 @@ VECTOR_STORE=weaviate
WEAVIATE_ENDPOINT=http://localhost:8080
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
WEAVIATE_GRPC_ENABLED=false
WEAVIATE_BATCH_SIZE=100
# Qdrant configuration, use `path:` prefix for local mode or `https://your-qdrant-cluster-url.qdrant.io` for remote mode
QDRANT_URL=path:storage/qdrant
QDRANT_API_KEY=your-qdrant-api-key
# Qdrant configuration, use `http://localhost:6333` for local mode or `https://your-qdrant-cluster-url.qdrant.io` for remote mode
QDRANT_URL=http://localhost:6333
QDRANT_API_KEY=difyai123456
# Mail configuration, support: resend
MAIL_TYPE=
MAIL_DEFAULT_SEND_FROM=no-reply <no-reply@dify.ai>
RESEND_API_KEY=
# Sentry configuration
SENTRY_DSN=
@ -83,3 +74,37 @@ SENTRY_DSN=
# DEBUG
DEBUG=false
SQLALCHEMY_ECHO=false
# Notion import configuration, support public and internal
NOTION_INTEGRATION_TYPE=public
NOTION_CLIENT_SECRET=you-client-secret
NOTION_CLIENT_ID=you-client-id
NOTION_INTERNAL_SECRET=you-internal-secret
# Hosted Model Credentials
HOSTED_OPENAI_ENABLED=false
HOSTED_OPENAI_API_KEY=
HOSTED_OPENAI_API_BASE=
HOSTED_OPENAI_API_ORGANIZATION=
HOSTED_OPENAI_QUOTA_LIMIT=200
HOSTED_OPENAI_PAID_ENABLED=false
HOSTED_OPENAI_PAID_STRIPE_PRICE_ID=
HOSTED_OPENAI_PAID_INCREASE_QUOTA=1
HOSTED_AZURE_OPENAI_ENABLED=false
HOSTED_AZURE_OPENAI_API_KEY=
HOSTED_AZURE_OPENAI_API_BASE=
HOSTED_AZURE_OPENAI_QUOTA_LIMIT=200
HOSTED_ANTHROPIC_ENABLED=false
HOSTED_ANTHROPIC_API_BASE=
HOSTED_ANTHROPIC_API_KEY=
HOSTED_ANTHROPIC_QUOTA_LIMIT=600000
HOSTED_ANTHROPIC_PAID_ENABLED=false
HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID=
HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA=1000000
HOSTED_ANTHROPIC_PAID_MIN_QUANTITY=20
HOSTED_ANTHROPIC_PAID_MAX_QUANTITY=100
STRIPE_API_KEY=
STRIPE_WEBHOOK_SECRET=

View File

@ -1,28 +1,46 @@
FROM langgenius/base:1.0.0-bullseye-slim as langgenius-api
# packages install stage
FROM python:3.10-slim AS base
LABEL maintainer="takatost@gmail.com"
RUN apt-get update \
&& apt-get install -y --no-install-recommends gcc g++ python3-dev libc-dev libffi-dev
COPY requirements.txt /requirements.txt
RUN pip install --prefix=/pkg -r requirements.txt
# build stage
FROM python:3.10-slim AS builder
ENV FLASK_APP app.py
ENV EDITION SELF_HOSTED
ENV DEPLOY_ENV PRODUCTION
ENV CONSOLE_URL http://127.0.0.1:5001
ENV API_URL http://127.0.0.1:5001
ENV APP_URL http://127.0.0.1:5001
ENV CONSOLE_API_URL http://127.0.0.1:5001
ENV CONSOLE_WEB_URL http://127.0.0.1:3000
ENV SERVICE_API_URL http://127.0.0.1:5001
ENV APP_API_URL http://127.0.0.1:5001
ENV APP_WEB_URL http://127.0.0.1:3000
EXPOSE 5001
WORKDIR /app/api
COPY requirements.txt /app/api/requirements.txt
RUN pip install -r requirements.txt
RUN apt-get update \
&& apt-get install -y --no-install-recommends bash curl wget vim nodejs \
&& apt-get autoremove \
&& rm -rf /var/lib/apt/lists/*
COPY --from=base /pkg /usr/local
COPY . /app/api/
RUN python -c "from transformers import GPT2TokenizerFast; GPT2TokenizerFast.from_pretrained('gpt2')"
ENV TRANSFORMERS_OFFLINE true
COPY docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ARG COMMIT_SHA
ENV COMMIT_SHA ${COMMIT_SHA}
ENTRYPOINT ["/entrypoint.sh"]
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]

View File

@ -8,7 +8,7 @@
```bash
cd ../docker
docker-compose -f docker-compose.middleware.yaml up -d
docker-compose -f docker-compose.middleware.yaml -p dify up -d
cd ../api
```
2. Copy `.env.example` to `.env`
@ -17,6 +17,11 @@
```bash
openssl rand -base64 42
```
3.5 If you use annaconda, create a new environment and activate it
```bash
conda create --name dify python=3.10
conda activate dify
```
4. Install dependencies
```bash
pip install -r requirements.txt
@ -28,9 +33,32 @@
```bash
flask db upgrade
```
⚠️ If you encounter problems with jieba, for example
```
> flask db upgrade
Error: While importing 'app', an ImportError was raised:
```
Please run the following command instead.
```
pip install -r requirements.txt --upgrade --force-reinstall
```
6. Start backend:
```bash
flask run --host 0.0.0.0 --port=5001 --debug
```
7. Setup your application by visiting http://localhost:5001/console/api/setup or other apis...
8. If you need to debug local async processing, you can run `celery -A app.celery worker`, celery can do dataset importing and other async tasks.
8. If you need to debug local async processing, you can run `celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail`, celery can do dataset importing and other async tasks.
8. Start frontend
You can start the frontend by running `npm install && npm run dev` in web/ folder, or you can use docker to start the frontend, for example:
```
docker run -it -d --platform linux/amd64 -p 3000:3000 -e EDITION=SELF_HOSTED -e CONSOLE_URL=http://127.0.0.1:5001 --name web-self-hosted langgenius/dify-web:latest
```
This will start a dify frontend, now you are all set, happy coding!

View File

@ -1,5 +1,8 @@
# -*- coding:utf-8 -*-
import os
from werkzeug.exceptions import Unauthorized
if not os.environ.get("DEBUG") or os.environ.get("DEBUG").lower() != 'true':
from gevent import monkey
monkey.patch_all()
@ -8,25 +11,24 @@ import logging
import json
import threading
from flask import Flask, request, Response, session
import flask_login
from flask import Flask, request, Response
from flask_cors import CORS
from extensions import ext_session, ext_celery, ext_sentry, ext_redis, ext_login, ext_vector_store, ext_migrate, \
ext_database, ext_storage
from core.model_providers.providers import hosted
from extensions import ext_celery, ext_sentry, ext_redis, ext_login, ext_migrate, \
ext_database, ext_storage, ext_mail, ext_stripe
from extensions.ext_database import db
from extensions.ext_login import login_manager
# DO NOT REMOVE BELOW
from models import model, account, dataset, web, task
from models import model, account, dataset, web, task, source, tool
from events import event_handlers
# DO NOT REMOVE ABOVE
import core
from config import Config, CloudEditionConfig
from commands import register_commands
from models.account import TenantAccountJoin
from models.model import Account, EndUser, App
from services.account_service import AccountService
from libs.passport import PassportService
import warnings
warnings.simplefilter("ignore", ResourceWarning)
@ -66,7 +68,7 @@ def create_app(test_config=None) -> Flask:
register_blueprints(app)
register_commands(app)
core.init_app(app)
hosted.init_app(app)
return app
@ -77,59 +79,35 @@ def initialize_extensions(app):
ext_database.init_app(app)
ext_migrate.init(app, db)
ext_redis.init_app(app)
ext_vector_store.init_app(app)
ext_storage.init_app(app)
ext_celery.init_app(app)
ext_session.init_app(app)
ext_login.init_app(app)
ext_mail.init_app(app)
ext_sentry.init_app(app)
ext_stripe.init_app(app)
# Flask-Login configuration
@login_manager.user_loader
def load_user(user_id):
"""Load user based on the user_id."""
@login_manager.request_loader
def load_user_from_request(request_from_flask_login):
"""Load user based on the request."""
if request.blueprint == 'console':
# Check if the user_id contains a dot, indicating the old format
if '.' in user_id:
tenant_id, account_id = user_id.split('.')
else:
account_id = user_id
auth_header = request.headers.get('Authorization', '')
if ' ' not in auth_header:
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
auth_scheme, auth_token = auth_header.split(None, 1)
auth_scheme = auth_scheme.lower()
if auth_scheme != 'bearer':
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
decoded = PassportService().verify(auth_token)
user_id = decoded.get('user_id')
account = db.session.query(Account).filter(Account.id == account_id).first()
if account:
workspace_id = session.get('workspace_id')
if workspace_id:
tenant_account_join = db.session.query(TenantAccountJoin).filter(
TenantAccountJoin.account_id == account.id,
TenantAccountJoin.tenant_id == workspace_id
).first()
if not tenant_account_join:
tenant_account_join = db.session.query(TenantAccountJoin).filter(
TenantAccountJoin.account_id == account.id).first()
if tenant_account_join:
account.current_tenant_id = tenant_account_join.tenant_id
session['workspace_id'] = account.current_tenant_id
else:
account.current_tenant_id = workspace_id
else:
tenant_account_join = db.session.query(TenantAccountJoin).filter(
TenantAccountJoin.account_id == account.id).first()
if tenant_account_join:
account.current_tenant_id = tenant_account_join.tenant_id
session['workspace_id'] = account.current_tenant_id
# Log in the user with the updated user_id
flask_login.login_user(account, remember=True)
return account
return AccountService.load_user(user_id)
else:
return None
@login_manager.unauthorized_handler
def unauthorized_handler():
"""Handle unauthorized requests."""
@ -145,13 +123,17 @@ def register_blueprints(app):
from controllers.web import bp as web_bp
from controllers.console import bp as console_app_bp
CORS(service_api_bp,
allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH']
)
app.register_blueprint(service_api_bp)
CORS(web_bp,
resources={
r"/*": {"origins": app.config['WEB_API_CORS_ALLOW_ORIGINS']}},
supports_credentials=True,
allow_headers=['Content-Type', 'Authorization'],
allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'],
expose_headers=['X-Version', 'X-Env']
)
@ -182,6 +164,7 @@ if app.config['TESTING']:
@app.after_request
def after_request(response):
"""Add Version headers to the response."""
response.set_cookie('remember_token', '', expires=0)
response.headers.add('X-Version', app.config['CURRENT_VERSION'])
response.headers.add('X-Env', app.config['DEPLOY_ENV'])
return response
@ -218,5 +201,18 @@ def threads():
}
@app.route('/db-pool-stat')
def pool_stat():
engine = db.engine
return {
'pool_size': engine.pool.size(),
'checked_in_connections': engine.pool.checkedin(),
'checked_out_connections': engine.pool.checkedout(),
'overflow_connections': engine.pool.overflow(),
'connection_timeout': engine.pool.timeout(),
'recycle_time': db.engine.pool._recycle
}
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)

View File

@ -1,17 +1,37 @@
import datetime
import json
import math
import random
import string
import threading
import time
import uuid
import click
from tqdm import tqdm
from flask import current_app, Flask
from langchain.embeddings import OpenAIEmbeddings
from werkzeug.exceptions import NotFound
from core.embedding.cached_embedding import CacheEmbedding
from core.index.index import IndexBuilder
from core.model_providers.model_factory import ModelFactory
from core.model_providers.models.embedding.openai_embedding import OpenAIEmbedding
from core.model_providers.models.entity.model_params import ModelType
from core.model_providers.providers.hosted import hosted_model_providers
from core.model_providers.providers.openai_provider import OpenAIProvider
from libs.password import password_pattern, valid_password, hash_password
from libs.helper import email as email_validate
from extensions.ext_database import db
from models.account import InvitationCode
from models.model import Account
from libs.rsa import generate_key_pair
from models.account import InvitationCode, Tenant, TenantAccountJoin
from models.dataset import Dataset, DatasetQuery, Document, DatasetCollectionBinding
from models.model import Account, AppModelConfig, App
import secrets
import base64
from models.provider import Provider, ProviderType, ProviderQuotaType, ProviderModel
@click.command('reset-password', help='Reset the account password.')
@click.option('--email', prompt=True, help='The email address of the account whose password you need to reset')
@ -73,6 +93,32 @@ def reset_email(email, new_email, email_confirm):
click.echo(click.style('Congratulations!, email has been reset.', fg='green'))
@click.command('reset-encrypt-key-pair', help='Reset the asymmetric key pair of workspace for encrypt LLM credentials. '
'After the reset, all LLM credentials will become invalid, '
'requiring re-entry.'
'Only support SELF_HOSTED mode.')
@click.confirmation_option(prompt=click.style('Are you sure you want to reset encrypt key pair?'
' this operation cannot be rolled back!', fg='red'))
def reset_encrypt_key_pair():
if current_app.config['EDITION'] != 'SELF_HOSTED':
click.echo(click.style('Sorry, only support SELF_HOSTED mode.', fg='red'))
return
tenant = db.session.query(Tenant).first()
if not tenant:
click.echo(click.style('Sorry, no workspace found. Please enter /install to initialize.', fg='red'))
return
tenant.encrypt_public_key = generate_key_pair(tenant.id)
db.session.query(Provider).filter(Provider.provider_type == 'custom').delete()
db.session.query(ProviderModel).delete()
db.session.commit()
click.echo(click.style('Congratulations! '
'the asymmetric key pair of workspace {} has been reset.'.format(tenant.id), fg='green'))
@click.command('generate-invitation-codes', help='Generate invitation codes.')
@click.option('--batch', help='The batch of invitation codes.')
@click.option('--count', prompt=True, help='Invitation codes count.')
@ -130,7 +176,558 @@ def generate_upper_string():
return result
@click.command('recreate-all-dataset-indexes', help='Recreate all dataset indexes.')
def recreate_all_dataset_indexes():
click.echo(click.style('Start recreate all dataset indexes.', fg='green'))
recreate_count = 0
page = 1
while True:
try:
datasets = db.session.query(Dataset).filter(Dataset.indexing_technique == 'high_quality') \
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for dataset in datasets:
try:
click.echo('Recreating dataset index: {}'.format(dataset.id))
index = IndexBuilder.get_index(dataset, 'high_quality')
if index and index._is_origin():
index.recreate_dataset(dataset)
recreate_count += 1
else:
click.echo('passed.')
except Exception as e:
click.echo(
click.style('Recreate dataset index error: {} {}'.format(e.__class__.__name__, str(e)), fg='red'))
continue
click.echo(click.style('Congratulations! Recreate {} dataset indexes.'.format(recreate_count), fg='green'))
@click.command('clean-unused-dataset-indexes', help='Clean unused dataset indexes.')
def clean_unused_dataset_indexes():
click.echo(click.style('Start clean unused dataset indexes.', fg='green'))
clean_days = int(current_app.config.get('CLEAN_DAY_SETTING'))
start_at = time.perf_counter()
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days)
page = 1
while True:
try:
datasets = db.session.query(Dataset).filter(Dataset.created_at < thirty_days_ago) \
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for dataset in datasets:
dataset_query = db.session.query(DatasetQuery).filter(
DatasetQuery.created_at > thirty_days_ago,
DatasetQuery.dataset_id == dataset.id
).all()
if not dataset_query or len(dataset_query) == 0:
documents = db.session.query(Document).filter(
Document.dataset_id == dataset.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False,
Document.updated_at > thirty_days_ago
).all()
if not documents or len(documents) == 0:
try:
# remove index
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
kw_index = IndexBuilder.get_index(dataset, 'economy')
# delete from vector index
if vector_index:
if dataset.collection_binding_id:
vector_index.delete_by_group_id(dataset.id)
else:
if dataset.collection_binding_id:
vector_index.delete_by_group_id(dataset.id)
else:
vector_index.delete()
kw_index.delete()
# update document
update_params = {
Document.enabled: False
}
Document.query.filter_by(dataset_id=dataset.id).update(update_params)
db.session.commit()
click.echo(click.style('Cleaned unused dataset {} from db success!'.format(dataset.id),
fg='green'))
except Exception as e:
click.echo(
click.style('clean dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
end_at = time.perf_counter()
click.echo(click.style('Cleaned unused dataset from db success latency: {}'.format(end_at - start_at), fg='green'))
@click.command('sync-anthropic-hosted-providers', help='Sync anthropic hosted providers.')
def sync_anthropic_hosted_providers():
if not hosted_model_providers.anthropic:
click.echo(click.style('Anthropic hosted provider is not configured.', fg='red'))
return
click.echo(click.style('Start sync anthropic hosted providers.', fg='green'))
count = 0
new_quota_limit = hosted_model_providers.anthropic.quota_limit
page = 1
while True:
try:
providers = db.session.query(Provider).filter(
Provider.provider_name == 'anthropic',
Provider.provider_type == ProviderType.SYSTEM.value,
Provider.quota_type == ProviderQuotaType.TRIAL.value,
Provider.quota_limit != new_quota_limit
).order_by(Provider.created_at.desc()).paginate(page=page, per_page=100)
except NotFound:
break
page += 1
for provider in providers:
try:
click.echo('Syncing tenant anthropic hosted provider: {}, origin: limit {}, used {}'
.format(provider.tenant_id, provider.quota_limit, provider.quota_used))
original_quota_limit = provider.quota_limit
division = math.ceil(new_quota_limit / 1000)
provider.quota_limit = new_quota_limit if original_quota_limit == 1000 \
else original_quota_limit * division
provider.quota_used = division * provider.quota_used
db.session.commit()
count += 1
except Exception as e:
click.echo(click.style(
'Sync tenant anthropic hosted provider error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
continue
click.echo(click.style('Congratulations! Synced {} anthropic hosted providers.'.format(count), fg='green'))
@click.command('create-qdrant-indexes', help='Create qdrant indexes.')
def create_qdrant_indexes():
click.echo(click.style('Start create qdrant indexes.', fg='green'))
create_count = 0
page = 1
while True:
try:
datasets = db.session.query(Dataset).filter(Dataset.indexing_technique == 'high_quality') \
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for dataset in datasets:
if dataset.index_struct_dict:
if dataset.index_struct_dict['type'] != 'qdrant':
try:
click.echo('Create dataset qdrant index: {}'.format(dataset.id))
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except Exception:
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id
)
dataset.embedding_model = embedding_model.name
dataset.embedding_model_provider = embedding_model.model_provider.provider_name
except Exception:
provider = Provider(
id='provider_id',
tenant_id=dataset.tenant_id,
provider_name='openai',
provider_type=ProviderType.SYSTEM.value,
encrypted_config=json.dumps({'openai_api_key': 'TEST'}),
is_valid=True,
)
model_provider = OpenAIProvider(provider=provider)
embedding_model = OpenAIEmbedding(name="text-embedding-ada-002",
model_provider=model_provider)
embeddings = CacheEmbedding(embedding_model)
from core.index.vector_index.qdrant_vector_index import QdrantVectorIndex, QdrantConfig
index = QdrantVectorIndex(
dataset=dataset,
config=QdrantConfig(
endpoint=current_app.config.get('QDRANT_URL'),
api_key=current_app.config.get('QDRANT_API_KEY'),
root_path=current_app.root_path
),
embeddings=embeddings
)
if index:
index.create_qdrant_dataset(dataset)
index_struct = {
"type": 'qdrant',
"vector_store": {
"class_prefix": dataset.index_struct_dict['vector_store']['class_prefix']}
}
dataset.index_struct = json.dumps(index_struct)
db.session.commit()
create_count += 1
else:
click.echo('passed.')
except Exception as e:
click.echo(
click.style('Create dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
continue
click.echo(click.style('Congratulations! Create {} dataset indexes.'.format(create_count), fg='green'))
@click.command('update-qdrant-indexes', help='Update qdrant indexes.')
def update_qdrant_indexes():
click.echo(click.style('Start Update qdrant indexes.', fg='green'))
create_count = 0
page = 1
while True:
try:
datasets = db.session.query(Dataset).filter(Dataset.indexing_technique == 'high_quality') \
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for dataset in datasets:
if dataset.index_struct_dict:
if dataset.index_struct_dict['type'] != 'qdrant':
try:
click.echo('Update dataset qdrant index: {}'.format(dataset.id))
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except Exception:
provider = Provider(
id='provider_id',
tenant_id=dataset.tenant_id,
provider_name='openai',
provider_type=ProviderType.CUSTOM.value,
encrypted_config=json.dumps({'openai_api_key': 'TEST'}),
is_valid=True,
)
model_provider = OpenAIProvider(provider=provider)
embedding_model = OpenAIEmbedding(name="text-embedding-ada-002",
model_provider=model_provider)
embeddings = CacheEmbedding(embedding_model)
from core.index.vector_index.qdrant_vector_index import QdrantVectorIndex, QdrantConfig
index = QdrantVectorIndex(
dataset=dataset,
config=QdrantConfig(
endpoint=current_app.config.get('QDRANT_URL'),
api_key=current_app.config.get('QDRANT_API_KEY'),
root_path=current_app.root_path
),
embeddings=embeddings
)
if index:
index.update_qdrant_dataset(dataset)
create_count += 1
else:
click.echo('passed.')
except Exception as e:
click.echo(
click.style('Create dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
continue
click.echo(click.style('Congratulations! Update {} dataset indexes.'.format(create_count), fg='green'))
@click.command('normalization-collections', help='restore all collections in one')
def normalization_collections():
click.echo(click.style('Start normalization collections.', fg='green'))
normalization_count = []
page = 1
while True:
try:
datasets = db.session.query(Dataset).filter(Dataset.indexing_technique == 'high_quality') \
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=100)
except NotFound:
break
datasets_result = datasets.items
page += 1
for i in range(0, len(datasets_result), 5):
threads = []
sub_datasets = datasets_result[i:i + 5]
for dataset in sub_datasets:
document_format_thread = threading.Thread(target=deal_dataset_vector, kwargs={
'flask_app': current_app._get_current_object(),
'dataset': dataset,
'normalization_count': normalization_count
})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
click.echo(click.style('Congratulations! restore {} dataset indexes.'.format(len(normalization_count)), fg='green'))
def deal_dataset_vector(flask_app: Flask, dataset: Dataset, normalization_count: list):
with flask_app.app_context():
try:
click.echo('restore dataset index: {}'.format(dataset.id))
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except Exception:
provider = Provider(
id='provider_id',
tenant_id=dataset.tenant_id,
provider_name='openai',
provider_type=ProviderType.CUSTOM.value,
encrypted_config=json.dumps({'openai_api_key': 'TEST'}),
is_valid=True,
)
model_provider = OpenAIProvider(provider=provider)
embedding_model = OpenAIEmbedding(name="text-embedding-ada-002",
model_provider=model_provider)
embeddings = CacheEmbedding(embedding_model)
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.provider_name == embedding_model.model_provider.provider_name,
DatasetCollectionBinding.model_name == embedding_model.name). \
order_by(DatasetCollectionBinding.created_at). \
first()
if not dataset_collection_binding:
dataset_collection_binding = DatasetCollectionBinding(
provider_name=embedding_model.model_provider.provider_name,
model_name=embedding_model.name,
collection_name="Vector_index_" + str(uuid.uuid4()).replace("-", "_") + '_Node'
)
db.session.add(dataset_collection_binding)
db.session.commit()
from core.index.vector_index.qdrant_vector_index import QdrantVectorIndex, QdrantConfig
index = QdrantVectorIndex(
dataset=dataset,
config=QdrantConfig(
endpoint=current_app.config.get('QDRANT_URL'),
api_key=current_app.config.get('QDRANT_API_KEY'),
root_path=current_app.root_path
),
embeddings=embeddings
)
if index:
# index.delete_by_group_id(dataset.id)
index.restore_dataset_in_one(dataset, dataset_collection_binding)
else:
click.echo('passed.')
normalization_count.append(1)
except Exception as e:
click.echo(
click.style('Create dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
@click.command('update_app_model_configs', help='Migrate data to support paragraph variable.')
@click.option("--batch-size", default=500, help="Number of records to migrate in each batch.")
def update_app_model_configs(batch_size):
pre_prompt_template = '{{default_input}}'
user_input_form_template = {
"en-US": [
{
"paragraph": {
"label": "Query",
"variable": "default_input",
"required": False,
"default": ""
}
}
],
"zh-Hans": [
{
"paragraph": {
"label": "查询内容",
"variable": "default_input",
"required": False,
"default": ""
}
}
]
}
click.secho("Start migrate old data that the text generator can support paragraph variable.", fg='green')
total_records = db.session.query(AppModelConfig) \
.join(App, App.app_model_config_id == AppModelConfig.id) \
.filter(App.mode == 'completion') \
.count()
if total_records == 0:
click.secho("No data to migrate.", fg='green')
return
num_batches = (total_records + batch_size - 1) // batch_size
with tqdm(total=total_records, desc="Migrating Data") as pbar:
for i in range(num_batches):
offset = i * batch_size
limit = min(batch_size, total_records - offset)
click.secho(f"Fetching batch {i + 1}/{num_batches} from source database...", fg='green')
data_batch = db.session.query(AppModelConfig) \
.join(App, App.app_model_config_id == AppModelConfig.id) \
.filter(App.mode == 'completion') \
.order_by(App.created_at) \
.offset(offset).limit(limit).all()
if not data_batch:
click.secho("No more data to migrate.", fg='green')
break
try:
click.secho(f"Migrating {len(data_batch)} records...", fg='green')
for data in data_batch:
# click.secho(f"Migrating data {data.id}, pre_prompt: {data.pre_prompt}, user_input_form: {data.user_input_form}", fg='green')
if data.pre_prompt is None:
data.pre_prompt = pre_prompt_template
else:
if pre_prompt_template in data.pre_prompt:
continue
data.pre_prompt += pre_prompt_template
app_data = db.session.query(App) \
.filter(App.id == data.app_id) \
.one()
account_data = db.session.query(Account) \
.join(TenantAccountJoin, Account.id == TenantAccountJoin.account_id) \
.filter(TenantAccountJoin.role == 'owner') \
.filter(TenantAccountJoin.tenant_id == app_data.tenant_id) \
.one_or_none()
if not account_data:
continue
if data.user_input_form is None or data.user_input_form == 'null':
data.user_input_form = json.dumps(user_input_form_template[account_data.interface_language])
else:
raw_json_data = json.loads(data.user_input_form)
raw_json_data.append(user_input_form_template[account_data.interface_language][0])
data.user_input_form = json.dumps(raw_json_data)
# click.secho(f"Updated data {data.id}, pre_prompt: {data.pre_prompt}, user_input_form: {data.user_input_form}", fg='green')
db.session.commit()
except Exception as e:
click.secho(f"Error while migrating data: {e}, app_id: {data.app_id}, app_model_config_id: {data.id}",
fg='red')
continue
click.secho(f"Successfully migrated batch {i + 1}/{num_batches}.", fg='green')
pbar.update(len(data_batch))
@click.command('migrate_default_input_to_dataset_query_variable')
@click.option("--batch-size", default=500, help="Number of records to migrate in each batch.")
def migrate_default_input_to_dataset_query_variable(batch_size):
click.secho("Starting...", fg='green')
total_records = db.session.query(AppModelConfig) \
.join(App, App.app_model_config_id == AppModelConfig.id) \
.filter(App.mode == 'completion') \
.filter(AppModelConfig.dataset_query_variable == None) \
.count()
if total_records == 0:
click.secho("No data to migrate.", fg='green')
return
num_batches = (total_records + batch_size - 1) // batch_size
with tqdm(total=total_records, desc="Migrating Data") as pbar:
for i in range(num_batches):
offset = i * batch_size
limit = min(batch_size, total_records - offset)
click.secho(f"Fetching batch {i + 1}/{num_batches} from source database...", fg='green')
data_batch = db.session.query(AppModelConfig) \
.join(App, App.app_model_config_id == AppModelConfig.id) \
.filter(App.mode == 'completion') \
.filter(AppModelConfig.dataset_query_variable == None) \
.order_by(App.created_at) \
.offset(offset).limit(limit).all()
if not data_batch:
click.secho("No more data to migrate.", fg='green')
break
try:
click.secho(f"Migrating {len(data_batch)} records...", fg='green')
for data in data_batch:
config = AppModelConfig.to_dict(data)
tools = config["agent_mode"]["tools"]
dataset_exists = "dataset" in str(tools)
if not dataset_exists:
continue
user_input_form = config.get("user_input_form", [])
for form in user_input_form:
paragraph = form.get('paragraph')
if paragraph \
and paragraph.get('variable') == 'query':
data.dataset_query_variable = 'query'
break
if paragraph \
and paragraph.get('variable') == 'default_input':
data.dataset_query_variable = 'default_input'
break
db.session.commit()
except Exception as e:
click.secho(f"Error while migrating data: {e}, app_id: {data.app_id}, app_model_config_id: {data.id}",
fg='red')
continue
click.secho(f"Successfully migrated batch {i + 1}/{num_batches}.", fg='green')
pbar.update(len(data_batch))
def register_commands(app):
app.cli.add_command(reset_password)
app.cli.add_command(reset_email)
app.cli.add_command(generate_invitation_codes)
app.cli.add_command(reset_encrypt_key_pair)
app.cli.add_command(recreate_all_dataset_indexes)
app.cli.add_command(sync_anthropic_hosted_providers)
app.cli.add_command(clean_unused_dataset_indexes)
app.cli.add_command(create_qdrant_indexes)
app.cli.add_command(update_qdrant_indexes)
app.cli.add_command(update_app_model_configs)
app.cli.add_command(normalization_collections)
app.cli.add_command(migrate_default_input_to_dataset_query_variable)

View File

@ -10,9 +10,6 @@ from extensions.ext_redis import redis_client
dotenv.load_dotenv()
DEFAULTS = {
'COOKIE_HTTPONLY': 'True',
'COOKIE_SECURE': 'True',
'COOKIE_SAMESITE': 'None',
'DB_USERNAME': 'postgres',
'DB_PASSWORD': '',
'DB_HOST': 'localhost',
@ -22,32 +19,44 @@ DEFAULTS = {
'REDIS_PORT': '6379',
'REDIS_DB': '0',
'REDIS_USE_SSL': 'False',
'SESSION_REDIS_HOST': 'localhost',
'SESSION_REDIS_PORT': '6379',
'SESSION_REDIS_DB': '2',
'SESSION_REDIS_USE_SSL': 'False',
'OAUTH_REDIRECT_PATH': '/console/api/oauth/authorize',
'OAUTH_REDIRECT_INDEX_PATH': '/',
'CONSOLE_URL': 'https://cloud.dify.ai',
'API_URL': 'https://api.dify.ai',
'APP_URL': 'https://udify.app',
'CONSOLE_WEB_URL': 'https://cloud.dify.ai',
'CONSOLE_API_URL': 'https://cloud.dify.ai',
'SERVICE_API_URL': 'https://api.dify.ai',
'APP_WEB_URL': 'https://udify.app',
'APP_API_URL': 'https://udify.app',
'STORAGE_TYPE': 'local',
'STORAGE_LOCAL_PATH': 'storage',
'CHECK_UPDATE_URL': 'https://updates.dify.ai',
'SESSION_TYPE': 'sqlalchemy',
'SESSION_PERMANENT': 'True',
'SESSION_USE_SIGNER': 'True',
'DEPLOY_ENV': 'PRODUCTION',
'SQLALCHEMY_POOL_SIZE': 30,
'SQLALCHEMY_POOL_RECYCLE': 3600,
'SQLALCHEMY_ECHO': 'False',
'SENTRY_TRACES_SAMPLE_RATE': 1.0,
'SENTRY_PROFILES_SAMPLE_RATE': 1.0,
'WEAVIATE_GRPC_ENABLED': 'True',
'WEAVIATE_BATCH_SIZE': 100,
'CELERY_BACKEND': 'database',
'PDF_PREVIEW': 'True',
'LOG_LEVEL': 'INFO',
'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False',
'DEFAULT_LLM_PROVIDER': 'openai'
'HOSTED_OPENAI_QUOTA_LIMIT': 200,
'HOSTED_OPENAI_ENABLED': 'False',
'HOSTED_OPENAI_PAID_ENABLED': 'False',
'HOSTED_OPENAI_PAID_INCREASE_QUOTA': 1,
'HOSTED_AZURE_OPENAI_ENABLED': 'False',
'HOSTED_AZURE_OPENAI_QUOTA_LIMIT': 200,
'HOSTED_ANTHROPIC_QUOTA_LIMIT': 600000,
'HOSTED_ANTHROPIC_ENABLED': 'False',
'HOSTED_ANTHROPIC_PAID_ENABLED': 'False',
'HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA': 1000000,
'HOSTED_ANTHROPIC_PAID_MIN_QUANTITY': 20,
'HOSTED_ANTHROPIC_PAID_MAX_QUANTITY': 100,
'HOSTED_MODERATION_ENABLED': 'False',
'HOSTED_MODERATION_PROVIDERS': '',
'TENANT_DOCUMENT_COUNT': 100,
'CLEAN_DAY_SETTING': 30,
'UPLOAD_FILE_SIZE_LIMIT': 15,
'UPLOAD_FILE_BATCH_LIMIT': 5,
}
@ -75,16 +84,20 @@ class Config:
def __init__(self):
# app settings
self.CONSOLE_API_URL = get_env('CONSOLE_URL') if get_env('CONSOLE_URL') else get_env('CONSOLE_API_URL')
self.CONSOLE_WEB_URL = get_env('CONSOLE_URL') if get_env('CONSOLE_URL') else get_env('CONSOLE_WEB_URL')
self.SERVICE_API_URL = get_env('API_URL') if get_env('API_URL') else get_env('SERVICE_API_URL')
self.APP_WEB_URL = get_env('APP_URL') if get_env('APP_URL') else get_env('APP_WEB_URL')
self.APP_API_URL = get_env('APP_URL') if get_env('APP_URL') else get_env('APP_API_URL')
self.CONSOLE_URL = get_env('CONSOLE_URL')
self.API_URL = get_env('API_URL')
self.APP_URL = get_env('APP_URL')
self.CURRENT_VERSION = "0.3.0"
self.CURRENT_VERSION = "0.3.25"
self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
self.TESTING = False
self.LOG_LEVEL = get_env('LOG_LEVEL')
self.PDF_PREVIEW = get_bool_env('PDF_PREVIEW')
# Your App secret key will be used for securely signing the session cookie
# Make sure you are changing this key for your deployment with a strong key.
@ -92,20 +105,6 @@ class Config:
# Alternatively you can set it with `SECRET_KEY` environment variable.
self.SECRET_KEY = get_env('SECRET_KEY')
# cookie settings
self.REMEMBER_COOKIE_HTTPONLY = get_bool_env('COOKIE_HTTPONLY')
self.SESSION_COOKIE_HTTPONLY = get_bool_env('COOKIE_HTTPONLY')
self.REMEMBER_COOKIE_SAMESITE = get_env('COOKIE_SAMESITE')
self.SESSION_COOKIE_SAMESITE = get_env('COOKIE_SAMESITE')
self.REMEMBER_COOKIE_SECURE = get_bool_env('COOKIE_SECURE')
self.SESSION_COOKIE_SECURE = get_bool_env('COOKIE_SECURE')
self.PERMANENT_SESSION_LIFETIME = timedelta(days=7)
# session settings, only support sqlalchemy, redis
self.SESSION_TYPE = get_env('SESSION_TYPE')
self.SESSION_PERMANENT = get_bool_env('SESSION_PERMANENT')
self.SESSION_USE_SIGNER = get_bool_env('SESSION_USE_SIGNER')
# redis settings
self.REDIS_HOST = get_env('REDIS_HOST')
self.REDIS_PORT = get_env('REDIS_PORT')
@ -114,14 +113,6 @@ class Config:
self.REDIS_DB = get_env('REDIS_DB')
self.REDIS_USE_SSL = get_bool_env('REDIS_USE_SSL')
# session redis settings
self.SESSION_REDIS_HOST = get_env('SESSION_REDIS_HOST')
self.SESSION_REDIS_PORT = get_env('SESSION_REDIS_PORT')
self.SESSION_REDIS_USERNAME = get_env('SESSION_REDIS_USERNAME')
self.SESSION_REDIS_PASSWORD = get_env('SESSION_REDIS_PASSWORD')
self.SESSION_REDIS_DB = get_env('SESSION_REDIS_DB')
self.SESSION_REDIS_USE_SSL = get_bool_env('SESSION_REDIS_USE_SSL')
# storage settings
self.STORAGE_TYPE = get_env('STORAGE_TYPE')
self.STORAGE_LOCAL_PATH = get_env('STORAGE_LOCAL_PATH')
@ -138,6 +129,7 @@ class Config:
self.WEAVIATE_ENDPOINT = get_env('WEAVIATE_ENDPOINT')
self.WEAVIATE_API_KEY = get_env('WEAVIATE_API_KEY')
self.WEAVIATE_GRPC_ENABLED = get_bool_env('WEAVIATE_GRPC_ENABLED')
self.WEAVIATE_BATCH_SIZE = int(get_env('WEAVIATE_BATCH_SIZE'))
# qdrant settings
self.QDRANT_URL = get_env('QDRANT_URL')
@ -145,10 +137,15 @@ class Config:
# cors settings
self.CONSOLE_CORS_ALLOW_ORIGINS = get_cors_allow_origins(
'CONSOLE_CORS_ALLOW_ORIGINS', self.CONSOLE_URL)
'CONSOLE_CORS_ALLOW_ORIGINS', self.CONSOLE_WEB_URL)
self.WEB_API_CORS_ALLOW_ORIGINS = get_cors_allow_origins(
'WEB_API_CORS_ALLOW_ORIGINS', '*')
# mail settings
self.MAIL_TYPE = get_env('MAIL_TYPE')
self.MAIL_DEFAULT_SEND_FROM = get_env('MAIL_DEFAULT_SEND_FROM')
self.RESEND_API_KEY = get_env('RESEND_API_KEY')
# sentry settings
self.SENTRY_DSN = get_env('SENTRY_DSN')
self.SENTRY_TRACES_SAMPLE_RATE = float(get_env('SENTRY_TRACES_SAMPLE_RATE'))
@ -164,7 +161,10 @@ class Config:
}
self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}"
self.SQLALCHEMY_ENGINE_OPTIONS = {'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE'))}
self.SQLALCHEMY_ENGINE_OPTIONS = {
'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')),
'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE'))
}
self.SQLALCHEMY_ECHO = get_bool_env('SQLALCHEMY_ECHO')
@ -176,15 +176,50 @@ class Config:
self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://')
# hosted provider credentials
self.OPENAI_API_KEY = get_env('OPENAI_API_KEY')
self.HOSTED_OPENAI_ENABLED = get_bool_env('HOSTED_OPENAI_ENABLED')
self.HOSTED_OPENAI_API_KEY = get_env('HOSTED_OPENAI_API_KEY')
self.HOSTED_OPENAI_API_BASE = get_env('HOSTED_OPENAI_API_BASE')
self.HOSTED_OPENAI_API_ORGANIZATION = get_env('HOSTED_OPENAI_API_ORGANIZATION')
self.HOSTED_OPENAI_QUOTA_LIMIT = int(get_env('HOSTED_OPENAI_QUOTA_LIMIT'))
self.HOSTED_OPENAI_PAID_ENABLED = get_bool_env('HOSTED_OPENAI_PAID_ENABLED')
self.HOSTED_OPENAI_PAID_STRIPE_PRICE_ID = get_env('HOSTED_OPENAI_PAID_STRIPE_PRICE_ID')
self.HOSTED_OPENAI_PAID_INCREASE_QUOTA = int(get_env('HOSTED_OPENAI_PAID_INCREASE_QUOTA'))
# By default it is False
# You could disable it for compatibility with certain OpenAPI providers
self.DISABLE_PROVIDER_CONFIG_VALIDATION = get_bool_env('DISABLE_PROVIDER_CONFIG_VALIDATION')
self.HOSTED_AZURE_OPENAI_ENABLED = get_bool_env('HOSTED_AZURE_OPENAI_ENABLED')
self.HOSTED_AZURE_OPENAI_API_KEY = get_env('HOSTED_AZURE_OPENAI_API_KEY')
self.HOSTED_AZURE_OPENAI_API_BASE = get_env('HOSTED_AZURE_OPENAI_API_BASE')
self.HOSTED_AZURE_OPENAI_QUOTA_LIMIT = int(get_env('HOSTED_AZURE_OPENAI_QUOTA_LIMIT'))
self.HOSTED_ANTHROPIC_ENABLED = get_bool_env('HOSTED_ANTHROPIC_ENABLED')
self.HOSTED_ANTHROPIC_API_BASE = get_env('HOSTED_ANTHROPIC_API_BASE')
self.HOSTED_ANTHROPIC_API_KEY = get_env('HOSTED_ANTHROPIC_API_KEY')
self.HOSTED_ANTHROPIC_QUOTA_LIMIT = int(get_env('HOSTED_ANTHROPIC_QUOTA_LIMIT'))
self.HOSTED_ANTHROPIC_PAID_ENABLED = get_bool_env('HOSTED_ANTHROPIC_PAID_ENABLED')
self.HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID = get_env('HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID')
self.HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA = int(get_env('HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA'))
self.HOSTED_ANTHROPIC_PAID_MIN_QUANTITY = int(get_env('HOSTED_ANTHROPIC_PAID_MIN_QUANTITY'))
self.HOSTED_ANTHROPIC_PAID_MAX_QUANTITY = int(get_env('HOSTED_ANTHROPIC_PAID_MAX_QUANTITY'))
self.HOSTED_MODERATION_ENABLED = get_bool_env('HOSTED_MODERATION_ENABLED')
self.HOSTED_MODERATION_PROVIDERS = get_env('HOSTED_MODERATION_PROVIDERS')
self.STRIPE_API_KEY = get_env('STRIPE_API_KEY')
self.STRIPE_WEBHOOK_SECRET = get_env('STRIPE_WEBHOOK_SECRET')
# notion import setting
self.NOTION_CLIENT_ID = get_env('NOTION_CLIENT_ID')
self.NOTION_CLIENT_SECRET = get_env('NOTION_CLIENT_SECRET')
self.NOTION_INTEGRATION_TYPE = get_env('NOTION_INTEGRATION_TYPE')
self.NOTION_INTERNAL_SECRET = get_env('NOTION_INTERNAL_SECRET')
self.NOTION_INTEGRATION_TOKEN = get_env('NOTION_INTEGRATION_TOKEN')
self.TENANT_DOCUMENT_COUNT = get_env('TENANT_DOCUMENT_COUNT')
self.CLEAN_DAY_SETTING = get_env('CLEAN_DAY_SETTING')
# uploading settings
self.UPLOAD_FILE_SIZE_LIMIT = int(get_env('UPLOAD_FILE_SIZE_LIMIT'))
self.UPLOAD_FILE_BATCH_LIMIT = int(get_env('UPLOAD_FILE_BATCH_LIMIT'))
# For temp use only
# set default LLM provider, default is 'openai', support `azure_openai`
self.DEFAULT_LLM_PROVIDER = get_env('DEFAULT_LLM_PROVIDER')
class CloudEditionConfig(Config):

View File

@ -16,7 +16,7 @@ model_templates = {
},
'model_config': {
'provider': 'openai',
'model_id': 'text-davinci-003',
'model_id': 'gpt-3.5-turbo-instruct',
'configs': {
'prompt_template': '',
'prompt_variables': [],
@ -30,7 +30,7 @@ model_templates = {
},
'model': json.dumps({
"provider": "openai",
"name": "text-davinci-003",
"name": "gpt-3.5-turbo-instruct",
"completion_params": {
"max_tokens": 512,
"temperature": 1,
@ -38,7 +38,18 @@ model_templates = {
"presence_penalty": 0,
"frequency_penalty": 0
}
})
}),
'user_input_form': json.dumps([
{
"paragraph": {
"label": "Query",
"variable": "query",
"required": True,
"default": ""
}
}
]),
'pre_prompt': '{{query}}'
}
},
@ -93,7 +104,7 @@ demo_model_templates = {
'mode': 'completion',
'model_config': AppModelConfig(
provider='openai',
model_id='text-davinci-003',
model_id='gpt-3.5-turbo-instruct',
configs={
'prompt_template': "Please translate the following text into {{target_language}}:\n",
'prompt_variables': [
@ -129,7 +140,7 @@ demo_model_templates = {
pre_prompt="Please translate the following text into {{target_language}}:\n",
model=json.dumps({
"provider": "openai",
"name": "text-davinci-003",
"name": "gpt-3.5-turbo-instruct",
"completion_params": {
"max_tokens": 1000,
"temperature": 0,
@ -211,7 +222,7 @@ demo_model_templates = {
'mode': 'completion',
'model_config': AppModelConfig(
provider='openai',
model_id='text-davinci-003',
model_id='gpt-3.5-turbo-instruct',
configs={
'prompt_template': "请将以下文本翻译为{{target_language}}:\n",
'prompt_variables': [
@ -247,7 +258,7 @@ demo_model_templates = {
pre_prompt="请将以下文本翻译为{{target_language}}:\n",
model=json.dumps({
"provider": "openai",
"name": "text-davinci-003",
"name": "gpt-3.5-turbo-instruct",
"completion_params": {
"max_tokens": 1000,
"temperature": 0,

View File

@ -9,16 +9,22 @@ api = ExternalApi(bp)
from . import setup, version, apikey, admin
# Import app controllers
from .app import app, site, completion, model_config, statistic, conversation, message
from .app import app, site, completion, model_config, statistic, conversation, message, generator, audio
# Import auth controllers
from .auth import login, oauth
from .auth import login, oauth, data_source_oauth, activate
# Import datasets controllers
from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing
from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing, data_source
# Import workspace controllers
from .workspace import workspace, members, providers, account
from .workspace import workspace, members, providers, model_providers, account, tool_providers, models
# Import explore controllers
from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message
from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message, audio
# Import universal chat controllers
from .universal_chat import chat, conversation, message, parameter, audio
# Import webhook controllers
from .webhook import stripe

View File

@ -8,6 +8,7 @@ from werkzeug.exceptions import NotFound, Unauthorized
from controllers.console import api
from controllers.console.wraps import only_edition_cloud
from extensions.ext_database import db
from libs.helper import supported_language
from models.model import RecommendedApp, App, InstalledApp
@ -47,15 +48,14 @@ class InsertExploreAppListApi(Resource):
parser.add_argument('desc', type=str, location='json')
parser.add_argument('copyright', type=str, location='json')
parser.add_argument('privacy_policy', type=str, location='json')
parser.add_argument('language', type=str, required=True, nullable=False, choices=['en-US', 'zh-Hans'],
location='json')
parser.add_argument('language', type=supported_language, required=True, nullable=False, location='json')
parser.add_argument('category', type=str, required=True, nullable=False, location='json')
parser.add_argument('position', type=int, required=True, nullable=False, location='json')
args = parser.parse_args()
app = App.query.filter(App.id == args['app_id']).first()
if not app:
raise NotFound('App not found')
raise NotFound(f'App \'{args["app_id"]}\' is not found')
site = app.site
if not site:
@ -63,10 +63,12 @@ class InsertExploreAppListApi(Resource):
copy_right = args['copyright'] if args['copyright'] else ''
privacy_policy = args['privacy_policy'] if args['privacy_policy'] else ''
else:
desc = site.description if (site.description if not args['desc'] else args['desc']) else ''
copy_right = site.copyright if (site.copyright if not args['copyright'] else args['copyright']) else ''
privacy_policy = site.privacy_policy \
if (site.privacy_policy if not args['privacy_policy'] else args['privacy_policy']) else ''
desc = site.description if site.description else \
args['desc'] if args['desc'] else ''
copy_right = site.copyright if site.copyright else \
args['copyright'] if args['copyright'] else ''
privacy_policy = site.privacy_policy if site.privacy_policy else \
args['privacy_policy'] if args['privacy_policy'] else ''
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()

View File

@ -1,4 +1,5 @@
from flask_login import login_required, current_user
from flask_login import current_user
from libs.login import login_required
import flask_restful
from flask_restful import Resource, fields, marshal_with
from werkzeug.exceptions import Forbidden
@ -80,6 +81,7 @@ class BaseApiKeyListResource(Resource):
key = ApiToken.generate_api_key(self.token_prefix, 24)
api_token = ApiToken()
setattr(api_token, self.resource_id_field, resource_id)
api_token.tenant_id = current_user.current_tenant_id
api_token.token = key
api_token.type = self.resource_type
db.session.add(api_token)

View File

@ -1,54 +1,28 @@
# -*- coding:utf-8 -*-
import json
import logging
from datetime import datetime
import flask
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal_with, abort, inputs
from werkzeug.exceptions import Unauthorized, Forbidden
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal_with, abort, inputs
from werkzeug.exceptions import Forbidden
from constants.model_template import model_templates, demo_model_templates
from controllers.console import api
from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError, ProviderQuotaExceededError, \
CompletionRequestError, ProviderModelCurrentlyNotSupportError
from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.generator.llm_generator import LLMGenerator
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, LLMBadRequestError, LLMAPIConnectionError, \
LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, ModelCurrentlyNotSupportError
from core.model_providers.error import ProviderTokenNotInitError, LLMBadRequestError
from core.model_providers.model_factory import ModelFactory
from core.model_providers.model_provider_factory import ModelProviderFactory
from events.app_event import app_was_created, app_was_deleted
from libs.helper import TimestampField
from fields.app_fields import app_pagination_fields, app_detail_fields, template_list_fields, \
app_detail_fields_with_site
from extensions.ext_database import db
from models.model import App, AppModelConfig, Site, InstalledApp
from services.account_service import TenantService
from models.model import App, AppModelConfig, Site
from services.app_model_config_service import AppModelConfigService
model_config_fields = {
'opening_statement': fields.String,
'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
'more_like_this': fields.Raw(attribute='more_like_this_dict'),
'model': fields.Raw(attribute='model_dict'),
'user_input_form': fields.Raw(attribute='user_input_form_list'),
'pre_prompt': fields.String,
'agent_mode': fields.Raw(attribute='agent_mode_dict'),
}
app_detail_fields = {
'id': fields.String,
'name': fields.String,
'mode': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'enable_site': fields.Boolean,
'enable_api': fields.Boolean,
'api_rpm': fields.Integer,
'api_rph': fields.Integer,
'is_demo': fields.Boolean,
'model_config': fields.Nested(model_config_fields, attribute='app_model_config'),
'created_at': TimestampField
}
def _get_app(app_id, tenant_id):
app = db.session.query(App).filter(App.id == app_id, App.tenant_id == tenant_id).first()
@ -58,35 +32,6 @@ def _get_app(app_id, tenant_id):
class AppListApi(Resource):
prompt_config_fields = {
'prompt_template': fields.String,
}
model_config_partial_fields = {
'model': fields.Raw(attribute='model_dict'),
'pre_prompt': fields.String,
}
app_partial_fields = {
'id': fields.String,
'name': fields.String,
'mode': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'enable_site': fields.Boolean,
'enable_api': fields.Boolean,
'is_demo': fields.Boolean,
'model_config': fields.Nested(model_config_partial_fields, attribute='app_model_config'),
'created_at': TimestampField
}
app_pagination_fields = {
'page': fields.Integer,
'limit': fields.Integer(attribute='per_page'),
'total': fields.Integer,
'has_more': fields.Boolean(attribute='has_next'),
'data': fields.List(fields.Nested(app_partial_fields), attribute='items')
}
@setup_required
@login_required
@ -100,7 +45,8 @@ class AppListApi(Resource):
args = parser.parse_args()
app_models = db.paginate(
db.select(App).where(App.tenant_id == current_user.current_tenant_id).order_by(App.created_at.desc()),
db.select(App).where(App.tenant_id == current_user.current_tenant_id,
App.is_universal == False).order_by(App.created_at.desc()),
page=args['page'],
per_page=args['limit'],
error_out=False)
@ -125,11 +71,39 @@ class AppListApi(Resource):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
try:
default_model = ModelFactory.get_text_generation_model(
tenant_id=current_user.current_tenant_id
)
except (ProviderTokenNotInitError, LLMBadRequestError):
default_model = None
except Exception as e:
logging.exception(e)
default_model = None
if args['model_config'] is not None:
# validate config
model_config_dict = args['model_config']
# get model provider
model_provider = ModelProviderFactory.get_preferred_model_provider(
current_user.current_tenant_id,
model_config_dict["model"]["provider"]
)
if not model_provider:
if not default_model:
raise ProviderNotInitializeError(
f"No Default System Reasoning Model available. Please configure "
f"in the Settings -> Model Provider.")
else:
model_config_dict["model"]["provider"] = default_model.model_provider.provider_name
model_config_dict["model"]["name"] = default_model.name
model_configuration = AppModelConfigService.validate_configuration(
tenant_id=current_user.current_tenant_id,
account=current_user,
config=args['model_config'],
config=model_config_dict,
mode=args['mode']
)
@ -142,19 +116,8 @@ class AppListApi(Resource):
status='normal'
)
app_model_config = AppModelConfig(
provider="",
model_id="",
configs={},
opening_statement=model_configuration['opening_statement'],
suggested_questions=json.dumps(model_configuration['suggested_questions']),
suggested_questions_after_answer=json.dumps(model_configuration['suggested_questions_after_answer']),
more_like_this=json.dumps(model_configuration['more_like_this']),
model=json.dumps(model_configuration['model']),
user_input_form=json.dumps(model_configuration['user_input_form']),
pre_prompt=model_configuration['pre_prompt'],
agent_mode=json.dumps(model_configuration['agent_mode']),
)
app_model_config = AppModelConfig()
app_model_config = app_model_config.from_model_config_dict(model_configuration)
else:
if 'mode' not in args or args['mode'] is None:
abort(400, message="mode is required")
@ -164,6 +127,23 @@ class AppListApi(Resource):
app = App(**model_config_template['app'])
app_model_config = AppModelConfig(**model_config_template['model_config'])
# get model provider
model_provider = ModelProviderFactory.get_preferred_model_provider(
current_user.current_tenant_id,
app_model_config.model_dict["provider"]
)
if not model_provider:
if not default_model:
raise ProviderNotInitializeError(
f"No Default System Reasoning Model available. Please configure "
f"in the Settings -> Model Provider.")
else:
model_dict = app_model_config.model_dict
model_dict['provider'] = default_model.model_provider.provider_name
model_dict['name'] = default_model.name
app_model_config.model = json.dumps(model_dict)
app.name = args['name']
app.mode = args['mode']
app.icon = args['icon']
@ -198,18 +178,6 @@ class AppListApi(Resource):
class AppTemplateApi(Resource):
template_fields = {
'name': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'description': fields.String,
'mode': fields.String,
'model_config': fields.Nested(model_config_fields),
}
template_list_fields = {
'data': fields.List(fields.Nested(template_fields)),
}
@setup_required
@login_required
@ -220,42 +188,14 @@ class AppTemplateApi(Resource):
account = current_user
interface_language = account.interface_language
return {'data': demo_model_templates.get(interface_language)}
templates = demo_model_templates.get(interface_language)
if not templates:
templates = demo_model_templates.get('en-US')
return {'data': templates}
class AppApi(Resource):
site_fields = {
'access_token': fields.String(attribute='code'),
'code': fields.String,
'title': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'description': fields.String,
'default_language': fields.String,
'customize_domain': fields.String,
'copyright': fields.String,
'privacy_policy': fields.String,
'customize_token_strategy': fields.String,
'prompt_public': fields.Boolean,
'app_base_url': fields.String,
}
app_detail_fields_with_site = {
'id': fields.String,
'name': fields.String,
'mode': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'enable_site': fields.Boolean,
'enable_api': fields.Boolean,
'api_rpm': fields.Integer,
'api_rph': fields.Integer,
'is_demo': fields.Boolean,
'model_config': fields.Nested(model_config_fields, attribute='app_model_config'),
'site': fields.Nested(site_fields),
'api_base_url': fields.String,
'created_at': TimestampField
}
@setup_required
@login_required
@ -274,6 +214,10 @@ class AppApi(Resource):
def delete(self, app_id):
"""Delete app"""
app_id = str(app_id)
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
app = _get_app(app_id, current_user.current_tenant_id)
db.session.delete(app)
@ -293,19 +237,13 @@ class AppNameApi(Resource):
@account_initialization_required
@marshal_with(app_detail_fields)
def post(self, app_id):
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, location='json')
args = parser.parse_args()
app = db.get_or_404(App, str(app_id))
if app.tenant_id != flask.session.get('tenant_id'):
raise Unauthorized()
app.name = args.get('name')
app.updated_at = datetime.utcnow()
db.session.commit()
@ -318,20 +256,14 @@ class AppIconApi(Resource):
@account_initialization_required
@marshal_with(app_detail_fields)
def post(self, app_id):
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
parser = reqparse.RequestParser()
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
args = parser.parse_args()
app = db.get_or_404(App, str(app_id))
if app.tenant_id != flask.session.get('tenant_id'):
raise Unauthorized()
app.icon = args.get('icon')
app.icon_background = args.get('icon_background')
app.updated_at = datetime.utcnow()
@ -385,29 +317,6 @@ class AppApiStatus(Resource):
return app
class AppRateLimit(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(app_detail_fields)
def post(self, app_id):
parser = reqparse.RequestParser()
parser.add_argument('api_rpm', type=inputs.natural, required=False, location='json')
parser.add_argument('api_rph', type=inputs.natural, required=False, location='json')
args = parser.parse_args()
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
if args.get('api_rpm'):
app.api_rpm = args.get('api_rpm')
if args.get('api_rph'):
app.api_rph = args.get('api_rph')
app.updated_at = datetime.utcnow()
db.session.commit()
return app
class AppCopy(Resource):
@staticmethod
def create_app_copy(app):
@ -427,20 +336,9 @@ class AppCopy(Resource):
@staticmethod
def create_app_model_config_copy(app_config, copy_app_id):
copy_app_model_config = AppModelConfig(
app_id=copy_app_id,
provider=app_config.provider,
model_id=app_config.model_id,
configs=app_config.configs,
opening_statement=app_config.opening_statement,
suggested_questions=app_config.suggested_questions,
suggested_questions_after_answer=app_config.suggested_questions_after_answer,
more_like_this=app_config.more_like_this,
model=app_config.model,
user_input_form=app_config.user_input_form,
pre_prompt=app_config.pre_prompt,
agent_mode=app_config.agent_mode
)
copy_app_model_config = app_config.copy()
copy_app_model_config.app_id = copy_app_id
return copy_app_model_config
@setup_required
@ -468,51 +366,11 @@ class AppCopy(Resource):
return copy_app, 201
class AppExport(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
# todo
pass
class IntroductionGenerateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('prompt_template', type=str, required=True, location='json')
args = parser.parse_args()
account = current_user
try:
answer = LLMGenerator.generate_introduction(
account.current_tenant_id,
args['prompt_template']
)
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
return {'introduction': answer}
api.add_resource(AppListApi, '/apps')
api.add_resource(AppTemplateApi, '/app-templates')
api.add_resource(AppApi, '/apps/<uuid:app_id>')
api.add_resource(AppCopy, '/apps/<uuid:app_id>/copy')
api.add_resource(AppNameApi, '/apps/<uuid:app_id>/name')
api.add_resource(AppIconApi, '/apps/<uuid:app_id>/icon')
api.add_resource(AppSiteStatus, '/apps/<uuid:app_id>/site-enable')
api.add_resource(AppApiStatus, '/apps/<uuid:app_id>/api-enable')
api.add_resource(AppRateLimit, '/apps/<uuid:app_id>/rate-limit')
api.add_resource(IntroductionGenerateApi, '/introduction-generate')

View File

@ -0,0 +1,69 @@
# -*- coding:utf-8 -*-
import logging
from flask import request
from libs.login import login_required
from werkzeug.exceptions import InternalServerError
import services
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import AppUnavailableError, \
ProviderNotInitializeError, CompletionRequestError, ProviderQuotaExceededError, \
ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, \
UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from flask_restful import Resource
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
class ChatMessageAudioApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
app_model = _get_app(app_id, 'chat')
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(ChatMessageAudioApi, '/apps/<uuid:app_id>/audio-to-text')

View File

@ -5,7 +5,7 @@ from typing import Generator, Union
import flask_login
from flask import Response, stream_with_context
from flask_login import login_required
from libs.login import login_required
from werkzeug.exceptions import InternalServerError, NotFound
import services
@ -17,7 +17,7 @@ from controllers.console.app.error import ConversationCompletedError, AppUnavail
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.conversation_message_task import PubHandler
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from libs.helper import uuid_value
from flask_restful import Resource, reqparse
@ -39,10 +39,14 @@ class CompletionMessageApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, location='json')
parser.add_argument('query', type=str, location='json')
parser.add_argument('query', type=str, location='json', default='')
parser.add_argument('model_config', type=dict, required=True, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('retriever_from', type=str, required=False, default='dev', location='json')
args = parser.parse_args()
streaming = args['response_mode'] != 'blocking'
account = flask_login.current_user
try:
@ -51,7 +55,7 @@ class CompletionMessageApi(Resource):
user=account,
args=args,
from_source='console',
streaming=True,
streaming=streaming,
is_model_config_override=True
)
@ -63,8 +67,8 @@ class CompletionMessageApi(Resource):
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -111,8 +115,12 @@ class ChatMessageApi(Resource):
parser.add_argument('query', type=str, required=True, location='json')
parser.add_argument('model_config', type=dict, required=True, location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('retriever_from', type=str, required=False, default='dev', location='json')
args = parser.parse_args()
streaming = args['response_mode'] != 'blocking'
account = flask_login.current_user
try:
@ -121,7 +129,7 @@ class ChatMessageApi(Resource):
user=account,
args=args,
from_source='console',
streaming=True,
streaming=streaming,
is_model_config_override=True
)
@ -133,8 +141,8 @@ class ChatMessageApi(Resource):
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -164,8 +172,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError:

View File

@ -1,8 +1,9 @@
from datetime import datetime
import pytz
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal_with
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal_with
from flask_restful.inputs import int_range
from sqlalchemy import or_, func
from sqlalchemy.orm import joinedload
@ -12,106 +13,14 @@ from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from libs.helper import TimestampField, datetime_string, uuid_value
from fields.conversation_fields import conversation_pagination_fields, conversation_detail_fields, \
conversation_message_detail_fields, conversation_with_summary_pagination_fields
from libs.helper import datetime_string
from extensions.ext_database import db
from models.model import Message, MessageAnnotation, Conversation
account_fields = {
'id': fields.String,
'name': fields.String,
'email': fields.String
}
feedback_fields = {
'rating': fields.String,
'content': fields.String,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account': fields.Nested(account_fields, allow_null=True),
}
annotation_fields = {
'content': fields.String,
'account': fields.Nested(account_fields, allow_null=True),
'created_at': TimestampField
}
message_detail_fields = {
'id': fields.String,
'conversation_id': fields.String,
'inputs': fields.Raw,
'query': fields.String,
'message': fields.Raw,
'message_tokens': fields.Integer,
'answer': fields.String,
'answer_tokens': fields.Integer,
'provider_response_latency': fields.Float,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account_id': fields.String,
'feedbacks': fields.List(fields.Nested(feedback_fields)),
'annotation': fields.Nested(annotation_fields, allow_null=True),
'created_at': TimestampField
}
feedback_stat_fields = {
'like': fields.Integer,
'dislike': fields.Integer
}
model_config_fields = {
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'model': fields.Raw,
'user_input_form': fields.Raw,
'pre_prompt': fields.String,
'agent_mode': fields.Raw,
}
class CompletionConversationApi(Resource):
class MessageTextField(fields.Raw):
def format(self, value):
return value[0]['text'] if value else ''
simple_configs_fields = {
'prompt_template': fields.String,
}
simple_model_config_fields = {
'model': fields.Raw(attribute='model_dict'),
'pre_prompt': fields.String,
}
simple_message_detail_fields = {
'inputs': fields.Raw,
'query': fields.String,
'message': MessageTextField,
'answer': fields.String,
}
conversation_fields = {
'id': fields.String,
'status': fields.String,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account_id': fields.String,
'read_at': TimestampField,
'created_at': TimestampField,
'annotation': fields.Nested(annotation_fields, allow_null=True),
'model_config': fields.Nested(simple_model_config_fields),
'user_feedback_stats': fields.Nested(feedback_stat_fields),
'admin_feedback_stats': fields.Nested(feedback_stat_fields),
'message': fields.Nested(simple_message_detail_fields, attribute='first_message')
}
conversation_pagination_fields = {
'page': fields.Integer,
'limit': fields.Integer(attribute='per_page'),
'total': fields.Integer,
'has_more': fields.Boolean(attribute='has_next'),
'data': fields.List(fields.Nested(conversation_fields), attribute='items')
}
@setup_required
@login_required
@ -160,7 +69,7 @@ class CompletionConversationApi(Resource):
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime = end_datetime.replace(second=59)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
@ -189,66 +98,44 @@ class CompletionConversationApi(Resource):
class CompletionConversationDetailApi(Resource):
conversation_detail_fields = {
'id': fields.String,
'status': fields.String,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account_id': fields.String,
'created_at': TimestampField,
'model_config': fields.Nested(model_config_fields),
'message': fields.Nested(message_detail_fields, attribute='first_message'),
}
@setup_required
@login_required
@account_initialization_required
@marshal_with(conversation_detail_fields)
@marshal_with(conversation_message_detail_fields)
def get(self, app_id, conversation_id):
app_id = str(app_id)
conversation_id = str(conversation_id)
return _get_conversation(app_id, conversation_id, 'completion')
@setup_required
@login_required
@account_initialization_required
def delete(self, app_id, conversation_id):
app_id = str(app_id)
conversation_id = str(conversation_id)
app = _get_app(app_id, 'chat')
conversation = db.session.query(Conversation) \
.filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
if not conversation:
raise NotFound("Conversation Not Exists.")
conversation.is_deleted = True
db.session.commit()
return {'result': 'success'}, 204
class ChatConversationApi(Resource):
simple_configs_fields = {
'prompt_template': fields.String,
}
simple_model_config_fields = {
'model': fields.Raw(attribute='model_dict'),
'pre_prompt': fields.String,
}
conversation_fields = {
'id': fields.String,
'status': fields.String,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account_id': fields.String,
'summary': fields.String(attribute='summary_or_query'),
'read_at': TimestampField,
'created_at': TimestampField,
'annotated': fields.Boolean,
'model_config': fields.Nested(simple_model_config_fields),
'message_count': fields.Integer,
'user_feedback_stats': fields.Nested(feedback_stat_fields),
'admin_feedback_stats': fields.Nested(feedback_stat_fields)
}
conversation_pagination_fields = {
'page': fields.Integer,
'limit': fields.Integer(attribute='per_page'),
'total': fields.Integer,
'has_more': fields.Boolean(attribute='has_next'),
'data': fields.List(fields.Nested(conversation_fields), attribute='items')
}
@setup_required
@login_required
@account_initialization_required
@marshal_with(conversation_pagination_fields)
@marshal_with(conversation_with_summary_pagination_fields)
def get(self, app_id):
app_id = str(app_id)
@ -296,7 +183,7 @@ class ChatConversationApi(Resource):
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime = end_datetime.replace(second=59)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
@ -333,19 +220,6 @@ class ChatConversationApi(Resource):
class ChatConversationDetailApi(Resource):
conversation_detail_fields = {
'id': fields.String,
'status': fields.String,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account_id': fields.String,
'created_at': TimestampField,
'annotated': fields.Boolean,
'model_config': fields.Nested(model_config_fields),
'message_count': fields.Integer,
'user_feedback_stats': fields.Nested(feedback_stat_fields),
'admin_feedback_stats': fields.Nested(feedback_stat_fields)
}
@setup_required
@login_required
@ -356,6 +230,27 @@ class ChatConversationDetailApi(Resource):
conversation_id = str(conversation_id)
return _get_conversation(app_id, conversation_id, 'chat')
@setup_required
@login_required
@account_initialization_required
def delete(self, app_id, conversation_id):
app_id = str(app_id)
conversation_id = str(conversation_id)
# get app info
app = _get_app(app_id, 'chat')
conversation = db.session.query(Conversation) \
.filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
if not conversation:
raise NotFound("Conversation Not Exists.")
conversation.is_deleted = True
db.session.commit()
return {'result': 'success'}, 204

View File

@ -16,7 +16,7 @@ class ProviderNotInitializeError(BaseHTTPException):
class ProviderQuotaExceededError(BaseHTTPException):
error_code = 'provider_quota_exceeded'
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
description = "Your quota for Dify Hosted Model Provider has been exhausted. " \
"Please go to Settings -> Model Provider to complete your own provider credentials."
code = 400
@ -49,3 +49,27 @@ class AppMoreLikeThisDisabledError(BaseHTTPException):
error_code = 'app_more_like_this_disabled'
description = "The 'More like this' feature is disabled. Please refresh your page."
code = 403
class NoAudioUploadedError(BaseHTTPException):
error_code = 'no_audio_uploaded'
description = "Please upload your audio."
code = 400
class AudioTooLargeError(BaseHTTPException):
error_code = 'audio_too_large'
description = "Audio size exceeded. {message}"
code = 413
class UnsupportedAudioTypeError(BaseHTTPException):
error_code = 'unsupported_audio_type'
description = "Audio type not allowed."
code = 415
class ProviderNotSupportSpeechToTextError(BaseHTTPException):
error_code = 'provider_not_support_speech_to_text'
description = "Provider not support speech to text."
code = 400

View File

@ -0,0 +1,76 @@
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
CompletionRequestError, ProviderModelCurrentlyNotSupportError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.generator.llm_generator import LLMGenerator
from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, LLMBadRequestError, LLMAPIConnectionError, \
LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, ModelCurrentlyNotSupportError
class IntroductionGenerateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('prompt_template', type=str, required=True, location='json')
args = parser.parse_args()
account = current_user
try:
answer = LLMGenerator.generate_introduction(
account.current_tenant_id,
args['prompt_template']
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
return {'introduction': answer}
class RuleGenerateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('audiences', type=str, required=True, nullable=False, location='json')
parser.add_argument('hoping_to_solve', type=str, required=True, nullable=False, location='json')
args = parser.parse_args()
account = current_user
try:
rules = LLMGenerator.generate_rule_config(
account.current_tenant_id,
args['audiences'],
args['hoping_to_solve']
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
return rules
api.add_resource(IntroductionGenerateApi, '/introduction-generate')
api.add_resource(RuleGenerateApi, '/rule-generate')

View File

@ -3,7 +3,7 @@ import logging
from typing import Union, Generator
from flask import Response, stream_with_context
from flask_login import current_user, login_required
from flask_login import current_user
from flask_restful import Resource, reqparse, marshal_with, fields
from flask_restful.inputs import int_range
from werkzeug.exceptions import InternalServerError, NotFound
@ -14,9 +14,11 @@ from controllers.console.app.error import CompletionRequestError, ProviderNotIni
AppMoreLikeThisDisabledError, ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError
from libs.helper import uuid_value, TimestampField
from libs.login import login_required
from fields.conversation_fields import message_detail_fields
from libs.helper import uuid_value
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from extensions.ext_database import db
from models.model import MessageAnnotation, Conversation, Message, MessageFeedback
@ -26,44 +28,6 @@ from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError
from services.message_service import MessageService
account_fields = {
'id': fields.String,
'name': fields.String,
'email': fields.String
}
feedback_fields = {
'rating': fields.String,
'content': fields.String,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account': fields.Nested(account_fields, allow_null=True),
}
annotation_fields = {
'content': fields.String,
'account': fields.Nested(account_fields, allow_null=True),
'created_at': TimestampField
}
message_detail_fields = {
'id': fields.String,
'conversation_id': fields.String,
'inputs': fields.Raw,
'query': fields.String,
'message': fields.Raw,
'message_tokens': fields.Integer,
'answer': fields.String,
'answer_tokens': fields.Integer,
'provider_response_latency': fields.Float,
'from_source': fields.String,
'from_end_user_id': fields.String,
'from_account_id': fields.String,
'feedbacks': fields.List(fields.Nested(feedback_fields)),
'annotation': fields.Nested(annotation_fields, allow_null=True),
'created_at': TimestampField
}
class ChatMessageListApi(Resource):
message_infinite_scroll_pagination_fields = {
@ -269,8 +233,8 @@ class MessageMoreLikeThisApi(Resource):
raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError:
raise AppMoreLikeThisDisabledError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -297,8 +261,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
except MoreLikeThisDisabledError:
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
except ProviderTokenNotInitError:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError:
@ -339,8 +303,8 @@ class MessageSuggestedQuestionApi(Resource):
raise NotFound("Message not found")
except ConversationNotExistsError:
raise NotFound("Conversation not found")
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:

View File

@ -1,14 +1,14 @@
# -*- coding:utf-8 -*-
import json
from flask import request
from flask_restful import Resource
from flask_login import login_required, current_user
from flask_login import current_user
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required
from events.app_event import app_model_config_was_updated
from extensions.ext_database import db
from models.model import AppModelConfig
@ -28,6 +28,7 @@ class ModelConfigResource(Resource):
# validate config
model_configuration = AppModelConfigService.validate_configuration(
tenant_id=current_user.current_tenant_id,
account=current_user,
config=request.json,
mode=app_model.mode
@ -35,18 +36,8 @@ class ModelConfigResource(Resource):
new_app_model_config = AppModelConfig(
app_id=app_model.id,
provider="",
model_id="",
configs={},
opening_statement=model_configuration['opening_statement'],
suggested_questions=json.dumps(model_configuration['suggested_questions']),
suggested_questions_after_answer=json.dumps(model_configuration['suggested_questions_after_answer']),
more_like_this=json.dumps(model_configuration['more_like_this']),
model=json.dumps(model_configuration['model']),
user_input_form=json.dumps(model_configuration['user_input_form']),
pre_prompt=model_configuration['pre_prompt'],
agent_mode=json.dumps(model_configuration['agent_mode']),
)
new_app_model_config = new_app_model_config.from_model_config_dict(model_configuration)
db.session.add(new_app_model_config)
db.session.flush()

View File

@ -1,32 +1,18 @@
# -*- coding:utf-8 -*-
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal_with
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal_with
from werkzeug.exceptions import NotFound, Forbidden
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.app_fields import app_site_fields
from libs.helper import supported_language
from extensions.ext_database import db
from models.model import Site
app_site_fields = {
'app_id': fields.String,
'access_token': fields.String(attribute='code'),
'code': fields.String,
'title': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'description': fields.String,
'default_language': fields.String,
'customize_domain': fields.String,
'copyright': fields.String,
'privacy_policy': fields.String,
'customize_token_strategy': fields.String,
'prompt_public': fields.Boolean
}
def parse_app_site_args():
parser = reqparse.RequestParser()
@ -80,6 +66,13 @@ class AppSite(Resource):
if value is not None:
setattr(site, attr_name, value)
if attr_name == 'title':
app_model.name = value
elif attr_name == 'icon':
app_model.icon = value
elif attr_name == 'icon_background':
app_model.icon_background = value
db.session.commit()
return site

View File

@ -1,9 +1,11 @@
# -*- coding:utf-8 -*-
from decimal import Decimal
from datetime import datetime
import pytz
from flask import jsonify
from flask_login import login_required, current_user
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse
from controllers.console import api
@ -59,18 +61,20 @@ class DailyConversationStatistic(Resource):
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
rs = db.session.execute(sql_query, arg_dict)
response_date = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_date.append({
response_data.append({
'date': str(i.date),
'conversation_count': i.conversation_count
})
return jsonify({
'data': response_date
'data': response_data
})
@ -119,18 +123,20 @@ class DailyTerminalsStatistic(Resource):
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
rs = db.session.execute(sql_query, arg_dict)
response_date = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_date.append({
response_data.append({
'date': str(i.date),
'terminal_count': i.terminal_count
})
return jsonify({
'data': response_date
'data': response_data
})
@ -180,12 +186,14 @@ class DailyTokenCostStatistic(Resource):
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
rs = db.session.execute(sql_query, arg_dict)
response_date = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_date.append({
response_data.append({
'date': str(i.date),
'token_count': i.token_count,
'total_price': i.total_price,
@ -193,10 +201,272 @@ class DailyTokenCostStatistic(Resource):
})
return jsonify({
'data': response_date
'data': response_data
})
class AverageSessionInteractionStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id, 'chat')
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = """SELECT date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(subquery.message_count) AS interactions
FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
FROM conversations c
JOIN messages m ON c.id = m.conversation_id
WHERE c.override_model_configs IS NULL AND c.app_id = :app_id"""
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and c.created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and c.created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += """
GROUP BY m.conversation_id) subquery
LEFT JOIN conversations c on c.id=subquery.conversation_id
GROUP BY date
ORDER BY date"""
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_data.append({
'date': str(i.date),
'interactions': float(i.interactions.quantize(Decimal('0.01')))
})
return jsonify({
'data': response_data
})
class UserSatisfactionRateStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = '''
SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count
FROM messages m
LEFT JOIN message_feedbacks mf on mf.message_id=m.id
WHERE m.app_id = :app_id
'''
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and m.created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and m.created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_data.append({
'date': str(i.date),
'rate': round((i.feedback_count * 1000 / i.message_count) if i.message_count > 0 else 0, 2),
})
return jsonify({
'data': response_data
})
class AverageResponseTimeStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id, 'completion')
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = '''
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(provider_response_latency) as latency
FROM messages
WHERE app_id = :app_id
'''
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_data.append({
'date': str(i.date),
'latency': round(i.latency * 1000, 4)
})
return jsonify({
'data': response_data
})
class TokensPerSecondStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = '''SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
CASE
WHEN SUM(provider_response_latency) = 0 THEN 0
ELSE (SUM(answer_tokens) / SUM(provider_response_latency))
END as tokens_per_second
FROM messages
WHERE app_id = :app_id'''
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
response_data = []
for i in rs:
response_data.append({
'date': str(i.date),
'tps': round(i.tokens_per_second, 4)
})
return jsonify({
'data': response_data
})
api.add_resource(DailyConversationStatistic, '/apps/<uuid:app_id>/statistics/daily-conversations')
api.add_resource(DailyTerminalsStatistic, '/apps/<uuid:app_id>/statistics/daily-end-users')
api.add_resource(DailyTokenCostStatistic, '/apps/<uuid:app_id>/statistics/token-costs')
api.add_resource(AverageSessionInteractionStatistic, '/apps/<uuid:app_id>/statistics/average-session-interactions')
api.add_resource(UserSatisfactionRateStatistic, '/apps/<uuid:app_id>/statistics/user-satisfaction-rate')
api.add_resource(AverageResponseTimeStatistic, '/apps/<uuid:app_id>/statistics/average-response-time')
api.add_resource(TokensPerSecondStatistic, '/apps/<uuid:app_id>/statistics/tokens-per-second')

View File

@ -0,0 +1,75 @@
import base64
import secrets
from datetime import datetime
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.error import AlreadyActivateError
from extensions.ext_database import db
from libs.helper import email, str_len, supported_language, timezone
from libs.password import valid_password, hash_password
from models.account import AccountStatus, Tenant
from services.account_service import RegisterService
class ActivateCheckApi(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('workspace_id', type=str, required=False, nullable=True, location='args')
parser.add_argument('email', type=email, required=False, nullable=True, location='args')
parser.add_argument('token', type=str, required=True, nullable=False, location='args')
args = parser.parse_args()
workspaceId = args['workspace_id']
reg_email = args['email']
token = args['token']
invitation = RegisterService.get_invitation_if_token_valid(workspaceId, reg_email, token)
return {'is_valid': invitation is not None, 'workspace_name': invitation['tenant'].name if invitation else None}
class ActivateApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('workspace_id', type=str, required=False, nullable=True, location='json')
parser.add_argument('email', type=email, required=False, nullable=True, location='json')
parser.add_argument('token', type=str, required=True, nullable=False, location='json')
parser.add_argument('name', type=str_len(30), required=True, nullable=False, location='json')
parser.add_argument('password', type=valid_password, required=True, nullable=False, location='json')
parser.add_argument('interface_language', type=supported_language, required=True, nullable=False,
location='json')
parser.add_argument('timezone', type=timezone, required=True, nullable=False, location='json')
args = parser.parse_args()
invitation = RegisterService.get_invitation_if_token_valid(args['workspace_id'], args['email'], args['token'])
if invitation is None:
raise AlreadyActivateError()
RegisterService.revoke_token(args['workspace_id'], args['email'], args['token'])
account = invitation['account']
account.name = args['name']
# generate password salt
salt = secrets.token_bytes(16)
base64_salt = base64.b64encode(salt).decode()
# encrypt password with salt
password_hashed = hash_password(args['password'], salt)
base64_password_hashed = base64.b64encode(password_hashed).decode()
account.password = base64_password_hashed
account.password_salt = base64_salt
account.interface_language = args['interface_language']
account.timezone = args['timezone']
account.interface_theme = 'light'
account.status = AccountStatus.ACTIVE.value
account.initialized_at = datetime.utcnow()
db.session.commit()
return {'result': 'success'}
api.add_resource(ActivateCheckApi, '/activate/check')
api.add_resource(ActivateApi, '/activate')

View File

@ -0,0 +1,116 @@
import logging
import requests
from flask import request, redirect, current_app
from flask_login import current_user
from flask_restful import Resource
from werkzeug.exceptions import Forbidden
from libs.login import login_required
from libs.oauth_data_source import NotionOAuth
from controllers.console import api
from ..setup import setup_required
from ..wraps import account_initialization_required
def get_oauth_providers():
with current_app.app_context():
notion_oauth = NotionOAuth(client_id=current_app.config.get('NOTION_CLIENT_ID'),
client_secret=current_app.config.get(
'NOTION_CLIENT_SECRET'),
redirect_uri=current_app.config.get(
'CONSOLE_API_URL') + '/console/api/oauth/data-source/callback/notion')
OAUTH_PROVIDERS = {
'notion': notion_oauth
}
return OAUTH_PROVIDERS
class OAuthDataSource(Resource):
def get(self, provider: str):
# The role of the current user in the table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
OAUTH_DATASOURCE_PROVIDERS = get_oauth_providers()
with current_app.app_context():
oauth_provider = OAUTH_DATASOURCE_PROVIDERS.get(provider)
print(vars(oauth_provider))
if not oauth_provider:
return {'error': 'Invalid provider'}, 400
if current_app.config.get('NOTION_INTEGRATION_TYPE') == 'internal':
internal_secret = current_app.config.get('NOTION_INTERNAL_SECRET')
oauth_provider.save_internal_access_token(internal_secret)
return { 'data': '' }
else:
auth_url = oauth_provider.get_authorization_url()
return { 'data': auth_url }, 200
class OAuthDataSourceCallback(Resource):
def get(self, provider: str):
OAUTH_DATASOURCE_PROVIDERS = get_oauth_providers()
with current_app.app_context():
oauth_provider = OAUTH_DATASOURCE_PROVIDERS.get(provider)
if not oauth_provider:
return {'error': 'Invalid provider'}, 400
if 'code' in request.args:
code = request.args.get('code')
return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?type=notion&code={code}')
elif 'error' in request.args:
error = request.args.get('error')
return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?type=notion&error={error}')
else:
return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?type=notion&error=Access denied')
class OAuthDataSourceBinding(Resource):
def get(self, provider: str):
OAUTH_DATASOURCE_PROVIDERS = get_oauth_providers()
with current_app.app_context():
oauth_provider = OAUTH_DATASOURCE_PROVIDERS.get(provider)
if not oauth_provider:
return {'error': 'Invalid provider'}, 400
if 'code' in request.args:
code = request.args.get('code')
try:
oauth_provider.get_access_token(code)
except requests.exceptions.HTTPError as e:
logging.exception(
f"An error occurred during the OAuthCallback process with {provider}: {e.response.text}")
return {'error': 'OAuth data source process failed'}, 400
return {'result': 'success'}, 200
class OAuthDataSourceSync(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider, binding_id):
provider = str(provider)
binding_id = str(binding_id)
OAUTH_DATASOURCE_PROVIDERS = get_oauth_providers()
with current_app.app_context():
oauth_provider = OAUTH_DATASOURCE_PROVIDERS.get(provider)
if not oauth_provider:
return {'error': 'Invalid provider'}, 400
try:
oauth_provider.sync_data_source(binding_id)
except requests.exceptions.HTTPError as e:
logging.exception(
f"An error occurred during the OAuthCallback process with {provider}: {e.response.text}")
return {'error': 'OAuth data source process failed'}, 400
return {'result': 'success'}, 200
api.add_resource(OAuthDataSource, '/oauth/data-source/<string:provider>')
api.add_resource(OAuthDataSourceCallback, '/oauth/data-source/callback/<string:provider>')
api.add_resource(OAuthDataSourceBinding, '/oauth/data-source/binding/<string:provider>')
api.add_resource(OAuthDataSourceSync, '/oauth/data-source/<string:provider>/<uuid:binding_id>/sync')

View File

@ -6,7 +6,6 @@ from flask_restful import Resource, reqparse
import services
from controllers.console import api
from controllers.console.error import AccountNotLinkTenantError
from controllers.console.setup import setup_required
from libs.helper import email
from libs.password import valid_password
@ -35,14 +34,14 @@ class LoginApi(Resource):
try:
TenantService.switch_tenant(account)
except Exception:
raise AccountNotLinkTenantError("Account not link tenant")
pass
flask_login.login_user(account, remember=args['remember_me'])
AccountService.update_last_login(account, request)
# todo: return the user info
token = AccountService.get_account_jwt_token(account)
return {'result': 'success'}
return {'result': 'success', 'data': token}
class LogoutApi(Resource):

View File

@ -2,9 +2,8 @@ import logging
from datetime import datetime
from typing import Optional
import flask_login
import requests
from flask import request, redirect, current_app, session
from flask import request, redirect, current_app
from flask_restful import Resource
from libs.oauth import OAuthUserInfo, GitHubOAuth, GoogleOAuth
@ -20,13 +19,13 @@ def get_oauth_providers():
client_secret=current_app.config.get(
'GITHUB_CLIENT_SECRET'),
redirect_uri=current_app.config.get(
'CONSOLE_URL') + '/console/api/oauth/authorize/github')
'CONSOLE_API_URL') + '/console/api/oauth/authorize/github')
google_oauth = GoogleOAuth(client_id=current_app.config.get('GOOGLE_CLIENT_ID'),
client_secret=current_app.config.get(
'GOOGLE_CLIENT_SECRET'),
redirect_uri=current_app.config.get(
'CONSOLE_URL') + '/console/api/oauth/authorize/google')
'CONSOLE_API_URL') + '/console/api/oauth/authorize/google')
OAUTH_PROVIDERS = {
'github': github_oauth,
@ -75,12 +74,11 @@ class OAuthCallback(Resource):
account.initialized_at = datetime.utcnow()
db.session.commit()
# login user
session.clear()
flask_login.login_user(account, remember=True)
AccountService.update_last_login(account, request)
return redirect(f'{current_app.config.get("CONSOLE_URL")}?oauth_login=success')
token = AccountService.get_account_jwt_token(account)
return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?console_token={token}')
def _get_account_by_openid_or_email(provider: str, user_info: OAuthUserInfo) -> Optional[Account]:

View File

@ -0,0 +1,248 @@
import datetime
import json
from cachetools import TTLCache
from flask import request
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.data_loader.loader.notion import NotionLoader
from core.indexing_runner import IndexingRunner
from extensions.ext_database import db
from fields.data_source_fields import integrate_notion_info_list_fields, integrate_list_fields
from models.dataset import Document
from models.source import DataSourceBinding
from services.dataset_service import DatasetService, DocumentService
from tasks.document_indexing_sync_task import document_indexing_sync_task
cache = TTLCache(maxsize=None, ttl=30)
class DataSourceApi(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(integrate_list_fields)
def get(self):
# get workspace data source integrates
data_source_integrates = db.session.query(DataSourceBinding).filter(
DataSourceBinding.tenant_id == current_user.current_tenant_id,
DataSourceBinding.disabled == False
).all()
base_url = request.url_root.rstrip('/')
data_source_oauth_base_path = "/console/api/oauth/data-source"
providers = ["notion"]
integrate_data = []
for provider in providers:
# existing_integrate = next((ai for ai in data_source_integrates if ai.provider == provider), None)
existing_integrates = filter(lambda item: item.provider == provider, data_source_integrates)
if existing_integrates:
for existing_integrate in list(existing_integrates):
integrate_data.append({
'id': existing_integrate.id,
'provider': provider,
'created_at': existing_integrate.created_at,
'is_bound': True,
'disabled': existing_integrate.disabled,
'source_info': existing_integrate.source_info,
'link': f'{base_url}{data_source_oauth_base_path}/{provider}'
})
else:
integrate_data.append({
'id': None,
'provider': provider,
'created_at': None,
'source_info': None,
'is_bound': False,
'disabled': None,
'link': f'{base_url}{data_source_oauth_base_path}/{provider}'
})
return {'data': integrate_data}, 200
@setup_required
@login_required
@account_initialization_required
def patch(self, binding_id, action):
binding_id = str(binding_id)
action = str(action)
data_source_binding = DataSourceBinding.query.filter_by(
id=binding_id
).first()
if data_source_binding is None:
raise NotFound('Data source binding not found.')
# enable binding
if action == 'enable':
if data_source_binding.disabled:
data_source_binding.disabled = False
data_source_binding.updated_at = datetime.datetime.utcnow()
db.session.add(data_source_binding)
db.session.commit()
else:
raise ValueError('Data source is not disabled.')
# disable binding
if action == 'disable':
if not data_source_binding.disabled:
data_source_binding.disabled = True
data_source_binding.updated_at = datetime.datetime.utcnow()
db.session.add(data_source_binding)
db.session.commit()
else:
raise ValueError('Data source is disabled.')
return {'result': 'success'}, 200
class DataSourceNotionListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(integrate_notion_info_list_fields)
def get(self):
dataset_id = request.args.get('dataset_id', default=None, type=str)
exist_page_ids = []
# import notion in the exist dataset
if dataset_id:
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
if dataset.data_source_type != 'notion_import':
raise ValueError('Dataset is not notion type.')
documents = Document.query.filter_by(
dataset_id=dataset_id,
tenant_id=current_user.current_tenant_id,
data_source_type='notion_import',
enabled=True
).all()
if documents:
for document in documents:
data_source_info = json.loads(document.data_source_info)
exist_page_ids.append(data_source_info['notion_page_id'])
# get all authorized pages
data_source_bindings = DataSourceBinding.query.filter_by(
tenant_id=current_user.current_tenant_id,
provider='notion',
disabled=False
).all()
if not data_source_bindings:
return {
'notion_info': []
}, 200
pre_import_info_list = []
for data_source_binding in data_source_bindings:
source_info = data_source_binding.source_info
pages = source_info['pages']
# Filter out already bound pages
for page in pages:
if page['page_id'] in exist_page_ids:
page['is_bound'] = True
else:
page['is_bound'] = False
pre_import_info = {
'workspace_name': source_info['workspace_name'],
'workspace_icon': source_info['workspace_icon'],
'workspace_id': source_info['workspace_id'],
'pages': pages,
}
pre_import_info_list.append(pre_import_info)
return {
'notion_info': pre_import_info_list
}, 200
class DataSourceNotionApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, workspace_id, page_id, page_type):
workspace_id = str(workspace_id)
page_id = str(page_id)
data_source_binding = DataSourceBinding.query.filter(
db.and_(
DataSourceBinding.tenant_id == current_user.current_tenant_id,
DataSourceBinding.provider == 'notion',
DataSourceBinding.disabled == False,
DataSourceBinding.source_info['workspace_id'] == f'"{workspace_id}"'
)
).first()
if not data_source_binding:
raise NotFound('Data source binding not found.')
loader = NotionLoader(
notion_access_token=data_source_binding.access_token,
notion_workspace_id=workspace_id,
notion_obj_id=page_id,
notion_page_type=page_type
)
text_docs = loader.load()
return {
'content': "\n".join([doc.page_content for doc in text_docs])
}, 200
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('notion_info_list', type=list, required=True, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
args = parser.parse_args()
# validate args
DocumentService.estimate_args_validate(args)
indexing_runner = IndexingRunner()
response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id, args['notion_info_list'], args['process_rule'])
return response, 200
class DataSourceNotionDatasetSyncApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
documents = DocumentService.get_document_by_dataset_id(dataset_id_str)
for document in documents:
document_indexing_sync_task.delay(dataset_id_str, document.id)
return 200
class DataSourceNotionDocumentSyncApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id, document_id):
dataset_id_str = str(dataset_id)
document_id_str = str(document_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
document = DocumentService.get_document(dataset_id_str, document_id_str)
if document is None:
raise NotFound("Document not found.")
document_indexing_sync_task.delay(dataset_id_str, document_id_str)
return 200
api.add_resource(DataSourceApi, '/data-source/integrates', '/data-source/integrates/<uuid:binding_id>/<string:action>')
api.add_resource(DataSourceNotionListApi, '/notion/pre-import/pages')
api.add_resource(DataSourceNotionApi,
'/notion/workspaces/<uuid:workspace_id>/pages/<uuid:page_id>/<string:page_type>/preview',
'/datasets/notion-indexing-estimate')
api.add_resource(DataSourceNotionDatasetSyncApi, '/datasets/<uuid:dataset_id>/notion/sync')
api.add_resource(DataSourceNotionDocumentSyncApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/notion/sync')

View File

@ -1,46 +1,29 @@
# -*- coding:utf-8 -*-
from flask import request
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal, marshal_with
from werkzeug.exceptions import NotFound, Forbidden
import flask_restful
from flask import request, current_app
from flask_login import current_user
from controllers.console.apikey import api_key_list, api_key_fields
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal, marshal_with
from werkzeug.exceptions import NotFound, Forbidden
import services
from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import DatasetNameDuplicateError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.indexing_runner import IndexingRunner
from libs.helper import TimestampField
from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_providers.models.entity.model_params import ModelType
from fields.app_fields import related_app_list
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
from fields.document_fields import document_status_fields
from extensions.ext_database import db
from models.model import UploadFile
from services.dataset_service import DatasetService
dataset_detail_fields = {
'id': fields.String,
'name': fields.String,
'description': fields.String,
'provider': fields.String,
'permission': fields.String,
'data_source_type': fields.String,
'indexing_technique': fields.String,
'app_count': fields.Integer,
'document_count': fields.Integer,
'word_count': fields.Integer,
'created_by': fields.String,
'created_at': TimestampField,
'updated_by': fields.String,
'updated_at': TimestampField,
}
dataset_query_detail_fields = {
"id": fields.String,
"content": fields.String,
"source": fields.String,
"source_app_id": fields.String,
"created_by_role": fields.String,
"created_by": fields.String,
"created_at": TimestampField
}
from models.dataset import DocumentSegment, Document
from models.model import UploadFile, ApiToken
from services.dataset_service import DatasetService, DocumentService
from services.provider_service import ProviderService
def _validate_name(name):
@ -50,8 +33,8 @@ def _validate_name(name):
def _validate_description_length(description):
if len(description) > 200:
raise ValueError('Description cannot exceed 200 characters.')
if len(description) > 400:
raise ValueError('Description cannot exceed 400 characters.')
return description
@ -71,8 +54,29 @@ class DatasetListApi(Resource):
datasets, total = DatasetService.get_datasets(page, limit, provider,
current_user.current_tenant_id, current_user)
# check embedding setting
provider_service = ProviderService()
valid_model_list = provider_service.get_valid_model_list(current_user.current_tenant_id,
ModelType.EMBEDDINGS.value)
# if len(valid_model_list) == 0:
# raise ProviderNotInitializeError(
# f"No Embedding Model available. Please configure a valid provider "
# f"in the Settings -> Model Provider.")
model_names = []
for valid_model in valid_model_list:
model_names.append(f"{valid_model['model_name']}:{valid_model['model_provider']['provider_name']}")
data = marshal(datasets, dataset_detail_fields)
for item in data:
if item['indexing_technique'] == 'high_quality':
item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
if item_model in model_names:
item['embedding_available'] = True
else:
item['embedding_available'] = False
else:
item['embedding_available'] = True
response = {
'data': marshal(datasets, dataset_detail_fields),
'data': data,
'has_more': len(datasets) == limit,
'limit': limit,
'total': total,
@ -119,20 +123,40 @@ class DatasetApi(Resource):
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
try:
DatasetService.check_dataset_permission(
dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
return marshal(dataset, dataset_detail_fields), 200
data = marshal(dataset, dataset_detail_fields)
# check embedding setting
provider_service = ProviderService()
# get valid model list
valid_model_list = provider_service.get_valid_model_list(current_user.current_tenant_id,
ModelType.EMBEDDINGS.value)
model_names = []
for valid_model in valid_model_list:
model_names.append(f"{valid_model['model_name']}:{valid_model['model_provider']['provider_name']}")
if data['indexing_technique'] == 'high_quality':
item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
if item_model in model_names:
data['embedding_available'] = True
else:
data['embedding_available'] = False
else:
data['embedding_available'] = True
return data, 200
@setup_required
@login_required
@account_initialization_required
def patch(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
parser = reqparse.RequestParser()
parser.add_argument('name', nullable=False,
@ -217,33 +241,62 @@ class DatasetIndexingEstimateApi(Resource):
@login_required
@account_initialization_required
def post(self):
segment_rule = request.get_json()
file_detail = db.session.query(UploadFile).filter(
UploadFile.tenant_id == current_user.current_tenant_id,
UploadFile.id == segment_rule["file_id"]
).first()
parser = reqparse.RequestParser()
parser.add_argument('info_list', type=dict, required=True, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
parser.add_argument('indexing_technique', type=str, required=True, nullable=True, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('dataset_id', type=str, required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
args = parser.parse_args()
# validate args
DocumentService.estimate_args_validate(args)
if args['info_list']['data_source_type'] == 'upload_file':
file_ids = args['info_list']['file_info_list']['file_ids']
file_details = db.session.query(UploadFile).filter(
UploadFile.tenant_id == current_user.current_tenant_id,
UploadFile.id.in_(file_ids)
).all()
if file_detail is None:
raise NotFound("File not found.")
if file_details is None:
raise NotFound("File not found.")
indexing_runner = IndexingRunner()
response = indexing_runner.indexing_estimate(file_detail, segment_rule['process_rule'])
indexing_runner = IndexingRunner()
try:
response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
args['process_rule'], args['doc_form'],
args['doc_language'], args['dataset_id'],
args['indexing_technique'])
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
elif args['info_list']['data_source_type'] == 'notion_import':
indexing_runner = IndexingRunner()
try:
response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
args['info_list']['notion_info_list'],
args['process_rule'], args['doc_form'],
args['doc_language'], args['dataset_id'],
args['indexing_technique'])
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
else:
raise ValueError('Data source type not support')
return response, 200
class DatasetRelatedAppListApi(Resource):
app_detail_kernel_fields = {
'id': fields.String,
'name': fields.String,
'mode': fields.String,
'icon': fields.String,
'icon_background': fields.String,
}
related_app_list = {
'data': fields.List(fields.Nested(app_detail_kernel_fields)),
'total': fields.Integer,
}
@setup_required
@login_required
@ -274,8 +327,121 @@ class DatasetRelatedAppListApi(Resource):
}, 200
class DatasetIndexingStatusApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id):
dataset_id = str(dataset_id)
documents = db.session.query(Document).filter(
Document.dataset_id == dataset_id,
Document.tenant_id == current_user.current_tenant_id
).all()
documents_status = []
for document in documents:
completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
document.completed_segments = completed_segments
document.total_segments = total_segments
documents_status.append(marshal(document, document_status_fields))
data = {
'data': documents_status
}
return data
class DatasetApiKeyApi(Resource):
max_keys = 10
token_prefix = 'dataset-'
resource_type = 'dataset'
@setup_required
@login_required
@account_initialization_required
@marshal_with(api_key_list)
def get(self):
keys = db.session.query(ApiToken). \
filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id). \
all()
return {"items": keys}
@setup_required
@login_required
@account_initialization_required
@marshal_with(api_key_fields)
def post(self):
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
current_key_count = db.session.query(ApiToken). \
filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id). \
count()
if current_key_count >= self.max_keys:
flask_restful.abort(
400,
message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
code='max_keys_exceeded'
)
key = ApiToken.generate_api_key(self.token_prefix, 24)
api_token = ApiToken()
api_token.tenant_id = current_user.current_tenant_id
api_token.token = key
api_token.type = self.resource_type
db.session.add(api_token)
db.session.commit()
return api_token, 200
class DatasetApiDeleteApi(Resource):
resource_type = 'dataset'
@setup_required
@login_required
@account_initialization_required
def delete(self, api_key_id):
api_key_id = str(api_key_id)
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
key = db.session.query(ApiToken). \
filter(ApiToken.tenant_id == current_user.current_tenant_id, ApiToken.type == self.resource_type,
ApiToken.id == api_key_id). \
first()
if key is None:
flask_restful.abort(404, message='API key not found')
db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()
db.session.commit()
return {'result': 'success'}, 204
class DatasetApiBaseUrlApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
return {
'api_base_url': (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
else request.host_url.rstrip('/')) + '/v1'
}
api.add_resource(DatasetListApi, '/datasets')
api.add_resource(DatasetApi, '/datasets/<uuid:dataset_id>')
api.add_resource(DatasetQueryApi, '/datasets/<uuid:dataset_id>/queries')
api.add_resource(DatasetIndexingEstimateApi, '/datasets/file-indexing-estimate')
api.add_resource(DatasetIndexingEstimateApi, '/datasets/indexing-estimate')
api.add_resource(DatasetRelatedAppListApi, '/datasets/<uuid:dataset_id>/related-apps')
api.add_resource(DatasetIndexingStatusApi, '/datasets/<uuid:dataset_id>/indexing-status')
api.add_resource(DatasetApiKeyApi, '/datasets/api-keys')
api.add_resource(DatasetApiDeleteApi, '/datasets/api-keys/<uuid:api_key_id>')
api.add_resource(DatasetApiBaseUrlApi, '/datasets/api-base-info')

View File

@ -1,9 +1,10 @@
# -*- coding:utf-8 -*-
import random
from datetime import datetime
from typing import List
from flask import request
from flask_login import login_required, current_user
from flask import request, current_app
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, fields, marshal, marshal_with, reqparse
from sqlalchemy import desc, asc
from werkzeug.exceptions import NotFound, Forbidden
@ -17,9 +18,12 @@ from controllers.console.datasets.error import DocumentAlreadyFinishedError, Inv
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.indexing_runner import IndexingRunner
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
LLMBadRequestError
from core.model_providers.model_factory import ModelFactory
from extensions.ext_redis import redis_client
from libs.helper import TimestampField
from fields.document_fields import document_with_segments_fields, document_fields, \
dataset_and_document_fields, document_status_fields
from extensions.ext_database import db
from models.dataset import DatasetProcessRule, Dataset
from models.dataset import Document, DocumentSegment
@ -28,39 +32,6 @@ from services.dataset_service import DocumentService, DatasetService
from tasks.add_document_to_index_task import add_document_to_index_task
from tasks.remove_document_from_index_task import remove_document_from_index_task
dataset_fields = {
'id': fields.String,
'name': fields.String,
'description': fields.String,
'permission': fields.String,
'data_source_type': fields.String,
'indexing_technique': fields.String,
'created_by': fields.String,
'created_at': TimestampField,
}
document_fields = {
'id': fields.String,
'position': fields.Integer,
'data_source_type': fields.String,
'data_source_info': fields.Raw(attribute='data_source_info_dict'),
'dataset_process_rule_id': fields.String,
'name': fields.String,
'created_from': fields.String,
'created_by': fields.String,
'created_at': TimestampField,
'tokens': fields.Integer,
'indexing_status': fields.String,
'error': fields.String,
'enabled': fields.Boolean,
'disabled_at': TimestampField,
'disabled_by': fields.String,
'archived': fields.Boolean,
'display_status': fields.String,
'word_count': fields.Integer,
'hit_count': fields.Integer,
}
class DocumentResource(Resource):
def get_document(self, dataset_id: str, document_id: str) -> Document:
@ -83,6 +54,23 @@ class DocumentResource(Resource):
return document
def get_batch_documents(self, dataset_id: str, batch: str) -> List[Document]:
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
try:
DatasetService.check_dataset_permission(dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
documents = DocumentService.get_batch_documents(dataset_id, batch)
if not documents:
raise NotFound('Documents not found.')
return documents
class GetProcessRuleApi(Resource):
@setup_required
@ -92,6 +80,10 @@ class GetProcessRuleApi(Resource):
req_data = request.args
document_id = req_data.get('document_id')
# get default rules
mode = DocumentService.DEFAULT_RULES['mode']
rules = DocumentService.DEFAULT_RULES['rules']
if document_id:
# get the latest process rule
document = Document.query.get_or_404(document_id)
@ -112,11 +104,9 @@ class GetProcessRuleApi(Resource):
order_by(DatasetProcessRule.created_at.desc()). \
limit(1). \
one_or_none()
mode = dataset_process_rule.mode
rules = dataset_process_rule.rules_dict
else:
mode = DocumentService.DEFAULT_RULES['mode']
rules = DocumentService.DEFAULT_RULES['rules']
if dataset_process_rule:
mode = dataset_process_rule.mode
rules = dataset_process_rule.rules_dict
return {
'mode': mode,
@ -132,9 +122,9 @@ class DatasetDocumentListApi(Resource):
dataset_id = str(dataset_id)
page = request.args.get('page', default=1, type=int)
limit = request.args.get('limit', default=20, type=int)
search = request.args.get('search', default=None, type=str)
search = request.args.get('keyword', default=None, type=str)
sort = request.args.get('sort', default='-created_at', type=str)
fetch = request.args.get('fetch', default=False, type=bool)
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
@ -173,9 +163,20 @@ class DatasetDocumentListApi(Resource):
paginated_documents = query.paginate(
page=page, per_page=limit, max_per_page=100, error_out=False)
documents = paginated_documents.items
if fetch:
for document in documents:
completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
document.completed_segments = completed_segments
document.total_segments = total_segments
data = marshal(documents, document_with_segments_fields)
else:
data = marshal(documents, document_fields)
response = {
'data': marshal(documents, document_fields),
'data': data,
'has_more': len(documents) == limit,
'limit': limit,
'total': paginated_documents.total,
@ -184,10 +185,15 @@ class DatasetDocumentListApi(Resource):
return response
documents_and_batch_fields = {
'documents': fields.List(fields.Nested(document_fields)),
'batch': fields.String
}
@setup_required
@login_required
@account_initialization_required
@marshal_with(document_fields)
@marshal_with(documents_and_batch_fields)
def post(self, dataset_id):
dataset_id = str(dataset_id)
@ -208,9 +214,13 @@ class DatasetDocumentListApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
location='json')
parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
parser.add_argument('data_source', type=dict, required=False, location='json')
parser.add_argument('process_rule', type=dict, required=False, location='json')
parser.add_argument('duplicate', type=bool, nullable=False, location='json')
parser.add_argument('original_document_id', type=str, required=False, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
args = parser.parse_args()
if not dataset.indexing_technique and not args['indexing_technique']:
@ -220,22 +230,21 @@ class DatasetDocumentListApi(Resource):
DocumentService.document_create_args_validate(args)
try:
document = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
return document
return {
'documents': documents,
'batch': batch
}
class DatasetInitApi(Resource):
dataset_and_document_fields = {
'dataset': fields.Nested(dataset_fields),
'document': fields.Nested(document_fields)
}
@setup_required
@login_required
@ -251,19 +260,33 @@ class DatasetInitApi(Resource):
nullable=False, location='json')
parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
args = parser.parse_args()
if args['indexing_technique'] == 'high_quality':
try:
ModelFactory.get_embedding_model(
tenant_id=current_user.current_tenant_id
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
# validate args
DocumentService.document_create_args_validate(args)
try:
dataset, document = DocumentService.save_document_without_dataset_id(
dataset, documents, batch = DocumentService.save_document_without_dataset_id(
tenant_id=current_user.current_tenant_id,
document_data=args,
account=current_user
)
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -271,7 +294,8 @@ class DatasetInitApi(Resource):
response = {
'dataset': dataset,
'document': document
'documents': documents,
'batch': batch
}
return response
@ -316,26 +340,134 @@ class DocumentIndexingEstimateApi(DocumentResource):
raise NotFound('File not found.')
indexing_runner = IndexingRunner()
response = indexing_runner.indexing_estimate(file, data_process_rule_dict)
try:
response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, [file],
data_process_rule_dict, None,
'English', dataset_id)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
return response
class DocumentBatchIndexingEstimateApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id, batch):
dataset_id = str(dataset_id)
batch = str(batch)
dataset = DatasetService.get_dataset(dataset_id)
if dataset is None:
raise NotFound("Dataset not found.")
documents = self.get_batch_documents(dataset_id, batch)
response = {
"tokens": 0,
"total_price": 0,
"currency": "USD",
"total_segments": 0,
"preview": []
}
if not documents:
return response
data_process_rule = documents[0].dataset_process_rule
data_process_rule_dict = data_process_rule.to_dict()
info_list = []
for document in documents:
if document.indexing_status in ['completed', 'error']:
raise DocumentAlreadyFinishedError()
data_source_info = document.data_source_info_dict
# format document files info
if data_source_info and 'upload_file_id' in data_source_info:
file_id = data_source_info['upload_file_id']
info_list.append(file_id)
# format document notion info
elif data_source_info and 'notion_workspace_id' in data_source_info and 'notion_page_id' in data_source_info:
pages = []
page = {
'page_id': data_source_info['notion_page_id'],
'type': data_source_info['type']
}
pages.append(page)
notion_info = {
'workspace_id': data_source_info['notion_workspace_id'],
'pages': pages
}
info_list.append(notion_info)
if dataset.data_source_type == 'upload_file':
file_details = db.session.query(UploadFile).filter(
UploadFile.tenant_id == current_user.current_tenant_id,
UploadFile.id in info_list
).all()
if file_details is None:
raise NotFound("File not found.")
indexing_runner = IndexingRunner()
try:
response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
data_process_rule_dict, None,
'English', dataset_id)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
elif dataset.data_source_type == 'notion_import':
indexing_runner = IndexingRunner()
try:
response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
info_list,
data_process_rule_dict,
None, 'English', dataset_id)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
else:
raise ValueError('Data source type not support')
return response
class DocumentBatchIndexingStatusApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def get(self, dataset_id, batch):
dataset_id = str(dataset_id)
batch = str(batch)
documents = self.get_batch_documents(dataset_id, batch)
documents_status = []
for document in documents:
completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
document.completed_segments = completed_segments
document.total_segments = total_segments
if document.is_paused:
document.indexing_status = 'paused'
documents_status.append(marshal(document, document_status_fields))
data = {
'data': documents_status
}
return data
class DocumentIndexingStatusApi(DocumentResource):
document_status_fields = {
'id': fields.String,
'indexing_status': fields.String,
'processing_started_at': TimestampField,
'parsing_completed_at': TimestampField,
'cleaning_completed_at': TimestampField,
'splitting_completed_at': TimestampField,
'completed_at': TimestampField,
'paused_at': TimestampField,
'error': fields.String,
'stopped_at': TimestampField,
'completed_segments': fields.Integer,
'total_segments': fields.Integer,
}
@setup_required
@login_required
@ -347,16 +479,19 @@ class DocumentIndexingStatusApi(DocumentResource):
completed_segments = DocumentSegment.query \
.filter(DocumentSegment.completed_at.isnot(None),
DocumentSegment.document_id == str(document_id)) \
DocumentSegment.document_id == str(document_id),
DocumentSegment.status != 're_segment') \
.count()
total_segments = DocumentSegment.query \
.filter_by(document_id=str(document_id)) \
.filter(DocumentSegment.document_id == str(document_id),
DocumentSegment.status != 're_segment') \
.count()
document.completed_segments = completed_segments
document.total_segments = total_segments
return marshal(document, self.document_status_fields)
if document.is_paused:
document.indexing_status = 'paused'
return marshal(document, document_status_fields)
class DocumentDetailApi(DocumentResource):
@ -405,9 +540,10 @@ class DocumentDetailApi(DocumentResource):
'disabled_by': document.disabled_by,
'archived': document.archived,
'segment_count': document.segment_count,
'average_segment_length': document.average_segment_length,
'average_segment_length': document.average_segment_length,
'hit_count': document.hit_count,
'display_status': document.display_status
'display_status': document.display_status,
'doc_form': document.doc_form
}
else:
process_rules = DatasetService.get_process_rules(dataset_id)
@ -425,7 +561,7 @@ class DocumentDetailApi(DocumentResource):
'created_at': document.created_at.timestamp(),
'tokens': document.tokens,
'indexing_status': document.indexing_status,
'completed_at': int(document.completed_at.timestamp())if document.completed_at else None,
'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
'indexing_latency': document.indexing_latency,
'error': document.error,
@ -438,7 +574,8 @@ class DocumentDetailApi(DocumentResource):
'segment_count': document.segment_count,
'average_segment_length': document.average_segment_length,
'hit_count': document.hit_count,
'display_status': document.display_status
'display_status': document.display_status,
'doc_form': document.doc_form
}
return response, 200
@ -487,6 +624,12 @@ class DocumentDeleteApi(DocumentResource):
def delete(self, dataset_id, document_id):
dataset_id = str(dataset_id)
document_id = str(document_id)
dataset = DatasetService.get_dataset(dataset_id)
if dataset is None:
raise NotFound("Dataset not found.")
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
document = self.get_document(dataset_id, document_id)
try:
@ -527,11 +670,13 @@ class DocumentMetadataApi(DocumentResource):
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
document.doc_metadata = {}
for key, value_type in metadata_schema.items():
value = doc_metadata.get(key)
if value is not None and isinstance(value, value_type):
document.doc_metadata[key] = value
if doc_type == 'others':
document.doc_metadata = doc_metadata
else:
for key, value_type in metadata_schema.items():
value = doc_metadata.get(key)
if value is not None and isinstance(value, value_type):
document.doc_metadata[key] = value
document.doc_type = doc_type
document.updated_at = datetime.utcnow()
@ -547,6 +692,12 @@ class DocumentStatusApi(DocumentResource):
def patch(self, dataset_id, document_id, action):
dataset_id = str(dataset_id)
document_id = str(document_id)
dataset = DatasetService.get_dataset(dataset_id)
if dataset is None:
raise NotFound("Dataset not found.")
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
document = self.get_document(dataset_id, document_id)
# The role of the current user in the ta table must be admin or owner
@ -576,6 +727,8 @@ class DocumentStatusApi(DocumentResource):
return {'result': 'success'}, 200
elif action == "disable":
if not document.completed_at or document.indexing_status != 'completed':
raise InvalidActionError('Document is not completed.')
if not document.enabled:
raise InvalidActionError('Document already disabled.')
@ -608,12 +761,40 @@ class DocumentStatusApi(DocumentResource):
remove_document_from_index_task.delay(document_id)
return {'result': 'success'}, 200
elif action == "un_archive":
if not document.archived:
raise InvalidActionError('Document is not archived.')
# check document limit
if current_app.config['EDITION'] == 'CLOUD':
documents_count = DocumentService.get_tenant_documents_count()
total_count = documents_count + 1
tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
if total_count > tenant_document_count:
raise ValueError(f"All your documents have overed limit {tenant_document_count}.")
document.archived = False
document.archived_at = None
document.archived_by = None
document.updated_at = datetime.utcnow()
db.session.commit()
# Set cache to prevent indexing the same document multiple times
redis_client.setex(indexing_cache_key, 600, 1)
add_document_to_index_task.delay(document_id)
return {'result': 'success'}, 200
else:
raise InvalidActionError()
class DocumentPauseApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def patch(self, dataset_id, document_id):
"""pause document."""
dataset_id = str(dataset_id)
@ -643,6 +824,9 @@ class DocumentPauseApi(DocumentResource):
class DocumentRecoverApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def patch(self, dataset_id, document_id):
"""recover document."""
dataset_id = str(dataset_id)
@ -668,6 +852,21 @@ class DocumentRecoverApi(DocumentResource):
return {'result': 'success'}, 204
class DocumentLimitApi(DocumentResource):
@setup_required
@login_required
@account_initialization_required
def get(self):
"""get document limit"""
documents_count = DocumentService.get_tenant_documents_count()
tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
return {
'documents_count': documents_count,
'documents_limit': tenant_document_count
}, 200
api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
api.add_resource(DatasetDocumentListApi,
'/datasets/<uuid:dataset_id>/documents')
@ -675,6 +874,10 @@ api.add_resource(DatasetInitApi,
'/datasets/init')
api.add_resource(DocumentIndexingEstimateApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate')
api.add_resource(DocumentBatchIndexingEstimateApi,
'/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate')
api.add_resource(DocumentBatchIndexingStatusApi,
'/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status')
api.add_resource(DocumentIndexingStatusApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status')
api.add_resource(DocumentDetailApi,
@ -689,3 +892,4 @@ api.add_resource(DocumentStatusApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')
api.add_resource(DocumentLimitApi, '/datasets/limit')

View File

@ -1,52 +1,30 @@
# -*- coding:utf-8 -*-
import uuid
from datetime import datetime
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal
from flask import request
from flask_login import current_user
from flask_restful import Resource, reqparse, marshal
from werkzeug.exceptions import NotFound, Forbidden
import services
from controllers.console import api
from controllers.console.datasets.error import InvalidActionError
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import InvalidActionError, NoFileUploadedError, TooManyFilesError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from libs.login import login_required
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from fields.segment_fields import segment_fields
from models.dataset import DocumentSegment
from libs.helper import TimestampField
from services.dataset_service import DatasetService, DocumentService
from tasks.add_segment_to_index_task import add_segment_to_index_task
from tasks.remove_segment_from_index_task import remove_segment_from_index_task
segment_fields = {
'id': fields.String,
'position': fields.Integer,
'document_id': fields.String,
'content': fields.String,
'word_count': fields.Integer,
'tokens': fields.Integer,
'keywords': fields.List(fields.String),
'index_node_id': fields.String,
'index_node_hash': fields.String,
'hit_count': fields.Integer,
'enabled': fields.Boolean,
'disabled_at': TimestampField,
'disabled_by': fields.String,
'status': fields.String,
'created_by': fields.String,
'created_at': TimestampField,
'indexing_at': TimestampField,
'completed_at': TimestampField,
'error': fields.String,
'stopped_at': TimestampField
}
segment_list_response = {
'data': fields.List(fields.Nested(segment_fields)),
'has_more': fields.Boolean,
'limit': fields.Integer
}
from services.dataset_service import DatasetService, DocumentService, SegmentService
from tasks.enable_segment_to_index_task import enable_segment_to_index_task
from tasks.disable_segment_from_index_task import disable_segment_from_index_task
from tasks.batch_create_segment_to_index_task import batch_create_segment_to_index_task
import pandas as pd
class DatasetDocumentSegmentListApi(Resource):
@ -78,12 +56,14 @@ class DatasetDocumentSegmentListApi(Resource):
parser.add_argument('hit_count_gte', type=int,
default=None, location='args')
parser.add_argument('enabled', type=str, default='all', location='args')
parser.add_argument('keyword', type=str, default=None, location='args')
args = parser.parse_args()
last_id = args['last_id']
limit = min(args['limit'], 100)
status_list = args['status']
hit_count_gte = args['hit_count_gte']
keyword = args['keyword']
query = DocumentSegment.query.filter(
DocumentSegment.document_id == str(document_id),
@ -104,6 +84,9 @@ class DatasetDocumentSegmentListApi(Resource):
if hit_count_gte is not None:
query = query.filter(DocumentSegment.hit_count >= hit_count_gte)
if keyword:
query = query.where(DocumentSegment.content.ilike(f'%{keyword}%'))
if args['enabled'].lower() != 'all':
if args['enabled'].lower() == 'true':
query = query.filter(DocumentSegment.enabled == True)
@ -120,6 +103,7 @@ class DatasetDocumentSegmentListApi(Resource):
return {
'data': marshal(segments, segment_fields),
'doc_form': document.doc_form,
'has_more': has_more,
'limit': limit,
'total': total
@ -135,7 +119,8 @@ class DatasetDocumentSegmentApi(Resource):
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
@ -144,6 +129,20 @@ class DatasetDocumentSegmentApi(Resource):
DatasetService.check_dataset_permission(dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
if dataset.indexing_technique == 'high_quality':
# check embedding model setting
try:
ModelFactory.get_embedding_model(
tenant_id=current_user.current_tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
segment = DocumentSegment.query.filter(
DocumentSegment.id == str(segment_id),
@ -175,7 +174,7 @@ class DatasetDocumentSegmentApi(Resource):
# Set cache to prevent indexing the same segment multiple times
redis_client.setex(indexing_cache_key, 600, 1)
add_segment_to_index_task.delay(segment.id)
enable_segment_to_index_task.delay(segment.id)
return {'result': 'success'}, 200
elif action == "disable":
@ -190,14 +189,234 @@ class DatasetDocumentSegmentApi(Resource):
# Set cache to prevent indexing the same segment multiple times
redis_client.setex(indexing_cache_key, 600, 1)
remove_segment_from_index_task.delay(segment.id)
disable_segment_from_index_task.delay(segment.id)
return {'result': 'success'}, 200
else:
raise InvalidActionError()
class DatasetDocumentSegmentAddApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, dataset_id, document_id):
# check dataset
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
# check document
document_id = str(document_id)
document = DocumentService.get_document(dataset_id, document_id)
if not document:
raise NotFound('Document not found.')
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
# check embedding model setting
if dataset.indexing_technique == 'high_quality':
try:
ModelFactory.get_embedding_model(
tenant_id=current_user.current_tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
try:
DatasetService.check_dataset_permission(dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
# validate args
parser = reqparse.RequestParser()
parser.add_argument('content', type=str, required=True, nullable=False, location='json')
parser.add_argument('answer', type=str, required=False, nullable=True, location='json')
parser.add_argument('keywords', type=list, required=False, nullable=True, location='json')
args = parser.parse_args()
SegmentService.segment_create_args_validate(args, document)
segment = SegmentService.create_segment(args, document, dataset)
return {
'data': marshal(segment, segment_fields),
'doc_form': document.doc_form
}, 200
class DatasetDocumentSegmentUpdateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def patch(self, dataset_id, document_id, segment_id):
# check dataset
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
# check document
document_id = str(document_id)
document = DocumentService.get_document(dataset_id, document_id)
if not document:
raise NotFound('Document not found.')
if dataset.indexing_technique == 'high_quality':
# check embedding model setting
try:
ModelFactory.get_embedding_model(
tenant_id=current_user.current_tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
# check segment
segment_id = str(segment_id)
segment = DocumentSegment.query.filter(
DocumentSegment.id == str(segment_id),
DocumentSegment.tenant_id == current_user.current_tenant_id
).first()
if not segment:
raise NotFound('Segment not found.')
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
# validate args
parser = reqparse.RequestParser()
parser.add_argument('content', type=str, required=True, nullable=False, location='json')
parser.add_argument('answer', type=str, required=False, nullable=True, location='json')
parser.add_argument('keywords', type=list, required=False, nullable=True, location='json')
args = parser.parse_args()
SegmentService.segment_create_args_validate(args, document)
segment = SegmentService.update_segment(args, segment, document, dataset)
return {
'data': marshal(segment, segment_fields),
'doc_form': document.doc_form
}, 200
@setup_required
@login_required
@account_initialization_required
def delete(self, dataset_id, document_id, segment_id):
# check dataset
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
# check document
document_id = str(document_id)
document = DocumentService.get_document(dataset_id, document_id)
if not document:
raise NotFound('Document not found.')
# check segment
segment_id = str(segment_id)
segment = DocumentSegment.query.filter(
DocumentSegment.id == str(segment_id),
DocumentSegment.tenant_id == current_user.current_tenant_id
).first()
if not segment:
raise NotFound('Segment not found.')
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
SegmentService.delete_segment(segment, document, dataset)
return {'result': 'success'}, 200
class DatasetDocumentSegmentBatchImportApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, dataset_id, document_id):
# check dataset
dataset_id = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
# check document
document_id = str(document_id)
document = DocumentService.get_document(dataset_id, document_id)
if not document:
raise NotFound('Document not found.')
# get file from request
file = request.files['file']
# check file
if 'file' not in request.files:
raise NoFileUploadedError()
if len(request.files) > 1:
raise TooManyFilesError()
# check file type
if not file.filename.endswith('.csv'):
raise ValueError("Invalid file type. Only CSV files are allowed")
try:
# Skip the first row
df = pd.read_csv(file)
result = []
for index, row in df.iterrows():
if document.doc_form == 'qa_model':
data = {'content': row[0], 'answer': row[1]}
else:
data = {'content': row[0]}
result.append(data)
if len(result) == 0:
raise ValueError("The CSV file is empty.")
# async job
job_id = str(uuid.uuid4())
indexing_cache_key = 'segment_batch_import_{}'.format(str(job_id))
# send batch add segments task
redis_client.setnx(indexing_cache_key, 'waiting')
batch_create_segment_to_index_task.delay(str(job_id), result, dataset_id, document_id,
current_user.current_tenant_id, current_user.id)
except Exception as e:
return {'error': str(e)}, 500
return {
'job_id': job_id,
'job_status': 'waiting'
}, 200
@setup_required
@login_required
@account_initialization_required
def get(self, job_id):
job_id = str(job_id)
indexing_cache_key = 'segment_batch_import_{}'.format(job_id)
cache_result = redis_client.get(indexing_cache_key)
if cache_result is None:
raise ValueError("The job is not exist.")
return {
'job_id': job_id,
'job_status': cache_result.decode()
}, 200
api.add_resource(DatasetDocumentSegmentListApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments')
api.add_resource(DatasetDocumentSegmentApi,
'/datasets/<uuid:dataset_id>/segments/<uuid:segment_id>/<string:action>')
api.add_resource(DatasetDocumentSegmentAddApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segment')
api.add_resource(DatasetDocumentSegmentUpdateApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments/<uuid:segment_id>')
api.add_resource(DatasetDocumentSegmentBatchImportApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments/batch_import',
'/datasets/batch_import_status/<uuid:job_id>')

View File

@ -1,45 +1,39 @@
import datetime
import hashlib
import tempfile
import time
import uuid
from pathlib import Path
from cachetools import TTLCache
from flask import request, current_app
from flask_login import login_required, current_user
from flask_restful import Resource, marshal_with, fields
from werkzeug.exceptions import NotFound
import services
from libs.login import login_required
from flask_restful import Resource, marshal_with
from controllers.console import api
from controllers.console.datasets.error import NoFileUploadedError, TooManyFilesError, FileTooLargeError, \
UnsupportedFileTypeError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.index.readers.html_parser import HTMLParser
from core.index.readers.pdf_parser import PDFParser
from extensions.ext_storage import storage
from libs.helper import TimestampField
from extensions.ext_database import db
from models.model import UploadFile
from fields.file_fields import upload_config_fields, file_fields
from services.file_service import FileService
cache = TTLCache(maxsize=None, ttl=30)
FILE_SIZE_LIMIT = 15 * 1024 * 1024 # 15MB
ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm']
ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm', 'xlsx', 'docx', 'csv']
PREVIEW_WORDS_LIMIT = 3000
class FileApi(Resource):
file_fields = {
'id': fields.String,
'name': fields.String,
'size': fields.Integer,
'extension': fields.String,
'mime_type': fields.String,
'created_by': fields.String,
'created_at': TimestampField,
}
@setup_required
@login_required
@account_initialization_required
@marshal_with(upload_config_fields)
def get(self):
file_size_limit = current_app.config.get("UPLOAD_FILE_SIZE_LIMIT")
batch_count_limit = current_app.config.get("UPLOAD_FILE_BATCH_LIMIT")
return {
'file_size_limit': file_size_limit,
'batch_count_limit': batch_count_limit
}, 200
@setup_required
@login_required
@ -56,44 +50,13 @@ class FileApi(Resource):
if len(request.files) > 1:
raise TooManyFilesError()
file_content = file.read()
file_size = len(file_content)
if file_size > FILE_SIZE_LIMIT:
message = "({file_size} > {FILE_SIZE_LIMIT})"
raise FileTooLargeError(message)
extension = file.filename.split('.')[-1]
if extension not in ALLOWED_EXTENSIONS:
try:
upload_file = FileService.upload_file(file)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
# user uuid as file name
file_uuid = str(uuid.uuid4())
file_key = 'upload_files/' + current_user.current_tenant_id + '/' + file_uuid + '.' + extension
# save file to storage
storage.save(file_key, file_content)
# save file to db
config = current_app.config
upload_file = UploadFile(
tenant_id=current_user.current_tenant_id,
storage_type=config['STORAGE_TYPE'],
key=file_key,
name=file.filename,
size=file_size,
extension=extension,
mime_type=file.mimetype,
created_by=current_user.id,
created_at=datetime.datetime.utcnow(),
used=False,
hash=hashlib.sha3_256(file_content).hexdigest()
)
db.session.add(upload_file)
db.session.commit()
return upload_file, 201
@ -103,43 +66,7 @@ class FilePreviewApi(Resource):
@account_initialization_required
def get(self, file_id):
file_id = str(file_id)
key = file_id + request.path
cached_response = cache.get(key)
if cached_response and time.time() - cached_response['timestamp'] < cache.ttl:
return cached_response['response']
upload_file = db.session.query(UploadFile) \
.filter(UploadFile.id == file_id) \
.first()
if not upload_file:
raise NotFound("File not found")
# extract text from file
extension = upload_file.extension
if extension not in ALLOWED_EXTENSIONS:
raise UnsupportedFileTypeError()
with tempfile.TemporaryDirectory() as temp_dir:
suffix = Path(upload_file.key).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
storage.download(upload_file.key, filepath)
if extension == 'pdf':
parser = PDFParser({'upload_file': upload_file})
text = parser.parse_file(Path(filepath))
elif extension in ['html', 'htm']:
# Use BeautifulSoup to extract text
parser = HTMLParser()
text = parser.parse_file(Path(filepath))
else:
# ['txt', 'markdown', 'md']
with open(filepath, "rb") as fp:
data = fp.read()
text = data.decode(encoding='utf-8').strip() if data else ''
text = text[0:PREVIEW_WORDS_LIMIT] if text else ''
text = FileService.get_file_preview(file_id)
return {'content': text}

View File

@ -1,7 +1,8 @@
import logging
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, marshal, fields
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal
from werkzeug.exceptions import InternalServerError, NotFound, Forbidden
import services
@ -11,48 +12,12 @@ from controllers.console.app.error import ProviderNotInitializeError, ProviderQu
from controllers.console.datasets.error import HighQualityDatasetOnlyError, DatasetNotInitializedError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from libs.helper import TimestampField
from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
LLMBadRequestError
from fields.hit_testing_fields import hit_testing_record_fields
from services.dataset_service import DatasetService
from services.hit_testing_service import HitTestingService
document_fields = {
'id': fields.String,
'data_source_type': fields.String,
'name': fields.String,
'doc_type': fields.String,
}
segment_fields = {
'id': fields.String,
'position': fields.Integer,
'document_id': fields.String,
'content': fields.String,
'word_count': fields.Integer,
'tokens': fields.Integer,
'keywords': fields.List(fields.String),
'index_node_id': fields.String,
'index_node_hash': fields.String,
'hit_count': fields.Integer,
'enabled': fields.Boolean,
'disabled_at': TimestampField,
'disabled_by': fields.String,
'status': fields.String,
'created_by': fields.String,
'created_at': TimestampField,
'indexing_at': TimestampField,
'completed_at': TimestampField,
'error': fields.String,
'stopped_at': TimestampField,
'document': fields.Nested(document_fields),
}
hit_testing_record_fields = {
'segment': fields.Nested(segment_fields),
'score': fields.Float,
'tsne_position': fields.Raw
}
class HitTestingApi(Resource):
@ -95,12 +60,18 @@ class HitTestingApi(Resource):
return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)}
except services.errors.index.IndexNotInitializedError:
raise DatasetNotInitializedError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ValueError as e:
raise ValueError(str(e))
except Exception as e:
logging.exception("Hit testing failed.")
raise InternalServerError(str(e))

View File

@ -18,3 +18,9 @@ class AccountNotLinkTenantError(BaseHTTPException):
error_code = 'account_not_link_tenant'
description = "Account not link tenant."
code = 403
class AlreadyActivateError(BaseHTTPException):
error_code = 'already_activate'
description = "Auth Token is invalid or account already activated, please check again."
code = 403

View File

@ -0,0 +1,66 @@
# -*- coding:utf-8 -*-
import logging
from flask import request
from werkzeug.exceptions import InternalServerError
import services
from controllers.console import api
from controllers.console.app.error import AppUnavailableError, ProviderNotInitializeError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError, \
NoAudioUploadedError, AudioTooLargeError, \
UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError
from controllers.console.explore.wraps import InstalledAppResource
from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
from models.model import AppModelConfig
class ChatAudioApi(InstalledAppResource):
def post(self, installed_app):
app_model = installed_app.app
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(ChatAudioApi, '/installed-apps/<uuid:installed_app_id>/audio-to-text', endpoint='installed_app_audio')

View File

@ -15,7 +15,7 @@ from controllers.console.app.error import ConversationCompletedError, AppUnavail
from controllers.console.explore.error import NotCompletionAppError, NotChatAppError
from controllers.console.explore.wraps import InstalledAppResource
from core.conversation_message_task import PubHandler
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from libs.helper import uuid_value
from services.completion_service import CompletionService
@ -31,8 +31,9 @@ class CompletionApi(InstalledAppResource):
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, location='json')
parser.add_argument('query', type=str, location='json')
parser.add_argument('query', type=str, location='json', default='')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('retriever_from', type=str, required=False, default='explore_app', location='json')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
@ -54,8 +55,8 @@ class CompletionApi(InstalledAppResource):
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -92,6 +93,7 @@ class ChatApi(InstalledAppResource):
parser.add_argument('query', type=str, required=True, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='explore_app', location='json')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
@ -113,8 +115,8 @@ class ChatApi(InstalledAppResource):
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -155,8 +157,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError:

View File

@ -7,26 +7,12 @@ from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.explore.error import NotChatAppError
from controllers.console.explore.wraps import InstalledAppResource
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
from libs.helper import TimestampField, uuid_value
from services.conversation_service import ConversationService
from services.errors.conversation import LastConversationNotExistsError, ConversationNotExistsError
from services.web_conversation_service import WebConversationService
conversation_fields = {
'id': fields.String,
'name': fields.String,
'inputs': fields.Raw,
'status': fields.String,
'introduction': fields.String,
'created_at': TimestampField
}
conversation_infinite_scroll_pagination_fields = {
'limit': fields.Integer,
'has_more': fields.Boolean,
'data': fields.List(fields.Nested(conversation_fields))
}
class ConversationListApi(InstalledAppResource):
@ -65,7 +51,10 @@ class ConversationApi(InstalledAppResource):
raise NotChatAppError()
conversation_id = str(c_id)
ConversationService.delete(app_model, conversation_id, current_user)
try:
ConversationService.delete(app_model, conversation_id, current_user)
except ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
WebConversationService.unpin(app_model, conversation_id, current_user)
return {"result": "success"}, 204
@ -73,7 +62,7 @@ class ConversationApi(InstalledAppResource):
class ConversationRenameApi(InstalledAppResource):
@marshal_with(conversation_fields)
@marshal_with(simple_conversation_fields)
def post(self, installed_app, c_id):
app_model = installed_app.app
if app_model.mode != 'chat':

View File

@ -1,8 +1,9 @@
# -*- coding:utf-8 -*-
from datetime import datetime
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal_with, inputs
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal_with, inputs
from sqlalchemy import and_
from werkzeug.exceptions import NotFound, Forbidden, BadRequest
@ -10,32 +11,10 @@ from controllers.console import api
from controllers.console.explore.wraps import InstalledAppResource
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.helper import TimestampField
from fields.installed_app_fields import installed_app_list_fields
from models.model import App, InstalledApp, RecommendedApp
from services.account_service import TenantService
app_fields = {
'id': fields.String,
'name': fields.String,
'mode': fields.String,
'icon': fields.String,
'icon_background': fields.String
}
installed_app_fields = {
'id': fields.String,
'app': fields.Nested(app_fields),
'app_owner_tenant_id': fields.String,
'is_pinned': fields.Boolean,
'last_used_at': TimestampField,
'editable': fields.Boolean,
'uninstallable': fields.Boolean,
}
installed_app_list_fields = {
'installed_apps': fields.List(fields.Nested(installed_app_fields))
}
class InstalledAppsListApi(Resource):
@login_required

View File

@ -15,8 +15,9 @@ from controllers.console.app.error import AppMoreLikeThisDisabledError, Provider
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
from controllers.console.explore.error import NotCompletionAppError, AppSuggestedQuestionsAfterAnswerDisabledError
from controllers.console.explore.wraps import InstalledAppResource
from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError
from fields.message_fields import message_infinite_scroll_pagination_fields
from libs.helper import uuid_value, TimestampField
from services.completion_service import CompletionService
from services.errors.app import MoreLikeThisDisabledError
@ -26,25 +27,6 @@ from services.message_service import MessageService
class MessageListApi(InstalledAppResource):
feedback_fields = {
'rating': fields.String
}
message_fields = {
'id': fields.String,
'conversation_id': fields.String,
'inputs': fields.Raw,
'query': fields.String,
'answer': fields.String,
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
'created_at': TimestampField
}
message_infinite_scroll_pagination_fields = {
'limit': fields.Integer,
'has_more': fields.Boolean,
'data': fields.List(fields.Nested(message_fields))
}
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, installed_app):
@ -107,8 +89,8 @@ class MessageMoreLikeThisApi(InstalledAppResource):
raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError:
raise AppMoreLikeThisDisabledError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -135,8 +117,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
except MoreLikeThisDisabledError:
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
except ProviderTokenNotInitError:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError:
@ -174,8 +156,8 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
raise NotFound("Conversation not found")
except SuggestedQuestionsAfterAnswerDisabledError:
raise AppSuggestedQuestionsAfterAnswerDisabledError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:

View File

@ -4,6 +4,8 @@ from flask_restful import marshal_with, fields
from controllers.console import api
from controllers.console.explore.wraps import InstalledAppResource
from models.model import InstalledApp
class AppParameterApi(InstalledAppResource):
"""Resource for app variables."""
@ -21,12 +23,14 @@ class AppParameterApi(InstalledAppResource):
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'suggested_questions_after_answer': fields.Raw,
'speech_to_text': fields.Raw,
'retriever_resource': fields.Raw,
'more_like_this': fields.Raw,
'user_input_form': fields.Raw,
}
@marshal_with(parameters_fields)
def get(self, installed_app):
def get(self, installed_app: InstalledApp):
"""Retrieve app parameters."""
app_model = installed_app.app
app_model_config = app_model.app_model_config
@ -35,6 +39,8 @@ class AppParameterApi(InstalledAppResource):
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'retriever_resource': app_model_config.retriever_resource_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list
}

View File

@ -1,5 +1,6 @@
# -*- coding:utf-8 -*-
from flask_login import login_required, current_user
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, fields, marshal_with
from sqlalchemy import and_

View File

@ -1,4 +1,5 @@
from flask_login import login_required, current_user
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource
from functools import wraps

View File

@ -1,7 +1,6 @@
# -*- coding:utf-8 -*-
from functools import wraps
import flask_login
from flask import request, current_app
from flask_restful import Resource, reqparse
@ -19,15 +18,16 @@ from .wraps import only_edition_self_hosted
class SetupApi(Resource):
@only_edition_self_hosted
def get(self):
setup_status = get_setup_status()
if setup_status:
return {
'step': 'finished',
'setup_at': setup_status.setup_at.isoformat()
}
return {'step': 'not_start'}
if current_app.config['EDITION'] == 'SELF_HOSTED':
setup_status = get_setup_status()
if setup_status:
return {
'step': 'finished',
'setup_at': setup_status.setup_at.isoformat()
}
return {'step': 'not_start'}
return {'step': 'finished'}
@only_edition_self_hosted
def post(self):
@ -57,9 +57,6 @@ class SetupApi(Resource):
)
setup()
# Login
flask_login.login_user(account)
AccountService.update_last_login(account, request)
return {'result': 'success'}, 201

View File

@ -0,0 +1,66 @@
# -*- coding:utf-8 -*-
import logging
from flask import request
from werkzeug.exceptions import InternalServerError
import services
from controllers.console import api
from controllers.console.app.error import AppUnavailableError, ProviderNotInitializeError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError, \
NoAudioUploadedError, AudioTooLargeError, \
UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError
from controllers.console.universal_chat.wraps import UniversalChatResource
from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
from models.model import AppModelConfig
class UniversalChatAudioApi(UniversalChatResource):
def post(self, universal_app):
app_model = universal_app
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(UniversalChatAudioApi, '/universal-chat/audio-to-text')

View File

@ -0,0 +1,139 @@
import json
import logging
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_login import current_user
from flask_restful import reqparse
from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
from controllers.console.universal_chat.wraps import UniversalChatResource
from core.conversation_message_task import PubHandler
from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError
from libs.helper import uuid_value
from services.completion_service import CompletionService
class UniversalChatApi(UniversalChatResource):
def post(self, universal_app):
app_model = universal_app
parser = reqparse.RequestParser()
parser.add_argument('query', type=str, required=True, location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
parser.add_argument('provider', type=str, required=True, location='json')
parser.add_argument('model', type=str, required=True, location='json')
parser.add_argument('tools', type=list, required=True, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='universal_app', location='json')
args = parser.parse_args()
app_model_config = app_model.app_model_config
# update app model config
args['model_config'] = app_model_config.to_dict()
args['model_config']['model']['name'] = args['model']
args['model_config']['model']['provider'] = args['provider']
args['model_config']['agent_mode']['tools'] = args['tools']
if not args['model_config']['agent_mode']['tools']:
args['model_config']['agent_mode']['tools'] = [
{
"current_datetime": {
"enabled": True
}
}
]
else:
args['model_config']['agent_mode']['tools'].append({
"current_datetime": {
"enabled": True
}
})
args['inputs'] = {}
del args['model']
del args['tools']
try:
response = CompletionService.completion(
app_model=app_model,
user=current_user,
args=args,
from_source='console',
streaming=True,
is_model_config_override=True,
)
return compact_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
raise ConversationCompletedError()
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class UniversalChatStopApi(UniversalChatResource):
def post(self, universal_app, task_id):
PubHandler.stop(current_user, task_id)
return {'result': 'success'}, 200
def compact_response(response: Union[dict | Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
try:
for chunk in response:
yield chunk
except services.errors.conversation.ConversationNotExistsError:
yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
except services.errors.conversation.ConversationCompletedError:
yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError:
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
except ValueError as e:
yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
except Exception:
logging.exception("internal server error.")
yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
api.add_resource(UniversalChatApi, '/universal-chat/messages')
api.add_resource(UniversalChatStopApi, '/universal-chat/messages/<string:task_id>/stop')

View File

@ -0,0 +1,104 @@
# -*- coding:utf-8 -*-
from flask_login import current_user
from flask_restful import fields, reqparse, marshal_with
from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.universal_chat.wraps import UniversalChatResource
from fields.conversation_fields import conversation_with_model_config_infinite_scroll_pagination_fields, \
conversation_with_model_config_fields
from libs.helper import TimestampField, uuid_value
from services.conversation_service import ConversationService
from services.errors.conversation import LastConversationNotExistsError, ConversationNotExistsError
from services.web_conversation_service import WebConversationService
class UniversalChatConversationListApi(UniversalChatResource):
@marshal_with(conversation_with_model_config_infinite_scroll_pagination_fields)
def get(self, universal_app):
app_model = universal_app
parser = reqparse.RequestParser()
parser.add_argument('last_id', type=uuid_value, location='args')
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
parser.add_argument('pinned', type=str, choices=['true', 'false', None], location='args')
args = parser.parse_args()
pinned = None
if 'pinned' in args and args['pinned'] is not None:
pinned = True if args['pinned'] == 'true' else False
try:
return WebConversationService.pagination_by_last_id(
app_model=app_model,
user=current_user,
last_id=args['last_id'],
limit=args['limit'],
pinned=pinned
)
except LastConversationNotExistsError:
raise NotFound("Last Conversation Not Exists.")
class UniversalChatConversationApi(UniversalChatResource):
def delete(self, universal_app, c_id):
app_model = universal_app
conversation_id = str(c_id)
try:
ConversationService.delete(app_model, conversation_id, current_user)
except ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
WebConversationService.unpin(app_model, conversation_id, current_user)
return {"result": "success"}, 204
class UniversalChatConversationRenameApi(UniversalChatResource):
@marshal_with(conversation_with_model_config_fields)
def post(self, universal_app, c_id):
app_model = universal_app
conversation_id = str(c_id)
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, location='json')
args = parser.parse_args()
try:
return ConversationService.rename(app_model, conversation_id, current_user, args['name'])
except ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
class UniversalChatConversationPinApi(UniversalChatResource):
def patch(self, universal_app, c_id):
app_model = universal_app
conversation_id = str(c_id)
try:
WebConversationService.pin(app_model, conversation_id, current_user)
except ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
return {"result": "success"}
class UniversalChatConversationUnPinApi(UniversalChatResource):
def patch(self, universal_app, c_id):
app_model = universal_app
conversation_id = str(c_id)
WebConversationService.unpin(app_model, conversation_id, current_user)
return {"result": "success"}
api.add_resource(UniversalChatConversationRenameApi, '/universal-chat/conversations/<uuid:c_id>/name')
api.add_resource(UniversalChatConversationListApi, '/universal-chat/conversations')
api.add_resource(UniversalChatConversationApi, '/universal-chat/conversations/<uuid:c_id>')
api.add_resource(UniversalChatConversationPinApi, '/universal-chat/conversations/<uuid:c_id>/pin')
api.add_resource(UniversalChatConversationUnPinApi, '/universal-chat/conversations/<uuid:c_id>/unpin')

View File

@ -0,0 +1,147 @@
# -*- coding:utf-8 -*-
import logging
from flask_login import current_user
from flask_restful import reqparse, fields, marshal_with
from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound, InternalServerError
import services
from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError
from controllers.console.universal_chat.wraps import UniversalChatResource
from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError
from libs.helper import uuid_value, TimestampField
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
from services.message_service import MessageService
class UniversalChatMessageListApi(UniversalChatResource):
feedback_fields = {
'rating': fields.String
}
agent_thought_fields = {
'id': fields.String,
'chain_id': fields.String,
'message_id': fields.String,
'position': fields.Integer,
'thought': fields.String,
'tool': fields.String,
'tool_input': fields.String,
'created_at': TimestampField
}
retriever_resource_fields = {
'id': fields.String,
'message_id': fields.String,
'position': fields.Integer,
'dataset_id': fields.String,
'dataset_name': fields.String,
'document_id': fields.String,
'document_name': fields.String,
'data_source_type': fields.String,
'segment_id': fields.String,
'score': fields.Float,
'hit_count': fields.Integer,
'word_count': fields.Integer,
'segment_position': fields.Integer,
'index_node_hash': fields.String,
'content': fields.String,
'created_at': TimestampField
}
message_fields = {
'id': fields.String,
'conversation_id': fields.String,
'inputs': fields.Raw,
'query': fields.String,
'answer': fields.String,
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
'created_at': TimestampField,
'agent_thoughts': fields.List(fields.Nested(agent_thought_fields))
}
message_infinite_scroll_pagination_fields = {
'limit': fields.Integer,
'has_more': fields.Boolean,
'data': fields.List(fields.Nested(message_fields))
}
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, universal_app):
app_model = universal_app
parser = reqparse.RequestParser()
parser.add_argument('conversation_id', required=True, type=uuid_value, location='args')
parser.add_argument('first_id', type=uuid_value, location='args')
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
args = parser.parse_args()
try:
return MessageService.pagination_by_first_id(app_model, current_user,
args['conversation_id'], args['first_id'], args['limit'])
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.message.FirstMessageNotExistsError:
raise NotFound("First Message Not Exists.")
class UniversalChatMessageFeedbackApi(UniversalChatResource):
def post(self, universal_app, message_id):
app_model = universal_app
message_id = str(message_id)
parser = reqparse.RequestParser()
parser.add_argument('rating', type=str, choices=['like', 'dislike', None], location='json')
args = parser.parse_args()
try:
MessageService.create_feedback(app_model, message_id, current_user, args['rating'])
except services.errors.message.MessageNotExistsError:
raise NotFound("Message Not Exists.")
return {'result': 'success'}
class UniversalChatMessageSuggestedQuestionApi(UniversalChatResource):
def get(self, universal_app, message_id):
app_model = universal_app
message_id = str(message_id)
try:
questions = MessageService.get_suggested_questions_after_answer(
app_model=app_model,
user=current_user,
message_id=message_id
)
except MessageNotExistsError:
raise NotFound("Message not found")
except ConversationNotExistsError:
raise NotFound("Conversation not found")
except SuggestedQuestionsAfterAnswerDisabledError:
raise AppSuggestedQuestionsAfterAnswerDisabledError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except Exception:
logging.exception("internal server error.")
raise InternalServerError()
return {'data': questions}
api.add_resource(UniversalChatMessageListApi, '/universal-chat/messages')
api.add_resource(UniversalChatMessageFeedbackApi, '/universal-chat/messages/<uuid:message_id>/feedbacks')
api.add_resource(UniversalChatMessageSuggestedQuestionApi, '/universal-chat/messages/<uuid:message_id>/suggested-questions')

View File

@ -0,0 +1,38 @@
# -*- coding:utf-8 -*-
import json
from flask_restful import marshal_with, fields
from controllers.console import api
from controllers.console.universal_chat.wraps import UniversalChatResource
from models.model import App
class UniversalChatParameterApi(UniversalChatResource):
"""Resource for app variables."""
parameters_fields = {
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'suggested_questions_after_answer': fields.Raw,
'speech_to_text': fields.Raw,
'retriever_resource': fields.Raw,
}
@marshal_with(parameters_fields)
def get(self, universal_app: App):
"""Retrieve app parameters."""
app_model = universal_app
app_model_config = app_model.app_model_config
app_model_config.retriever_resource = json.dumps({'enabled': True})
return {
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'retriever_resource': app_model_config.retriever_resource_dict,
}
api.add_resource(UniversalChatParameterApi, '/universal-chat/parameters')

View File

@ -0,0 +1,86 @@
import json
from functools import wraps
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from models.model import App, AppModelConfig
def universal_chat_app_required(view=None):
def decorator(view):
@wraps(view)
def decorated(*args, **kwargs):
# get universal chat app
universal_app = db.session.query(App).filter(
App.tenant_id == current_user.current_tenant_id,
App.is_universal == True
).first()
if universal_app is None:
# create universal app if not exists
universal_app = App(
tenant_id=current_user.current_tenant_id,
name='Universal Chat',
mode='chat',
is_universal=True,
icon='',
icon_background='',
api_rpm=0,
api_rph=0,
enable_site=False,
enable_api=False,
status='normal'
)
db.session.add(universal_app)
db.session.flush()
app_model_config = AppModelConfig(
provider="",
model_id="",
configs={},
opening_statement='',
suggested_questions=json.dumps([]),
suggested_questions_after_answer=json.dumps({'enabled': True}),
speech_to_text=json.dumps({'enabled': True}),
retriever_resource=json.dumps({'enabled': True}),
more_like_this=None,
sensitive_word_avoidance=None,
model=json.dumps({
"provider": "openai",
"name": "gpt-3.5-turbo-16k",
"completion_params": {
"max_tokens": 800,
"temperature": 0.8,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0
}
}),
user_input_form=json.dumps([]),
pre_prompt='',
agent_mode=json.dumps({"enabled": True, "strategy": "function_call", "tools": []}),
)
app_model_config.app_id = universal_app.id
db.session.add(app_model_config)
db.session.flush()
universal_app.app_model_config_id = app_model_config.id
db.session.commit()
return view(universal_app, *args, **kwargs)
return decorated
if view:
return decorator(view)
return decorator
class UniversalChatResource(Resource):
# must be reversed if there are multiple decorators
method_decorators = [universal_chat_app_required, account_initialization_required, login_required, setup_required]

View File

@ -32,8 +32,13 @@ class VersionApi(Resource):
'current_version': args.get('current_version')
})
except Exception as error:
logging.exception("Check update error.")
raise InternalServerError()
logging.warning("Check update version error: {}.".format(str(error)))
return {
'version': args.get('current_version'),
'release_date': '',
'release_notes': '',
'can_auto_update': False
}
content = json.loads(response.content)
return {

View File

@ -0,0 +1,61 @@
import logging
import stripe
from flask import request, current_app
from flask_restful import Resource
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import only_edition_cloud
from services.provider_checkout_service import ProviderCheckoutService
class StripeWebhookApi(Resource):
@setup_required
@only_edition_cloud
def post(self):
payload = request.data
sig_header = request.headers.get('STRIPE_SIGNATURE')
webhook_secret = current_app.config.get('STRIPE_WEBHOOK_SECRET')
try:
event = stripe.Webhook.construct_event(
payload, sig_header, webhook_secret
)
except ValueError as e:
# Invalid payload
return 'Invalid payload', 400
except stripe.error.SignatureVerificationError as e:
# Invalid signature
return 'Invalid signature', 400
# Handle the checkout.session.completed event
if event['type'] == 'checkout.session.completed':
logging.debug(event['data']['object']['id'])
logging.debug(event['data']['object']['amount_subtotal'])
logging.debug(event['data']['object']['currency'])
logging.debug(event['data']['object']['payment_intent'])
logging.debug(event['data']['object']['payment_status'])
logging.debug(event['data']['object']['metadata'])
session = stripe.checkout.Session.retrieve(
event['data']['object']['id'],
expand=['line_items'],
)
logging.debug(session.line_items['data'][0]['quantity'])
# Fulfill the purchase...
provider_checkout_service = ProviderCheckoutService()
try:
provider_checkout_service.fulfill_provider_order(event, session.line_items)
except Exception as e:
logging.debug(str(e))
return 'success', 200
return 'success', 200
api.add_resource(StripeWebhookApi, '/webhook/stripe')

View File

@ -3,25 +3,27 @@ from datetime import datetime
import pytz
from flask import current_app, request
from flask_login import login_required, current_user
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, fields, marshal_with
from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.workspace.error import AccountAlreadyInitedError, InvalidInvitationCodeError, \
RepeatPasswordNotMatchError
RepeatPasswordNotMatchError, CurrentPasswordIncorrectError
from controllers.console.wraps import account_initialization_required
from libs.helper import TimestampField, supported_language, timezone
from extensions.ext_database import db
from models.account import InvitationCode, AccountIntegrate
from services.account_service import AccountService
account_fields = {
'id': fields.String,
'name': fields.String,
'avatar': fields.String,
'email': fields.String,
'is_password_set': fields.Boolean,
'interface_language': fields.String,
'interface_theme': fields.String,
'timezone': fields.String,
@ -194,8 +196,11 @@ class AccountPasswordApi(Resource):
if args['new_password'] != args['repeat_new_password']:
raise RepeatPasswordNotMatchError()
AccountService.update_account_password(
current_user, args['password'], args['new_password'])
try:
AccountService.update_account_password(
current_user, args['password'], args['new_password'])
except ServiceCurrentPasswordIncorrectError:
raise CurrentPasswordIncorrectError()
return {"result": "success"}

View File

@ -7,6 +7,12 @@ class RepeatPasswordNotMatchError(BaseHTTPException):
code = 400
class CurrentPasswordIncorrectError(BaseHTTPException):
error_code = 'current_password_incorrect'
description = "Current password is incorrect."
code = 400
class ProviderRequestFailedError(BaseHTTPException):
error_code = 'provider_request_failed'
description = None

View File

@ -1,6 +1,7 @@
# -*- coding:utf-8 -*-
from flask_login import login_required, current_user
from flask import current_app
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse, marshal_with, abort, fields, marshal
import services
@ -48,37 +49,44 @@ class MemberInviteEmailApi(Resource):
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('email', type=str, required=True, location='json')
parser.add_argument('emails', type=str, required=True, location='json', action='append')
parser.add_argument('role', type=str, required=True, default='admin', location='json')
args = parser.parse_args()
invitee_email = args['email']
invitee_emails = args['emails']
invitee_role = args['role']
if invitee_role not in ['admin', 'normal']:
return {'code': 'invalid-role', 'message': 'Invalid role'}, 400
inviter = current_user
invitation_results = []
console_web_url = current_app.config.get("CONSOLE_WEB_URL")
for invitee_email in invitee_emails:
try:
token = RegisterService.invite_new_member(inviter.current_tenant, invitee_email, role=invitee_role,
inviter=inviter)
account = db.session.query(Account, TenantAccountJoin.role).join(
TenantAccountJoin, Account.id == TenantAccountJoin.account_id
).filter(Account.email == invitee_email).first()
account, role = account
invitation_results.append({
'status': 'success',
'email': invitee_email,
'url': f'{console_web_url}/activate?email={invitee_email}&token={token}'
})
account = marshal(account, account_fields)
account['role'] = role
except Exception as e:
invitation_results.append({
'status': 'failed',
'email': invitee_email,
'message': str(e)
})
try:
RegisterService.invite_new_member(inviter.current_tenant, invitee_email, role=invitee_role, inviter=inviter)
account = db.session.query(Account, TenantAccountJoin.role).join(
TenantAccountJoin, Account.id == TenantAccountJoin.account_id
).filter(Account.email == args['email']).first()
account, role = account
account = marshal(account, account_fields)
account['role'] = role
except services.errors.account.CannotOperateSelfError as e:
return {'code': 'cannot-operate-self', 'message': str(e)}, 400
except services.errors.account.NoPermissionError as e:
return {'code': 'forbidden', 'message': str(e)}, 403
except services.errors.account.AccountAlreadyInTenantError as e:
return {'code': 'email-taken', 'message': str(e)}, 409
except Exception as e:
return {'code': 'unexpected-error', 'message': str(e)}, 500
# todo:413
return {'result': 'success', 'account': account}, 201
return {
'result': 'success',
'invitation_results': invitation_results,
}, 201
class MemberCancelInviteApi(Resource):
@ -88,7 +96,7 @@ class MemberCancelInviteApi(Resource):
@login_required
@account_initialization_required
def delete(self, member_id):
member = Account.query.get(str(member_id))
member = db.session.query(Account).filter(Account.id == str(member_id)).first()
if not member:
abort(404)

View File

@ -0,0 +1,324 @@
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_providers.error import LLMBadRequestError
from core.model_providers.providers.base import CredentialsValidateFailedError
from services.provider_checkout_service import ProviderCheckoutService
from services.provider_service import ProviderService
class ModelProviderListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
tenant_id = current_user.current_tenant_id
provider_service = ProviderService()
provider_list = provider_service.get_provider_list(tenant_id)
return provider_list
class ModelProviderValidateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider_name: str):
parser = reqparse.RequestParser()
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
provider_service = ProviderService()
result = True
error = None
try:
provider_service.custom_provider_config_validate(
provider_name=provider_name,
config=args['config']
)
except CredentialsValidateFailedError as ex:
result = False
error = str(ex)
response = {'result': 'success' if result else 'error'}
if not result:
response['error'] = error
return response
class ModelProviderUpdateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider_name: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
provider_service = ProviderService()
try:
provider_service.save_custom_provider_config(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name,
config=args['config']
)
except CredentialsValidateFailedError as ex:
raise ValueError(str(ex))
return {'result': 'success'}, 201
@setup_required
@login_required
@account_initialization_required
def delete(self, provider_name: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
provider_service = ProviderService()
provider_service.delete_custom_provider(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name
)
return {'result': 'success'}, 204
class ModelProviderModelValidateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider_name: str):
parser = reqparse.RequestParser()
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
provider_service = ProviderService()
result = True
error = None
try:
provider_service.custom_provider_model_config_validate(
provider_name=provider_name,
model_name=args['model_name'],
model_type=args['model_type'],
config=args['config']
)
except CredentialsValidateFailedError as ex:
result = False
error = str(ex)
response = {'result': 'success' if result else 'error'}
if not result:
response['error'] = error
return response
class ModelProviderModelUpdateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider_name: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
parser.add_argument('config', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
provider_service = ProviderService()
try:
provider_service.add_or_save_custom_provider_model_config(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name,
model_name=args['model_name'],
model_type=args['model_type'],
config=args['config']
)
except CredentialsValidateFailedError as ex:
raise ValueError(str(ex))
return {'result': 'success'}, 200
@setup_required
@login_required
@account_initialization_required
def delete(self, provider_name: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('model_name', type=str, required=True, nullable=False, location='args')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=['text-generation', 'embeddings', 'speech2text'], location='args')
args = parser.parse_args()
provider_service = ProviderService()
provider_service.delete_custom_provider_model(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name,
model_name=args['model_name'],
model_type=args['model_type']
)
return {'result': 'success'}, 204
class PreferredProviderTypeUpdateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider_name: str):
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('preferred_provider_type', type=str, required=True, nullable=False,
choices=['system', 'custom'], location='json')
args = parser.parse_args()
provider_service = ProviderService()
provider_service.switch_preferred_provider(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name,
preferred_provider_type=args['preferred_provider_type']
)
return {'result': 'success'}
class ModelProviderModelParameterRuleApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider_name: str):
parser = reqparse.RequestParser()
parser.add_argument('model_name', type=str, required=True, nullable=False, location='args')
args = parser.parse_args()
provider_service = ProviderService()
try:
parameter_rules = provider_service.get_model_parameter_rules(
tenant_id=current_user.current_tenant_id,
model_provider_name=provider_name,
model_name=args['model_name'],
model_type='text-generation'
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"Current Text Generation Model is invalid. Please switch to the available model.")
rules = {
k: {
'enabled': v.enabled,
'min': v.min,
'max': v.max,
'default': v.default,
'precision': v.precision
}
for k, v in vars(parameter_rules).items()
}
return rules
class ModelProviderPaymentCheckoutUrlApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider_name: str):
provider_service = ProviderCheckoutService()
provider_checkout = provider_service.create_checkout(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name,
account=current_user
)
return {
'url': provider_checkout.get_checkout_url()
}
class ModelProviderFreeQuotaSubmitApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider_name: str):
provider_service = ProviderService()
result = provider_service.free_quota_submit(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name
)
return result
class ModelProviderFreeQuotaQualificationVerifyApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider_name: str):
parser = reqparse.RequestParser()
parser.add_argument('token', type=str, required=False, nullable=True, location='args')
args = parser.parse_args()
provider_service = ProviderService()
result = provider_service.free_quota_qualification_verify(
tenant_id=current_user.current_tenant_id,
provider_name=provider_name,
token=args['token']
)
return result
api.add_resource(ModelProviderListApi, '/workspaces/current/model-providers')
api.add_resource(ModelProviderValidateApi, '/workspaces/current/model-providers/<string:provider_name>/validate')
api.add_resource(ModelProviderUpdateApi, '/workspaces/current/model-providers/<string:provider_name>')
api.add_resource(ModelProviderModelValidateApi,
'/workspaces/current/model-providers/<string:provider_name>/models/validate')
api.add_resource(ModelProviderModelUpdateApi,
'/workspaces/current/model-providers/<string:provider_name>/models')
api.add_resource(PreferredProviderTypeUpdateApi,
'/workspaces/current/model-providers/<string:provider_name>/preferred-provider-type')
api.add_resource(ModelProviderModelParameterRuleApi,
'/workspaces/current/model-providers/<string:provider_name>/models/parameter-rules')
api.add_resource(ModelProviderPaymentCheckoutUrlApi,
'/workspaces/current/model-providers/<string:provider_name>/checkout-url')
api.add_resource(ModelProviderFreeQuotaSubmitApi,
'/workspaces/current/model-providers/<string:provider_name>/free-quota-submit')
api.add_resource(ModelProviderFreeQuotaQualificationVerifyApi,
'/workspaces/current/model-providers/<string:provider_name>/free-quota-qualification-verify')

View File

@ -0,0 +1,109 @@
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_providers.model_provider_factory import ModelProviderFactory
from core.model_providers.models.entity.model_params import ModelType
from models.provider import ProviderType
from services.provider_service import ProviderService
class DefaultModelApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=['text-generation', 'embeddings', 'speech2text'], location='args')
args = parser.parse_args()
tenant_id = current_user.current_tenant_id
provider_service = ProviderService()
default_model = provider_service.get_default_model_of_model_type(
tenant_id=tenant_id,
model_type=args['model_type']
)
if not default_model:
return None
model_provider = ModelProviderFactory.get_preferred_model_provider(
tenant_id,
default_model.provider_name
)
if not model_provider:
return {
'model_name': default_model.model_name,
'model_type': default_model.model_type,
'model_provider': {
'provider_name': default_model.provider_name
}
}
provider = model_provider.provider
rst = {
'model_name': default_model.model_name,
'model_type': default_model.model_type,
'model_provider': {
'provider_name': provider.provider_name,
'provider_type': provider.provider_type
}
}
model_provider_rules = ModelProviderFactory.get_provider_rule(default_model.provider_name)
if provider.provider_type == ProviderType.SYSTEM.value:
rst['model_provider']['quota_type'] = provider.quota_type
rst['model_provider']['quota_unit'] = model_provider_rules['system_config']['quota_unit']
rst['model_provider']['quota_limit'] = provider.quota_limit
rst['model_provider']['quota_used'] = provider.quota_used
return rst
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('model_name', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_type', type=str, required=True, nullable=False,
choices=['text-generation', 'embeddings', 'speech2text'], location='json')
parser.add_argument('provider_name', type=str, required=True, nullable=False, location='json')
args = parser.parse_args()
provider_service = ProviderService()
provider_service.update_default_model_of_model_type(
tenant_id=current_user.current_tenant_id,
model_type=args['model_type'],
provider_name=args['provider_name'],
model_name=args['model_name']
)
return {'result': 'success'}
class ValidModelApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, model_type):
ModelType.value_of(model_type)
provider_service = ProviderService()
valid_models = provider_service.get_valid_model_list(
tenant_id=current_user.current_tenant_id,
model_type=model_type
)
return valid_models
api.add_resource(DefaultModelApi, '/workspaces/current/default-model')
api.add_resource(ValidModelApi, '/workspaces/current/models/model-type/<string:model_type>')

View File

@ -1,19 +1,14 @@
# -*- coding:utf-8 -*-
import base64
import json
import logging
from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, abort
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.llm.provider.errors import ValidateFailedError
from extensions.ext_database import db
from libs import rsa
from models.provider import Provider, ProviderType, ProviderName
from core.model_providers.providers.base import CredentialsValidateFailedError
from models.provider import ProviderType
from services.provider_service import ProviderService
@ -34,25 +29,26 @@ class ProviderListApi(Resource):
plaintext, the rest is replaced by * and the last two bits are displayed in plaintext
"""
ProviderService.init_supported_provider(current_user.current_tenant, "cloud")
providers = Provider.query.filter_by(tenant_id=tenant_id).all()
provider_service = ProviderService()
provider_info_list = provider_service.get_provider_list(tenant_id)
provider_list = [
{
'provider_name': p.provider_name,
'provider_type': p.provider_type,
'is_valid': p.is_valid,
'last_used': p.last_used,
'is_enabled': p.is_enabled,
'provider_name': p['provider_name'],
'provider_type': p['provider_type'],
'is_valid': p['is_valid'],
'last_used': p['last_used'],
'is_enabled': p['is_valid'],
**({
'quota_type': p.quota_type,
'quota_limit': p.quota_limit,
'quota_used': p.quota_used
} if p.provider_type == ProviderType.SYSTEM.value else {}),
'token': ProviderService.get_obfuscated_api_key(current_user.current_tenant,
ProviderName(p.provider_name))
'quota_type': p['quota_type'],
'quota_limit': p['quota_limit'],
'quota_used': p['quota_used']
} if p['provider_type'] == ProviderType.SYSTEM.value else {}),
'token': (p['config'] if p['provider_name'] != 'openai' else p['config']['openai_api_key'])
if p['config'] else None
}
for p in providers
for name, provider_info in provider_info_list.items()
for p in provider_info['providers']
]
return provider_list
@ -64,78 +60,28 @@ class ProviderTokenApi(Resource):
@login_required
@account_initialization_required
def post(self, provider):
if provider not in [p.value for p in ProviderName]:
abort(404)
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
logging.log(logging.ERROR,
f'User {current_user.id} is not authorized to update provider token, current_role is {current_user.current_tenant.current_role}')
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('token', type=ProviderService.get_token_type(
tenant=current_user.current_tenant,
provider_name=ProviderName(provider)
), required=True, nullable=False, location='json')
parser.add_argument('token', required=True, nullable=False, location='json')
args = parser.parse_args()
if args['token']:
try:
ProviderService.validate_provider_configs(
tenant=current_user.current_tenant,
provider_name=ProviderName(provider),
configs=args['token']
)
token_is_valid = True
except ValidateFailedError as ex:
raise ValueError(str(ex))
if provider == 'openai':
args['token'] = {
'openai_api_key': args['token']
}
base64_encrypted_token = ProviderService.get_encrypted_token(
tenant=current_user.current_tenant,
provider_name=ProviderName(provider),
configs=args['token']
provider_service = ProviderService()
try:
provider_service.save_custom_provider_config(
tenant_id=current_user.current_tenant_id,
provider_name=provider,
config=args['token']
)
else:
base64_encrypted_token = None
token_is_valid = False
tenant = current_user.current_tenant
provider_model = db.session.query(Provider).filter(
Provider.tenant_id == tenant.id,
Provider.provider_name == provider,
Provider.provider_type == ProviderType.CUSTOM.value
).first()
# Only allow updating token for CUSTOM provider type
if provider_model:
provider_model.encrypted_config = base64_encrypted_token
provider_model.is_valid = token_is_valid
else:
provider_model = Provider(tenant_id=tenant.id, provider_name=provider,
provider_type=ProviderType.CUSTOM.value,
encrypted_config=base64_encrypted_token,
is_valid=token_is_valid)
db.session.add(provider_model)
if provider_model.is_valid:
other_providers = db.session.query(Provider).filter(
Provider.tenant_id == tenant.id,
Provider.provider_name != provider,
Provider.provider_type == ProviderType.CUSTOM.value
).all()
for other_provider in other_providers:
other_provider.is_valid = False
db.session.commit()
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
ProviderName.HUGGINGFACEHUB.value]:
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}, 201
except CredentialsValidateFailedError as ex:
raise ValueError(str(ex))
return {'result': 'success'}, 201
@ -146,33 +92,28 @@ class ProviderTokenValidateApi(Resource):
@login_required
@account_initialization_required
def post(self, provider):
if provider not in [p.value for p in ProviderName]:
abort(404)
parser = reqparse.RequestParser()
parser.add_argument('token', type=ProviderService.get_token_type(
tenant=current_user.current_tenant,
provider_name=ProviderName(provider)
), required=True, nullable=False, location='json')
parser.add_argument('token', required=True, nullable=False, location='json')
args = parser.parse_args()
# todo: remove this when the provider is supported
if provider in [ProviderName.ANTHROPIC.value, ProviderName.COHERE.value,
ProviderName.HUGGINGFACEHUB.value]:
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}
provider_service = ProviderService()
if provider == 'openai':
args['token'] = {
'openai_api_key': args['token']
}
result = True
error = None
try:
ProviderService.validate_provider_configs(
tenant=current_user.current_tenant,
provider_name=ProviderName(provider),
configs=args['token']
provider_service.custom_provider_config_validate(
provider_name=provider,
config=args['token']
)
except ValidateFailedError as e:
except CredentialsValidateFailedError as ex:
result = False
error = str(e)
error = str(ex)
response = {'result': 'success' if result else 'error'}
@ -182,79 +123,9 @@ class ProviderTokenValidateApi(Resource):
return response
class ProviderSystemApi(Resource):
@setup_required
@login_required
@account_initialization_required
def put(self, provider):
if provider not in [p.value for p in ProviderName]:
abort(404)
parser = reqparse.RequestParser()
parser.add_argument('is_enabled', type=bool, required=True, location='json')
args = parser.parse_args()
tenant = current_user.current_tenant_id
provider_model = Provider.query.filter_by(tenant_id=tenant.id, provider_name=provider).first()
if provider_model and provider_model.provider_type == ProviderType.SYSTEM.value:
provider_model.is_valid = args['is_enabled']
db.session.commit()
elif not provider_model:
ProviderService.create_system_provider(tenant, provider, args['is_enabled'])
else:
abort(403)
return {'result': 'success'}
@setup_required
@login_required
@account_initialization_required
def get(self, provider):
if provider not in [p.value for p in ProviderName]:
abort(404)
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden()
provider_model = db.session.query(Provider).filter(Provider.tenant_id == current_user.current_tenant_id,
Provider.provider_name == provider,
Provider.provider_type == ProviderType.SYSTEM.value).first()
system_model = None
if provider_model:
system_model = {
'result': 'success',
'provider': {
'provider_name': provider_model.provider_name,
'provider_type': provider_model.provider_type,
'is_valid': provider_model.is_valid,
'last_used': provider_model.last_used,
'is_enabled': provider_model.is_enabled,
'quota_type': provider_model.quota_type,
'quota_limit': provider_model.quota_limit,
'quota_used': provider_model.quota_used
}
}
else:
abort(404)
return system_model
api.add_resource(ProviderTokenApi, '/providers/<provider>/token',
endpoint='current_providers_token') # Deprecated
api.add_resource(ProviderTokenValidateApi, '/providers/<provider>/token-validate',
endpoint='current_providers_token_validate') # Deprecated
api.add_resource(ProviderTokenApi, '/workspaces/current/providers/<provider>/token',
endpoint='workspaces_current_providers_token') # PUT for updating provider token
api.add_resource(ProviderTokenValidateApi, '/workspaces/current/providers/<provider>/token-validate',
endpoint='workspaces_current_providers_token_validate') # POST for validating provider token
api.add_resource(ProviderListApi, '/workspaces/current/providers') # GET for getting providers list
api.add_resource(ProviderSystemApi, '/workspaces/current/providers/<provider>/system',
endpoint='workspaces_current_providers_system') # GET for getting provider quota, PUT for updating provider status

View File

@ -0,0 +1,137 @@
import json
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, abort, reqparse
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.tool.provider.errors import ToolValidateFailedError
from core.tool.provider.tool_provider_service import ToolProviderService
from extensions.ext_database import db
from models.tool import ToolProvider, ToolProviderName
class ToolProviderListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
tenant_id = current_user.current_tenant_id
tool_credential_dict = {}
for tool_name in ToolProviderName:
tool_credential_dict[tool_name.value] = {
'tool_name': tool_name.value,
'is_enabled': False,
'credentials': None
}
tool_providers = db.session.query(ToolProvider).filter(ToolProvider.tenant_id == tenant_id).all()
for p in tool_providers:
if p.is_enabled:
tool_credential_dict[p.tool_name] = {
'tool_name': p.tool_name,
'is_enabled': p.is_enabled,
'credentials': ToolProviderService(tenant_id, p.tool_name).get_credentials(obfuscated=True)
}
return list(tool_credential_dict.values())
class ToolProviderCredentialsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider):
if provider not in [p.value for p in ToolProviderName]:
abort(404)
# The role of the current user in the ta table must be admin or owner
if current_user.current_tenant.current_role not in ['admin', 'owner']:
raise Forbidden(f'User {current_user.id} is not authorized to update provider token, '
f'current_role is {current_user.current_tenant.current_role}')
parser = reqparse.RequestParser()
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
tenant_id = current_user.current_tenant_id
tool_provider_service = ToolProviderService(tenant_id, provider)
try:
tool_provider_service.credentials_validate(args['credentials'])
except ToolValidateFailedError as ex:
raise ValueError(str(ex))
encrypted_credentials = json.dumps(tool_provider_service.encrypt_credentials(args['credentials']))
tenant = current_user.current_tenant
tool_provider_model = db.session.query(ToolProvider).filter(
ToolProvider.tenant_id == tenant.id,
ToolProvider.tool_name == provider,
).first()
# Only allow updating token for CUSTOM provider type
if tool_provider_model:
tool_provider_model.encrypted_credentials = encrypted_credentials
tool_provider_model.is_enabled = True
else:
tool_provider_model = ToolProvider(
tenant_id=tenant.id,
tool_name=provider,
encrypted_credentials=encrypted_credentials,
is_enabled=True
)
db.session.add(tool_provider_model)
db.session.commit()
return {'result': 'success'}, 201
class ToolProviderCredentialsValidateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider):
if provider not in [p.value for p in ToolProviderName]:
abort(404)
parser = reqparse.RequestParser()
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
result = True
error = None
tenant_id = current_user.current_tenant_id
tool_provider_service = ToolProviderService(tenant_id, provider)
try:
tool_provider_service.credentials_validate(args['credentials'])
except ToolValidateFailedError as ex:
result = False
error = str(ex)
response = {'result': 'success' if result else 'error'}
if not result:
response['error'] = error
return response
api.add_resource(ToolProviderListApi, '/workspaces/current/tool-providers')
api.add_resource(ToolProviderCredentialsApi, '/workspaces/current/tool-providers/<provider>/credentials')
api.add_resource(ToolProviderCredentialsValidateApi,
'/workspaces/current/tool-providers/<provider>/credentials-validate')

View File

@ -2,10 +2,12 @@
import logging
from flask import request
from flask_login import login_required, current_user
from flask_restful import Resource, fields, marshal_with, reqparse, marshal
from flask_login import current_user
from libs.login import login_required
from flask_restful import Resource, fields, marshal_with, reqparse, marshal, inputs
from controllers.console import api
from controllers.console.admin import admin_required
from controllers.console.setup import setup_required
from controllers.console.error import AccountNotLinkTenantError
from controllers.console.wraps import account_initialization_required
@ -30,7 +32,7 @@ tenant_fields = {
'created_at': TimestampField,
'role': fields.String,
'providers': fields.List(fields.Nested(provider_fields)),
'in_trail': fields.Boolean,
'in_trial': fields.Boolean,
'trial_end_reason': fields.String,
}
@ -43,6 +45,13 @@ tenants_fields = {
'current': fields.Boolean
}
workspace_fields = {
'id': fields.String,
'name': fields.String,
'status': fields.String,
'created_at': TimestampField
}
class TenantListApi(Resource):
@setup_required
@ -57,6 +66,38 @@ class TenantListApi(Resource):
return {'workspaces': marshal(tenants, tenants_fields)}, 200
class WorkspaceListApi(Resource):
@setup_required
@admin_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('page', type=inputs.int_range(1, 99999), required=False, default=1, location='args')
parser.add_argument('limit', type=inputs.int_range(1, 100), required=False, default=20, location='args')
args = parser.parse_args()
tenants = db.session.query(Tenant).order_by(Tenant.created_at.desc())\
.paginate(page=args['page'], per_page=args['limit'])
has_more = False
if len(tenants.items) == args['limit']:
current_page_first_tenant = tenants[-1]
rest_count = db.session.query(Tenant).filter(
Tenant.created_at < current_page_first_tenant.created_at,
Tenant.id != current_page_first_tenant.id
).count()
if rest_count > 0:
has_more = True
total = db.session.query(Tenant).count()
return {
'data': marshal(tenants.items, workspace_fields),
'has_more': has_more,
'limit': args['limit'],
'page': args['page'],
'total': total
}, 200
class TenantApi(Resource):
@setup_required
@login_required
@ -92,6 +133,7 @@ class SwitchWorkspaceApi(Resource):
api.add_resource(TenantListApi, '/workspaces') # GET for getting all tenants
api.add_resource(WorkspaceListApi, '/all-workspaces') # GET for getting all tenants
api.add_resource(TenantApi, '/workspaces/current', endpoint='workspaces_current') # GET for getting current tenant info
api.add_resource(TenantApi, '/info', endpoint='info') # Deprecated
api.add_resource(SwitchWorkspaceApi, '/workspaces/switch') # POST for switching tenant

View File

@ -7,6 +7,6 @@ bp = Blueprint('service_api', __name__, url_prefix='/v1')
api = ExternalApi(bp)
from .app import completion, app, conversation, message
from .app import completion, app, conversation, message, audio
from .dataset import document
from .dataset import document, segment, dataset

View File

@ -4,6 +4,8 @@ from flask_restful import fields, marshal_with
from controllers.service_api import api
from controllers.service_api.wraps import AppApiResource
from models.model import App
class AppParameterApi(AppApiResource):
"""Resource for app variables."""
@ -22,12 +24,14 @@ class AppParameterApi(AppApiResource):
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'suggested_questions_after_answer': fields.Raw,
'speech_to_text': fields.Raw,
'retriever_resource': fields.Raw,
'more_like_this': fields.Raw,
'user_input_form': fields.Raw,
}
@marshal_with(parameters_fields)
def get(self, app_model, end_user):
def get(self, app_model: App, end_user):
"""Retrieve app parameters."""
app_model_config = app_model.app_model_config
@ -35,6 +39,8 @@ class AppParameterApi(AppApiResource):
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'retriever_resource': app_model_config.retriever_resource_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list
}

View File

@ -0,0 +1,61 @@
import logging
from flask import request
from werkzeug.exceptions import InternalServerError
import services
from controllers.service_api import api
from controllers.service_api.app.error import AppUnavailableError, ProviderNotInitializeError, CompletionRequestError, ProviderQuotaExceededError, \
ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, UnsupportedAudioTypeError, \
ProviderNotSupportSpeechToTextError
from controllers.service_api.wraps import AppApiResource
from core.model_providers.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from models.model import App, AppModelConfig
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
class AudioApi(AppApiResource):
def post(self, app_model: App, end_user):
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(AudioApi, '/audio-to-text')

View File

@ -14,7 +14,7 @@ from controllers.service_api.app.error import AppUnavailableError, ProviderNotIn
ProviderModelCurrentlyNotSupportError
from controllers.service_api.wraps import AppApiResource
from core.conversation_message_task import PubHandler
from core.llm.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \
from core.model_providers.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from libs.helper import uuid_value
from services.completion_service import CompletionService
@ -27,9 +27,11 @@ class CompletionApi(AppApiResource):
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, location='json')
parser.add_argument('query', type=str, location='json')
parser.add_argument('query', type=str, location='json', default='')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('user', type=str, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='dev', location='json')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
@ -54,8 +56,8 @@ class CompletionApi(AppApiResource):
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -91,6 +93,8 @@ class ChatApi(AppApiResource):
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
parser.add_argument('user', type=str, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='dev', location='json')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
@ -115,8 +119,8 @@ class ChatApi(AppApiResource):
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
@ -156,8 +160,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError:

View File

@ -1,4 +1,5 @@
# -*- coding:utf-8 -*-
from flask import request
from flask_restful import fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound
@ -7,25 +8,11 @@ from controllers.service_api import api
from controllers.service_api.app import create_or_update_end_user_for_user_id
from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import AppApiResource
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
from libs.helper import TimestampField, uuid_value
import services
from services.conversation_service import ConversationService
conversation_fields = {
'id': fields.String,
'name': fields.String,
'inputs': fields.Raw,
'status': fields.String,
'introduction': fields.String,
'created_at': TimestampField
}
conversation_infinite_scroll_pagination_fields = {
'limit': fields.Integer,
'has_more': fields.Boolean,
'data': fields.List(fields.Nested(conversation_fields))
}
class ConversationApi(AppApiResource):
@ -48,10 +35,28 @@ class ConversationApi(AppApiResource):
except services.errors.conversation.LastConversationNotExistsError:
raise NotFound("Last Conversation Not Exists.")
class ConversationDetailApi(AppApiResource):
@marshal_with(simple_conversation_fields)
def delete(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
raise NotChatAppError()
conversation_id = str(c_id)
user = request.get_json().get('user')
if end_user is None and user is not None:
end_user = create_or_update_end_user_for_user_id(app_model, user)
try:
ConversationService.delete(app_model, conversation_id, end_user)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
return {"result": "success"}, 204
class ConversationRenameApi(AppApiResource):
@marshal_with(conversation_fields)
@marshal_with(simple_conversation_fields)
def post(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
raise NotChatAppError()
@ -74,3 +79,5 @@ class ConversationRenameApi(AppApiResource):
api.add_resource(ConversationRenameApi, '/conversations/<uuid:c_id>/name', endpoint='conversation_name')
api.add_resource(ConversationApi, '/conversations')
api.add_resource(ConversationApi, '/conversations/<uuid:c_id>', endpoint='conversation')
api.add_resource(ConversationDetailApi, '/conversations/<uuid:c_id>', endpoint='conversation_detail')

View File

@ -51,3 +51,27 @@ class CompletionRequestError(BaseHTTPException):
description = "Completion request failed."
code = 400
class NoAudioUploadedError(BaseHTTPException):
error_code = 'no_audio_uploaded'
description = "Please upload your audio."
code = 400
class AudioTooLargeError(BaseHTTPException):
error_code = 'audio_too_large'
description = "Audio size exceeded. {message}"
code = 413
class UnsupportedAudioTypeError(BaseHTTPException):
error_code = 'unsupported_audio_type'
description = "Audio type not allowed."
code = 415
class ProviderNotSupportSpeechToTextError(BaseHTTPException):
error_code = 'provider_not_support_speech_to_text'
description = "Provider not support speech to text."
code = 400

View File

@ -16,6 +16,24 @@ class MessageListApi(AppApiResource):
feedback_fields = {
'rating': fields.String
}
retriever_resource_fields = {
'id': fields.String,
'message_id': fields.String,
'position': fields.Integer,
'dataset_id': fields.String,
'dataset_name': fields.String,
'document_id': fields.String,
'document_name': fields.String,
'data_source_type': fields.String,
'segment_id': fields.String,
'score': fields.Float,
'hit_count': fields.Integer,
'word_count': fields.Integer,
'segment_position': fields.Integer,
'index_node_hash': fields.String,
'content': fields.String,
'created_at': TimestampField
}
message_fields = {
'id': fields.String,
@ -24,6 +42,7 @@ class MessageListApi(AppApiResource):
'query': fields.String,
'answer': fields.String,
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
'created_at': TimestampField
}

View File

@ -0,0 +1,81 @@
from flask import request
from flask_restful import reqparse, marshal
import services.dataset_service
from controllers.service_api import api
from controllers.service_api.dataset.error import DatasetNameDuplicateError
from controllers.service_api.wraps import DatasetApiResource
from libs.login import current_user
from core.model_providers.models.entity.model_params import ModelType
from fields.dataset_fields import dataset_detail_fields
from services.dataset_service import DatasetService
from services.provider_service import ProviderService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 40:
raise ValueError('Name must be between 1 to 40 characters.')
return name
class DatasetApi(DatasetApiResource):
"""Resource for get datasets."""
def get(self, tenant_id):
page = request.args.get('page', default=1, type=int)
limit = request.args.get('limit', default=20, type=int)
provider = request.args.get('provider', default="vendor")
datasets, total = DatasetService.get_datasets(page, limit, provider,
tenant_id, current_user)
# check embedding setting
provider_service = ProviderService()
valid_model_list = provider_service.get_valid_model_list(current_user.current_tenant_id,
ModelType.EMBEDDINGS.value)
model_names = []
for valid_model in valid_model_list:
model_names.append(f"{valid_model['model_name']}:{valid_model['model_provider']['provider_name']}")
data = marshal(datasets, dataset_detail_fields)
for item in data:
if item['indexing_technique'] == 'high_quality':
item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
if item_model in model_names:
item['embedding_available'] = True
else:
item['embedding_available'] = False
else:
item['embedding_available'] = True
response = {
'data': data,
'has_more': len(datasets) == limit,
'limit': limit,
'total': total,
'page': page
}
return response, 200
"""Resource for datasets."""
def post(self, tenant_id):
parser = reqparse.RequestParser()
parser.add_argument('name', nullable=False, required=True,
help='type is required. Name must be between 1 to 40 characters.',
type=_validate_name)
parser.add_argument('indexing_technique', type=str, location='json',
choices=('high_quality', 'economy'),
help='Invalid indexing technique.')
args = parser.parse_args()
try:
dataset = DatasetService.create_empty_dataset(
tenant_id=tenant_id,
name=args['name'],
indexing_technique=args['indexing_technique'],
account=current_user
)
except services.errors.dataset.DatasetNameDuplicateError:
raise DatasetNameDuplicateError()
return marshal(dataset, dataset_detail_fields), 200
api.add_resource(DatasetApi, '/datasets')

View File

@ -1,110 +1,287 @@
import datetime
import uuid
import json
from flask import current_app
from flask_restful import reqparse
from flask import request
from flask_restful import reqparse, marshal
from sqlalchemy import desc
from werkzeug.exceptions import NotFound
import services.dataset_service
from controllers.service_api import api
from controllers.service_api.app.error import ProviderNotInitializeError
from controllers.service_api.dataset.error import ArchivedDocumentImmutableError, DocumentIndexingError, \
DatasetNotInitedError
NoFileUploadedError, TooManyFilesError
from controllers.service_api.wraps import DatasetApiResource
from core.llm.error import ProviderTokenNotInitError
from libs.login import current_user
from core.model_providers.error import ProviderTokenNotInitError
from extensions.ext_database import db
from extensions.ext_storage import storage
from models.model import UploadFile
from fields.document_fields import document_fields, document_status_fields
from models.dataset import Dataset, Document, DocumentSegment
from services.dataset_service import DocumentService
from services.file_service import FileService
class DocumentListApi(DatasetApiResource):
class DocumentAddByTextApi(DatasetApiResource):
"""Resource for documents."""
def post(self, dataset):
"""Create document."""
def post(self, tenant_id, dataset_id):
"""Create document by text."""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, nullable=False, location='json')
parser.add_argument('text', type=str, required=True, nullable=False, location='json')
parser.add_argument('doc_type', type=str, location='json')
parser.add_argument('doc_metadata', type=dict, location='json')
parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
parser.add_argument('original_document_id', type=str, required=False, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
location='json')
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset.indexing_technique:
raise DatasetNotInitedError("Dataset indexing technique must be set.")
if not dataset:
raise ValueError('Dataset is not exist.')
doc_type = args.get('doc_type')
doc_metadata = args.get('doc_metadata')
if not dataset.indexing_technique and not args['indexing_technique']:
raise ValueError('indexing_technique is required.')
if doc_type and doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
raise ValueError('Invalid doc_type.')
# user uuid as file name
file_uuid = str(uuid.uuid4())
file_key = 'upload_files/' + dataset.tenant_id + '/' + file_uuid + '.txt'
# save file to storage
storage.save(file_key, args.get('text'))
# save file to db
config = current_app.config
upload_file = UploadFile(
tenant_id=dataset.tenant_id,
storage_type=config['STORAGE_TYPE'],
key=file_key,
name=args.get('name') + '.txt',
size=len(args.get('text')),
extension='txt',
mime_type='text/plain',
created_by=dataset.created_by,
created_at=datetime.datetime.utcnow(),
used=True,
used_by=dataset.created_by,
used_at=datetime.datetime.utcnow()
)
db.session.add(upload_file)
db.session.commit()
document_data = {
'data_source': {
'type': 'upload_file',
'info': upload_file.id
upload_file = FileService.upload_text(args.get('text'), args.get('name'))
data_source = {
'type': 'upload_file',
'info_list': {
'data_source_type': 'upload_file',
'file_info_list': {
'file_ids': [upload_file.id]
}
}
}
args['data_source'] = data_source
# validate args
DocumentService.document_create_args_validate(args)
try:
document = DocumentService.save_document_with_dataset_id(
documents, batch = DocumentService.save_document_with_dataset_id(
dataset=dataset,
document_data=document_data,
account=dataset.created_by_account,
dataset_process_rule=dataset.latest_process_rule,
document_data=args,
account=current_user,
dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
created_from='api'
)
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
document = documents[0]
if doc_type and doc_metadata:
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
document.doc_metadata = {}
for key, value_type in metadata_schema.items():
value = doc_metadata.get(key)
if value is not None and isinstance(value, value_type):
document.doc_metadata[key] = value
document.doc_type = doc_type
document.updated_at = datetime.datetime.utcnow()
db.session.commit()
return {'id': document.id}
documents_and_batch_fields = {
'document': marshal(document, document_fields),
'batch': batch
}
return documents_and_batch_fields, 200
class DocumentApi(DatasetApiResource):
def delete(self, dataset, document_id):
class DocumentUpdateByTextApi(DatasetApiResource):
"""Resource for update documents."""
def post(self, tenant_id, dataset_id, document_id):
"""Update document by text."""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=False, nullable=True, location='json')
parser.add_argument('text', type=str, required=False, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
location='json')
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
raise ValueError('Dataset is not exist.')
if args['text']:
upload_file = FileService.upload_text(args.get('text'), args.get('name'))
data_source = {
'type': 'upload_file',
'info_list': {
'data_source_type': 'upload_file',
'file_info_list': {
'file_ids': [upload_file.id]
}
}
}
args['data_source'] = data_source
# validate args
args['original_document_id'] = str(document_id)
DocumentService.document_create_args_validate(args)
try:
documents, batch = DocumentService.save_document_with_dataset_id(
dataset=dataset,
document_data=args,
account=current_user,
dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
created_from='api'
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
document = documents[0]
documents_and_batch_fields = {
'document': marshal(document, document_fields),
'batch': batch
}
return documents_and_batch_fields, 200
class DocumentAddByFileApi(DatasetApiResource):
"""Resource for documents."""
def post(self, tenant_id, dataset_id):
"""Create document by upload file."""
args = {}
if 'data' in request.form:
args = json.loads(request.form['data'])
if 'doc_form' not in args:
args['doc_form'] = 'text_model'
if 'doc_language' not in args:
args['doc_language'] = 'English'
# get dataset info
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
raise ValueError('Dataset is not exist.')
if not dataset.indexing_technique and not args['indexing_technique']:
raise ValueError('indexing_technique is required.')
# save file info
file = request.files['file']
# check file
if 'file' not in request.files:
raise NoFileUploadedError()
if len(request.files) > 1:
raise TooManyFilesError()
upload_file = FileService.upload_file(file)
data_source = {
'type': 'upload_file',
'info_list': {
'file_info_list': {
'file_ids': [upload_file.id]
}
}
}
args['data_source'] = data_source
# validate args
DocumentService.document_create_args_validate(args)
try:
documents, batch = DocumentService.save_document_with_dataset_id(
dataset=dataset,
document_data=args,
account=dataset.created_by_account,
dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
created_from='api'
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
document = documents[0]
documents_and_batch_fields = {
'document': marshal(document, document_fields),
'batch': batch
}
return documents_and_batch_fields, 200
class DocumentUpdateByFileApi(DatasetApiResource):
"""Resource for update documents."""
def post(self, tenant_id, dataset_id, document_id):
"""Update document by upload file."""
args = {}
if 'data' in request.form:
args = json.loads(request.form['data'])
if 'doc_form' not in args:
args['doc_form'] = 'text_model'
if 'doc_language' not in args:
args['doc_language'] = 'English'
# get dataset info
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
raise ValueError('Dataset is not exist.')
if 'file' in request.files:
# save file info
file = request.files['file']
if len(request.files) > 1:
raise TooManyFilesError()
upload_file = FileService.upload_file(file)
data_source = {
'type': 'upload_file',
'info_list': {
'file_info_list': {
'file_ids': [upload_file.id]
}
}
}
args['data_source'] = data_source
# validate args
args['original_document_id'] = str(document_id)
DocumentService.document_create_args_validate(args)
try:
documents, batch = DocumentService.save_document_with_dataset_id(
dataset=dataset,
document_data=args,
account=dataset.created_by_account,
dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
created_from='api'
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
document = documents[0]
documents_and_batch_fields = {
'document': marshal(document, document_fields),
'batch': batch
}
return documents_and_batch_fields, 200
class DocumentDeleteApi(DatasetApiResource):
def delete(self, tenant_id, dataset_id, document_id):
"""Delete document."""
document_id = str(document_id)
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
# get dataset info
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
raise ValueError('Dataset is not exist.')
document = DocumentService.get_document(dataset.id, document_id)
@ -122,8 +299,85 @@ class DocumentApi(DatasetApiResource):
except services.errors.document.DocumentIndexingError:
raise DocumentIndexingError('Cannot delete document during indexing.')
return {'result': 'success'}, 204
return {'result': 'success'}, 200
api.add_resource(DocumentListApi, '/documents')
api.add_resource(DocumentApi, '/documents/<uuid:document_id>')
class DocumentListApi(DatasetApiResource):
def get(self, tenant_id, dataset_id):
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
page = request.args.get('page', default=1, type=int)
limit = request.args.get('limit', default=20, type=int)
search = request.args.get('keyword', default=None, type=str)
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
raise NotFound('Dataset not found.')
query = Document.query.filter_by(
dataset_id=str(dataset_id), tenant_id=tenant_id)
if search:
search = f'%{search}%'
query = query.filter(Document.name.like(search))
query = query.order_by(desc(Document.created_at))
paginated_documents = query.paginate(
page=page, per_page=limit, max_per_page=100, error_out=False)
documents = paginated_documents.items
response = {
'data': marshal(documents, document_fields),
'has_more': len(documents) == limit,
'limit': limit,
'total': paginated_documents.total,
'page': page
}
return response
class DocumentIndexingStatusApi(DatasetApiResource):
def get(self, tenant_id, dataset_id, batch):
dataset_id = str(dataset_id)
batch = str(batch)
tenant_id = str(tenant_id)
# get dataset
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
raise NotFound('Dataset not found.')
# get documents
documents = DocumentService.get_batch_documents(dataset_id, batch)
if not documents:
raise NotFound('Documents not found.')
documents_status = []
for document in documents:
completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
DocumentSegment.status != 're_segment').count()
document.completed_segments = completed_segments
document.total_segments = total_segments
if document.is_paused:
document.indexing_status = 'paused'
documents_status.append(marshal(document, document_status_fields))
data = {
'data': documents_status
}
return data
api.add_resource(DocumentAddByTextApi, '/datasets/<uuid:dataset_id>/document/create_by_text')
api.add_resource(DocumentAddByFileApi, '/datasets/<uuid:dataset_id>/document/create_by_file')
api.add_resource(DocumentUpdateByTextApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text')
api.add_resource(DocumentUpdateByFileApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file')
api.add_resource(DocumentDeleteApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
api.add_resource(DocumentListApi, '/datasets/<uuid:dataset_id>/documents')
api.add_resource(DocumentIndexingStatusApi, '/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status')

View File

@ -1,20 +1,73 @@
# -*- coding:utf-8 -*-
from libs.exception import BaseHTTPException
class NoFileUploadedError(BaseHTTPException):
error_code = 'no_file_uploaded'
description = "Please upload your file."
code = 400
class TooManyFilesError(BaseHTTPException):
error_code = 'too_many_files'
description = "Only one file is allowed."
code = 400
class FileTooLargeError(BaseHTTPException):
error_code = 'file_too_large'
description = "File size exceeded. {message}"
code = 413
class UnsupportedFileTypeError(BaseHTTPException):
error_code = 'unsupported_file_type'
description = "File type not allowed."
code = 415
class HighQualityDatasetOnlyError(BaseHTTPException):
error_code = 'high_quality_dataset_only'
description = "Current operation only supports 'high-quality' datasets."
code = 400
class DatasetNotInitializedError(BaseHTTPException):
error_code = 'dataset_not_initialized'
description = "The dataset is still being initialized or indexing. Please wait a moment."
code = 400
class ArchivedDocumentImmutableError(BaseHTTPException):
error_code = 'archived_document_immutable'
description = "Cannot operate when document was archived."
description = "The archived document is not editable."
code = 403
class DatasetNameDuplicateError(BaseHTTPException):
error_code = 'dataset_name_duplicate'
description = "The dataset name already exists. Please modify your dataset name."
code = 409
class InvalidActionError(BaseHTTPException):
error_code = 'invalid_action'
description = "Invalid action."
code = 400
class DocumentAlreadyFinishedError(BaseHTTPException):
error_code = 'document_already_finished'
description = "The document has been processed. Please refresh the page or go to the document details."
code = 400
class DocumentIndexingError(BaseHTTPException):
error_code = 'document_indexing'
description = "Cannot operate document during indexing."
code = 403
description = "The document is being processed and cannot be edited."
code = 400
class DatasetNotInitedError(BaseHTTPException):
error_code = 'dataset_not_inited'
description = "The dataset is still being initialized or indexing. Please wait a moment."
code = 403
class InvalidMetadataError(BaseHTTPException):
error_code = 'invalid_metadata'
description = "The metadata content is incorrect. Please check and verify."
code = 400

View File

@ -0,0 +1,59 @@
from flask_login import current_user
from flask_restful import reqparse, marshal
from werkzeug.exceptions import NotFound
from controllers.service_api import api
from controllers.service_api.app.error import ProviderNotInitializeError
from controllers.service_api.wraps import DatasetApiResource
from core.model_providers.error import ProviderTokenNotInitError, LLMBadRequestError
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from fields.segment_fields import segment_fields
from models.dataset import Dataset
from services.dataset_service import DocumentService, SegmentService
class SegmentApi(DatasetApiResource):
"""Resource for segments."""
def post(self, tenant_id, dataset_id, document_id):
"""Create single segment."""
# check dataset
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == tenant_id,
Dataset.id == dataset_id
).first()
# check document
document_id = str(document_id)
document = DocumentService.get_document(dataset.id, document_id)
if not document:
raise NotFound('Document not found.')
# check embedding model setting
if dataset.indexing_technique == 'high_quality':
try:
ModelFactory.get_embedding_model(
tenant_id=current_user.current_tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
# validate args
parser = reqparse.RequestParser()
parser.add_argument('segments', type=list, required=False, nullable=True, location='json')
args = parser.parse_args()
for args_item in args['segments']:
SegmentService.segment_create_args_validate(args_item, document)
segments = SegmentService.multi_create_segment(args['segments'], document, dataset)
return {
'data': marshal(segments, segment_fields),
'doc_form': document.doc_form
}, 200
api.add_resource(SegmentApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments')

View File

@ -2,12 +2,14 @@
from datetime import datetime
from functools import wraps
from flask import request
from flask import request, current_app
from flask_login import user_logged_in
from flask_restful import Resource
from werkzeug.exceptions import NotFound, Unauthorized
from libs.login import _get_user
from extensions.ext_database import db
from models.dataset import Dataset
from models.account import Tenant, TenantAccountJoin, Account
from models.model import ApiToken, App
@ -17,7 +19,7 @@ def validate_app_token(view=None):
def decorated(*args, **kwargs):
api_token = validate_and_get_api_token('app')
app_model = db.session.query(App).get(api_token.app_id)
app_model = db.session.query(App).filter(App.id == api_token.app_id).first()
if not app_model:
raise NotFound()
@ -43,12 +45,24 @@ def validate_dataset_token(view=None):
@wraps(view)
def decorated(*args, **kwargs):
api_token = validate_and_get_api_token('dataset')
dataset = db.session.query(Dataset).get(api_token.dataset_id)
if not dataset:
raise NotFound()
return view(dataset, *args, **kwargs)
tenant_account_join = db.session.query(Tenant, TenantAccountJoin) \
.filter(Tenant.id == api_token.tenant_id) \
.filter(TenantAccountJoin.tenant_id == Tenant.id) \
.filter(TenantAccountJoin.role == 'owner') \
.one_or_none()
if tenant_account_join:
tenant, ta = tenant_account_join
account = Account.query.filter_by(id=ta.account_id).first()
# Login admin
if account:
account.current_tenant = tenant
current_app.login_manager._update_request_context_with_user(account)
user_logged_in.send(current_app._get_current_object(), user=_get_user())
else:
raise Unauthorized("Tenant owner account is not exist.")
else:
raise Unauthorized("Tenant is not exist.")
return view(api_token.tenant_id, *args, **kwargs)
return decorated
if view:
@ -64,14 +78,14 @@ def validate_and_get_api_token(scope=None):
Validate and get API token.
"""
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise Unauthorized()
if auth_header is None or ' ' not in auth_header:
raise Unauthorized("Authorization header must be provided and start with 'Bearer'")
auth_scheme, auth_token = auth_header.split(None, 1)
auth_scheme = auth_scheme.lower()
if auth_scheme != 'bearer':
raise Unauthorized()
raise Unauthorized("Authorization scheme must be 'Bearer'")
api_token = db.session.query(ApiToken).filter(
ApiToken.token == auth_token,
@ -79,7 +93,7 @@ def validate_and_get_api_token(scope=None):
).first()
if not api_token:
raise Unauthorized()
raise Unauthorized("Access token is invalid")
api_token.last_used_at = datetime.utcnow()
db.session.commit()

View File

@ -7,4 +7,4 @@ bp = Blueprint('web', __name__, url_prefix='/api')
api = ExternalApi(bp)
from . import completion, app, conversation, message, site, saved_message
from . import completion, app, conversation, message, site, saved_message, audio, passport

Some files were not shown because too many files have changed in this diff Show More