Compare commits

...

1562 Commits

Author SHA1 Message Date
85e568e114 compatible query is None 2024-04-03 18:39:29 +08:00
0026cb404f compatible query is None 2024-04-03 18:38:27 +08:00
62919a9ff5 app list mutation 2024-04-03 18:06:36 +08:00
e13f8da9d5 fix: prompt editor 2024-04-03 18:04:46 +08:00
53aca1a922 add sys variables for start node 2024-04-03 17:53:54 +08:00
464aee08a1 fix: prompt editor 2024-04-03 17:45:27 +08:00
ab56e6b6af fix: enable memory add sys query var input 2024-04-03 17:42:21 +08:00
2a2f4cd4d5 fix: label ui 2024-04-03 17:22:10 +08:00
b3a4a52a7a fix: valid tool valid empty error 2024-04-03 17:18:16 +08:00
379f9b56ad fix: start node not show sys files 2024-04-03 16:55:55 +08:00
b705041dda chore: remove uage output 2024-04-03 16:52:01 +08:00
fedcfe94ae update version to 0.6.0-preview-workflow.2 2024-04-03 16:50:11 +08:00
add7bdc877 fix: add input instance id 2024-04-03 16:46:47 +08:00
088842dcdb fix: prompt editor 2024-04-03 16:13:14 +08:00
c43eaeec06 fix: can not open 2024-04-03 15:48:57 +08:00
f06554a11e fix: sync draft 2024-04-03 15:37:19 +08:00
3ac37e802a fix: sandbox tips 2024-04-03 15:34:05 +08:00
aee669f67d fix max length for paragraph 2024-04-03 15:32:59 +08:00
c8db4d8a08 fix: editor choose context would blur 2024-04-03 15:15:35 +08:00
c3bb541a69 fix 2024-04-03 15:13:26 +08:00
ba3039d6c9 merge feat/workflow 2024-04-03 15:00:26 +08:00
bd3b400121 chore: support click to show choose add var 2024-04-03 14:57:16 +08:00
28e813f57f fix 2024-04-03 14:38:15 +08:00
3f11e11c2d chore: confirm ui 2024-04-03 14:08:36 +08:00
37a282cc1c fix: add default lora 2024-04-03 13:51:18 +08:00
88ef220d4d fix app list cache 2024-04-03 13:49:29 +08:00
ccb67bffc4 fix: tools 2024-04-03 13:46:29 +08:00
20394b3231 fix 2024-04-03 13:39:15 +08:00
52a1c4580c feat: update chat app publishing 2024-04-03 13:12:07 +08:00
aca395b97d fix test run 2024-04-03 12:44:32 +08:00
459b690416 fix: prompt-editor 2024-04-03 12:31:40 +08:00
d48bdf3e14 chore: generic 2024-04-03 12:02:51 +08:00
9fea2fd44b feat: change from creditical schema value from api 2024-04-03 11:56:54 +08:00
f291aec2cd chore: start input placeholder and bg 2024-04-03 11:41:44 +08:00
00d9c48461 fix migration version dependency 2024-04-02 22:46:38 +08:00
fef62d937d Merge branch 'main' into feat/workflow 2024-04-02 22:36:27 +08:00
6b06c5b957 optimize workflow inputs 2024-04-02 22:36:07 +08:00
5a4ea0932a add inputs for workflow_started event 2024-04-02 21:13:08 +08:00
f7e4f0a988 fix: run error 2024-04-02 21:05:04 +08:00
cf449b31a1 code node rename 2024-04-02 20:48:46 +08:00
5e66a60f1c add embedding cache and clean embedding cache job (#3087)
Co-authored-by: jyong <jyong@dify.ai>
2024-04-02 20:46:24 +08:00
f7184c0e36 fix: tool node check 2024-04-02 20:19:50 +08:00
5df66579a8 fix crash of advanced prompt app 2024-04-02 20:12:43 +08:00
d260e6b064 fix: prompt-editor 2024-04-02 19:55:58 +08:00
7f55ea0c53 Chore/move chrome ext (#3085) 2024-04-02 19:51:02 +08:00
01c6a35966 chore: encoder 2024-04-02 19:25:52 +08:00
0202469254 fix: checklist 2024-04-02 19:06:22 +08:00
fb2fa625b4 fix: use sys query instead user query 2024-04-02 18:46:44 +08:00
fbdf2ba839 fix: classify default two classifies and empty check 2024-04-02 18:41:31 +08:00
f7d1d9b8b1 fix(duckduckgo-search): invoke error (#3077) 2024-04-02 18:40:09 +08:00
6b4c8e76e6 feat (new llm): add support for openrouter (#3042) 2024-04-02 18:38:46 +08:00
716936e37a fix: remove key replicate 2024-04-02 18:30:57 +08:00
1c004e0df6 optimize: sd 2024-04-02 18:21:08 +08:00
59d279fbe0 fix: remove rename check 2024-04-02 18:12:44 +08:00
e12a0c154c add segment function billing check for SAAS env (#3082)
Co-authored-by: jyong <jyong@dify.ai>
2024-04-02 17:55:49 +08:00
2d7c43b60f feat: http panel 2024-04-02 17:45:12 +08:00
a9f7f88a9a feat: answer support render var input 2024-04-02 17:38:05 +08:00
56cb9ccec1 feat: update workflow app publishing 2024-04-02 17:33:35 +08:00
e0a152164b fix: prompt-editor 2024-04-02 17:20:32 +08:00
9c7e99e829 Update README.md (#3081) 2024-04-02 17:19:21 +08:00
d72524ceb0 hide result info in chatflow 2024-04-02 17:16:39 +08:00
74538fb3b2 Merge remote-tracking branch 'origin/feat/workflow' into feat/workflow 2024-04-02 16:59:04 +08:00
f832211e2e db migrate merge 2024-04-02 16:58:49 +08:00
e46c3a9235 optimize: tool 2024-04-02 16:58:24 +08:00
36c3774fac modify test run panel 2024-04-02 16:57:28 +08:00
5e201324d6 Merge branch 'main' into feat/workflow
# Conflicts:
#	api/.env.example
#	docker/docker-compose.yaml
2024-04-02 16:55:43 +08:00
5adbcacc52 fix: end node can not selector 2024-04-02 15:52:15 +08:00
8b01796f5d fix external data convert 2024-04-02 15:30:39 +08:00
34f4f76f67 fix: handle debug run valid 2024-04-02 15:27:03 +08:00
01e832a587 fix: linter 2024-04-02 15:25:15 +08:00
1af2d06d29 feat: add tool benchmark 2024-04-02 15:23:54 +08:00
426abe2134 fix: variable type add missing key 2024-04-02 15:14:00 +08:00
f62775bcad fix: prompt-editor 2024-04-02 14:12:52 +08:00
7a2083a6b7 fix: num suuport var insert 2024-04-02 14:12:34 +08:00
09650b9d47 Merge remote-tracking branch 'origin/feat/workflow' into feat/workflow 2024-04-02 14:03:02 +08:00
f19219ab8d fix knowledge retrival 2024-04-02 14:02:49 +08:00
8be04b57f9 fix: http attr key rerender 2024-04-02 13:55:30 +08:00
ef39fa3fb2 node connect 2024-04-02 13:23:36 +08:00
fe569559ac fix app type label 2024-04-02 12:56:11 +08:00
d14ea2ecaa version to 0.5.11-fix1 (#3073) 2024-04-02 12:51:29 +08:00
8125d8fc9f modify params of app switch 2024-04-02 12:45:33 +08:00
fd8ed95209 fix prompt log 2024-04-02 12:34:40 +08:00
cf22842554 support app creation in nav 2024-04-02 12:15:28 +08:00
0fcb746c08 add created_at for app model config 2024-04-02 12:07:30 +08:00
9d7ab0400d chat error 2024-04-02 12:01:32 +08:00
396a3e0456 feat: add tool parameter type converter 2024-04-02 11:58:50 +08:00
56a1d5330a chore: types 2024-04-02 11:50:03 +08:00
c5e58c713c remove not necessary error reporting 2024-04-02 11:15:50 +08:00
6e0f13f269 feat: tool new struct 2024-04-02 11:14:33 +08:00
00728c2b1d support type fitlering for app template 2024-04-01 22:55:53 +08:00
9fb7100b3f modify style of app type tag 2024-04-01 22:55:53 +08:00
4e31d7b64f chat 2024-04-01 21:17:39 +08:00
1ab3b73c14 add app info for workflow convert 2024-04-01 21:00:08 +08:00
b5fa68fdfe node selected 2024-04-01 20:47:59 +08:00
31f24e1a14 enhance: enable configurate limitation of code 2024-04-01 20:47:26 +08:00
e800109c02 node selected 2024-04-01 20:33:20 +08:00
a94d86da6d add keyword table s3 storage support (#3065)
Co-authored-by: jyong <jyong@dify.ai>
2024-04-01 20:19:30 +08:00
5c3162cc33 fix: http delete btn hide 2024-04-01 19:58:51 +08:00
04b4be27b7 refresh history 2024-04-01 19:41:37 +08:00
5793855115 fix http single run 2024-04-01 19:40:49 +08:00
41cce464ca fix: http var inputs 2024-04-01 19:36:09 +08:00
8c55ff392d fix bugs 2024-04-01 19:33:53 +08:00
45d5d259a4 fix prompt editor 2024-04-01 19:03:34 +08:00
e08d871837 fix: http other params check 2024-04-01 18:48:36 +08:00
ab2c112059 feat: reuse get vars inputs and http request url 2024-04-01 18:33:17 +08:00
5e591fc1b7 feat: add Feishu(飞书) tool for sending message to chat group bot via webhook (#3059)
Co-authored-by: crazywoola <427733928@qq.com>
2024-04-01 18:03:45 +08:00
a42f26d857 fix: object label not pass the right value 2024-04-01 17:52:11 +08:00
6fea18b4d0 feat: insert var key ui 2024-04-01 17:40:31 +08:00
7e259600bf fix: debugger form struct and textare line-height 2024-04-01 17:40:31 +08:00
75e95e09d3 fix: test 2024-04-01 17:07:09 +08:00
51f225e567 fix 2024-04-01 16:57:42 +08:00
53c988718b fix: no var caused bugs 2024-04-01 16:57:03 +08:00
74ead43ae1 fix: query selector set sys value problem 2024-04-01 16:50:09 +08:00
d0509213d1 prompt editor 2024-04-01 16:47:41 +08:00
5b81234db8 fix: tool entities 2024-04-01 16:43:10 +08:00
df9e2e478f workflow template 2024-04-01 16:38:37 +08:00
7c64f2cfe0 feat: use en-US recommended apps as fallback if using unmaintained language 2024-04-01 16:24:59 +08:00
3b3d19dab7 Merge branch 'main' into feat/workflow
# Conflicts:
#	api/controllers/console/explore/recommended_app.py
2024-04-01 16:22:49 +08:00
32e83e00e4 feat: use en-US as fallback recommend app if using unmaintained language (#3063) 2024-04-01 16:15:59 +08:00
806f27c370 revert automatic prompt 2024-04-01 16:00:34 +08:00
86a32517e5 fix: tool variable selectors 2024-04-01 15:42:00 +08:00
072967a1d3 fix node single step run of answer & http request & llm 2024-04-01 15:24:35 +08:00
132269618d FEAT: Add Brave Search and Trello(12 Tools) Included (#3040) 2024-04-01 14:53:56 +08:00
9147e0046f node connect 2024-04-01 14:10:05 +08:00
f967203012 remove automatic 2024-04-01 13:31:30 +08:00
e2d0ff4784 chore: run text font size 2024-04-01 13:19:40 +08:00
85ce2e8df8 merge main 2024-04-01 13:09:40 +08:00
c330f89c77 feat: support llm single run 2024-04-01 12:55:20 +08:00
ffb698922a fix: edge 2024-04-01 12:54:15 +08:00
50a7c2c92c fix bug 2024-04-01 12:51:01 +08:00
0843af2996 fix: sys var not show 2024-04-01 12:39:54 +08:00
e9985f0696 chore: enchance debug show name 2024-04-01 12:29:51 +08:00
dfad42075c sys variable 2024-04-01 12:06:22 +08:00
705d765a71 feat: llm show vars 2024-04-01 11:48:42 +08:00
e03367a188 feat: get input vars 2024-04-01 10:38:12 +08:00
84d118de07 add redis lock on create collection in multiple thread mode (#3054)
Co-authored-by: jyong <jyong@dify.ai>
2024-04-01 02:10:41 +08:00
1716ac562c add clean_unused_datasets_task (#3057)
Co-authored-by: jyong <jyong@dify.ai>
2024-04-01 01:34:21 +08:00
e215aae39a feat:xinference audio model support (#3045) 2024-03-31 12:44:11 +08:00
12782cad4d Fix typo (#3041) 2024-03-31 12:41:16 +08:00
fc5ed17fe9 provide a bit more info in logs when parsing api schema error (#3026) 2024-03-30 14:44:50 +08:00
c20685e669 fix 2024-03-29 22:26:26 +08:00
429dd11dd7 add icon for tool node execution 2024-03-29 22:22:58 +08:00
94d04934b3 fix: agent tool label (#3039) 2024-03-29 22:15:16 +08:00
b394dd6fb0 fix convert bug 2024-03-29 21:43:44 +08:00
a30a6dda63 Merge branch 'main' into feat/workflow
# Conflicts:
#	docker/docker-compose.yaml
2024-03-29 21:18:16 +08:00
1387f9b23e version to 0.5.11 (#3038) 2024-03-29 21:09:21 +08:00
6817eab5f1 fix: api / moderation extension import error (#3037) 2024-03-29 21:07:34 +08:00
218f591a5d fix: prompt editor linebreak (#3036) 2024-03-29 21:01:04 +08:00
de3b7e8815 http request node support template variable 2024-03-29 20:54:17 +08:00
142d1be4f8 refactor 2024-03-29 20:53:48 +08:00
17af0de7b6 Add New Tool: StackExchange (#3034)
Co-authored-by: crazywoola <427733928@qq.com>
2024-03-29 20:28:21 +08:00
fb364d44d1 refactor 2024-03-29 20:12:26 +08:00
a647698c32 Merge remote-tracking branch 'origin/feat/workflow' into feat/workflow 2024-03-29 19:44:35 +08:00
75ffdc9d3f fixed single retrival 2024-03-29 19:44:26 +08:00
0d12e5c795 run history 2024-03-29 19:32:38 +08:00
bab88efda9 Merge remote-tracking branch 'origin/feat/workflow' into feat/workflow 2024-03-29 19:29:42 +08:00
ca6acf2650 fixed single retrival 2024-03-29 19:29:27 +08:00
11b428a73f feat: agent log 2024-03-29 19:23:48 +08:00
f43faa125b feat: add condition placeholder to if-else node 2024-03-29 19:08:35 +08:00
6b3bc789b5 fix: http text pass vars 2024-03-29 19:04:53 +08:00
586488c6a9 feat: llm output and raw text 2024-03-29 19:04:53 +08:00
704cb42869 Merge remote-tracking branch 'origin/feat/workflow' into feat/workflow
# Conflicts:
#	api/core/workflow/nodes/question_classifier/question_classifier_node.py
2024-03-29 19:00:21 +08:00
2d26c4745b add history message 2024-03-29 18:58:59 +08:00
971436d935 llm and answer node support inner variable template 2024-03-29 18:44:30 +08:00
8a2d04b305 chore: llm editor bg and not flash 2024-03-29 18:32:59 +08:00
7c45f369d1 checklist 2024-03-29 18:27:41 +08:00
6444d94f41 fix style of app card 2024-03-29 18:24:46 +08:00
a8236a270a feat: body to json editor 2024-03-29 18:19:45 +08:00
760ada399f checklist 2024-03-29 18:08:27 +08:00
815262b9a6 chore: remove input vars 2024-03-29 18:02:06 +08:00
83651a038f feat: http attr support selct keys 2024-03-29 17:55:59 +08:00
589ac9b22c feat: http key value inputs 2024-03-29 17:24:36 +08:00
d673b4c219 fix: prompt editor 2024-03-29 17:20:48 +08:00
4e548fff5e feat: add insert var tooltip 2024-03-29 16:58:07 +08:00
636603d5af chore: type picker 2024-03-29 16:32:36 +08:00
950a52f4fc feat: input var ui 2024-03-29 16:19:09 +08:00
b50e897aa0 fix: prompt editor 2024-03-29 16:08:10 +08:00
d7be9c0afc prompt editor 2024-03-29 14:59:13 +08:00
06a6d398cd checklist 2024-03-29 14:56:47 +08:00
12ed31be4d feat: api support var logic 2024-03-29 14:56:32 +08:00
9d962053a2 Fix claude request errors in bedrock (#3015)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-03-29 13:57:45 +08:00
8d2ac8ff8f feat: ignore invalid vars keys 2024-03-29 13:56:13 +08:00
59909b5ca7 update the discord Invalid invite (#3028) 2024-03-29 13:16:52 +08:00
a6cd0f0e73 fix add segment when dataset and document is empty (#3021)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-29 13:06:00 +08:00
91b84d8f1e chore: http node check 2024-03-29 13:01:36 +08:00
46cc635e05 fix: error status code 2024-03-29 12:35:04 +08:00
1ea8504cf1 chore: code output var empty check 2024-03-29 11:48:45 +08:00
a32465eeb8 chore: handle key exists check 2024-03-29 11:37:16 +08:00
2c43393bf1 Add New Tool: DevDocs (#2993) 2024-03-29 11:21:02 +08:00
f930521d64 chore: start var name check 2024-03-29 11:19:24 +08:00
42ad622a6c fix tool icon 2024-03-28 21:49:56 +08:00
4eb9027510 add icon for tool node in web app 2024-03-28 21:47:10 +08:00
05bb65bd94 add icon for tool node 2024-03-28 21:37:23 +08:00
85285931e2 feat: add agent tool invoke meta 2024-03-28 20:04:31 +08:00
d7c4032917 fix style of app creation 2024-03-28 19:48:45 +08:00
c1466a7a4d Merge branch 'feat/merge-tool-engine' into feat/workflow 2024-03-28 18:44:12 +08:00
51404f9035 refactor: tool engine 2024-03-28 18:36:58 +08:00
c1bf4c6405 chore: var picker ui 2024-03-28 18:23:05 +08:00
b8818c90b0 feat: answer use selector vars 2024-03-28 17:41:41 +08:00
ead55ce931 chore: support hide editor var search 2024-03-28 17:37:11 +08:00
0a0d9565ac add icon return for tool node in workflow event stream 2024-03-28 17:26:09 +08:00
4235baf493 editor 2024-03-28 17:11:39 +08:00
669c8c3cca some optimization for admin api key, create tenant and reset-encrypt-key-pair command (#3013)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-28 17:02:52 +08:00
b0b0cc045f add mutil-thread document embedding (#3016)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-28 17:02:35 +08:00
82a82fff35 chore: llm remove var inputs 2024-03-28 16:54:14 +08:00
4934a655dd chore: xxx 2024-03-28 16:48:54 +08:00
615178dafa feat: get var list hooks 2024-03-28 16:44:11 +08:00
08650339d7 feat: split var reference 2024-03-28 15:27:11 +08:00
12ea3af242 fix: sync draft 2024-03-28 14:47:09 +08:00
858ab8c8c4 merge main 2024-03-28 14:38:21 +08:00
63a4ddc251 add text/plain support for draft sync api 2024-03-28 14:36:24 +08:00
20d16d7b31 doc: update helm charts (#3012) 2024-03-28 13:02:41 +08:00
aa4d734244 fix 2024-03-28 12:39:24 +08:00
714722bb2d fix: 'next' button unresponsive when uploading additional documents before previous batch completes (#2991) 2024-03-28 12:28:15 +08:00
830495a607 bump celery from 5.2 to 5.3 (#2478)
Co-authored-by: takatost <takatost@users.noreply.github.com>
2024-03-28 11:53:48 +08:00
41a4593b6d bump redis client to 5.0 and enable hiredis support (#2518) 2024-03-28 11:40:21 +08:00
08b727833e generalize helper for loading module from source (#2862) 2024-03-28 11:37:26 +08:00
85b45a7cd0 fix: http json value would changed 2024-03-28 11:34:31 +08:00
de3fd0f382 fix 2024-03-27 21:00:53 +08:00
db3f38bc2b workflow webapp result modification 2024-03-27 18:42:07 +08:00
d239e6bf0f fix 2024-03-27 17:36:16 +08:00
9e4b39e19f fix question classifier node type 2024-03-27 17:03:49 +08:00
078b10a9f0 fix: linter 2024-03-27 16:01:09 +08:00
c70d0546ae sign single step files 2024-03-27 16:00:54 +08:00
c3d926e2ed templates filtering 2024-03-27 15:49:33 +08:00
78a851d240 fix: hide readonly tooltip 2024-03-27 14:47:23 +08:00
6256a3fadb fix: missing datasets 2024-03-27 14:44:04 +08:00
c8b82b9d08 fix: missing comma in JSON for /completion-messages request (#2999) 2024-03-27 14:31:06 +08:00
8def0f8cf2 Merge remote-tracking branch 'origin/feat/workflow' into feat/workflow 2024-03-27 14:19:38 +08:00
a29d3f2400 fix question classifier issue when llm
is completion mode
2024-03-27 14:19:23 +08:00
794c57b938 fix: code editor readonly can get focus 2024-03-27 13:49:22 +08:00
9eff8715fb fix: model params in question classify 2024-03-27 13:38:26 +08:00
e952e01dfe fix: sql 2024-03-27 12:28:07 +08:00
a20d305842 fix: missing agent 2024-03-27 12:26:00 +08:00
17d1e2e5b7 fix: template transform node output length 2024-03-27 11:51:12 +08:00
5becb4c43a update wenxin llm (#2929) 2024-03-27 11:36:21 +08:00
1c05d2ef7f fix: questioin classify memory limit 2024-03-27 11:06:51 +08:00
f9caa09cac fix: empty chat 2024-03-27 10:39:41 +08:00
829a7b0d16 Merge branch 'main' into feat/workflow 2024-03-27 10:33:28 +08:00
13694293e3 fix: resolve header.uid' length must be less or equal than 32 on Spark V1.5 (#2983) 2024-03-27 09:58:41 +08:00
815beac356 Fix the time in the annotation from 12-hour clock to 24-hour clock. (#2990) 2024-03-27 09:08:38 +08:00
83aaacd71d app creation update 2024-03-26 18:53:02 +08:00
a56115a664 chore: message type i18n 2024-03-26 18:07:35 +08:00
5e60204832 fix: progress bar issue (#2957) 2024-03-26 17:26:58 +08:00
d2624b13a0 fix: the issue of text overflow in the NavSelector component (#2976) 2024-03-26 17:22:01 +08:00
b5578c754f fix: var too long 2024-03-26 17:18:23 +08:00
de00245af0 fix: not highlight query block 2024-03-26 17:02:41 +08:00
2c9d4c8dca feat: value support hights 2024-03-26 16:47:38 +08:00
1ac96564a0 feat: http node hightlight node 2024-03-26 16:35:53 +08:00
61f5de9662 fix: chat scroll (#2981) 2024-03-26 16:19:41 +08:00
c15677634f merge main 2024-03-26 15:25:02 +08:00
2dd2c8c358 feat: support url highlight 2024-03-26 15:02:34 +08:00
8e3be982eb feat: chat memory placeholder 2024-03-26 14:19:50 +08:00
46f4e61edc fix: tools value too long ui 2024-03-26 13:52:35 +08:00
40dbf30784 feat: support new reranker [jina-colbert-v1-en] (#2975) 2024-03-26 11:34:40 +08:00
afd77c4745 fix: the batch annotaion btn should also be loading when progress status is waiting (#2974) 2024-03-26 11:05:29 +08:00
d70bd4aaa4 fix tool_inputs parse error in message that in CoT(ReAct) agent mode (#2949) 2024-03-26 11:05:10 +08:00
8e05261588 Fix handling of missing required parameters in ApiTool (#2965) 2024-03-26 10:53:39 +08:00
a676d4387c fix: Correct image parameter passing in GLM-4v model API calls (#2948) 2024-03-26 10:43:20 +08:00
08a5afcf9f feat: update nginx and docker-compose files to support HTTPS. (#2940)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-03-26 10:37:43 +08:00
eeaa3c1643 Fix/2969 add model provider ollama not work (#2973) 2024-03-26 10:26:34 +08:00
7c8c233cf4 Add S3_ADDRESS_STYLE configuration option (#2934) 2024-03-26 10:18:26 +08:00
129a9850eb fix: correct response hint for generated image to avoid illusion of regernerated image link (#2962) 2024-03-26 10:13:35 +08:00
1f98a4fff3 improve: cache tool icons by setting max-age HTTP header and enable gzip compression SVG icons from backend (#2971) 2024-03-26 10:11:43 +08:00
58e4702b14 fix: white screen when editing annotaion in log panel (#2968) 2024-03-26 10:10:14 +08:00
c60749678b When disabling the "Annotation Reply" button, the backend reports an error. #2904 (#2933)
Co-authored-by: colvin <colvin.zhang@boaocloud.com>
2024-03-25 22:20:40 +08:00
a35b5e4fff chat restart 2024-03-25 18:42:51 +08:00
07091c9d33 node panel resize 2024-03-25 18:02:09 +08:00
b3db119146 feat: memory example 2024-03-25 16:36:33 +08:00
28206cac72 feat: move start output var to vars 2024-03-25 15:51:31 +08:00
47f2fe591d feat: default fold output 2024-03-25 15:24:33 +08:00
d5214e4644 reuse layout (#2956) 2024-03-25 15:13:50 +08:00
52804ca6d1 fix: adjust popup panel's z-index value (#2952) 2024-03-25 15:09:01 +08:00
acd0e22b9e feat: handle limit tool var type 2024-03-25 15:00:36 +08:00
2ebd8d9fdc feat: support var search 2024-03-25 14:30:43 +08:00
b5fe1f7c46 chore: var reference support portal 2024-03-25 11:34:56 +08:00
4fb9606361 fix: max_token default help info improved (#2951) 2024-03-25 10:07:32 +08:00
c534d95972 fix: yi model price correction (#2946) 2024-03-24 12:10:57 +08:00
46ccfda493 fix: invalid i18 link in README (#2947) 2024-03-24 12:10:13 +08:00
6dc62334d6 doc: model schema document fix and wording about the model price parameter (#2944) 2024-03-24 12:06:20 +08:00
d7b2fe1e8b update docker compose images verison 2024-03-24 00:14:53 +09:00
fac9459402 add workflow image build 2024-03-23 23:19:33 +09:00
6cf0e0c242 Merge branch 'main' into feat/workflow 2024-03-23 23:09:36 +09:00
656bd9257d Merge branch 'feat/workflow-backend' into feat/workflow 2024-03-23 23:09:20 +09:00
38441c930c fix: tool sort 2024-03-23 17:54:40 +08:00
dafdbfa0fd fix: next step tool icon 2024-03-23 12:15:38 +08:00
c7d003d551 fix: Upgrade duckduckgo-search to version 5.1.0 & update document segment api parameter error (#2938) 2024-03-22 19:18:01 +08:00
cc754122fc Authentication is only applied when both the username and password have values. (#2937) 2024-03-22 17:58:21 +08:00
a264973366 hide log panel in web app 2024-03-22 17:23:40 +08:00
5843b30a13 feat: support var remove in code node 2024-03-22 15:20:09 +08:00
240a94182e Feat/add triton inference server (#2928) 2024-03-22 15:15:48 +08:00
340ae3c52f feat: remove var check in start node 2024-03-22 15:10:01 +08:00
817e16493f checklist 2024-03-22 14:58:11 +08:00
66cf787755 fix: can remove struct 2024-03-22 14:35:22 +08:00
4f3872277c all tools 2024-03-22 13:08:28 +08:00
9b84086bac fix: tool provider icon 2024-03-22 12:43:56 +08:00
ce2b2755af add description for workflow 2024-03-22 10:30:20 +08:00
a91bec033d fix bug 2024-03-21 22:04:43 +08:00
096cc74373 hide node info in chat 2024-03-21 19:56:43 +08:00
34db42ecea fix bug 2024-03-21 18:37:37 +08:00
34e8d2f6bb add message error record 2024-03-21 18:30:23 +08:00
a771d59b1e fix: model param inner ui 2024-03-21 18:14:12 +08:00
3a00941125 fix style of dataset 2024-03-21 18:08:10 +08:00
8e9ade14df fix: style 2024-03-21 17:49:12 +08:00
a1ec45fdd1 fix style of message log operation 2024-03-21 17:47:23 +08:00
9295739dc0 fix: model trigger ui 2024-03-21 17:45:45 +08:00
4afb16844c chat stop 2024-03-21 17:30:59 +08:00
c4e6ed1aa2 optimize codes 2024-03-21 17:12:52 +08:00
95c5848d05 update workflow app bind datasets 2024-03-21 17:06:45 +08:00
8e56096f83 fix style of nav 2024-03-21 16:58:00 +08:00
c8f51dd6db chore: edit code support line wrap 2024-03-21 16:56:49 +08:00
ebbb30de44 fix: wrong infernece 2024-03-21 16:48:53 +08:00
fe1168d15a feat: code var sync 2024-03-21 16:43:36 +08:00
fd7fded6e5 fix: style 2024-03-21 16:34:56 +08:00
8cffbc6b2a chore: more info to workflows 2024-03-21 16:23:47 +08:00
fa673f9b4c fix: raw text 2024-03-21 16:21:59 +08:00
178f1fc5d6 fix: tools var renmae problem 2024-03-21 16:15:50 +08:00
0c409e2b9e enhance: increase code timeout 2024-03-21 16:14:16 +08:00
e366e12be0 fix: running line 2024-03-21 16:12:55 +08:00
8e0d8fdb3f feat: other nodes support rename and fix knonw set var bug 2024-03-21 16:06:45 +08:00
524b19bb3a node style 2024-03-21 15:59:20 +08:00
fb2e351c08 fix icons 2024-03-21 15:56:20 +08:00
260fef40c4 enhance: full tools 2024-03-21 15:40:08 +08:00
72818e946d fix llm memory 2024-03-21 15:36:25 +08:00
e673c64534 feat: llm rename 2024-03-21 15:23:38 +08:00
93bbb2694f temp publish not check valid 2024-03-21 15:18:10 +08:00
16af509c46 Update docker-compose files version (#2920) 2024-03-21 15:16:30 +08:00
b038b7aa33 fix: can not choose var type 2024-03-21 15:14:54 +08:00
02a059bdc6 feat: var name rename struct 2024-03-21 15:07:44 +08:00
267d9568c6 fix: running status 2024-03-21 15:03:49 +08:00
d71eae8f93 fix qc 2024-03-21 15:02:55 +08:00
8bdaab96b1 fix 2024-03-21 14:25:11 +08:00
a05fcedd61 fix stop 2024-03-21 14:04:22 +08:00
0db67a2fd3 fix features not publish 2024-03-21 13:47:10 +08:00
6e56a504fd fix docs for advanced-chat 2024-03-21 12:39:46 +08:00
69fa8c9794 add docs for advanced-chat 2024-03-21 12:36:28 +08:00
8dc8650ecb fix type of workflow process 2024-03-21 09:24:09 +08:00
40775e27ce correct api doc of workflow 2024-03-20 23:46:53 +08:00
6fb294202d modify workflow web app output 2024-03-20 23:07:43 +08:00
bd409a3caf enhance: code node validator 2024-03-20 23:01:24 +08:00
0d0da9a892 fix variable assigner multi route 2024-03-20 22:49:24 +08:00
a7e2f9caf0 fix variable assigner 2024-03-20 22:27:59 +08:00
3d4d60a353 feat: llm intput only number and str 2024-03-20 22:00:56 +08:00
75e876b14e chore: reduce more var limit 2024-03-20 21:56:18 +08:00
66fd60bc6f fix: var objects sorts change 2024-03-20 21:56:18 +08:00
c3e7299494 fix service api blocking mode 2024-03-20 21:55:06 +08:00
8fc576870d fix 2024-03-20 20:52:19 +08:00
d4f362164f fix: memory support switch 2024-03-20 20:50:16 +08:00
86e474fff1 Add azure blob storage support (#2919)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-20 20:49:58 +08:00
94ca0edb68 run history 2024-03-20 20:27:52 +08:00
9a3d5729bb fix: suggest service api missed user in query (#2918) 2024-03-20 20:08:26 +08:00
a0dde6e4da fix bug 2024-03-20 20:02:51 +08:00
17f572f23f feat: can not add context 2024-03-20 19:35:42 +08:00
137746387d fix style 2024-03-20 19:24:47 +08:00
9b5deaf80a add proccess of workflow in web app 2024-03-20 18:56:03 +08:00
1201bef879 chore: picker width set 2024-03-20 18:18:34 +08:00
30a9b8b917 fix bug 2024-03-20 17:52:47 +08:00
77bdc6ffb1 fix bug 2024-03-20 17:36:56 +08:00
a65c99496b add extra info for workflow stream output 2024-03-20 17:34:07 +08:00
a8c86b759d fix: var02 icon show 2024-03-20 17:31:46 +08:00
76081db6e4 fix: style 2024-03-20 17:27:45 +08:00
0606b6f922 fix: remove datasets problem 2024-03-20 17:23:18 +08:00
9ed2a99abf feat: var reference support readonly 2024-03-20 17:01:53 +08:00
8f311b020a feat: tools readonly 2024-03-20 16:56:09 +08:00
de6cbc36bb enhance: code return tyoe 2024-03-20 16:54:32 +08:00
0aa984219f feat: telmplate transform support readonly 2024-03-20 16:52:01 +08:00
c9168c19cd feat: question classify support readonly 2024-03-20 16:48:52 +08:00
beca05848c feat: llm support readonly 2024-03-20 16:43:00 +08:00
2e5acef1b6 fix 2024-03-20 16:42:13 +08:00
c4811f921f feat: retrieval support readonly 2024-03-20 16:36:49 +08:00
7569346943 feat: if support readonly 2024-03-20 16:27:41 +08:00
2919cc9adf fix 2024-03-20 16:25:02 +08:00
18883d9faa fix agent config 2024-03-20 16:20:19 +08:00
e462ddb805 feat: http support readonly 2024-03-20 16:17:04 +08:00
5a1c29fd8c chore: change Yi model SDK to OpenAI (#2910) 2024-03-20 16:02:13 +08:00
df274416f9 feat: end support readonly 2024-03-20 16:00:41 +08:00
b35d9f6c36 fix style of running workflow 2024-03-20 15:54:21 +08:00
4c5737fc7f feat: code support readonly 2024-03-20 15:53:14 +08:00
7bb1decaf8 feat: support prompt readonly 2024-03-20 15:46:21 +08:00
4bef2eed25 chat add workflow process 2024-03-20 15:44:51 +08:00
0d2a90adf3 fix knowledge retriever return 2024-03-20 15:43:22 +08:00
38a1ea139a feat: answer support readonly 2024-03-20 15:32:06 +08:00
698eb9671f feat: start support readonly 2024-03-20 15:09:12 +08:00
180775a0ec fix: init qdrant vector max recursion (#2909) 2024-03-20 14:57:13 +08:00
1b857eba29 chore: remove useless 2024-03-20 14:49:39 +08:00
b060b773ef fix: set default logic error 2024-03-20 14:42:35 +08:00
ae197fb2ba fix: switch provider call infinate 2024-03-20 14:42:35 +08:00
2697454a8e fix 2024-03-20 14:10:43 +08:00
2a75258836 feat: not show var 2024-03-20 13:55:26 +08:00
b50f221327 fix bug 2024-03-20 12:47:36 +08:00
d984eb3648 chore: workflow editor not choose outtool in var 2024-03-20 12:05:30 +08:00
4df8fa0afb feat: if change to defalut operator 2024-03-20 11:42:57 +08:00
67b3ee3776 feat: ifelse check and item choose var first 2024-03-20 11:35:31 +08:00
8337e3c6ba fix lint 2024-03-20 11:23:33 +08:00
a9b8917e22 fix bug 2024-03-20 11:23:25 +08:00
70698b553e fix prompt log in completion debug 2024-03-20 11:09:03 +08:00
b131c5dc73 fix: code defalut may not switch if not load config 2024-03-20 10:50:43 +08:00
15e2ab9203 feat: ifelse not set var not change selector 2024-03-20 10:50:43 +08:00
d5c79e0489 fix user-inputs generation 2024-03-20 10:45:30 +08:00
884eeebe83 fix react response 2024-03-20 04:00:50 +08:00
9042db301d fix page content is empty 2024-03-20 03:50:28 +08:00
f4f8d6c652 Merge branch 'main' into feat/workflow-backend
# Conflicts:
#	api/core/model_runtime/model_providers/anthropic/llm/llm.py
2024-03-20 00:06:33 +08:00
20cd3e52d0 fix qc bug 2024-03-19 23:55:06 +08:00
d018e279f8 fix: typo $ mark in logs of vdb migrate command (#2901) 2024-03-19 22:21:58 +08:00
53fa4ffe73 fix bug 2024-03-19 21:53:24 +08:00
11636bc7c7 bump version to 0.5.10 (#2902) 2024-03-19 21:35:58 +08:00
8acd6f2531 fix bug 2024-03-19 21:10:19 +08:00
518c1ceb94 Feat/add-NVIDIA-as-a-new-model-provider (#2900) 2024-03-19 21:08:17 +08:00
8d8bbc586e fix bug 2024-03-19 20:57:07 +08:00
696efe494e fix: Ignore some emtpy page_content when append to split_documents (#2898) 2024-03-19 20:55:15 +08:00
4419d357c4 chore: update Yi models params (#2895) 2024-03-19 20:54:31 +08:00
df4e1339da fix convert bug 2024-03-19 20:51:06 +08:00
0183651cd5 fix stream output 2024-03-19 20:34:43 +08:00
9f024835aa chat 2024-03-19 20:34:10 +08:00
45017f3f35 fix knowledge single retrieve when function call response is none 2024-03-19 20:08:16 +08:00
089072432e chat log 2024-03-19 19:46:14 +08:00
6e600bc0dc Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-19 19:41:33 +08:00
25995eb735 fix knowledge single retrieve when function call response is none 2024-03-19 19:41:18 +08:00
28dc089540 fix style of node tracing 2024-03-19 19:24:36 +08:00
8967c4c8f6 fix 2024-03-19 19:19:47 +08:00
3969ed6f69 enhance: check valid JSON 2024-03-19 19:01:09 +08:00
56b025ebdd fix import by DSL 2024-03-19 18:57:57 +08:00
aab5566d98 fix app switch 2024-03-19 18:49:51 +08:00
a9e44b1fd2 fix: missing head 2024-03-19 18:38:06 +08:00
bae1bc2e4b fix 2024-03-19 18:37:27 +08:00
b9f58d3c1d Merge branch 'main' into feat/workflow 2024-03-19 18:37:09 +08:00
7c7f3958ff feat: optimize ollama model default parameters (#2894) 2024-03-19 18:36:30 +08:00
85da94aac4 fix incorrect exception raised by api tool which leads to incorrect L… (#2886)
Co-authored-by: OSS-MAOLONGDONG\kaihong <maolongdong@kaihong.com>
2024-03-19 18:36:30 +08:00
5350753905 chore: update Qwen model params (#2892) 2024-03-19 18:36:30 +08:00
e7895cdc53 chore: update pr template (#2893) 2024-03-19 18:36:30 +08:00
b84d4bdb85 chore: Update TongYi models prices (#2890) 2024-03-19 18:36:30 +08:00
66538d8cbd feat:support azure openai llm 0125 version (#2889) 2024-03-19 18:36:30 +08:00
4e24e116aa chore: use API Key instead of APIKey (#2888) 2024-03-19 18:36:29 +08:00
3f13c47b9b Bump tiktoken to 0.6.0 to support text-embedding-3-* in encoding_for_model (#2891) 2024-03-19 18:36:29 +08:00
10237c99e4 fix: anthropic system prompt not working (#2885) 2024-03-19 18:36:28 +08:00
faf936416f fix: Fix the problem of system not working (#2884) 2024-03-19 18:36:14 +08:00
779f77ccd6 feat: add icons for 01.ai (#2883) 2024-03-19 18:36:14 +08:00
758b8bf812 i18n: update bedrock label (#2879) 2024-03-19 18:36:14 +08:00
c61f51dc5d feat: AWS Bedrock Claude3 (#2864)
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
2024-03-19 18:36:14 +08:00
fbbba6db92 feat: optimize ollama model default parameters (#2894) 2024-03-19 18:34:23 +08:00
b17e30b1c2 fix: form-data 2024-03-19 18:30:13 +08:00
0756b09cf5 chore: var assigner output 2024-03-19 18:19:07 +08:00
53d428907b fix incorrect exception raised by api tool which leads to incorrect L… (#2886)
Co-authored-by: OSS-MAOLONGDONG\kaihong <maolongdong@kaihong.com>
2024-03-19 18:17:12 +08:00
8133ba16b1 chore: update Qwen model params (#2892) 2024-03-19 18:13:32 +08:00
2f16b3600c fix: avoid space in http key 2024-03-19 18:13:30 +08:00
55d2417906 fix: invalid http header 2024-03-19 18:12:50 +08:00
49dd5b76f1 chore: http remove blank to value 2024-03-19 18:08:09 +08:00
43429108f5 chore: http files 2024-03-19 18:05:32 +08:00
18159b1a4b feat: valid assignes 2024-03-19 18:03:54 +08:00
b31da3b195 initial node position 2024-03-19 17:59:35 +08:00
17b7426cc6 fix external_data_tools bug 2024-03-19 17:58:33 +08:00
ba7b9a595b fix: tool invaild 2024-03-19 17:52:43 +08:00
7778901630 fix tool image render 2024-03-19 17:49:26 +08:00
8f7356cc12 fix completion log item 2024-03-19 17:37:17 +08:00
d49834ee56 feat: if node valid 2024-03-19 17:33:49 +08:00
e4fdf1730e chore: change output 2024-03-19 17:29:40 +08:00
e9aa0e89d3 chore: update pr template (#2893) 2024-03-19 17:24:57 +08:00
f41a619490 check before publish 2024-03-19 17:24:35 +08:00
1607fcfaa7 fix knowledge single retrieve when function call response is none 2024-03-19 17:18:29 +08:00
8386abaed1 fix: file 2024-03-19 17:07:44 +08:00
16a1562900 fix style of tip modal 2024-03-19 17:06:33 +08:00
9b1869f521 feat: template transform code valid 2024-03-19 16:58:13 +08:00
3dfcd9ca67 feat: valid question classify 2024-03-19 16:58:13 +08:00
74408c4ced fix app convert 2024-03-19 16:44:28 +08:00
653917649d add beta tag and fix some style 2024-03-19 16:43:24 +08:00
7e3c59e53e chore: Update TongYi models prices (#2890) 2024-03-19 16:32:42 +08:00
f6314f8e73 feat:support azure openai llm 0125 version (#2889) 2024-03-19 16:32:26 +08:00
3bcfd84fba chore: use API Key instead of APIKey (#2888) 2024-03-19 16:32:06 +08:00
7c0ae76cd0 Bump tiktoken to 0.6.0 to support text-embedding-3-* in encoding_for_model (#2891) 2024-03-19 16:31:46 +08:00
00f51749a3 add switch operation in app list 2024-03-19 16:18:22 +08:00
8d3158a6d5 feat: tool valid 2024-03-19 16:14:25 +08:00
dbaf54c93d chat 2024-03-19 16:04:34 +08:00
2dee8a25d5 fix: anthropic system prompt not working (#2885) 2024-03-19 15:50:02 +08:00
7762737796 optimize app list desc 2024-03-19 15:40:03 +08:00
133d52deb9 fix bug 2024-03-19 15:32:10 +08:00
0ede136d67 fix: single run sync draft 2024-03-19 15:30:13 +08:00
8d82d9f7ef fix: overwrite defalut value in tool 2024-03-19 15:27:33 +08:00
1532564601 modify operations in app list 2024-03-19 15:27:14 +08:00
24ac4996c0 fix bug 2024-03-19 15:20:03 +08:00
112593119a fix suggested_questions_after_answer 2024-03-19 15:12:29 +08:00
0c100ac0b1 fix node 2024-03-19 15:07:13 +08:00
45168d0e00 remove log 2024-03-19 14:55:11 +08:00
dc91b2e3df fix: retieveal output error and var ref error 2024-03-19 14:54:23 +08:00
ced6a5c18b answer node 2024-03-19 14:50:33 +08:00
0b7cdd1e5d node collapse 2024-03-19 14:47:03 +08:00
67de047122 fix: http not pass headers and so on 2024-03-19 14:42:54 +08:00
4ec14d8d91 fix knowledge single retrieve when function call response is none 2024-03-19 14:17:22 +08:00
09516726e9 fix: overwrite template 2024-03-19 13:58:51 +08:00
6bfd61a887 feat: retrieval check valid 2024-03-19 13:58:51 +08:00
507aa6d949 fix: Fix the problem of system not working (#2884) 2024-03-19 13:56:22 +08:00
59f173f2e6 feat: add icons for 01.ai (#2883) 2024-03-19 13:53:21 +08:00
a436550dff workflow info 2024-03-19 13:50:10 +08:00
f5a3069913 sync draft 2024-03-19 13:18:14 +08:00
cf0c96e0d1 fix workflow outputs 2024-03-19 12:34:03 +08:00
978ee93df7 fix: not show sys var type 2024-03-19 11:54:47 +08:00
f3bf4c7730 feat: code default value 2024-03-19 11:36:29 +08:00
d2de16fba2 fix: var list defalut 2024-03-19 10:57:05 +08:00
90543c458c feat: valid before run struct 2024-03-19 10:46:28 +08:00
d6e655eaae fix restart button 2024-03-19 09:43:39 +08:00
9884466ef0 fix judgement of app configure 2024-03-19 09:33:18 +08:00
3b4676d8e9 fix agent configuration 2024-03-19 08:55:47 +08:00
3ee9f74cf8 fix style of completion creation 2024-03-19 08:02:56 +08:00
1c7573a686 add logging callback for workflow 2024-03-19 04:37:29 +08:00
2da7cc6928 fix file bugs 2024-03-19 03:56:47 +08:00
c3790c239c i18n: update bedrock label (#2879) 2024-03-19 00:57:19 +08:00
fda802e796 chore: remove comments 2024-03-18 22:46:19 +08:00
5a5beb5b59 Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-18 22:38:27 +08:00
a0b16e541c question classifier 2024-03-18 22:38:12 +08:00
a67777b8e2 app overview 2024-03-18 22:32:21 +08:00
eae4c80679 fix: input text error 2024-03-18 22:12:46 +08:00
ac63b5385a fix: set code execution timeout 2024-03-18 22:12:21 +08:00
f61ceadec5 fix 2024-03-18 22:03:26 +08:00
5ff2fbed59 fix: linter 2024-03-18 22:00:35 +08:00
d24cf9e56a limit http response 2024-03-18 22:00:34 +08:00
7b9fbccf60 feat: support add files and vision 2024-03-18 21:59:51 +08:00
0b07c6914a fix bugs 2024-03-18 21:52:39 +08:00
f803fb5855 Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-18 21:51:32 +08:00
cd3c2f6b00 knowledge fix 2024-03-18 21:51:23 +08:00
587ba27f8c fix bugs 2024-03-18 21:42:45 +08:00
1b0acdbe63 fix message resign url 2024-03-18 21:22:58 +08:00
3e810bc490 knowledge fix 2024-03-18 21:22:16 +08:00
cc86850ad9 pure: rm file transformer 2024-03-18 21:17:13 +08:00
fed19db938 feat: http download file 2024-03-18 21:16:21 +08:00
9175eb455f fix context 2024-03-18 21:11:27 +08:00
a89287bf20 block icon 2024-03-18 21:03:02 +08:00
977020f580 lint fix 2024-03-18 20:59:22 +08:00
a2195c813c fix file render 2024-03-18 20:59:11 +08:00
d5a404236a knowledge fix 2024-03-18 20:54:50 +08:00
202492e5ac message log style modified 2024-03-18 20:54:18 +08:00
601e888fde feat: handle sys var to run 2024-03-18 20:40:38 +08:00
4a483a8754 Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-18 20:35:23 +08:00
a4f367b8ff knowledge fix 2024-03-18 20:35:10 +08:00
e225a3d33c linter 2024-03-18 20:22:25 +08:00
31b6383697 fix: to new sys vars 2024-03-18 20:12:43 +08:00
e7d6def1e8 fix: trim file extension 2024-03-18 19:59:54 +08:00
197c0bb1a3 fix: jsonable_encoder 2024-03-18 19:56:38 +08:00
d6953f28d3 chore: remove not necessary config 2024-03-18 19:52:40 +08:00
249f013ca3 fix 2024-03-18 19:50:55 +08:00
387a6cfee4 remove answer as end 2024-03-18 19:25:18 +08:00
81cbf2e713 node prev available nodes 2024-03-18 19:22:58 +08:00
e66c55ba9e fix enable annotation reply when collection is None 2024-03-18 19:21:36 +08:00
56044a104c remove logs 2024-03-18 19:14:21 +08:00
c409ab4c3c feat: knowledge support one sigle 2024-03-18 18:49:01 +08:00
487efcb206 fix: support deprecated tools 2024-03-18 18:45:29 +08:00
4eb7546177 workflow publish 2024-03-18 18:45:24 +08:00
4b561aec93 feat: workflow statistics 2024-03-18 18:44:27 +08:00
34695f02fb add model config for conversation 2024-03-18 18:25:46 +08:00
45e51e7730 feat: AWS Bedrock Claude3 (#2864)
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
2024-03-18 18:16:36 +08:00
aa421269c4 deduct llm quota use llm node func 2024-03-18 18:01:57 +08:00
09cfbe117e fix annotation bugs 2024-03-18 17:57:10 +08:00
0ea233edbe Merge branch 'main' into feat/workflow-backend 2024-03-18 17:20:25 +08:00
4834eae887 fix enable annotation reply when collection is None (#2877)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-18 17:18:52 +08:00
01108e6172 fix/Add isModel flag to AgentTools component (#2876) 2024-03-18 17:01:25 +08:00
95b74c211d Feat/support tool credentials bool schema (#2875) 2024-03-18 16:55:26 +08:00
d69e0a79d4 fix file upload config internal err 2024-03-18 16:55:15 +08:00
7320ac41af feat: support context and other var reset 2024-03-18 16:50:52 +08:00
08b1f5d7c3 fix web app bugs 2024-03-18 16:48:31 +08:00
61b41ca04b fix retriever resource 2024-03-18 16:38:39 +08:00
8d4f40bc7c fix style of chat message log 2024-03-18 16:37:50 +08:00
3e9c7dccc0 feat: prompt editor set context status setter 2024-03-18 16:25:20 +08:00
cb79a90031 feat: Add tools for open weather search and image generation using the Spark API. (#2845) 2024-03-18 16:22:48 +08:00
672b8f14f2 chat 2024-03-18 16:14:05 +08:00
513d075ebc chat 2024-03-18 16:03:57 +08:00
8d34082246 feat: llm default 2024-03-18 15:56:37 +08:00
5ed181dd42 knowledge entities fix 2024-03-18 15:54:59 +08:00
9d8f9f6f63 fix app template list filtering 2024-03-18 15:52:02 +08:00
8e8c39a88c feat: sys var remove nodeid 2024-03-18 15:46:36 +08:00
41d9fdee50 Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-18 15:40:26 +08:00
9e37021387 knowledge entities fix 2024-03-18 15:40:11 +08:00
bf06be0c75 fix migration order 2024-03-18 15:37:23 +08:00
a93a2e2e0c Merge branch 'main' into feat/workflow-backend 2024-03-18 15:35:04 +08:00
4502436c47 feat:Embedding models Support for the Aliyun dashscope text-embedding-v1 and text-embedding-v2 (#2874) 2024-03-18 15:21:26 +08:00
02337cbb09 fix answer message save 2024-03-18 15:07:56 +08:00
c35c0fc6f4 chat upload file 2024-03-18 15:04:32 +08:00
1482eb0348 feat: generation support vars 2024-03-18 14:57:28 +08:00
cbe7116bb7 fix data fetching of app list 2024-03-18 14:54:01 +08:00
788550affa chat upload file 2024-03-18 14:49:40 +08:00
25949338cb feat: editor history and query 2024-03-18 14:49:15 +08:00
c3d0cf940c add tenant id index for document and document_segment table (#2873)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-18 14:34:32 +08:00
958da42f74 fix advanced chat answer 2024-03-18 14:28:07 +08:00
6e8ea528c2 fix loading of run 2024-03-18 14:19:47 +08:00
d537efe97a refactor run data fetching 2024-03-18 14:15:29 +08:00
0439276866 add tracing panel 2024-03-18 13:24:27 +08:00
e7343cc67c add max_tokens parameter rule for zhipuai glm4 and glm4v (#2861) 2024-03-18 13:19:36 +08:00
83145486b0 fix: fix unstable function call response arguments missing (#2872) 2024-03-18 13:17:16 +08:00
69c8e4ddd1 fix source handle 2024-03-18 13:11:58 +08:00
711f7107b4 fix merge 2024-03-18 13:11:17 +08:00
ea4476ac6e init edges 2024-03-18 12:49:55 +08:00
13dbc7f0ce feat: handle questioin classify 2024-03-18 11:26:26 +08:00
b8ecfd859b feat: fill single run form variable with constant value first time 2024-03-18 11:03:23 +08:00
4daf93ef4f feat: form input var type 2024-03-18 10:52:45 +08:00
90b7ca1df1 chore: merge main 2024-03-18 10:34:57 +08:00
6fd1795d25 feat: Allow users to specify AWS Bedrock validation models (#2857) 2024-03-18 00:44:09 +08:00
96f38b2d15 fix bug 2024-03-18 00:13:34 +08:00
8a27e51658 add Bad Request when generating 2024-03-17 21:40:59 +08:00
8ecec84dcf Merge branch 'main' into feat/workflow-backend
# Conflicts:
#	api/core/application_manager.py
2024-03-17 21:38:33 +08:00
a2b3096159 add text chunk subscribe for advanced chat blocking mode 2024-03-17 21:36:22 +08:00
80f1fbba56 add image file as markdown stream outupt 2024-03-17 21:27:08 +08:00
f770232b63 feat: add model for 01.ai, yi-chat-34b series (#2865) 2024-03-17 21:24:01 +08:00
d8ab611480 fix: code 2024-03-17 21:08:41 +08:00
722ff7795d insert node 2024-03-17 20:19:58 +08:00
73c2b35dfe add completion app creation back 2024-03-17 16:30:04 +08:00
b99eadecf6 fix: code template 2024-03-17 16:18:15 +08:00
843db3dbdf fix typo 2024-03-17 15:06:53 +08:00
3b660f1698 chat list api 2024-03-17 15:03:37 +08:00
a8e694c235 fix: print exception logs for ValueError and InvokeError (#2823) 2024-03-17 14:34:32 +08:00
15a6d94953 Refactor: Streamline the build-push and deploy-dev workflow (#2852) 2024-03-17 14:20:34 +08:00
a2e30e6aa9 message log 2024-03-17 14:05:56 +08:00
9638885a67 fix prompt log 2024-03-17 12:41:23 +08:00
cd01c890e1 chat record 2024-03-17 09:53:16 +08:00
552ccb058b stop & restart 2024-03-17 09:25:19 +08:00
36180b1001 add model support for kr node single_retrieval_config 2024-03-16 22:22:08 +08:00
65ed4dc91f refactor recommend app api 2024-03-16 22:13:06 +08:00
c709e339b1 fix route 2024-03-16 18:48:16 +08:00
3cf8416484 add workflow api for installed app & web api & service api 2024-03-16 16:27:39 +08:00
d2d47d0e0e fix bug 2024-03-16 15:09:47 +08:00
05f97f6e06 fix chat 2024-03-16 15:00:30 +08:00
11dfdb236d lint fix 2024-03-16 14:45:39 +08:00
6df520ebc6 add skip ran node 2024-03-16 14:45:16 +08:00
a047a98462 advanced chat support 2024-03-16 14:30:53 +08:00
1df68a546e variable assigner node 2024-03-16 01:15:40 +08:00
5013ea09d5 variable assigner node 2024-03-16 00:54:29 +08:00
d92d952e76 Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-16 00:37:15 +08:00
4af304e6ae question classifier 2024-03-16 00:36:58 +08:00
5c4d1c52ee add conversation_id & message_id to advanced-chat workflow-runs API 2024-03-15 22:24:00 +08:00
5ee7fc4fde feat: tools vars limit 2024-03-15 22:15:36 +08:00
b0cf8c00db add created_at return in publish workflow 2024-03-15 22:08:25 +08:00
338dd1c714 feat: http var limit 2024-03-15 22:04:07 +08:00
af9ae91934 feat: template transform default 2024-03-15 21:58:56 +08:00
d122daca87 fix conversation filter 2024-03-15 21:56:17 +08:00
ec49da073e feat: code default value 2024-03-15 21:54:16 +08:00
62846be275 refactor app generate pipeline 2024-03-15 21:42:22 +08:00
6b9cc927c0 feat: llm default value 2024-03-15 21:41:27 +08:00
a577db9ddd stop run 2024-03-15 21:33:51 +08:00
e5c8743712 fix elpased time 2024-03-15 21:15:44 +08:00
777cca1a09 feat: question classify init 2024-03-15 20:50:55 +08:00
e3c65c072c node value init 2024-03-15 20:26:00 +08:00
9b069bd3d4 node value form 2024-03-15 20:18:19 +08:00
56c53d1f07 node value init 2024-03-15 20:18:19 +08:00
6146f24932 fix tip of workflow 2024-03-15 20:15:57 +08:00
9908a8bf1f prompt log 2024-03-15 19:55:33 +08:00
e33260d2e2 node value init 2024-03-15 19:51:46 +08:00
5713ee5fce Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend
# Conflicts:
#	api/constants/languages.py
#	api/controllers/console/app/app.py
#	api/controllers/console/app/model_config.py
2024-03-15 19:50:50 +08:00
129a68bb06 auto layout 2024-03-15 19:34:50 +08:00
aff5ab933b feat: knowledge node var init value and limit 2024-03-15 19:22:35 +08:00
4835358f24 modify prompt log 2024-03-15 18:26:56 +08:00
9b57b4c6c8 dataset retrival 2024-03-15 18:22:48 +08:00
785dfc5c00 dataset retrival 2024-03-15 18:22:48 +08:00
12eb236364 answer stream output support 2024-03-15 18:22:48 +08:00
1cfeb989f7 fix: code default output 2024-03-15 18:22:47 +08:00
ede65eca4d fix: tool 2024-03-15 18:22:47 +08:00
dc53362506 fix: conversation_id equals to none 2024-03-15 18:22:47 +08:00
74e644be1c fix: linter 2024-03-15 18:22:47 +08:00
6e51ce123c fix: null conversation id 2024-03-15 18:22:47 +08:00
737321da75 add advanced chat apis support 2024-03-15 18:22:47 +08:00
72d2f76d24 fix default configs 2024-03-15 18:22:47 +08:00
87a36a1fc8 fix: linter 2024-03-15 18:22:47 +08:00
c2ded79cb2 fix: node type 2024-03-15 18:22:47 +08:00
fb6e5bf4d5 fix publish route 2024-03-15 18:22:47 +08:00
6633a92e1a fix: http 2024-03-15 18:22:47 +08:00
44c4d5be72 add answer output parse 2024-03-15 18:22:47 +08:00
5a67c09b48 use answer node instead of end in advanced chatbot 2024-03-15 18:22:47 +08:00
0614ddde7d fix: allow None AuthorizationConfig 2024-03-15 18:22:47 +08:00
e5ff06bcb7 fix err typo 2024-03-15 18:22:47 +08:00
6b19ba3bb2 enhance: sandbox-docker-compose 2024-03-15 18:22:47 +08:00
735b55e61b add if-else node 2024-03-15 18:22:47 +08:00
7e53625eae fix value type 2024-03-15 18:22:47 +08:00
5213b0aade add sequence_number for workflow_started event 2024-03-15 18:22:47 +08:00
2b4b6817a3 record inputs and process data when node failed 2024-03-15 18:22:47 +08:00
da3e1e9d14 add deduct quota for llm node 2024-03-15 18:22:47 +08:00
e4794e309a add llm node test 2024-03-15 18:22:47 +08:00
e6572ef2d7 fix: linter 2024-03-15 18:22:47 +08:00
2182533af8 feat: javascript code 2024-03-15 18:22:47 +08:00
d88ac6c238 add llm node 2024-03-15 18:22:47 +08:00
e8751bebfa fix single step run error 2024-03-15 18:22:47 +08:00
92c1da8dbe fix: remove answer 2024-03-15 18:22:47 +08:00
951aaf5161 feat: sandbox 2024-03-15 18:22:47 +08:00
a420953385 feat: docker-compose 2024-03-15 18:22:47 +08:00
b102562614 fix: forward-ref 2024-03-15 18:22:47 +08:00
2c2b9e7389 test: template transform 2024-03-15 18:22:47 +08:00
513a8655b1 test: tool 2024-03-15 18:22:47 +08:00
d3385a2715 feat 2024-03-15 18:22:47 +08:00
ebf9c41adb feat: http 2024-03-15 18:22:47 +08:00
7372776992 knowledge node 2024-03-15 18:22:47 +08:00
7f7269d261 remove unused params in workflow_run_for_list_fields 2024-03-15 18:22:47 +08:00
f2bb0012fd add debug code 2024-03-15 18:22:47 +08:00
33113034ea add single step run 2024-03-15 18:22:47 +08:00
88c29f613f fix: typing 2024-03-15 18:22:47 +08:00
f318fa058c feat: add variable selector mapping 2024-03-15 18:22:47 +08:00
407bfb8182 feat: add user uid 2024-03-15 18:22:47 +08:00
91845fc9f6 fix: linter 2024-03-15 18:22:47 +08:00
f911b1c488 feat: support empty code output children 2024-03-15 18:22:47 +08:00
7a6fa3655f add user for node 2024-03-15 18:22:47 +08:00
5eb7b4d56a feat: tool entity 2024-03-15 18:22:47 +08:00
5e4bd9fc38 feat: tool node 2024-03-15 18:22:47 +08:00
f8cba2679e fix: linter 2024-03-15 18:22:47 +08:00
e0883302d2 feat: jinja2 2024-03-15 18:22:47 +08:00
a0a1618869 add tenant_id / app_id / workflow_id for nodes 2024-03-15 18:22:47 +08:00
be68369983 add workflow_app_log codes 2024-03-15 18:22:47 +08:00
9d0a832e40 refactor: github actions 2024-03-15 18:22:47 +08:00
8031262006 feat: workflow mock test 2024-03-15 18:22:47 +08:00
751489fa54 modify readme 2024-03-15 18:22:47 +08:00
1e6feadc7e fix: code node dose not work as expected 2024-03-15 18:22:47 +08:00
2d8497f79b add readme for db connection management in App Runner and Task Pipeline 2024-03-15 18:22:47 +08:00
61a1aadf9c optimize workflow db connections 2024-03-15 18:22:47 +08:00
8b832097de optimize db connections 2024-03-15 18:22:45 +08:00
056331981e fix: api doc duplicate symbols (#2853) 2024-03-15 18:17:43 +08:00
7e4daf131e optimize db connections 2024-03-15 18:17:05 +08:00
de3978fdbb optimize db connections 2024-03-15 18:17:05 +08:00
51f6ab49cf fix: linter 2024-03-15 18:17:05 +08:00
2895c3bc8c feat: template transform 2024-03-15 18:17:05 +08:00
3d5f9b5a1e fix: missing _extract_variable_selector_to_variable_mapping 2024-03-15 18:17:05 +08:00
614bc2e075 feat: http reqeust 2024-03-15 18:17:05 +08:00
193bcce236 feat: http request 2024-03-15 18:17:05 +08:00
a0fd731170 feat: mapping variables 2024-03-15 18:17:05 +08:00
2f57d090a1 refactor pipeline and remove node run run_args 2024-03-15 18:17:05 +08:00
4c5822fb6e fix: transform 2024-03-15 18:17:05 +08:00
e90637f67a fix generate bug 2024-03-15 18:17:05 +08:00
9b0f83f807 fix: add max number array length 2024-03-15 18:17:05 +08:00
fc573564b4 refactor workflow runner 2024-03-15 18:17:05 +08:00
5596b3b00b fix: linter 2024-03-15 18:17:05 +08:00
cb02b1e12e feat: code 2024-03-15 18:17:05 +08:00
736e386f15 fix: bugs 2024-03-15 18:17:05 +08:00
c152d55f68 fix workflow app bugs 2024-03-15 18:17:05 +08:00
1a0b6adc2c fix stream bugs 2024-03-15 18:17:05 +08:00
1914dfea77 fix bugs 2024-03-15 18:17:05 +08:00
1f986a3abb fix bugs 2024-03-15 18:17:05 +08:00
b174f85237 fix bug 2024-03-15 18:17:05 +08:00
2ad9c76093 modify migrations 2024-03-15 18:17:05 +08:00
8684b172d2 add start, end, direct answer node 2024-03-15 18:17:05 +08:00
3e54cb26be move funcs 2024-03-15 18:17:05 +08:00
079cc082a3 use callback to filter workflow stream output 2024-03-15 18:17:05 +08:00
a1bc6b50c5 refactor workflow generate pipeline 2024-03-15 18:17:05 +08:00
7d28fe8ea5 completed workflow engine main logic 2024-03-15 18:17:05 +08:00
dd50deaa43 fix audio voice arg 2024-03-15 18:17:04 +08:00
79a10e9729 add updated_at to sync workflow api 2024-03-15 18:17:04 +08:00
a5de7b10f3 update ruff check 2024-03-15 18:17:04 +08:00
bc4edbfc2b lint fix 2024-03-15 18:17:04 +08:00
75f1355d4c add few workflow run codes 2024-03-15 18:17:04 +08:00
1a86e79d4a lint fix 2024-03-15 18:17:04 +08:00
c8a1f923f5 lint fix 2024-03-15 18:17:04 +08:00
df753e84a3 fix workflow api return 2024-03-15 18:17:04 +08:00
3086893ee7 fix typo 2024-03-15 18:17:04 +08:00
242fcf0145 fix typo 2024-03-15 18:17:04 +08:00
de40422205 lint fix 2024-03-15 18:17:04 +08:00
df809ff435 add get default node config 2024-03-15 18:17:04 +08:00
75559bcbf9 replace block type to node type 2024-03-15 18:17:04 +08:00
d9b8a938c6 use enum instead 2024-03-15 18:17:04 +08:00
e9004a06a5 lint fix 2024-03-15 18:17:04 +08:00
be709d4b84 add AdvancedChatAppGenerateTaskPipeline 2024-03-15 18:17:04 +08:00
602bc67495 lint fix 2024-03-15 18:17:04 +08:00
e498efce2d refactor app generate 2024-03-15 18:17:04 +08:00
09dfe80718 add app copy api 2024-03-15 18:17:04 +08:00
06b05163f6 update app import response 2024-03-15 18:17:04 +08:00
b80092ea12 lint fix 2024-03-15 18:17:04 +08:00
2eaae6742a lint fix 2024-03-15 18:17:04 +08:00
3f5d1a79c6 refactor apps 2024-03-15 18:17:04 +08:00
15c7e0ec2f lint fix 2024-03-15 18:17:04 +08:00
43b0440358 support workflow features 2024-03-15 18:17:03 +08:00
9651a208a9 lint fix 2024-03-15 18:15:54 +08:00
7bff65304f add features structure validate 2024-03-15 18:15:54 +08:00
8a8882ed8d move workflow_id to app 2024-03-15 18:15:54 +08:00
9467fe9aa9 lint fix 2024-03-15 18:15:54 +08:00
799db69e4f refactor app 2024-03-15 18:15:48 +08:00
896c200211 fix import problem 2024-03-15 18:15:17 +08:00
3badc4423a fix: wrong default model parameters when creating app 2024-03-15 18:15:17 +08:00
d741527ae4 lint 2024-03-15 18:15:17 +08:00
77618823a5 add features update api
refactor app model config validation
2024-03-15 18:15:17 +08:00
dd70aeff24 lint fix 2024-03-15 18:15:17 +08:00
022b7d5dd4 optimize default model exceptions 2024-03-15 18:15:17 +08:00
11337e51c5 lint fix 2024-03-15 18:15:17 +08:00
7724d010b6 add app description
add update app api
2024-03-15 18:15:16 +08:00
124aa9db08 lint fix 2024-03-15 18:15:16 +08:00
20cf075b2d add workflow runs & workflow node executions api 2024-03-15 18:15:16 +08:00
bf4a5f6b33 lint fix 2024-03-15 18:15:16 +08:00
03749917f0 add workflow app log api 2024-03-15 18:15:16 +08:00
7d51d6030b remove publish workflow when app import 2024-03-15 18:15:16 +08:00
742b87df5e lint fix 2024-03-15 18:15:16 +08:00
a457faa2bf trigger app_model_config_was_updated when app import 2024-03-15 18:15:16 +08:00
4f50f113dd lint fix 2024-03-15 18:15:16 +08:00
8b529a3ec7 refactor app api 2024-03-15 18:15:16 +08:00
84c3ec0ea7 site init move to event handler 2024-03-15 18:15:16 +08:00
c13e8077ba fix agent app converter command 2024-03-15 18:15:16 +08:00
9f42892b42 lint fix 2024-03-15 18:15:16 +08:00
27ba5a0bce refactor app mode
add app import and export
2024-03-15 18:15:13 +08:00
78afba49bf lint fix 2024-03-15 18:13:55 +08:00
a9192bc1c6 make recommended app list api public 2024-03-15 18:13:55 +08:00
77f04603b3 fix bugs 2024-03-15 18:13:55 +08:00
34ed5e428c fix bugs 2024-03-15 18:13:55 +08:00
98cb17e79e lint fix 2024-03-15 18:13:55 +08:00
fce20e483c restore completion app 2024-03-15 18:13:55 +08:00
97c4733e79 lint fix 2024-03-15 18:13:55 +08:00
748aa22ee2 add manual convert logic 2024-03-15 18:13:55 +08:00
2ba7ac8bc1 add expert mode of chatapp convert command 2024-03-15 18:13:55 +08:00
7458fde5a5 add agent app convert command 2024-03-15 18:13:55 +08:00
f11bf9153d add more tests 2024-03-15 18:13:55 +08:00
0806b3163a add to http request node convert tests 2024-03-15 18:13:55 +08:00
45621ba4d7 add api extension to http request node convert 2024-03-15 18:13:55 +08:00
6aecf42b6e fix prompt transform bugs 2024-03-15 18:13:55 +08:00
3b234febf5 fix bugs and add unit tests 2024-03-15 18:13:55 +08:00
8642354a2a lint 2024-03-15 18:13:55 +08:00
c028e5f889 add app convert codes 2024-03-15 18:13:55 +08:00
3642dd3a73 add workflow logics 2024-03-15 18:13:55 +08:00
603b1e9ed4 lint 2024-03-15 18:13:55 +08:00
b7c6cba23f add workflow models 2024-03-15 18:13:55 +08:00
d430136f65 lint 2024-03-15 18:13:55 +08:00
381b3d5016 optimize get app model to wraps 2024-03-15 18:13:55 +08:00
e3f1e143e5 feat: llm context type limit 2024-03-15 18:03:01 +08:00
9f1cbb2ee7 feat: answer node input limit 2024-03-15 18:03:01 +08:00
d0ef9e672f llm mode 2024-03-15 17:57:46 +08:00
b5c212f575 feat: parse to right datatype and show parse json error 2024-03-15 17:42:00 +08:00
ff5ab43f9c feat: check before run 2024-03-15 17:42:00 +08:00
68f947c7e0 stop workflow run 2024-03-15 17:24:40 +08:00
e98456b025 store 2024-03-15 16:58:48 +08:00
75b332695b feat: support string num seletor to single run debug 2024-03-15 16:37:58 +08:00
3e4bb695e4 dataset retrival 2024-03-15 16:14:32 +08:00
7ba1b37a5a feat: show assigner panel 2024-03-15 15:23:27 +08:00
2886255c8b fix: can not get first var type 2024-03-15 15:10:41 +08:00
f7a9564e11 feat: can noe choose selected nodes 2024-03-15 14:44:48 +08:00
c1b0f115d0 dataset retrival 2024-03-15 14:40:53 +08:00
2203d9a138 available nodes 2024-03-15 14:40:44 +08:00
cef16862da fix: charts encoding (#2848) 2024-03-15 14:02:52 +08:00
5adf94bd7d feat: support filter obj select type 2024-03-15 14:01:25 +08:00
5fbf8ee6c6 available nodes 2024-03-15 13:24:29 +08:00
8a4015722d prevent auto scrolling down to bottom when user already scrolled up (#2813) 2024-03-15 13:19:06 +08:00
156345cb4b fix: use supported languages only for install form (#2844) 2024-03-15 12:05:35 +08:00
8d9c86ac4c fix: advance setting error 2024-03-15 11:47:53 +08:00
f29280ba5c Fix/compatible to old tool config (#2839) 2024-03-15 11:44:24 +08:00
742be06ea9 Fix/localai (#2840) 2024-03-15 11:41:51 +08:00
946ef4c685 chore: remove uselsee lang 2024-03-15 11:31:48 +08:00
c3773bc2d1 chore: add language placeholder 2024-03-15 11:30:39 +08:00
1b8c8b0a43 feat: node before and after run 2024-03-15 11:26:19 +08:00
86d2c1184c Merge branch 'main' into feat/workflow 2024-03-15 11:17:18 +08:00
817aea9f05 fix 2024-03-15 11:04:34 +08:00
985c07b25b fix 2024-03-15 11:01:47 +08:00
05ac27dfa8 fix 2024-03-14 21:07:59 +08:00
bcce53a929 web app support workflow 2024-03-14 21:02:15 +08:00
58922ba40b add route for workflow app 2024-03-14 21:02:15 +08:00
e6b8b13f2e answer stream output support 2024-03-14 20:50:03 +08:00
ac675c4443 feat: add checkvalid empty fn 2024-03-14 20:44:51 +08:00
ae6a558662 feat: add prev and next nodes 2024-03-14 20:29:47 +08:00
64e44d1709 chore: direct answer to answer 2024-03-14 19:58:17 +08:00
f35ae2355f fix: code default output 2024-03-14 19:17:27 +08:00
3a857c83e6 chore: only show has value node in end 2024-03-14 18:46:30 +08:00
d9edcb2250 feat: change to new end node 2024-03-14 18:43:04 +08:00
d129d7951c fix 2024-03-14 18:27:58 +08:00
af98954fc1 Feat/add script to check i18n keys (#2835) 2024-03-14 18:03:59 +08:00
5c246285da feat: support node type filter 2024-03-14 17:58:26 +08:00
4d63770189 fix: The generate conversation name was not saved (#2836) 2024-03-14 17:53:55 +08:00
bbea3a6b84 fix: compatible to old tool config (#2837) 2024-03-14 17:51:11 +08:00
19c1722032 node default value 2024-03-14 17:27:42 +08:00
19d3a56194 feat: add weekday calculator in time tool (#2822) 2024-03-14 17:01:48 +08:00
5cab2b711f fix: doc for datasets (#2831) 2024-03-14 16:41:40 +08:00
cd9a58231b fix: tool show 2024-03-14 16:40:21 +08:00
d85b5b9134 fix: tool 2024-03-14 16:38:22 +08:00
8bd74d5abf feat: var picker support choose type 2024-03-14 16:04:15 +08:00
Qun
1e5455e266 enhance: use override_settings for concurrent stable diffusion (#2818) 2024-03-14 15:26:07 +08:00
2af2e2be67 node add 2024-03-14 15:14:31 +08:00
43a3b827a3 chore: stringify output 2024-03-14 13:43:13 +08:00
3c6de0bf3e feat: tool default value 2024-03-14 13:43:13 +08:00
13a724864d fix: conversation_id equals to none 2024-03-14 13:24:48 +08:00
baf536eb2b fix: linter 2024-03-14 12:57:14 +08:00
5200668336 fix: null conversation id 2024-03-14 12:57:14 +08:00
7eeffb16e2 fix url of webapp 2024-03-14 12:35:12 +08:00
300909341e use app store in overview 2024-03-14 12:27:54 +08:00
277d21cccb fix webapp url 2024-03-14 12:22:08 +08:00
de184051f0 add advanced chat apis support 2024-03-14 12:17:15 +08:00
95ee72556f fix default configs 2024-03-14 12:12:38 +08:00
f48364914b fix: linter 2024-03-14 11:59:43 +08:00
19df70efad fix: node type 2024-03-14 11:59:43 +08:00
9813609645 publish 2024-03-14 11:48:58 +08:00
975d0a1651 fix publish route 2024-03-14 11:39:18 +08:00
3c3571713e fix: http 2024-03-14 11:35:51 +08:00
aa6254a3b4 add doc for workflow app 2024-03-14 11:31:31 +08:00
0b05d2939a add doc for workflow 2024-03-14 11:31:31 +08:00
6f33163f88 fix app siderbar hook 2024-03-14 11:31:31 +08:00
93101b4d9a add running state for step run 2024-03-14 11:31:31 +08:00
d8222a15ca remove useless comments 2024-03-14 11:31:31 +08:00
1728513634 fix sequence number and tokens in result panel 2024-03-14 11:31:31 +08:00
68fa81ec82 chore: change tool input types 2024-03-14 11:25:27 +08:00
c051c89176 chore: fix http i18n 2024-03-14 11:25:27 +08:00
8b2a63e545 fix 2024-03-14 11:19:50 +08:00
4fe585acc2 feat(llm/models): add claude-3-haiku-20240307 (#2825) 2024-03-14 10:08:24 +08:00
e52448b84b feat:add api-version selection for azure openai APIs (#2821) 2024-03-14 09:14:27 +08:00
fcd470fcac add answer output parse 2024-03-13 23:00:28 +08:00
1f92b55f58 fix: doc for completion-messages (#2820) 2024-03-13 22:25:18 +08:00
fd8fe15d28 use answer node instead of end in advanced chatbot 2024-03-13 20:54:23 +08:00
e80315f504 fix: allow None AuthorizationConfig 2024-03-13 20:40:37 +08:00
8b15b742ad generalize position helper for parsing _position.yaml and sorting objects by name (#2803) 2024-03-13 20:29:38 +08:00
ae9e7acd77 feat: other node run 2024-03-13 18:49:22 +08:00
149eb38e84 feat: tool single run 2024-03-13 18:43:43 +08:00
d777184fd5 feat: http run 2024-03-13 18:33:08 +08:00
849dc0560b feat: add French fr-FR (#2810)
Co-authored-by: Laurent Magnien <laurent.magnien@adsn.fr>
2024-03-13 18:20:55 +08:00
1653e5eebe feat: template transform 2024-03-13 18:05:17 +08:00
1f4826ca01 fix err typo 2024-03-13 18:02:19 +08:00
ef700b2688 enhance: sandbox-docker-compose 2024-03-13 17:46:42 +08:00
cedc1bada2 fix: temple transform query selec can not choose var 2024-03-13 17:17:28 +08:00
0c709afe5c add if-else node 2024-03-13 17:10:51 +08:00
1c5d07871f hooks 2024-03-13 17:07:03 +08:00
e11fc8c131 init 2024-03-13 17:07:03 +08:00
2edef89a8d feat: handle system var 2024-03-13 16:38:24 +08:00
cbe7de58ab backup draft 2024-03-13 16:27:04 +08:00
801160c430 feat: tool output vars 2024-03-13 16:18:46 +08:00
cb2a814296 feat: assign output 2024-03-13 15:58:03 +08:00
db78b91ec2 feat: http output 2024-03-13 15:58:02 +08:00
25a11c5bb7 feat: question classify output 2024-03-13 15:58:02 +08:00
a026c5fd08 feat: add Vietnamese vi-VN (#2807) 2024-03-13 15:54:47 +08:00
1f41521c21 workflow run 2024-03-13 15:38:56 +08:00
fd7aade26b Fix tts api err (#2809)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-03-13 15:38:10 +08:00
e686d42262 feat: support template transform output var 2024-03-13 15:21:51 +08:00
9bca3f8fd7 feat: code support output var list 2024-03-13 15:17:03 +08:00
6ef3542c6c fix value type 2024-03-13 15:08:15 +08:00
db299a876e add sequence_number for workflow_started event 2024-03-13 15:01:02 +08:00
737d04361b record inputs and process data when node failed 2024-03-13 14:55:56 +08:00
0d2366b432 feat: knowledge output var 2024-03-13 14:52:32 +08:00
b13345ceb2 chore: remove useless and node filter 2024-03-13 14:41:11 +08:00
f15dce9ee3 feat: llm output and var type 2024-03-13 14:37:58 +08:00
510f8ede10 Improve automatic prompt generation (#2805) 2024-03-13 14:10:47 +08:00
b718e66b26 fix: drag stop & click 2024-03-13 13:59:57 +08:00
8f9125b08a fix:typo (#2808) 2024-03-13 13:00:46 +08:00
a55a7603dd split hooks 2024-03-13 11:53:49 +08:00
64fa343d16 chore: remove log 2024-03-13 11:24:20 +08:00
6b02eebe36 feat: support start node vars 2024-03-13 11:22:59 +08:00
d0f5318b75 feat: code node can run 2024-03-13 10:43:18 +08:00
5fe0d50cee add deduct quota for llm node 2024-03-13 00:08:13 +08:00
4d7caa3458 add llm node test 2024-03-12 23:08:23 +08:00
856466320d fix: linter 2024-03-12 22:42:28 +08:00
3bd53556ca feat: javascript code 2024-03-12 22:41:59 +08:00
c74854aec0 icon fix 2024-03-12 22:24:23 +08:00
294128d43a fix tracing 2024-03-12 22:21:45 +08:00
3f59a579d7 add llm node 2024-03-12 22:12:03 +08:00
e5e97c0a0a fix:change azure openai api_version default value to 2024-02-15-preview (#2797) 2024-03-12 22:07:06 +08:00
870ca713df Refactor Markdown component to include paragraph after image (#2798) 2024-03-12 22:06:54 +08:00
e5cf4ea60e fix result panel 2024-03-12 21:40:41 +08:00
768ca2d3f0 add panel of result 2024-03-12 21:05:15 +08:00
92e9b1bbb1 update style of app list 2024-03-12 21:05:15 +08:00
446932e076 update style of app creation 2024-03-12 21:05:15 +08:00
0469edcc0c fix 2024-03-12 21:03:45 +08:00
3823ae5890 chore: prompt to promt template 2024-03-12 20:05:42 +08:00
14d71fb598 feat: var picker get vars 2024-03-12 20:03:35 +08:00
a031507443 feat: code show result 2024-03-12 19:45:45 +08:00
4f5c052dc8 fix single step run error 2024-03-12 19:15:11 +08:00
90e013554c fix 2024-03-12 18:47:24 +08:00
6854a3fd26 Update README.md (#2800) 2024-03-12 18:14:07 +08:00
30ea3cb702 feat: can run code node 2024-03-12 17:59:14 +08:00
a5147a382d fix 2024-03-12 17:44:45 +08:00
620360d41a Update README.md (#2799) 2024-03-12 17:02:46 +08:00
20bd49285b excel: get keys from every sheet (#2796) 2024-03-12 16:59:25 +08:00
6bd2730317 Fix/2770 suggestions for next steps (#2788) 2024-03-12 16:27:55 +08:00
74bf6cd186 feat: add single run api 2024-03-12 16:26:30 +08:00
15ddbb5e6f fix: remove answer 2024-03-12 16:25:07 +08:00
547df0b5fe fix 2024-03-12 15:04:52 +08:00
f734cca337 enhance: add stable diffusion user guide (#2795) 2024-03-12 14:45:48 +08:00
ce5b19d011 bump version to 0.5.9 (#2794) 2024-03-12 14:01:24 +08:00
f82a64d149 feat: add DingTalk(钉钉) tool for sending message to chat group bot via webhook (#2693) 2024-03-12 13:45:59 +08:00
8ae46a8a14 fix 2024-03-12 13:25:57 +08:00
9753077661 fix 2024-03-12 13:03:39 +08:00
f49b1afd6c feat:support azure tts (#2751) 2024-03-12 12:06:35 +08:00
22e7393b9d fix 2024-03-12 11:56:15 +08:00
796c5626a7 fix delete dataset when dataset has no document (#2789)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-11 23:57:38 +08:00
943c676768 feat: sandbox 2024-03-11 22:14:28 +08:00
4ecfe1fec5 feat: docker-compose 2024-03-11 22:12:13 +08:00
5fac4f8737 fix: forward-ref 2024-03-11 21:58:54 +08:00
a5394fa2ce test: template transform 2024-03-11 21:53:08 +08:00
8dc4d122b9 test: tool 2024-03-11 21:53:08 +08:00
0eb482f35b chat workflow run 2024-03-11 21:05:54 +08:00
bd52937c88 chat workflow run 2024-03-11 20:54:29 +08:00
d5b321af3f Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend 2024-03-11 20:06:49 +08:00
f3b46bf7e2 knowledge node 2024-03-11 20:06:38 +08:00
2008986f83 feat 2024-03-11 19:51:31 +08:00
1a57951d72 feat: http 2024-03-11 19:51:06 +08:00
e54c9cd401 Feat/open ai compatible functioncall (#2783)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-11 19:48:21 +08:00
7655d7f662 run 2024-03-11 19:35:06 +08:00
373857d0f2 remove unused params in workflow_run_for_list_fields 2024-03-11 19:04:48 +08:00
6719af9ba9 add debug code 2024-03-11 18:52:24 +08:00
19c9091d5b add single step run 2024-03-11 18:49:58 +08:00
f8951d7f57 fix: api tool provider not found (#2782) 2024-03-11 18:21:41 +08:00
84e2071a32 run 2024-03-11 18:11:01 +08:00
b3b9e1dabb feat: tools support run 2024-03-11 17:33:17 +08:00
91a35ded18 fix: typing 2024-03-11 16:51:27 +08:00
2d68594a86 feat: add variable selector mapping 2024-03-11 16:48:28 +08:00
f3d19f9691 feat: add user uid 2024-03-11 16:46:11 +08:00
94047de8b4 fix: linter 2024-03-11 16:44:36 +08:00
1c450e27d3 feat: support empty code output children 2024-03-11 16:44:22 +08:00
c0ccffa1c3 chore: no var not show var group 2024-03-11 16:43:13 +08:00
bbc76cb833 add user for node 2024-03-11 16:31:43 +08:00
94f3cf1a4c feat: tool entity 2024-03-11 16:13:52 +08:00
6454e1d644 chunk-overlap None check (#2781)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-11 15:36:56 +08:00
2aa8847b78 mrege main 2024-03-11 14:54:29 +08:00
e184c8cb42 Update README.md (#2780) 2024-03-11 14:53:40 +08:00
049e858ef7 run 2024-03-11 14:43:50 +08:00
fdd211e399 debug/chat: increase notify error duration to 3000 (#2778) 2024-03-11 14:16:31 +08:00
7001e21e7d overview: fix filter today calc start & end (#2777) 2024-03-11 14:11:51 +08:00
82d0732c12 fix: aippt default styles (#2779) 2024-03-11 14:04:09 +08:00
8e491ace5c feat: tool node 2024-03-11 13:54:11 +08:00
53cd125780 fix: deep copy of model-tool label (#2775) 2024-03-11 10:27:00 +08:00
3c91f9b5ab fix: dataset segements api (#2766) 2024-03-11 09:26:15 +08:00
dcf9d85e8d fix: linter 2024-03-10 21:12:07 +08:00
460c0da176 feat: jinja2 2024-03-10 20:24:16 +08:00
295a248561 add tenant_id / app_id / workflow_id for nodes 2024-03-10 20:15:49 +08:00
4630f9c746 add workflow_app_log codes 2024-03-10 20:02:19 +08:00
ba66beb487 refactor: github actions 2024-03-10 18:41:49 +08:00
b5cb38641a feat: workflow mock test 2024-03-10 18:41:25 +08:00
4b37d30c0d modify readme 2024-03-10 18:02:05 +08:00
59ba7917c4 fix: code node dose not work as expected 2024-03-10 17:55:24 +08:00
8d0ff01a59 add readme for db connection management in App Runner and Task Pipeline 2024-03-10 17:11:39 +08:00
100fb0c5d6 optimize workflow db connections 2024-03-10 16:59:17 +08:00
b75cd2514e optimize db connections 2024-03-10 16:29:55 +08:00
7693ba8797 optimize db connections 2024-03-10 15:55:14 +08:00
3d6b06696e optimize db connections 2024-03-10 15:55:14 +08:00
0386061fdf fix: linter 2024-03-10 15:55:14 +08:00
3407b4d8dd feat: template transform 2024-03-10 15:55:14 +08:00
71ff2a8356 fix: missing _extract_variable_selector_to_variable_mapping 2024-03-10 15:55:14 +08:00
8b809b8004 feat: http reqeust 2024-03-10 15:55:14 +08:00
707a3a0a66 feat: http request 2024-03-10 15:55:14 +08:00
b798aa915c feat: mapping variables 2024-03-10 15:55:14 +08:00
2db67c4101 refactor pipeline and remove node run run_args 2024-03-10 15:55:14 +08:00
80b4db08dc fix: transform 2024-03-10 15:55:14 +08:00
37cdee5101 fix generate bug 2024-03-10 15:55:14 +08:00
b5366cba03 fix: add max number array length 2024-03-10 15:55:14 +08:00
6cfda369ef refactor workflow runner 2024-03-10 15:55:14 +08:00
5a57ed2536 fix: linter 2024-03-10 15:55:14 +08:00
13937fc103 feat: code 2024-03-10 15:55:14 +08:00
17cd512284 fix: bugs 2024-03-10 15:55:14 +08:00
97398ff209 fix workflow app bugs 2024-03-10 15:55:14 +08:00
2ffb63ff0c fix stream bugs 2024-03-10 15:55:14 +08:00
90bcb241cc fix bugs 2024-03-10 15:55:14 +08:00
f4f7cfd45a fix bugs 2024-03-10 15:55:14 +08:00
d214c047e9 fix bug 2024-03-10 15:55:14 +08:00
fee8a86880 modify migrations 2024-03-10 15:55:14 +08:00
ea883b5e48 add start, end, direct answer node 2024-03-10 15:55:14 +08:00
46296d777c move funcs 2024-03-10 15:55:14 +08:00
79f0e894e9 use callback to filter workflow stream output 2024-03-10 15:55:14 +08:00
6372183471 refactor workflow generate pipeline 2024-03-10 15:55:14 +08:00
5963e7d1c5 completed workflow engine main logic 2024-03-10 15:55:14 +08:00
c7618fc377 fix audio voice arg 2024-03-10 15:55:14 +08:00
3fc932b041 add updated_at to sync workflow api 2024-03-10 15:55:14 +08:00
97cdc96f7c update ruff check 2024-03-10 15:55:14 +08:00
892fe927c2 lint fix 2024-03-10 15:55:14 +08:00
d51d456d80 add few workflow run codes 2024-03-10 15:55:14 +08:00
836376c6c8 lint fix 2024-03-10 15:55:14 +08:00
fa29eadb7a lint fix 2024-03-10 15:55:14 +08:00
0cc0065f8c fix workflow api return 2024-03-10 15:55:14 +08:00
c3eac450ce fix typo 2024-03-10 15:55:14 +08:00
7b738e045e fix typo 2024-03-10 15:55:14 +08:00
3f6c17247f lint fix 2024-03-10 15:55:14 +08:00
0551a9bfcd add get default node config 2024-03-10 15:55:14 +08:00
7c149ebf4f replace block type to node type 2024-03-10 15:55:14 +08:00
37b70eb73e use enum instead 2024-03-10 15:55:14 +08:00
451ea5308f lint fix 2024-03-10 15:55:14 +08:00
a4d6954d4f add AdvancedChatAppGenerateTaskPipeline 2024-03-10 15:55:14 +08:00
c786533f22 lint fix 2024-03-10 15:55:13 +08:00
406a625c98 refactor app generate 2024-03-10 15:55:13 +08:00
171b2bdc20 add app copy api 2024-03-10 15:55:13 +08:00
4266ce73cb update app import response 2024-03-10 15:55:13 +08:00
afa920cc94 lint fix 2024-03-10 15:55:13 +08:00
701f116be3 lint fix 2024-03-10 15:55:13 +08:00
5c7ea08bdf refactor apps 2024-03-10 15:55:12 +08:00
5e38996222 lint fix 2024-03-10 15:54:10 +08:00
18febeabd1 support workflow features 2024-03-10 15:54:10 +08:00
be1500bf7d lint fix 2024-03-10 15:54:10 +08:00
fea549679a add features structure validate 2024-03-10 15:54:10 +08:00
11e1b569ea move workflow_id to app 2024-03-10 15:54:10 +08:00
2bbf96d762 lint fix 2024-03-10 15:54:10 +08:00
70394bae52 refactor app 2024-03-10 15:54:08 +08:00
0c9e112f41 fix import problem 2024-03-10 15:52:45 +08:00
607b84d929 fix: wrong default model parameters when creating app 2024-03-10 15:52:45 +08:00
7a13cd1530 lint 2024-03-10 15:52:45 +08:00
9b1afb68eb add features update api
refactor app model config validation
2024-03-10 15:52:45 +08:00
cf9d2965bf lint fix 2024-03-10 15:52:45 +08:00
b1328c193b optimize default model exceptions 2024-03-10 15:52:45 +08:00
3d222caaae lint fix 2024-03-10 15:52:45 +08:00
77ac6fa356 add app description
add update app api
2024-03-10 15:52:45 +08:00
a3b46006a8 lint fix 2024-03-10 15:52:45 +08:00
ea4716d039 add workflow runs & workflow node executions api 2024-03-10 15:52:45 +08:00
db9e7a53f8 lint fix 2024-03-10 15:52:45 +08:00
4432e055be add workflow app log api 2024-03-10 15:52:45 +08:00
403c2f436d remove publish workflow when app import 2024-03-10 15:52:45 +08:00
594de43dec lint fix 2024-03-10 15:52:45 +08:00
2e68c3fc11 trigger app_model_config_was_updated when app import 2024-03-10 15:52:45 +08:00
2187f6f62e lint fix 2024-03-10 15:52:45 +08:00
9249c38bf9 refactor app api 2024-03-10 15:52:44 +08:00
67e0ba5167 site init move to event handler 2024-03-10 15:52:10 +08:00
9004d8c3cd fix agent app converter command 2024-03-10 15:52:10 +08:00
4df424438d lint fix 2024-03-10 15:52:10 +08:00
6e3cd62e31 refactor app mode
add app import and export
2024-03-10 15:52:09 +08:00
61b4bedc16 lint fix 2024-03-10 15:51:36 +08:00
4e5de036c6 make recommended app list api public 2024-03-10 15:51:36 +08:00
8e54b2e3f2 fix bugs 2024-03-10 15:51:36 +08:00
d39a51c134 fix bugs 2024-03-10 15:51:36 +08:00
6efc3d4913 lint fix 2024-03-10 15:51:35 +08:00
55c31eec31 restore completion app 2024-03-10 15:51:35 +08:00
9820dcb201 lint fix 2024-03-10 15:51:35 +08:00
9f29ce9591 add manual convert logic 2024-03-10 15:51:35 +08:00
afb0ff37bd add expert mode of chatapp convert command 2024-03-10 15:51:35 +08:00
67b6f08d89 add agent app convert command 2024-03-10 15:51:35 +08:00
892036bd7d add more tests 2024-03-10 15:51:35 +08:00
d123ddedc8 add to http request node convert tests 2024-03-10 15:51:35 +08:00
fc243982e5 add api extension to http request node convert 2024-03-10 15:51:35 +08:00
df66cd2205 fix prompt transform bugs 2024-03-10 15:51:35 +08:00
a44d3c3eda fix bugs and add unit tests 2024-03-10 15:51:35 +08:00
297b33aa41 lint 2024-03-10 15:51:35 +08:00
0d858cc036 add app convert codes 2024-03-10 15:51:35 +08:00
f067947266 add workflow logics 2024-03-10 15:51:35 +08:00
9ad6bd78f5 lint 2024-03-10 15:51:35 +08:00
b1e220f2d2 add workflow models 2024-03-10 15:51:35 +08:00
200dc56c37 lint 2024-03-10 15:51:35 +08:00
49992925e2 optimize get app model to wraps 2024-03-10 15:51:33 +08:00
f073dca22a feat: optimize db connection when llm invoking (#2774) 2024-03-10 15:48:31 +08:00
8b1e35d7dc doc: add suggested questions back (#2771) 2024-03-10 15:40:17 +08:00
b75d8ca621 fix: auto closing when close local image uploading (#2767) 2024-03-10 13:11:41 +08:00
9beefd7d5a fix: auto prompt (#2768) 2024-03-09 18:36:58 +08:00
405e99d27f fix api url of workflow run 2024-03-09 13:03:22 +08:00
90ee7fe201 tracing 2024-03-09 12:48:14 +08:00
bc90fc885f tracing node style update 2024-03-09 11:06:25 +08:00
5afa5fb085 app switch 2024-03-09 10:30:26 +08:00
88145efa97 fix: app name can be empty in settings modal (#2761) 2024-03-09 09:13:12 +08:00
bdc13f9238 SMTP authentication is optional (#2765)
Co-authored-by: Laurent Magnien <laurent.magnien@adsn.fr>
2024-03-09 09:11:03 +08:00
93e2dc4f5f workflow log result 2024-03-08 21:10:11 +08:00
ce58f0607b Feat/tool secret parameter (#2760) 2024-03-08 20:31:13 +08:00
bbc0d330a9 chore: rename lastStep to previousStep (#2759) 2024-03-08 19:27:02 +08:00
60e7e17c86 feat: Add new Azure OpenAI Embedding models (#2758) 2024-03-08 19:04:20 +08:00
237bb8514e replace message content type list to string when file_objs is empty .. (#2745) 2024-03-08 18:46:31 +08:00
bd26c933d2 fix: valid password on reset-password page (#2753) 2024-03-08 18:44:49 +08:00
08d2a4279f cache toolsmap 2024-03-08 18:36:48 +08:00
d79b686992 block selector 2024-03-08 18:10:30 +08:00
1adec7ab51 feat: tool auth 2024-03-08 17:13:24 +08:00
3b029f2387 feat: tool auth 2024-03-08 17:13:24 +08:00
6d6afe8f52 fix app mode in logs 2024-03-08 17:11:33 +08:00
e307947dd8 node control 2024-03-08 17:02:02 +08:00
04ad1eef79 workflow logs 2024-03-08 16:43:41 +08:00
2b475b7916 help line 2024-03-08 16:02:28 +08:00
f51f4a5843 feat: tool inputs 2024-03-08 15:43:11 +08:00
b6b58da2d2 enhance: custom tool timeout (#2754) 2024-03-08 15:26:08 +08:00
40c646cf7a Feat/model as tool (#2744) 2024-03-08 15:22:55 +08:00
3231a8c51c fix: image tokenizer (#2752) 2024-03-08 14:50:51 +08:00
b5f3bbead2 update cache in appNav after app info updated 2024-03-08 14:48:10 +08:00
a192ae9314 feat: remove useless file 2024-03-08 14:37:29 +08:00
7a07d8c2bc feat: tool params 2024-03-08 14:32:33 +08:00
17a67e7922 remove annatation 2024-03-08 13:46:14 +08:00
328a3e2e6b node about author 2024-03-08 13:24:59 +08:00
597053c30e fix style of app info 2024-03-08 11:46:25 +08:00
29bef1e3ab app sidebar auto collapse 2024-03-08 11:46:25 +08:00
e36d62f08c hide switch modal 2024-03-08 11:46:25 +08:00
50b4c7fa18 switch app 2024-03-08 11:46:25 +08:00
d86ef15d9a add tip for switch 2024-03-08 11:46:25 +08:00
fa3eb11b6a old app do not support duplicate and export dsl 2024-03-08 11:46:25 +08:00
beff31b003 update style app config 2024-03-08 11:46:25 +08:00
2360fb293b update data 2024-03-08 11:27:05 +08:00
1c82e3870a feat: not choose model hide in node 2024-03-08 11:06:32 +08:00
49ce9d2200 feat: http support debug and remove mock init debug data 2024-03-08 10:59:49 +08:00
c20c9b53e1 feat: template tranform support debug 2024-03-08 10:40:12 +08:00
89fc90ac80 chore: code support debug 2024-03-08 10:36:57 +08:00
4170d6a491 use SVG icons for built-in tools (#2748) 2024-03-08 10:21:26 +08:00
0b50c525cf feat: support error correction and border size in qrcode tool (#2731) 2024-03-07 20:54:14 +08:00
072f5caa06 init 2024-03-07 19:43:11 +08:00
783f7a9b13 feat: question classifer support run 2024-03-07 18:43:19 +08:00
425e162a91 feat: knowledge support single run 2024-03-07 18:36:45 +08:00
8ba38e8e74 fix overlap and splitter optimization (#2742)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-07 18:25:49 +08:00
b163545771 Use python-docx to extract docx files (#2654) 2024-03-07 18:24:55 +08:00
55b5d76e0b chore: move node run data to node hooks 2024-03-07 18:13:56 +08:00
c0b82f8e58 UPDATE: Twilio tool crdential verification (#2741) 2024-03-07 18:08:52 +08:00
9693d014ba feat: add llm debug 2024-03-07 17:48:18 +08:00
b75ff5fa03 fix:missing import (#2739) 2024-03-07 17:31:30 +08:00
16abcf082c node control 2024-03-07 17:09:04 +08:00
9440d7fe88 fix: the behavior of save action in opening config panel (#2736) 2024-03-07 16:48:44 +08:00
24809fce07 fix: missing en_name of aippt (#2737) 2024-03-07 16:37:12 +08:00
9819ad347f feat:support azure whisper model and fix:rename text-embedidng-ada-002.yaml to text-embedding-ada-002.yaml (#2732) 2024-03-07 16:36:58 +08:00
8fe83750b7 Fix/jina tokenizer cache (#2735) 2024-03-07 16:32:37 +08:00
1809f05904 Feat/add groq (#2733) 2024-03-07 16:00:40 +08:00
173336f256 node handle 2024-03-07 15:58:46 +08:00
0ac250a035 fix: check webhook key of Wecom tool in valid UUID form and fix typo (#2719) 2024-03-07 15:51:06 +08:00
405a00bb2c fix:delete the slash at the end of xinference provider server_url (#2730) 2024-03-07 15:37:05 +08:00
3a3ca8e6a9 fix: max tokens can only up to 2048 (#2734) 2024-03-07 15:35:56 +08:00
f37316f2a0 feat: single run modal 2024-03-07 15:15:39 +08:00
27e678480e Feat: AIPPT & DynamicToolParamter (#2725) 2024-03-07 15:04:42 +08:00
e044e8efaa chat mode 2024-03-07 14:35:13 +08:00
af99a55552 chat mode 2024-03-07 14:24:27 +08:00
8f3d9d0149 panel 2024-03-07 13:54:02 +08:00
344e30bef4 node 2024-03-07 12:15:51 +08:00
45ef4059f0 block-icon 2024-03-07 11:48:42 +08:00
13174aac18 debug and preview 2024-03-07 11:21:59 +08:00
74f02363f4 record 2024-03-07 10:48:11 +08:00
10c421a94c Merge branch 'main' into feat/workflow 2024-03-07 10:35:05 +08:00
7052565380 fix typo: responsing -> responding (#2718)
Co-authored-by: OSS-MAOLONGDONG\kaihong <maolongdong@kaihong.com>
2024-03-07 10:20:35 +08:00
3162227b54 features 2024-03-06 19:43:47 +08:00
7e647cc6e7 fix page title update 2024-03-06 19:14:25 +08:00
ec710d7ffd Merge branch 'main' into feat/workflow 2024-03-06 19:05:21 +08:00
36718c39dc features 2024-03-06 19:04:06 +08:00
fca9753140 fix app detail update 2024-03-06 18:42:37 +08:00
0529c3d5d2 feat: add role tooltip and fix add prompt error 2024-03-06 18:36:35 +08:00
5a27a95f8d feat: llm support type select 2024-03-06 18:11:14 +08:00
5ec3a967b5 feat: all other code expand 2024-03-06 17:59:58 +08:00
a45ec15a56 features 2024-03-06 17:48:01 +08:00
0164dec438 features 2024-03-06 17:46:42 +08:00
4edaa95cbf app menu 2024-03-06 17:39:27 +08:00
067e6b5ae7 app detail redirection 2024-03-06 17:39:27 +08:00
6adb986167 feat: expend toggle 2024-03-06 16:56:18 +08:00
31070ffbca fix qa index processor tenant id is None error (#2713)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-06 16:46:08 +08:00
7f3dec7bee fix error msg format issue (#2715)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-06 16:45:40 +08:00
cc4ca942c9 feat: prompt editor blur and focus ui 2024-03-06 15:38:45 +08:00
3202f12cb8 feat: config prompt 2024-03-06 15:17:51 +08:00
6448d71ca6 draft updated at 2024-03-06 14:35:47 +08:00
e3a3e07eef tool 2024-03-06 14:04:15 +08:00
b1e0db4944 fix: chatbot service api auto generate name default value error (#2709) 2024-03-06 13:19:27 +08:00
c439952a41 fix(web): chat input auto resize by window (#2696) 2024-03-06 12:49:22 +08:00
2f28afebb6 FEAT: Add twilio tool for sending text and whatsapp messages (#2700)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-03-06 11:35:08 +08:00
8a906e2959 fix: http nodes update error and support json 2024-03-06 11:34:07 +08:00
fa7ba30ba3 Fix rebuild index&csv parsing (#2705)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-03-06 11:33:32 +08:00
1cf5f510ed feat: add qrcode tool for QR code generation (#2699) 2024-03-06 11:26:16 +08:00
9839b5cb53 fix: enchance code editor syle 2024-03-06 11:23:42 +08:00
526c874caa fix mistralai icon (#2707) 2024-03-06 11:08:22 +08:00
430569d486 app detail 2024-03-05 17:38:32 +08:00
d3dfadbd9b feat: add code editor 2024-03-05 17:37:20 +08:00
f88f744097 make volume folders for milvus docker containers ignored by git (#2694) 2024-03-05 17:26:21 +08:00
95733796f0 fix: replace os.path.join with yarl (#2690) 2024-03-05 17:25:20 +08:00
e474e02a50 sync workflow draft 2024-03-05 17:11:54 +08:00
54d9cdaabf sync workflow draft 2024-03-05 17:11:54 +08:00
76fe3c1d76 fix: question classifer can not edit 2024-03-05 16:13:24 +08:00
261e56e61d single run 2024-03-05 15:57:10 +08:00
ede0bb5396 control run 2024-03-05 15:28:28 +08:00
186b85cd62 add store of app detail 2024-03-05 15:27:52 +08:00
eab405af5b chore: node add memo 2024-03-05 15:26:28 +08:00
93999cec56 chore: panel memo 2024-03-05 14:54:59 +08:00
552f319b9d feat: support HTTP response compression in api server (#2680) 2024-03-05 14:45:22 +08:00
acacc0a4cb service 2024-03-05 14:44:13 +08:00
b2ae7089dc fix: var assigner 2024-03-05 14:40:21 +08:00
d4ab6b294a fix: llm default 2024-03-05 14:35:12 +08:00
e6d89f6756 fix: start node add 2024-03-05 14:27:52 +08:00
7ec29bbee7 feat: node add default value 2024-03-05 14:27:52 +08:00
f1d44a4c87 zoom in out 2024-03-05 14:15:05 +08:00
38e5952417 Fix/agent react output parser (#2689) 2024-03-05 14:02:07 +08:00
466f16eb1d node name 2024-03-05 13:05:36 +08:00
04d54c0319 fix 2024-03-05 12:40:56 +08:00
0367a2148a bg 2024-03-05 12:36:59 +08:00
57e9e229de temp 2024-03-05 12:35:36 +08:00
90c8d9d27b service 2024-03-05 11:57:51 +08:00
a30b6acc52 fix: start node 2024-03-05 11:30:45 +08:00
0ee7f952ef fix: start node 2024-03-05 11:27:24 +08:00
86656de971 feat: classify data panel node sync 2024-03-05 11:06:35 +08:00
7f891939f1 FEAT: add tavily tool for searching... A search engine for LLM (#2681) 2024-03-05 10:23:44 +08:00
69a5ce1e31 Fix tts play logic (#2683)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-03-05 09:22:36 +08:00
2e649c3329 add icon of yaml 2024-03-05 09:03:52 +08:00
534802b761 bump version to 0.5.8 (#2685) 2024-03-05 01:37:53 +08:00
5c258e212c feat: add Anthropic claude-3 models support (#2684) 2024-03-05 01:37:42 +08:00
e868e44025 help line 2024-03-04 20:35:01 +08:00
4376813951 feat: get and set data use context 2024-03-04 20:14:18 +08:00
ccd3e519ea edges change 2024-03-04 19:01:38 +08:00
c4ca3bd34d rename data 2024-03-04 18:18:47 +08:00
6a6133c102 Fix voice selection (#2664)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-03-04 17:50:06 +08:00
081baae883 operator 2024-03-04 17:45:41 +08:00
a3d4befad4 service 2024-03-04 17:45:41 +08:00
2f13d2775f Merge branch 'main' into feat/workflow 2024-03-04 17:40:52 +08:00
3c1825187a fix: auto generate prompt result not show (#2678) 2024-03-04 17:36:11 +08:00
8523b34be7 add jina-reranker-v1-base-en (#2676) 2024-03-04 17:31:01 +08:00
65cfd4360a fix: typo in wecom tool (#2674) 2024-03-04 17:25:42 +08:00
bbf5f42c87 fix: CE edition limits upload file nums (#2677) 2024-03-04 17:25:31 +08:00
3631e53ff0 Feat/add annotation migrate (#2675)
Co-authored-by: jyong <jyong@dify.ai>
2024-03-04 17:22:06 +08:00
a36a2a1080 feat: handleupdate logic 2024-03-04 17:04:53 +08:00
f322d9bddb Fix vdb merge error (#2650) 2024-03-04 16:35:50 +08:00
474c7865d7 feat: get and set value from store 2024-03-04 15:51:05 +08:00
05ce7b9d5e fix: deep copy customColletion (#2673) 2024-03-04 15:20:20 +08:00
bd205f63cc fix: workflow route 2024-03-04 14:53:48 +08:00
72ddedfc5c fix: setup default filters while add credentials (#2669) 2024-03-04 14:17:00 +08:00
36686d7425 fix: test custom tool already exists without decrypting credentials (#2668) 2024-03-04 14:16:47 +08:00
34387ec0f1 fix typo recale to recalc (#2670) 2024-03-04 14:15:53 +08:00
83a6b0c626 Doc/update license (#2666) 2024-03-04 14:10:39 +08:00
76da66fb7e fix: fix import from explore apps err when OpenAI not inited (#2671) 2024-03-04 14:06:54 +08:00
607f9eda35 Fix/app runner typo (#2661) 2024-03-04 13:32:17 +08:00
ac40eb8d87 chore: add missing jp files 2024-03-04 10:44:34 +08:00
3ea06d286a merge main 2024-03-04 10:41:33 +08:00
f25cec265d feat: add Wecom(企业微信) tool for sending message to chat group bot via webhook (#2638) 2024-03-04 10:27:20 +08:00
8d6984e286 create app by import yaml 2024-03-03 15:37:47 +08:00
cd773b8cc9 app import supported 2024-03-03 14:25:50 +08:00
9bca69ebfb app DSL export supported 2024-03-03 14:10:11 +08:00
b33da6a09c duplicate app supported 2024-03-03 13:41:14 +08:00
7ae23d5567 add redirection 2024-03-03 13:24:58 +08:00
8e66b96221 Feat: Add documents limitation (#2662) 2024-03-03 12:45:06 +08:00
b5c1bb346c Add PubMed to tools (#2652) 2024-03-03 12:44:13 +08:00
569315ee3e add tip for chatbot orchestrate 2024-03-03 12:39:07 +08:00
e94b323e6c fix: use English as the default i18n language (#2663) 2024-03-03 12:35:28 +08:00
bc65ee10c0 bugfix: model str maybe empty (#2660) 2024-03-03 11:43:38 +08:00
2001483659 fix: default to allcategories when search params is not from recommended (#2653) 2024-03-02 17:11:25 +08:00
4c7941adef app creation 2024-03-02 16:47:43 +08:00
93d116a9d0 add tracing panel 2024-03-02 14:05:05 +08:00
444aba55dd Feat/jpn support (#2651) 2024-03-02 13:47:51 +08:00
7b2499c292 add meta data of run log 2024-03-02 11:16:23 +08:00
2be2bc5877 add run log status 2024-03-02 09:59:31 +08:00
cfb853efbf log detail panel 2024-03-01 20:32:22 +08:00
2691164fc4 workflow log list 2024-03-01 20:32:22 +08:00
b113711a86 hooks 2024-03-01 20:22:06 +08:00
68e9530507 run by single 2024-03-01 19:09:27 +08:00
3f640b1037 fix: click tool item in app debug page would show detail (#2644) 2024-03-01 18:47:12 +08:00
0ca23bb840 features 2024-03-01 18:20:49 +08:00
6e3d6c4269 features 2024-03-01 18:20:49 +08:00
b07084711c fix: missing description (#2643) 2024-03-01 18:19:04 +08:00
fa8ab2134f feat: displaying the tool description when clicking on a custom tool (#2642) 2024-03-01 17:58:38 +08:00
c2eaa32036 temp 2024-03-01 17:09:53 +08:00
604930db64 feat: support detect when to show vision config 2024-03-01 16:43:56 +08:00
1a677da792 fix: custom tool max tool (#2641) 2024-03-01 16:43:47 +08:00
c3f99779f2 feat: vision config 2024-03-01 16:30:24 +08:00
0518da1e49 feat: handle llm memory 2024-03-01 15:07:29 +08:00
6f6f032244 feat: choose context var 2024-03-01 14:35:37 +08:00
0acb2db9b6 layout 2024-03-01 14:29:02 +08:00
74d26764f8 feat: knowledge retrieval dataset setting 2024-03-01 13:56:52 +08:00
b6d61a818e fix: Replace path.join with urljoin. (#2631) 2024-03-01 13:07:15 +08:00
cf77a89123 feat: dasetitem ui 2024-03-01 11:38:24 +08:00
1b73632f77 utils 2024-03-01 11:27:30 +08:00
8495ffaa45 fix: typo in gaode tool (#2636) 2024-03-01 10:12:48 +08:00
0a7cbf6fde feat: dataset list struct 2024-02-29 20:26:51 +08:00
e4701e26c8 feat: add datasets 2024-02-29 20:03:26 +08:00
dbd1d79770 FEAT: Add arxiv tool for searching scientific papers and articles fro… (#2632) 2024-02-29 19:46:10 +08:00
045156985a fix: not show rerank modal picker 2024-02-29 19:39:27 +08:00
257e795ca9 feat: retrieval config 2024-02-29 18:24:15 +08:00
bafdc510d6 record panel 2024-02-29 17:33:49 +08:00
1910178199 fix: default mail type invalid in .env.example (#2628) 2024-02-29 17:29:48 +08:00
1840d05a37 record panel 2024-02-29 17:27:08 +08:00
6d5618447e feat: knowledge retirveval output 2024-02-29 16:39:36 +08:00
839a6a2c8a add logs for vdb-migrate command (#2626) 2024-02-29 16:24:51 +08:00
b2de27b7be feat: knowledge query var 2024-02-29 15:48:48 +08:00
9c0d44fa09 feat: llm node support config memroy 2024-02-29 15:36:25 +08:00
f95eb2df0d feat: filed fold 2024-02-29 15:26:06 +08:00
cbb298ccb6 feat: config conversation role name 2024-02-29 15:17:20 +08:00
9e6940ed3e feat: mermory size config 2024-02-29 14:50:36 +08:00
a769edbc89 Fix/custom tool any of (#2625) 2024-02-29 14:39:05 +08:00
57ffecd0e5 fix: remove unnecessary credentials of custom tool (#2621) 2024-02-29 12:58:12 +08:00
801d135390 generalize the generation of new collection name by dataset id (#2620) 2024-02-29 12:47:10 +08:00
0428f44113 chore: bump superlinter action from v5 to v6 (#2325) 2024-02-29 12:45:06 +08:00
7beff3fd5a fix: model parameter load presets config (#2622) 2024-02-29 12:43:46 +08:00
88a095e40e fix: wrong default model parameters when creating app (#2623) 2024-02-29 12:43:07 +08:00
fbcc769d4e feat: instructions 2024-02-29 11:50:46 +08:00
65f0378e43 feat: classlist crud 2024-02-29 11:37:53 +08:00
f7a90f2660 merge main 2024-02-29 10:59:21 +08:00
dd961985f0 refactor: remove unused codes, move core/agent module into dataset retrieval feature (#2614) 2024-02-28 23:32:47 +08:00
d44b05a9e5 feat: support auth type like basic bearer and custom (#2613) 2024-02-28 23:19:08 +08:00
3d825dcb3e add features 2024-02-28 20:58:00 +08:00
2094a554f6 multiple edge 2024-02-28 20:58:00 +08:00
4837ae4958 feat: question add class 2024-02-28 20:49:12 +08:00
6da9950b72 feat: workflow to auth 2024-02-28 20:19:22 +08:00
e8921787b3 hooks 2024-02-28 20:11:24 +08:00
d2d6904c9b panel-operator 2024-02-28 20:11:23 +08:00
510f0593e9 chore: question classify 2024-02-28 19:54:20 +08:00
7a438f8999 chore: assign var 2024-02-28 19:49:01 +08:00
113af85c3c chore: add requierd 2024-02-28 18:32:36 +08:00
916bacb60e chore: remove auto show modal 2024-02-28 18:17:39 +08:00
a98b5ca97e chore: auth i18n 2024-02-28 18:11:09 +08:00
5bd3b02be6 version to 0.5.7 (#2610) 2024-02-28 18:07:13 +08:00
076fe8ca3a feat: auth struct 2024-02-28 17:37:31 +08:00
3cf5c1853d Fix: default button behavior (#2609) 2024-02-28 17:34:20 +08:00
a4d86496e1 fix: notion extractor raise 'NoneType' object has no attribute 'curre… (#2608) 2024-02-28 17:08:27 +08:00
90bdc85f8c fix: AppParameterApi.get() got an unexpected keyword argument 'end_user' (#2607) 2024-02-28 16:46:50 +08:00
b08327cb4b feat: edit body 2024-02-28 16:25:09 +08:00
f1b868d5d9 next step 2024-02-28 16:19:57 +08:00
0828873b52 fix: missing default user for APP service api (#2606) 2024-02-28 16:09:56 +08:00
816b707a16 Fix: explore apps is not shown (#2604) 2024-02-28 15:43:42 +08:00
c9257ab4bf Fix/2559 upload powered by brand image not showing up (#2602) 2024-02-28 15:17:49 +08:00
69ce3b3d33 fix props.appDetail.api_base_url /v1 repeat error (#2601) 2024-02-28 15:13:38 +08:00
76ff004ea5 feat: bulk edit 2024-02-28 15:01:58 +08:00
df173764d2 chore: replace remove btn 2024-02-28 13:54:11 +08:00
c4caa7c401 doc: props.appDetail.api_base_url (#2597) 2024-02-28 13:40:57 +08:00
dc93a292c3 Feat/provider mistralai (#2598) 2024-02-28 13:39:55 +08:00
174ee1b646 fix: parameter user exceeded max length when invoking moonshot llm (#2596) 2024-02-28 12:23:34 +08:00
9b1c4f47fb feat:add mistral ai (#2594) 2024-02-28 12:22:57 +08:00
7fa25934af feat: key value input 2024-02-28 11:30:28 +08:00
582ba45c00 Fix 500 error when creating from the template and the provider is None (#2591) 2024-02-28 11:27:17 +08:00
f1cbd55007 enhancement: skip fetching to improve user experience when switching … (#2580) 2024-02-27 19:16:22 +08:00
3a34370422 fix: convert tool messages into user messages in react mode and fill … (#2584) 2024-02-27 19:15:07 +08:00
649c3d0732 feat: key value struct 2024-02-27 18:48:01 +08:00
35c56237a0 feat: url selector 2024-02-27 18:34:54 +08:00
236cc6f526 hooks 2024-02-27 18:20:32 +08:00
29ab244de6 fix: correct the parent class of CacheEmbedding (#2578) 2024-02-27 18:05:48 +08:00
a311f88c99 compute node position 2024-02-27 18:02:49 +08:00
920b2c2b40 Fix/hit test tsne issue (#2581)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-27 17:30:52 +08:00
e92bc25216 fix: if node condition operation i18n 2024-02-27 17:10:52 +08:00
0c06d84e22 chore: add spacing and hover 2024-02-27 16:22:47 +08:00
77c8261fca chore: if not align 2024-02-27 16:19:29 +08:00
ac96d192a6 fix: parameter type handling in API tool and parser (#2574) 2024-02-27 15:59:11 +08:00
07fbeb6cf0 enhancement: improve client-side code (#2568) 2024-02-27 15:58:57 +08:00
91a2e71fff feat: if comparasion 2024-02-27 15:52:07 +08:00
0fb47fed9e feat: add adn update condition 2024-02-27 15:20:04 +08:00
fc64cdee64 fix mivlus delete by ids error (#2573)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-27 12:23:13 +08:00
0c0e96c55f fix: notion binding (#2572) 2024-02-27 11:59:54 +08:00
4519c6ab29 feat: conditions struct 2024-02-27 11:50:56 +08:00
5b953c1ef2 Fix some RAG bugs (#2570)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-27 11:39:05 +08:00
562ca45e07 fix weaviate delete_by_ids (#2565) 2024-02-27 11:14:35 +08:00
32c6431dbc feat: assign node no var list tip 2024-02-27 10:58:46 +08:00
925964ac28 feat: add default and utils 2024-02-27 10:43:11 +08:00
6bbd53512e Add Dify Meetup Event on Mar 9 (#2566) 2024-02-27 10:40:26 +08:00
4d4d3bb965 feat: add default values and utils and fix ts 2024-02-27 10:34:13 +08:00
e352a8ed1b chore: remove redundant casting flask app config into dict (#2564) 2024-02-27 09:39:26 +08:00
e55225e2bc fix typo in error message of supported keyword store (#2560) 2024-02-27 00:47:36 +08:00
3e63abd335 Feat/json mode (#2563) 2024-02-26 23:34:40 +08:00
0620fa3094 Feat/vdb migrate command (#2562)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-26 19:47:29 +08:00
3d526b3a87 handleAddNextNode 2024-02-26 19:03:04 +08:00
f91582e060 publish button 2024-02-26 18:43:33 +08:00
dec60fdd4c feat: fin var assigner 2024-02-26 18:19:01 +08:00
31930159b8 feat: var assigner data logic 2024-02-26 18:19:01 +08:00
6e2611c86c node title desc 2024-02-26 17:10:18 +08:00
7574107d8c add run-history 2024-02-26 15:38:49 +08:00
58d8b0dd01 node handle connection line 2024-02-26 14:33:31 +08:00
d93288f711 Feat/use searchparams as state (#2554)
Co-authored-by: crazywoola <427733928@qq.com>
2024-02-26 12:52:59 +08:00
49f78bacef update icons of app menu 2024-02-25 14:06:07 +08:00
3b190467c1 update i18n and style of creatioin from app template 2024-02-24 14:58:00 +08:00
804a090457 app templates 2024-02-24 14:21:42 +08:00
14cfb310e3 app creation 2024-02-24 12:45:58 +08:00
f607a334ac create from DSL 2024-02-24 12:45:58 +08:00
117b84116e app list modification 2024-02-24 12:45:58 +08:00
ca69af7b97 feat: change max_question_num to 5 (#2520)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-02-24 09:28:27 +08:00
171dd5c737 node-handle 2024-02-23 19:26:56 +08:00
b5ed4af25a feat: var assigner node 2024-02-23 18:26:42 +08:00
952e13fef8 Update README_CN.md (#2550) 2024-02-23 17:38:03 +08:00
b6c683a1b8 next step 2024-02-23 17:20:06 +08:00
5200ec0b9a feat: end node panle 2024-02-23 17:01:48 +08:00
4be3087642 Fix/new RAG bugs (#2547)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-23 16:54:15 +08:00
49da8a23a8 feat: openai llm get trial or paid models from config. (#2546) 2024-02-23 16:48:58 +08:00
307cbf1d9f feat: input no var tip 2024-02-23 16:15:45 +08:00
3ad943a9eb Feat/openai llm trial paid config (#2545) 2024-02-23 16:12:43 +08:00
e7ecdb01a6 block-selector 2024-02-23 15:20:04 +08:00
383bfd7583 feat: merge i18n 2024-02-23 15:18:48 +08:00
508ea8bc0a feat: add number type var 2024-02-23 15:02:00 +08:00
3082093293 fix: webapp name (#2543) 2024-02-23 14:54:03 +08:00
b03bbab5ad fix dev/reformat (#2542)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-23 14:53:24 +08:00
9574730050 Feat/i18n restructure (#2529) 2024-02-23 14:31:06 +08:00
077de17cd5 feat: support config modal edit 2024-02-23 14:25:08 +08:00
91ea6fe4ee Fix/langchain document schema (#2539)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-23 14:16:44 +08:00
f6c07c996b workflow store 2024-02-23 14:16:19 +08:00
769be13189 chore: add api key and value placeholder (#2538) 2024-02-23 13:55:43 +08:00
e42175241e fix: tolerate exceptions in cleaning up index when vector db service unavailable (#2533) 2024-02-23 12:30:39 +08:00
12257b438b Fix/tool default value (#2536) 2024-02-23 12:02:29 +08:00
7ba0bfffa2 fix: debug set var value error 2024-02-23 11:58:28 +08:00
9b577fa32c chore 2024-02-23 11:31:46 +08:00
9ecc736c30 fix: update current tenant id of account when switching tenant (#2530) 2024-02-23 10:51:19 +08:00
6c4e6bf1d6 Feat/dify rag (#2528)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-22 23:31:57 +08:00
94cda3e837 chore 2024-02-22 20:10:58 +08:00
f09f91e25a hide debug 2024-02-22 18:51:25 +08:00
235bec6481 feat: new var input editor 2024-02-22 18:49:32 +08:00
97fe817186 Fix/upload limit (#2521)
Co-authored-by: jyong <jyong@dify.ai>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
2024-02-22 17:16:22 +08:00
ee616ee6dd header 2024-02-22 16:35:54 +08:00
5153068a64 feat: start var list 2024-02-22 16:17:38 +08:00
52b12ed7eb Voice audition (#2504)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-02-22 16:06:17 +08:00
701e441349 panel 2024-02-22 15:37:33 +08:00
5817a035f9 feat: start build in vars show 2024-02-22 15:06:27 +08:00
ea76f46223 block-selector 2024-02-22 15:02:02 +08:00
0759b29ca2 feat: retrial node 2024-02-22 14:40:16 +08:00
db6074e035 feat: tool node 2024-02-22 14:06:34 +08:00
d8ab4474b4 fix: bing search response filter (#2519) 2024-02-22 13:06:55 +08:00
6057ba0988 feat: var assigner node struct 2024-02-22 11:42:20 +08:00
2fdcf1756e feat: end node 2024-02-22 11:04:14 +08:00
1ecbd95adf Fix #2512 (#2515) 2024-02-22 09:22:57 +08:00
f489736e06 add debug-and-preview 2024-02-21 20:23:03 +08:00
cad6e6624f fix: config not exists (#2513) 2024-02-21 19:27:38 +08:00
15f13209cf node handle 2024-02-21 19:02:58 +08:00
dbf3b7ad6d feat: end node typs and mock 2024-02-21 18:38:52 +08:00
3341077587 feat: start node 2024-02-21 18:31:45 +08:00
e39d7021e0 feat: if else node 2024-02-21 17:45:52 +08:00
cffaf30760 feat: if types 2024-02-21 16:34:38 +08:00
bc60cf0a35 feat: http node content 2024-02-21 16:00:14 +08:00
9bb9807252 feat: http node struct 2024-02-21 15:50:36 +08:00
8b8fdb48bb feat: output var 2024-02-21 14:56:10 +08:00
b4437ccd2b chore: output lines 2024-02-21 14:11:51 +08:00
3505cbe05c update issue template (#2507) 2024-02-21 14:08:11 +08:00
65ac4dedcc feat: template transform code tooltip 2024-02-21 13:55:17 +08:00
671654da71 add node 2024-02-21 12:28:10 +08:00
31490417d1 Merge branch 'main' into feat/workflow 2024-02-21 12:05:22 +08:00
e15359e589 fix: api doc example error (#2505) 2024-02-21 12:03:48 +08:00
17e8c91267 feat: template transform panel content 2024-02-21 11:48:34 +08:00
db7dccf349 feat: type selector 2024-02-21 11:33:25 +08:00
71d3f71e22 feat: code editor base 2024-02-21 11:04:37 +08:00
edb86f5f5a Feat/stream react (#2498) 2024-02-21 10:45:59 +08:00
adf2651d1f FEAT: Add DuckDuckGo Search Tool for Enhanced Privacy-Focused Search Functionality (#2499) 2024-02-21 10:42:34 +08:00
5031d64e28 Chore/delete chunk decode error alert (#2500) 2024-02-21 03:17:33 +08:00
13a54c3f56 block-selector edit 2024-02-20 20:12:41 +08:00
ae3ad59b16 Refactor agent history organization and initialization of agent scrat… (#2495) 2024-02-20 19:03:43 +08:00
e6cd7b0467 feat: increase max tools (#2497) 2024-02-20 19:03:10 +08:00
d58a1b1359 feat: code support vars 2024-02-20 18:42:21 +08:00
bb87a350ac feat: question classify panel 2024-02-20 17:39:09 +08:00
c441a848e7 feat: question classify node 2024-02-20 17:29:06 +08:00
f14a5c7346 add node-control 2024-02-20 16:58:29 +08:00
92219b5aad feat: prompt ide and fin direct ansewer node 2024-02-20 16:52:22 +08:00
97e9f52331 doc: typo in chat (#2492) 2024-02-20 16:08:01 +08:00
25957d917a Add default values for optional parameters in API tool and parser (#2491) 2024-02-20 16:07:43 +08:00
20b932da97 del doc support (#2494)
Co-authored-by: jyong <jyong@dify.ai>
2024-02-20 16:05:09 +08:00
291201db1c chore: llm use config 2024-02-20 15:34:44 +08:00
c8ea6d7bfb feat: direct answer node 2024-02-20 15:21:35 +08:00
207080babc fix: audio to text (#2493) 2024-02-20 15:16:46 +08:00
9c70befaf6 chore: move node type to self struct 2024-02-20 14:59:03 +08:00
fcadb807f6 feat: llm node content 2024-02-20 14:51:19 +08:00
48bacd01cc fix: incorrect tool name (#2489) 2024-02-20 14:50:57 +08:00
297d0f1f30 fix: code-based extension (#2490) 2024-02-20 14:49:00 +08:00
4364775dcb feat: output vars 2024-02-20 14:27:40 +08:00
2a196e91a6 feat: default set var name 2024-02-20 14:04:24 +08:00
7a0358827a feat: finish choose var 2024-02-20 14:01:20 +08:00
eedbe1b770 fix: chat restart (#2488) 2024-02-20 11:24:27 +08:00
62e2deafca feat: infinite choose var 2024-02-20 11:23:23 +08:00
5ff6b1da07 Windows local deployment switch "tool“ interface failed (#2483) 2024-02-19 20:03:20 +08:00
25b4e68fbb delete 2024-02-19 19:58:49 +08:00
c909319413 base-node base-panel 2024-02-19 19:44:48 +08:00
c7ee8ac1c7 add app-info-panel 2024-02-19 19:06:42 +08:00
2386eed703 chore: new block enum 2024-02-19 18:46:21 +08:00
ada558bedc feat: add picker shower 2024-02-19 18:33:12 +08:00
6caca3aaf7 base panel 2024-02-19 17:58:54 +08:00
3d3bc4c512 initial node data 2024-02-19 17:45:36 +08:00
8b49e0ee2a bump version to 0.5.6 (#2482) 2024-02-19 17:13:55 +08:00
e031ec9359 remove: parameters in seeds (#2481) 2024-02-19 17:00:46 +08:00
044ed624eb feat: var picker trigger 2024-02-19 16:36:53 +08:00
1bd1cd6938 fix: event handlers not registered globally (#2479) 2024-02-19 16:04:52 +08:00
4dff0c5dff feat: to not ignore var 2024-02-19 16:01:12 +08:00
59d8f926c8 block-selector 2024-02-19 15:54:41 +08:00
c6f1900a93 chore: merge main 2024-02-19 15:52:51 +08:00
d94a9cd864 feat: add node icons 2024-02-19 15:44:52 +08:00
81c5a21b8d FEAT: add image styling in markdown (#2441)
Co-authored-by: crazywoola <427733928@qq.com>
2024-02-19 15:07:45 +08:00
61e4bbabaf feat: added Ukrainian language support (#2473) 2024-02-19 13:11:23 +08:00
4cf475680d fix: credential verification of baichuan did not throw all errors (#2475) 2024-02-19 11:52:52 +08:00
21db8e3be4 feat: add var struct 2024-02-19 11:26:25 +08:00
ca4aa340f6 fix: Add model_uid validation for model_uid in Xinference models (#2468) 2024-02-19 10:43:25 +08:00
e05bbec879 chore: model and params select 2024-02-19 10:35:11 +08:00
240e0dfa6f next-step 2024-02-18 20:26:16 +08:00
ab6a01b476 chore: handle llm model type 2024-02-18 18:31:04 +08:00
dce01cf002 header 2024-02-18 17:56:00 +08:00
767d8a4b05 fix: hybrid search may pass rerank enable false (#2467) 2024-02-18 17:52:05 +08:00
da84ba06c7 add block-icon 2024-02-18 17:36:34 +08:00
45ba3ca07b feat: add model selector 2024-02-18 17:26:52 +08:00
56407a910d add block-selector 2024-02-18 16:14:21 +08:00
e624c33d51 node props 2024-02-18 16:14:21 +08:00
0b8dcaba8f Chore: Add type files and unit test ci for Node.js SDK (#2268)
Co-authored-by: xieweicheng <xieweicheng@bytedance.com>
2024-02-18 15:54:14 +08:00
af6a318aae fix: windows load provider file error (#2463) 2024-02-18 15:48:25 +08:00
c6e2900be7 Display selected tts voice name (#2459)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-02-18 15:39:25 +08:00
3666462076 feat: llm input struct 2024-02-18 14:33:44 +08:00
da0d9aab39 chore: remove node code to panel 2024-02-18 14:08:08 +08:00
ace04b3ef4 feat: filed and var 2024-02-18 14:01:22 +08:00
963d9b6032 Feature/display selected info for tts (#2454) 2024-02-16 20:05:14 +08:00
b2ee738bb1 Ignore SSE comments to support openrouter streaming (#2432) 2024-02-16 10:00:10 +08:00
c8ca3ff404 Tts add voice choose (#2453)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-02-16 01:10:11 +08:00
5d8fa2c7af Tts add voice choose (#2452)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-02-16 00:15:22 +08:00
58df5e5376 fix: tts voice language to zh-Hans instead of zh-CN (#2450) 2024-02-16 00:05:29 +08:00
348ad1a624 Update pull_request_template.md (#2451) 2024-02-16 00:05:18 +08:00
73e17d5aa8 Create pull_request_template.md (#2449) 2024-02-15 23:35:59 +08:00
300d9892a5 tts add voice choose (#2391)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-02-15 22:41:18 +08:00
e47b5b43b8 fix: baichuan frequency_penalty (#2446) 2024-02-14 20:11:41 +08:00
21c9d9e200 feat: add introduction field in log detail response of chat app (#2445) 2024-02-14 12:38:13 +08:00
4f6916c4d8 Update SMTP environment variable name in docker-compose (#2444) 2024-02-14 12:29:27 +08:00
8633957726 version to 0.5.5 (#2440) 2024-02-13 12:31:49 +08:00
0850c953b3 fix: variable in opener (#2437) 2024-02-12 22:22:57 +08:00
23e95fd7ab Fix tool provider credential caching issue (#2433) 2024-02-12 18:17:43 +08:00
e1045f01c6 pref: optimize add hit count query performance when dataset hit (#2436) 2024-02-12 13:50:43 +08:00
e6d22fc3a0 fix: account has no owner workspace by member inviting (#2435) 2024-02-12 02:09:01 +08:00
9232244920 fix recreating users' default tenant relations when loading user (#2408) 2024-02-12 01:31:40 +08:00
476eb90a90 fix: List not found in account service (#2434) 2024-02-12 00:56:17 +08:00
063191889d chore: apply ruff's pyupgrade linter rules to modernize Python code with targeted version (#2419) 2024-02-09 15:21:33 +08:00
589099a005 fix: possible unsent function call in the last chunk of streaming response in OpenAI provider (#2422) 2024-02-09 14:43:38 +08:00
a0ec7de058 clean: remove no-use ecc_aes.py (#2426) 2024-02-08 20:47:54 +08:00
14a19a3da9 chore: apply ruff's pyflakes linter rules (#2420) 2024-02-08 14:11:10 +08:00
1b04382a9b fix: chat agent mode content copy (#2418) 2024-02-07 21:23:47 +08:00
71e5828d41 feat: add support for smtp when send email (#2409) 2024-02-07 18:08:41 +08:00
65a02f7d32 chore: apply F811 linter rule to eliminate redefined imports and methods (#2412) 2024-02-07 16:28:45 +08:00
acf9174bef fix: studio/api doc (#2415) 2024-02-07 16:28:09 +08:00
243ca5b1e2 fix: typo in package path of core.splitter (#2411) 2024-02-07 15:34:02 +08:00
f6059c377c fix: api based extension modal title (#2414) 2024-02-07 15:01:53 +08:00
1a4c2e77c4 feat: nodes placeholder 2024-02-06 17:49:07 +08:00
f3c78fe73d init 2024-02-06 17:17:29 +08:00
a17c0e5bf6 init 2024-02-06 17:05:26 +08:00
20d5fdea2c init 2024-02-06 12:41:34 +08:00
1927 changed files with 96648 additions and 199502 deletions

View File

@ -10,7 +10,9 @@ body:
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
- label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
required: true
- type: input

View File

@ -10,7 +10,9 @@ body:
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
- label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
- label: I confirm that I am using English to submit report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:

View File

@ -10,7 +10,9 @@ body:
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
- label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:

View File

@ -10,7 +10,9 @@ body:
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
- label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
required: true
- type: textarea
attributes:

View File

@ -10,7 +10,9 @@ body:
options:
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
- label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
- label: I confirm that I am using English to submit this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
required: true
- label: "Pleas do not modify this template :) and fill in all the required fields."
required: true
- type: input
attributes:

32
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,32 @@
# Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
Fixes # (issue)
## Type of Change
Please delete options that are not relevant.
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs)
- [ ] Improvement, including but not limited to code refactoring, performance optimization, and UI/UX improvement
- [ ] Dependency upgrade
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] TODO
# Suggested Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] My changes generate no new warnings
- [ ] I ran `dev/reformat`(backend) and `cd web && npx lint-staged`(frontend) to appease the lint gods
- [ ] `optional` I have made corresponding changes to the documentation
- [ ] `optional` I have added tests that prove my fix is effective or that my feature works
- [ ] `optional` New and existing unit tests pass locally with my changes

View File

@ -0,0 +1,31 @@
name: Run Pytest
on:
pull_request:
branches:
- main
- deploy/dev
jobs:
test:
runs-on: ubuntu-latest
env:
MOCK_SWITCH: true
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
cache: 'pip'
cache-dependency-path: ./api/requirements.txt
- name: Install dependencies
run: pip install -r ./api/requirements.txt
- name: Run pytest
run: pytest api/tests/integration_tests/workflow

View File

@ -1,17 +1,33 @@
name: Build and Push API Image
name: Build and Push API & Web
on:
push:
branches:
- 'main'
- 'deploy/dev'
- "main"
- "deploy/dev"
- "feat/workflow"
release:
types: [ published ]
types: [published]
env:
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
DIFY_WEB_IMAGE_NAME: ${{ vars.DIFY_WEB_IMAGE_NAME || 'langgenius/dify-web' }}
DIFY_API_IMAGE_NAME: ${{ vars.DIFY_API_IMAGE_NAME || 'langgenius/dify-api' }}
jobs:
build-and-push:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
strategy:
matrix:
include:
- service_name: "web"
image_name_env: "DIFY_WEB_IMAGE_NAME"
context: "web"
- service_name: "api"
image_name_env: "DIFY_API_IMAGE_NAME"
context: "api"
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -22,16 +38,16 @@ jobs:
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
username: ${{ env.DOCKERHUB_USER }}
password: ${{ env.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: langgenius/dify-api
images: ${{ env[matrix.image_name_env] }}
tags: |
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' && startsWith(github.ref, 'refs/tags/') }}
type=ref,event=branch
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
@ -39,22 +55,11 @@ jobs:
- name: Build and push
uses: docker/build-push-action@v5
with:
context: "{{defaultContext}}:api"
context: "{{defaultContext}}:${{ matrix.context }}"
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
build-args: |
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
build-args: COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Deploy to server
if: github.ref == 'refs/heads/deploy/dev'
uses: appleboy/ssh-action@v0.1.8
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_PRIVATE_KEY }}
script: |
${{ secrets.SSH_SCRIPT }}

View File

@ -1,60 +0,0 @@
name: Build and Push WEB Image
on:
push:
branches:
- 'main'
- 'deploy/dev'
release:
types: [ published ]
jobs:
build-and-push:
runs-on: ubuntu-latest
if: github.event.pull_request.draft == false
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: langgenius/dify-web
tags: |
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
type=ref,event=branch
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: "{{defaultContext}}:web"
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
build-args: |
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Deploy to server
if: github.ref == 'refs/heads/deploy/dev'
uses: appleboy/ssh-action@v0.1.8
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_PRIVATE_KEY }}
script: |
${{ secrets.SSH_SCRIPT }}

24
.github/workflows/deploy-dev.yml vendored Normal file
View File

@ -0,0 +1,24 @@
name: Deploy Dev
on:
workflow_run:
workflows: ["Build and Push API & Web"]
branches:
- "deploy/dev"
types:
- completed
jobs:
deploy:
runs-on: ubuntu-latest
if: |
github.event.workflow_run.conclusion == 'success'
steps:
- name: Deploy to server
uses: appleboy/ssh-action@v0.1.8
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
key: ${{ secrets.SSH_PRIVATE_KEY }}
script: |
${{ vars.SSH_SCRIPT || secrets.SSH_SCRIPT }}

View File

@ -41,6 +41,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup NodeJS
uses: actions/setup-node@v4
@ -60,11 +62,10 @@ jobs:
yarn run lint
- name: Super-linter
uses: super-linter/super-linter/slim@v5
uses: super-linter/super-linter/slim@v6
env:
BASH_SEVERITY: warning
DEFAULT_BRANCH: main
ERROR_ON_MISSING_EXEC_BIT: true
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
IGNORE_GENERATED_FILES: true
IGNORE_GITIGNORED_FILES: true

34
.github/workflows/tool-test-sdks.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Run Unit Test For SDKs
on:
pull_request:
branches:
- main
jobs:
build:
name: unit test for Node.js SDK
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [16, 18, 20]
defaults:
run:
working-directory: sdks/nodejs-client
steps:
- uses: actions/checkout@v4
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: ''
cache-dependency-path: 'yarn.lock'
- name: Install Dependencies
run: yarn install
- name: Test
run: yarn test

6
.gitignore vendored
View File

@ -145,10 +145,14 @@ docker/volumes/db/data/*
docker/volumes/redis/data/*
docker/volumes/weaviate/*
docker/volumes/qdrant/*
docker/volumes/etcd/*
docker/volumes/minio/*
docker/volumes/milvus/*
sdks/python-client/build
sdks/python-client/dist
sdks/python-client/dify_client.egg-info
.vscode/*
!.vscode/launch.json
!.vscode/launch.json
pyrightconfig.json

View File

@ -155,4 +155,4 @@ And that's it! Once your PR is merged, you will be featured as a contributor in
## Getting Help
If you ever get stuck or got a burning question while contributing, simply shoot your queries our way via the related GitHub issue, or hop onto our [Discord](https://discord.gg/AhzKf7dNgk) for a quick chat.
If you ever get stuck or got a burning question while contributing, simply shoot your queries our way via the related GitHub issue, or hop onto our [Discord](https://discord.gg/8Tpq4AcN9c) for a quick chat.

View File

@ -152,4 +152,4 @@ Dify的后端使用Python编写使用[Flask](https://flask.palletsprojects.co
## 获取帮助
如果你在贡献过程中遇到困难或者有任何问题,可以通过相关的 GitHub 问题提出你的疑问,或者加入我们的 [Discord](https://discord.gg/AhzKf7dNgk) 进行快速交流。
如果你在贡献过程中遇到困难或者有任何问题,可以通过相关的 GitHub 问题提出你的疑问,或者加入我们的 [Discord](https://discord.gg/8Tpq4AcN9c) 进行快速交流。

22
LICENSE
View File

@ -1,24 +1,26 @@
# Dify Open Source License
# Open Source License
The Dify project is licensed under the Apache License 2.0, with the following additional conditions:
Dify is licensed under the Apache License 2.0, with the following additional conditions:
1. Dify is permitted to be used for commercialization, such as using Dify as a "backend-as-a-service" for your other applications, or delivering it to enterprises as an application development platform. However, when the following conditions are met, you must contact the producer to obtain a commercial license:
1. Dify may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. Should the conditions below be met, a commercial license must be obtained from the producer:
a. Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify.AI source code to operate a multi-tenant SaaS service that is similar to the Dify.AI service edition.
b. LOGO and copyright information: In the process of using Dify, you may not remove or modify the LOGO or copyright information in the Dify console.
a. Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
- Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
b. LOGO and copyright information: In the process of using Dify's frontend components, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend components.
Please contact business@dify.ai by email to inquire about licensing matters.
2. As a contributor, you should agree that your contributed code:
2. As a contributor, you should agree that:
a. The producer can adjust the open-source agreement to be more strict or relaxed.
b. Can be used for commercial purposes, such as Dify's cloud business.
a. The producer can adjust the open-source agreement to be more strict or relaxed as deemed necessary.
b. Your contributed code may be used for commercial purposes, including but not limited to its cloud business operations.
Apart from this, all other rights and restrictions follow the Apache License 2.0. If you need more detailed information, you can refer to the full version of Apache License 2.0.
Apart from the specific conditions mentioned above, all other rights and restrictions follow the Apache License 2.0. Detailed information about the Apache License 2.0 can be found at http://www.apache.org/licenses/LICENSE-2.0.
The interactive design of this product is protected by appearance patent.
© 2023 LangGenius, Inc.
© 2024 LangGenius, Inc.
----------

43
Makefile Normal file
View File

@ -0,0 +1,43 @@
# Variables
DOCKER_REGISTRY=langgenius
WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web
API_IMAGE=$(DOCKER_REGISTRY)/dify-api
VERSION=latest
# Build Docker images
build-web:
@echo "Building web Docker image: $(WEB_IMAGE):$(VERSION)..."
docker build -t $(WEB_IMAGE):$(VERSION) ./web
@echo "Web Docker image built successfully: $(WEB_IMAGE):$(VERSION)"
build-api:
@echo "Building API Docker image: $(API_IMAGE):$(VERSION)..."
docker build -t $(API_IMAGE):$(VERSION) ./api
@echo "API Docker image built successfully: $(API_IMAGE):$(VERSION)"
# Push Docker images
push-web:
@echo "Pushing web Docker image: $(WEB_IMAGE):$(VERSION)..."
docker push $(WEB_IMAGE):$(VERSION)
@echo "Web Docker image pushed successfully: $(WEB_IMAGE):$(VERSION)"
push-api:
@echo "Pushing API Docker image: $(API_IMAGE):$(VERSION)..."
docker push $(API_IMAGE):$(VERSION)
@echo "API Docker image pushed successfully: $(API_IMAGE):$(VERSION)"
# Build all images
build-all: build-web build-api
# Push all images
push-all: push-web push-api
build-push-api: build-api push-api
build-push-web: build-web push-web
# Build and push all images
build-push-all: build-all push-all
@echo "All Docker images have been built and pushed."
# Phony targets
.PHONY: build-web build-api push-web push-api build-all push-all build-push-all

View File

@ -22,12 +22,12 @@
</p>
<p align="center">
<a href="https://dify.ai/blog/dify-ai-unveils-ai-agent-creating-gpts-and-assistants-with-various-llms" target="_blank">
Dify.AI Unveils AI Agent: Creating GPTs and Assistants with Various LLMs
<a href="https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6" target="_blank">
📌 Check out Dify Premium on AWS and deploy it to your own AWS VPC with one-click.
</a>
</p>
**Dify** is an LLM application development platform that has helped built over **100,000** applications. It integrates BaaS and LLMOps, covering the essential tech stack for building generative AI-native applications, including a built-in RAG engine. Dify allows you to **deploy your own version of Assistants API and GPTs, based on any LLMs.**
**Dify** is an open-source LLM app development platform. Dify's intuitive interface combines a RAG pipeline, AI workflow orchestration, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production.
![](./images/demo.png)
@ -37,6 +37,9 @@
You can try out [Dify.AI Cloud](https://dify.ai) now. It provides all the capabilities of the self-deployed version, and includes 200 free requests to OpenAI GPT-3.5.
### Looking to purchase via AWS?
Check out [Dify Premium on AWS](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) and deploy it to your own AWS VPC with one-click.
## Dify vs. LangChain vs. Assistants API
| Feature | Dify.AI | Assistants API | LangChain |
@ -97,10 +100,12 @@ docker compose up -d
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization installation process.
### Helm Chart
#### Deploy with Helm Chart
Big thanks to @BorisPolonsky for providing us with a [Helm Chart](https://helm.sh/) version, which allows Dify to be deployed on Kubernetes.
You can go to https://github.com/BorisPolonsky/dify-helm for deployment information.
[Helm Chart](https://helm.sh/) version, which allows Dify to be deployed on Kubernetes.
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
### Configuration
@ -117,6 +122,10 @@ For those who'd like to contribute code, see our [Contribution Guide](https://gi
At the same time, please consider supporting Dify by sharing it on social media and at events and conferences.
### Projects made by community
- [Chatbot Chrome Extension by @charli117](https://github.com/langgenius/chatbot-chrome-extension)
### Contributors
<a href="https://github.com/langgenius/dify/graphs/contributors">
@ -125,7 +134,7 @@ At the same time, please consider supporting Dify by sharing it on social media
### Translations
We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README_EN.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/AhzKf7dNgk).
We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).
## Community & Support

View File

@ -94,10 +94,12 @@ docker compose up -d
运行后,可以在浏览器上访问 [http://localhost/install](http://localhost/install) 进入 Dify 控制台并开始初始化安装操作。
### Helm Chart
#### 使用 Helm Chart 部署
非常感谢 @BorisPolonsky 为我们提供了一个 [Helm Chart](https://helm.sh/) 版本,可以在 Kubernetes 上部署 Dify。
您可以前往 https://github.com/BorisPolonsky/dify-helm 来获取部署信息。
使用 [Helm Chart](https://helm.sh/) 版本,可以在 Kubernetes 上部署 Dify。
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
- [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
### 配置

View File

@ -39,7 +39,7 @@ DB_DATABASE=dify
# Storage configuration
# use for store upload files, private keys...
# storage type: local, s3
# storage type: local, s3, azure-blob
STORAGE_TYPE=local
STORAGE_LOCAL_PATH=storage
S3_ENDPOINT=https://your-bucket-name.storage.s3.clooudflare.com
@ -47,6 +47,11 @@ S3_BUCKET_NAME=your-bucket-name
S3_ACCESS_KEY=your-access-key
S3_SECRET_KEY=your-secret-key
S3_REGION=your-region
# Azure Blob Storage configuration
AZURE_BLOB_ACCOUNT_NAME=your-account-name
AZURE_BLOB_ACCOUNT_KEY=your-account-key
AZURE_BLOB_CONTAINER_NAME=yout-container-name
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
# CORS configuration
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
@ -81,11 +86,17 @@ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
# Model Configuration
MULTIMODAL_SEND_IMAGE_FORMAT=base64
# Mail configuration, support: resend
# Mail configuration, support: resend, smtp
MAIL_TYPE=
MAIL_DEFAULT_SEND_FROM=no-reply <no-reply@dify.ai>
RESEND_API_KEY=
RESEND_API_URL=https://api.resend.com
# smtp configuration
SMTP_SERVER=smtp.gmail.com
SMTP_PORT=587
SMTP_USERNAME=123
SMTP_PASSWORD=abc
SMTP_USE_TLS=false
# Sentry configuration
SENTRY_DSN=
@ -124,3 +135,17 @@ UNSTRUCTURED_API_URL=
SSRF_PROXY_HTTP_URL=
SSRF_PROXY_HTTPS_URL=
BATCH_UPLOAD_LIMIT=10
KEYWORD_DATA_SOURCE_TYPE=database
# CODE EXECUTION CONFIGURATION
CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
CODE_EXECUTION_API_KEY=dify-sandbox
CODE_MAX_NUMBER=9223372036854775807
CODE_MIN_NUMBER=-9223372036854775808
CODE_MAX_STRING_LENGTH=80000
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
CODE_MAX_STRING_ARRAY_LENGTH=30
CODE_MAX_OBJECT_ARRAY_LENGTH=30
CODE_MAX_NUMBER_ARRAY_LENGTH=1000

View File

@ -5,7 +5,7 @@
1. Start the docker-compose stack
The backend require some middleware, including PostgreSQL, Redis, and Weaviate, which can be started together using `docker-compose`.
```bash
cd ../docker
docker-compose -f docker-compose.middleware.yaml -p dify up -d
@ -15,7 +15,7 @@
3. Generate a `SECRET_KEY` in the `.env` file.
```bash
openssl rand -base64 42
sed -i "/^SECRET_KEY=/c\SECRET_KEY=$(openssl rand -base64 42)" .env
```
3.5 If you use annaconda, create a new environment and activate it
```bash
@ -46,7 +46,7 @@
```
pip install -r requirements.txt --upgrade --force-reinstall
```
6. Start backend:
```bash
flask run --host 0.0.0.0 --port=5001 --debug

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import os
from werkzeug.exceptions import Unauthorized
@ -27,6 +26,7 @@ from config import CloudEditionConfig, Config
from extensions import (
ext_celery,
ext_code_based_extension,
ext_compress,
ext_database,
ext_hosting_provider,
ext_login,
@ -39,10 +39,11 @@ from extensions import (
from extensions.ext_database import db
from extensions.ext_login import login_manager
from libs.passport import PassportService
# DO NOT REMOVE BELOW
from services.account_service import AccountService
# DO NOT REMOVE BELOW
from events import event_handlers
from models import account, dataset, model, source, task, tool, tools, web
# DO NOT REMOVE ABOVE
@ -96,6 +97,7 @@ def create_app(test_config=None) -> Flask:
def initialize_extensions(app):
# Since the application instance is now created, pass it to each Flask
# extension instance to bind it to the Flask application instance (app)
ext_compress.init_app(app)
ext_code_based_extension.init()
ext_database.init_app(app)
ext_migrate.init(app, db)

View File

@ -6,16 +6,16 @@ import click
from flask import current_app
from werkzeug.exceptions import NotFound
from core.embedding.cached_embedding import CacheEmbedding
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document
from extensions.ext_database import db
from libs.helper import email as email_validate
from libs.password import hash_password, password_pattern, valid_password
from libs.rsa import generate_key_pair
from models.account import Tenant
from models.dataset import Dataset
from models.model import Account
from models.dataset import Dataset, DatasetCollectionBinding, DocumentSegment
from models.dataset import Document as DatasetDocument
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
from models.provider import Provider, ProviderModel
@ -109,29 +109,140 @@ def reset_encrypt_key_pair():
click.echo(click.style('Sorry, only support SELF_HOSTED mode.', fg='red'))
return
tenant = db.session.query(Tenant).first()
if not tenant:
click.echo(click.style('Sorry, no workspace found. Please enter /install to initialize.', fg='red'))
return
tenants = db.session.query(Tenant).all()
for tenant in tenants:
if not tenant:
click.echo(click.style('Sorry, no workspace found. Please enter /install to initialize.', fg='red'))
return
tenant.encrypt_public_key = generate_key_pair(tenant.id)
tenant.encrypt_public_key = generate_key_pair(tenant.id)
db.session.query(Provider).filter(Provider.provider_type == 'custom').delete()
db.session.query(ProviderModel).delete()
db.session.commit()
db.session.query(Provider).filter(Provider.provider_type == 'custom', Provider.tenant_id == tenant.id).delete()
db.session.query(ProviderModel).filter(ProviderModel.tenant_id == tenant.id).delete()
db.session.commit()
click.echo(click.style('Congratulations! '
'the asymmetric key pair of workspace {} has been reset.'.format(tenant.id), fg='green'))
click.echo(click.style('Congratulations! '
'the asymmetric key pair of workspace {} has been reset.'.format(tenant.id), fg='green'))
@click.command('create-qdrant-indexes', help='Create qdrant indexes.')
def create_qdrant_indexes():
@click.command('vdb-migrate', help='migrate vector db.')
@click.option('--scope', default='all', prompt=False, help='The scope of vector database to migrate, Default is All.')
def vdb_migrate(scope: str):
if scope in ['knowledge', 'all']:
migrate_knowledge_vector_database()
if scope in ['annotation', 'all']:
migrate_annotation_vector_database()
def migrate_annotation_vector_database():
"""
Migrate other vector database datas to Qdrant.
Migrate annotation datas to target vector database .
"""
click.echo(click.style('Start create qdrant indexes.', fg='green'))
click.echo(click.style('Start migrate annotation data.', fg='green'))
create_count = 0
skipped_count = 0
total_count = 0
page = 1
while True:
try:
# get apps info
apps = db.session.query(App).filter(
App.status == 'normal'
).order_by(App.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for app in apps:
total_count = total_count + 1
click.echo(f'Processing the {total_count} app {app.id}. '
+ f'{create_count} created, {skipped_count} skipped.')
try:
click.echo('Create app annotation index: {}'.format(app.id))
app_annotation_setting = db.session.query(AppAnnotationSetting).filter(
AppAnnotationSetting.app_id == app.id
).first()
if not app_annotation_setting:
skipped_count = skipped_count + 1
click.echo('App annotation setting is disabled: {}'.format(app.id))
continue
# get dataset_collection_binding info
dataset_collection_binding = db.session.query(DatasetCollectionBinding).filter(
DatasetCollectionBinding.id == app_annotation_setting.collection_binding_id
).first()
if not dataset_collection_binding:
click.echo('App annotation collection binding is not exist: {}'.format(app.id))
continue
annotations = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app.id).all()
dataset = Dataset(
id=app.id,
tenant_id=app.tenant_id,
indexing_technique='high_quality',
embedding_model_provider=dataset_collection_binding.provider_name,
embedding_model=dataset_collection_binding.model_name,
collection_binding_id=dataset_collection_binding.id
)
documents = []
if annotations:
for annotation in annotations:
document = Document(
page_content=annotation.question,
metadata={
"annotation_id": annotation.id,
"app_id": app.id,
"doc_id": annotation.id
}
)
documents.append(document)
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
click.echo(f"Start to migrate annotation, app_id: {app.id}.")
try:
vector.delete()
click.echo(
click.style(f'Successfully delete vector index for app: {app.id}.',
fg='green'))
except Exception as e:
click.echo(
click.style(f'Failed to delete vector index for app {app.id}.',
fg='red'))
raise e
if documents:
try:
click.echo(click.style(
f'Start to created vector index with {len(documents)} annotations for app {app.id}.',
fg='green'))
vector.create(documents)
click.echo(
click.style(f'Successfully created vector index for app {app.id}.', fg='green'))
except Exception as e:
click.echo(click.style(f'Failed to created vector index for app {app.id}.', fg='red'))
raise e
click.echo(f'Successfully migrated app annotation {app.id}.')
create_count += 1
except Exception as e:
click.echo(
click.style('Create app annotation index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
continue
click.echo(
click.style(f'Congratulations! Create {create_count} app annotation indexes, and skipped {skipped_count} apps.',
fg='green'))
def migrate_knowledge_vector_database():
"""
Migrate vector database datas to target vector database .
"""
click.echo(click.style('Start migrate vector db.', fg='green'))
create_count = 0
skipped_count = 0
total_count = 0
config = current_app.config
vector_type = config.get('VECTOR_STORE')
page = 1
while True:
try:
@ -140,60 +251,190 @@ def create_qdrant_indexes():
except NotFound:
break
model_manager = ModelManager()
page += 1
for dataset in datasets:
if dataset.index_struct_dict:
if dataset.index_struct_dict['type'] != 'qdrant':
try:
click.echo('Create dataset qdrant index: {}'.format(dataset.id))
try:
embedding_model = model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
except Exception:
continue
embeddings = CacheEmbedding(embedding_model)
from core.index.vector_index.qdrant_vector_index import QdrantConfig, QdrantVectorIndex
index = QdrantVectorIndex(
dataset=dataset,
config=QdrantConfig(
endpoint=current_app.config.get('QDRANT_URL'),
api_key=current_app.config.get('QDRANT_API_KEY'),
root_path=current_app.root_path
),
embeddings=embeddings
)
if index:
index.create_qdrant_dataset(dataset)
index_struct = {
"type": 'qdrant',
"vector_store": {
"class_prefix": dataset.index_struct_dict['vector_store']['class_prefix']}
}
dataset.index_struct = json.dumps(index_struct)
db.session.commit()
create_count += 1
else:
click.echo('passed.')
except Exception as e:
click.echo(
click.style('Create dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
total_count = total_count + 1
click.echo(f'Processing the {total_count} dataset {dataset.id}. '
+ f'{create_count} created, {skipped_count} skipped.')
try:
click.echo('Create dataset vdb index: {}'.format(dataset.id))
if dataset.index_struct_dict:
if dataset.index_struct_dict['type'] == vector_type:
skipped_count = skipped_count + 1
continue
collection_name = ''
if vector_type == "weaviate":
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == "qdrant":
if dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
click.echo(click.style('Congratulations! Create {} dataset indexes.'.format(create_count), fg='green'))
elif vector_type == "milvus":
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
dataset.index_struct = json.dumps(index_struct_dict)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
vector = Vector(dataset)
click.echo(f"Start to migrate dataset {dataset.id}.")
try:
vector.delete()
click.echo(
click.style(f'Successfully delete vector index {collection_name} for dataset {dataset.id}.',
fg='green'))
except Exception as e:
click.echo(
click.style(f'Failed to delete vector index {collection_name} for dataset {dataset.id}.',
fg='red'))
raise e
dataset_documents = db.session.query(DatasetDocument).filter(
DatasetDocument.dataset_id == dataset.id,
DatasetDocument.indexing_status == 'completed',
DatasetDocument.enabled == True,
DatasetDocument.archived == False,
).all()
documents = []
segments_count = 0
for dataset_document in dataset_documents:
segments = db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).all()
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
segments_count = segments_count + 1
if documents:
try:
click.echo(click.style(
f'Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.',
fg='green'))
vector.create(documents)
click.echo(
click.style(f'Successfully created vector index for dataset {dataset.id}.', fg='green'))
except Exception as e:
click.echo(click.style(f'Failed to created vector index for dataset {dataset.id}.', fg='red'))
raise e
db.session.add(dataset)
db.session.commit()
click.echo(f'Successfully migrated dataset {dataset.id}.')
create_count += 1
except Exception as e:
db.session.rollback()
click.echo(
click.style('Create dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
continue
click.echo(
click.style(f'Congratulations! Create {create_count} dataset indexes, and skipped {skipped_count} datasets.',
fg='green'))
@click.command('convert-to-agent-apps', help='Convert Agent Assistant to Agent App.')
def convert_to_agent_apps():
"""
Convert Agent Assistant to Agent App.
"""
click.echo(click.style('Start convert to agent apps.', fg='green'))
proceeded_app_ids = []
while True:
# fetch first 1000 apps
sql_query = """SELECT a.id AS id FROM apps a
INNER JOIN app_model_configs am ON a.app_model_config_id=am.id
WHERE a.mode = 'chat'
AND am.agent_mode is not null
AND (
am.agent_mode like '%"strategy": "function_call"%'
OR am.agent_mode like '%"strategy": "react"%'
)
AND (
am.agent_mode like '{"enabled": true%'
OR am.agent_mode like '{"max_iteration": %'
) ORDER BY a.created_at DESC LIMIT 1000
"""
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query))
apps = []
for i in rs:
app_id = str(i.id)
if app_id not in proceeded_app_ids:
proceeded_app_ids.append(app_id)
app = db.session.query(App).filter(App.id == app_id).first()
apps.append(app)
if len(apps) == 0:
break
for app in apps:
click.echo('Converting app: {}'.format(app.id))
try:
app.mode = AppMode.AGENT_CHAT.value
db.session.commit()
# update conversation mode to agent
db.session.query(Conversation).filter(Conversation.app_id == app.id).update(
{Conversation.mode: AppMode.AGENT_CHAT.value}
)
db.session.commit()
click.echo(click.style('Converted app: {}'.format(app.id), fg='green'))
except Exception as e:
click.echo(
click.style('Convert app error: {} {}'.format(e.__class__.__name__,
str(e)), fg='red'))
click.echo(click.style('Congratulations! Converted {} agent apps.'.format(len(proceeded_app_ids)), fg='green'))
def register_commands(app):
app.cli.add_command(reset_password)
app.cli.add_command(reset_email)
app.cli.add_command(reset_encrypt_key_pair)
app.cli.add_command(create_qdrant_indexes)
app.cli.add_command(vdb_migrate)
app.cli.add_command(convert_to_agent_apps)

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import os
import dotenv
@ -23,11 +22,13 @@ DEFAULTS = {
'SERVICE_API_URL': 'https://api.dify.ai',
'APP_WEB_URL': 'https://udify.app',
'FILES_URL': '',
'S3_ADDRESS_STYLE': 'auto',
'STORAGE_TYPE': 'local',
'STORAGE_LOCAL_PATH': 'storage',
'CHECK_UPDATE_URL': 'https://updates.dify.ai',
'DEPLOY_ENV': 'PRODUCTION',
'SQLALCHEMY_POOL_SIZE': 30,
'SQLALCHEMY_MAX_OVERFLOW': 10,
'SQLALCHEMY_POOL_RECYCLE': 3600,
'SQLALCHEMY_ECHO': 'False',
'SENTRY_TRACES_SAMPLE_RATE': 1.0,
@ -39,7 +40,9 @@ DEFAULTS = {
'LOG_LEVEL': 'INFO',
'HOSTED_OPENAI_QUOTA_LIMIT': 200,
'HOSTED_OPENAI_TRIAL_ENABLED': 'False',
'HOSTED_OPENAI_TRIAL_MODELS': 'gpt-3.5-turbo,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-16k,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-0125,text-davinci-003',
'HOSTED_OPENAI_PAID_ENABLED': 'False',
'HOSTED_OPENAI_PAID_MODELS': 'gpt-4,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-4-0125-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613,gpt-3.5-turbo-0125,gpt-3.5-turbo-instruct,text-davinci-003',
'HOSTED_AZURE_OPENAI_ENABLED': 'False',
'HOSTED_AZURE_OPENAI_QUOTA_LIMIT': 200,
'HOSTED_ANTHROPIC_QUOTA_LIMIT': 600000,
@ -47,6 +50,8 @@ DEFAULTS = {
'HOSTED_ANTHROPIC_PAID_ENABLED': 'False',
'HOSTED_MODERATION_ENABLED': 'False',
'HOSTED_MODERATION_PROVIDERS': '',
'HOSTED_FETCH_APP_TEMPLATES_MODE': 'remote',
'HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN': 'https://tmpl.dify.ai',
'CLEAN_DAY_SETTING': 30,
'UPLOAD_FILE_SIZE_LIMIT': 15,
'UPLOAD_FILE_BATCH_LIMIT': 5,
@ -57,6 +62,12 @@ DEFAULTS = {
'BILLING_ENABLED': 'False',
'CAN_REPLACE_LOGO': 'False',
'ETL_TYPE': 'dify',
'KEYWORD_STORE': 'jieba',
'BATCH_UPLOAD_LIMIT': 20,
'CODE_EXECUTION_ENDPOINT': '',
'CODE_EXECUTION_API_KEY': '',
'TOOL_ICON_CACHE_MAX_AGE': 3600,
'KEYWORD_DATA_SOURCE_TYPE': 'database',
}
@ -87,7 +98,7 @@ class Config:
# ------------------------
# General Configurations.
# ------------------------
self.CURRENT_VERSION = "0.5.4"
self.CURRENT_VERSION = "0.6.0-preview-workflow.2"
self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
@ -143,6 +154,7 @@ class Config:
self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}{db_extras}"
self.SQLALCHEMY_ENGINE_OPTIONS = {
'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')),
'max_overflow': int(get_env('SQLALCHEMY_MAX_OVERFLOW')),
'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE'))
}
@ -177,13 +189,18 @@ class Config:
self.S3_ACCESS_KEY = get_env('S3_ACCESS_KEY')
self.S3_SECRET_KEY = get_env('S3_SECRET_KEY')
self.S3_REGION = get_env('S3_REGION')
self.S3_ADDRESS_STYLE = get_env('S3_ADDRESS_STYLE')
self.AZURE_BLOB_ACCOUNT_NAME = get_env('AZURE_BLOB_ACCOUNT_NAME')
self.AZURE_BLOB_ACCOUNT_KEY = get_env('AZURE_BLOB_ACCOUNT_KEY')
self.AZURE_BLOB_CONTAINER_NAME = get_env('AZURE_BLOB_CONTAINER_NAME')
self.AZURE_BLOB_ACCOUNT_URL = get_env('AZURE_BLOB_ACCOUNT_URL')
# ------------------------
# Vector Store Configurations.
# Currently, only support: qdrant, milvus, zilliz, weaviate
# ------------------------
self.VECTOR_STORE = get_env('VECTOR_STORE')
self.KEYWORD_STORE = get_env('KEYWORD_STORE')
# qdrant settings
self.QDRANT_URL = get_env('QDRANT_URL')
self.QDRANT_API_KEY = get_env('QDRANT_API_KEY')
@ -209,6 +226,12 @@ class Config:
self.MAIL_DEFAULT_SEND_FROM = get_env('MAIL_DEFAULT_SEND_FROM')
self.RESEND_API_KEY = get_env('RESEND_API_KEY')
self.RESEND_API_URL = get_env('RESEND_API_URL')
# SMTP settings
self.SMTP_SERVER = get_env('SMTP_SERVER')
self.SMTP_PORT = get_env('SMTP_PORT')
self.SMTP_USERNAME = get_env('SMTP_USERNAME')
self.SMTP_PASSWORD = get_env('SMTP_PASSWORD')
self.SMTP_USE_TLS = get_bool_env('SMTP_USE_TLS')
# ------------------------
# Workpace Configurations.
@ -254,8 +277,10 @@ class Config:
self.HOSTED_OPENAI_API_BASE = get_env('HOSTED_OPENAI_API_BASE')
self.HOSTED_OPENAI_API_ORGANIZATION = get_env('HOSTED_OPENAI_API_ORGANIZATION')
self.HOSTED_OPENAI_TRIAL_ENABLED = get_bool_env('HOSTED_OPENAI_TRIAL_ENABLED')
self.HOSTED_OPENAI_TRIAL_MODELS = get_env('HOSTED_OPENAI_TRIAL_MODELS')
self.HOSTED_OPENAI_QUOTA_LIMIT = int(get_env('HOSTED_OPENAI_QUOTA_LIMIT'))
self.HOSTED_OPENAI_PAID_ENABLED = get_bool_env('HOSTED_OPENAI_PAID_ENABLED')
self.HOSTED_OPENAI_PAID_MODELS = get_env('HOSTED_OPENAI_PAID_MODELS')
self.HOSTED_AZURE_OPENAI_ENABLED = get_bool_env('HOSTED_AZURE_OPENAI_ENABLED')
self.HOSTED_AZURE_OPENAI_API_KEY = get_env('HOSTED_AZURE_OPENAI_API_KEY')
@ -275,11 +300,24 @@ class Config:
self.HOSTED_MODERATION_ENABLED = get_bool_env('HOSTED_MODERATION_ENABLED')
self.HOSTED_MODERATION_PROVIDERS = get_env('HOSTED_MODERATION_PROVIDERS')
# fetch app templates mode, remote, builtin, db(only for dify SaaS), default: remote
self.HOSTED_FETCH_APP_TEMPLATES_MODE = get_env('HOSTED_FETCH_APP_TEMPLATES_MODE')
self.HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN = get_env('HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN')
self.ETL_TYPE = get_env('ETL_TYPE')
self.UNSTRUCTURED_API_URL = get_env('UNSTRUCTURED_API_URL')
self.BILLING_ENABLED = get_bool_env('BILLING_ENABLED')
self.CAN_REPLACE_LOGO = get_bool_env('CAN_REPLACE_LOGO')
self.BATCH_UPLOAD_LIMIT = get_env('BATCH_UPLOAD_LIMIT')
self.CODE_EXECUTION_ENDPOINT = get_env('CODE_EXECUTION_ENDPOINT')
self.CODE_EXECUTION_API_KEY = get_env('CODE_EXECUTION_API_KEY')
self.API_COMPRESSION_ENABLED = get_bool_env('API_COMPRESSION_ENABLED')
self.TOOL_ICON_CACHE_MAX_AGE = get_env('TOOL_ICON_CACHE_MAX_AGE')
self.KEYWORD_DATA_SOURCE_TYPE = get_env('KEYWORD_DATA_SOURCE_TYPE')
class CloudEditionConfig(Config):

View File

@ -1,9 +1,6 @@
import json
from models.model import AppModelConfig
languages = ['en-US', 'zh-Hans', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT']
languages = ['en-US', 'zh-Hans', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN']
language_timezone_mapping = {
'en-US': 'America/New_York',
@ -16,8 +13,11 @@ language_timezone_mapping = {
'ko-KR': 'Asia/Seoul',
'ru-RU': 'Europe/Moscow',
'it-IT': 'Europe/Rome',
'uk-UA': 'Europe/Kyiv',
'vi-VN': 'Asia/Ho_Chi_Minh',
}
def supported_language(lang):
if lang in languages:
return lang
@ -25,303 +25,3 @@ def supported_language(lang):
error = ('{lang} is not a valid language.'
.format(lang=lang))
raise ValueError(error)
user_input_form_template = {
"en-US": [
{
"paragraph": {
"label": "Query",
"variable": "default_input",
"required": False,
"default": ""
}
}
],
"zh-Hans": [
{
"paragraph": {
"label": "查询内容",
"variable": "default_input",
"required": False,
"default": ""
}
}
],
"pt-BR": [
{
"paragraph": {
"label": "Consulta",
"variable": "default_input",
"required": False,
"default": ""
}
}
],
"es-ES": [
{
"paragraph": {
"label": "Consulta",
"variable": "default_input",
"required": False,
"default": ""
}
}
],
}
demo_model_templates = {
'en-US': [
{
'name': 'Translation Assistant',
'icon': '',
'icon_background': '',
'description': 'A multilingual translator that provides translation capabilities in multiple languages, translating user input into the language they need.',
'mode': 'completion',
'model_config': AppModelConfig(
provider='openai',
model_id='gpt-3.5-turbo-instruct',
configs={
'prompt_template': "Please translate the following text into {{target_language}}:\n",
'prompt_variables': [
{
"key": "target_language",
"name": "Target Language",
"description": "The language you want to translate into.",
"type": "select",
"default": "Chinese",
'options': [
'Chinese',
'English',
'Japanese',
'French',
'Russian',
'German',
'Spanish',
'Korean',
'Italian',
]
}
],
'completion_params': {
'max_token': 1000,
'temperature': 0,
'top_p': 0,
'presence_penalty': 0.1,
'frequency_penalty': 0.1,
}
},
opening_statement='',
suggested_questions=None,
pre_prompt="Please translate the following text into {{target_language}}:\n{{query}}\ntranslate:",
model=json.dumps({
"provider": "openai",
"name": "gpt-3.5-turbo-instruct",
"mode": "completion",
"completion_params": {
"max_tokens": 1000,
"temperature": 0,
"top_p": 0,
"presence_penalty": 0.1,
"frequency_penalty": 0.1
}
}),
user_input_form=json.dumps([
{
"select": {
"label": "Target Language",
"variable": "target_language",
"description": "The language you want to translate into.",
"default": "Chinese",
"required": True,
'options': [
'Chinese',
'English',
'Japanese',
'French',
'Russian',
'German',
'Spanish',
'Korean',
'Italian',
]
}
},{
"paragraph": {
"label": "Query",
"variable": "query",
"required": True,
"default": ""
}
}
])
)
},
{
'name': 'AI Front-end Interviewer',
'icon': '',
'icon_background': '',
'description': 'A simulated front-end interviewer that tests the skill level of front-end development through questioning.',
'mode': 'chat',
'model_config': AppModelConfig(
provider='openai',
model_id='gpt-3.5-turbo',
configs={
'introduction': 'Hi, welcome to our interview. I am the interviewer for this technology company, and I will test your web front-end development skills. Next, I will ask you some technical questions. Please answer them as thoroughly as possible. ',
'prompt_template': "You will play the role of an interviewer for a technology company, examining the user's web front-end development skills and posing 5-10 sharp technical questions.\n\nPlease note:\n- Only ask one question at a time.\n- After the user answers a question, ask the next question directly, without trying to correct any mistakes made by the candidate.\n- If you think the user has not answered correctly for several consecutive questions, ask fewer questions.\n- After asking the last question, you can ask this question: Why did you leave your last job? After the user answers this question, please express your understanding and support.\n",
'prompt_variables': [],
'completion_params': {
'max_token': 300,
'temperature': 0.8,
'top_p': 0.9,
'presence_penalty': 0.1,
'frequency_penalty': 0.1,
}
},
opening_statement='Hi, welcome to our interview. I am the interviewer for this technology company, and I will test your web front-end development skills. Next, I will ask you some technical questions. Please answer them as thoroughly as possible. ',
suggested_questions=None,
pre_prompt="You will play the role of an interviewer for a technology company, examining the user's web front-end development skills and posing 5-10 sharp technical questions.\n\nPlease note:\n- Only ask one question at a time.\n- After the user answers a question, ask the next question directly, without trying to correct any mistakes made by the candidate.\n- If you think the user has not answered correctly for several consecutive questions, ask fewer questions.\n- After asking the last question, you can ask this question: Why did you leave your last job? After the user answers this question, please express your understanding and support.\n",
model=json.dumps({
"provider": "openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {
"max_tokens": 300,
"temperature": 0.8,
"top_p": 0.9,
"presence_penalty": 0.1,
"frequency_penalty": 0.1
}
}),
user_input_form=None
)
}
],
'zh-Hans': [
{
'name': '翻译助手',
'icon': '',
'icon_background': '',
'description': '一个多语言翻译器,提供多种语言翻译能力,将用户输入的文本翻译成他们需要的语言。',
'mode': 'completion',
'model_config': AppModelConfig(
provider='openai',
model_id='gpt-3.5-turbo-instruct',
configs={
'prompt_template': "请将以下文本翻译为{{target_language}}:\n",
'prompt_variables': [
{
"key": "target_language",
"name": "目标语言",
"description": "翻译的目标语言",
"type": "select",
"default": "中文",
"options": [
"中文",
"英文",
"日语",
"法语",
"俄语",
"德语",
"西班牙语",
"韩语",
"意大利语",
]
}
],
'completion_params': {
'max_token': 1000,
'temperature': 0,
'top_p': 0,
'presence_penalty': 0.1,
'frequency_penalty': 0.1,
}
},
opening_statement='',
suggested_questions=None,
pre_prompt="请将以下文本翻译为{{target_language}}:\n{{query}}\n翻译:",
model=json.dumps({
"provider": "openai",
"name": "gpt-3.5-turbo-instruct",
"mode": "completion",
"completion_params": {
"max_tokens": 1000,
"temperature": 0,
"top_p": 0,
"presence_penalty": 0.1,
"frequency_penalty": 0.1
}
}),
user_input_form=json.dumps([
{
"select": {
"label": "目标语言",
"variable": "target_language",
"description": "翻译的目标语言",
"default": "中文",
"required": True,
'options': [
"中文",
"英文",
"日语",
"法语",
"俄语",
"德语",
"西班牙语",
"韩语",
"意大利语",
]
}
},{
"paragraph": {
"label": "文本内容",
"variable": "query",
"required": True,
"default": ""
}
}
])
)
},
{
'name': 'AI 前端面试官',
'icon': '',
'icon_background': '',
'description': '一个模拟的前端面试官,通过提问的方式对前端开发的技能水平进行检验。',
'mode': 'chat',
'model_config': AppModelConfig(
provider='openai',
model_id='gpt-3.5-turbo',
configs={
'introduction': '你好,欢迎来参加我们的面试,我是这家科技公司的面试官,我将考察你的 Web 前端开发技能。接下来我会向您提出一些技术问题,请您尽可能详尽地回答。',
'prompt_template': "你将扮演一个科技公司的面试官,考察用户作为候选人的 Web 前端开发水平,提出 5-10 个犀利的技术问题。\n\n请注意:\n- 每次只问一个问题\n- 用户回答问题后请直接问下一个问题,而不要试图纠正候选人的错误;\n- 如果你认为用户连续几次回答的都不对,就少问一点;\n- 问完最后一个问题后,你可以问这样一个问题:上一份工作为什么离职?用户回答该问题后,请表示理解与支持。\n",
'prompt_variables': [],
'completion_params': {
'max_token': 300,
'temperature': 0.8,
'top_p': 0.9,
'presence_penalty': 0.1,
'frequency_penalty': 0.1,
}
},
opening_statement='你好,欢迎来参加我们的面试,我是这家科技公司的面试官,我将考察你的 Web 前端开发技能。接下来我会向您提出一些技术问题,请您尽可能详尽地回答。',
suggested_questions=None,
pre_prompt="你将扮演一个科技公司的面试官,考察用户作为候选人的 Web 前端开发水平,提出 5-10 个犀利的技术问题。\n\n请注意:\n- 每次只问一个问题\n- 用户回答问题后请直接问下一个问题,而不要试图纠正候选人的错误;\n- 如果你认为用户连续几次回答的都不对,就少问一点;\n- 问完最后一个问题后,你可以问这样一个问题:上一份工作为什么离职?用户回答该问题后,请表示理解与支持。\n",
model=json.dumps({
"provider": "openai",
"name": "gpt-3.5-turbo",
"mode": "chat",
"completion_params": {
"max_tokens": 300,
"temperature": 0.8,
"top_p": 0.9,
"presence_penalty": 0.1,
"frequency_penalty": 0.1
}
}),
user_input_form=None
)
}
],
}

View File

@ -1,43 +1,31 @@
import json
model_templates = {
# completion default mode
'completion_default': {
from models.model import AppMode
default_app_templates = {
# workflow default mode
AppMode.WORKFLOW: {
'app': {
'mode': 'completion',
'mode': AppMode.WORKFLOW.value,
'enable_site': True,
'enable_api': True,
'is_demo': False,
'api_rpm': 0,
'api_rph': 0,
'status': 'normal'
'enable_api': True
}
},
# completion default mode
AppMode.COMPLETION: {
'app': {
'mode': AppMode.COMPLETION.value,
'enable_site': True,
'enable_api': True
},
'model_config': {
'provider': 'openai',
'model_id': 'gpt-3.5-turbo-instruct',
'configs': {
'prompt_template': '',
'prompt_variables': [],
'completion_params': {
'max_token': 512,
'temperature': 1,
'top_p': 1,
'presence_penalty': 0,
'frequency_penalty': 0,
}
},
'model': json.dumps({
'model': {
"provider": "openai",
"name": "gpt-3.5-turbo-instruct",
"mode": "completion",
"completion_params": {
"max_tokens": 512,
"temperature": 1,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0
}
}),
"name": "gpt-4",
"mode": "chat",
"completion_params": {}
},
'user_input_form': json.dumps([
{
"paragraph": {
@ -49,48 +37,50 @@ model_templates = {
}
]),
'pre_prompt': '{{query}}'
}
},
},
# chat default mode
'chat_default': {
AppMode.CHAT: {
'app': {
'mode': 'chat',
'mode': AppMode.CHAT.value,
'enable_site': True,
'enable_api': True,
'is_demo': False,
'api_rpm': 0,
'api_rph': 0,
'status': 'normal'
'enable_api': True
},
'model_config': {
'provider': 'openai',
'model_id': 'gpt-3.5-turbo',
'configs': {
'prompt_template': '',
'prompt_variables': [],
'completion_params': {
'max_token': 512,
'temperature': 1,
'top_p': 1,
'presence_penalty': 0,
'frequency_penalty': 0,
}
},
'model': json.dumps({
'model': {
"provider": "openai",
"name": "gpt-3.5-turbo",
"name": "gpt-4",
"mode": "chat",
"completion_params": {
"max_tokens": 512,
"temperature": 1,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0
}
})
"completion_params": {}
}
}
},
# advanced-chat default mode
AppMode.ADVANCED_CHAT: {
'app': {
'mode': AppMode.ADVANCED_CHAT.value,
'enable_site': True,
'enable_api': True
}
},
# agent-chat default mode
AppMode.AGENT_CHAT: {
'app': {
'mode': AppMode.AGENT_CHAT.value,
'enable_site': True,
'enable_api': True
},
'model_config': {
'model': {
"provider": "openai",
"name": "gpt-4",
"mode": "chat",
"completion_params": {}
}
}
}
}

File diff suppressed because one or more lines are too long

View File

@ -5,10 +5,10 @@ bp = Blueprint('console', __name__, url_prefix='/console/api')
api = ExternalApi(bp)
# Import other controllers
from . import admin, apikey, extension, feature, setup, version
from . import admin, apikey, extension, feature, setup, version, ping
# Import app controllers
from .app import (advanced_prompt_template, annotation, app, audio, completion, conversation, generator, message,
model_config, site, statistic)
model_config, site, statistic, workflow, workflow_run, workflow_app_log, workflow_statistic, agent)
# Import auth controllers
from .auth import activate, data_source_oauth, login, oauth
# Import billing controllers
@ -16,6 +16,7 @@ from .billing import billing
# Import datasets controllers
from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing
# Import explore controllers
from .explore import audio, completion, conversation, installed_app, message, parameter, recommended_app, saved_message
from .explore import (audio, completion, conversation, installed_app, message, parameter, recommended_app,
saved_message, workflow)
# Import workspace controllers
from .workspace import account, members, model_providers, models, tool_providers, workspace
from .workspace import account, members, model_providers, models, tool_providers, workspace

View File

@ -1,21 +0,0 @@
from controllers.console.app.error import AppUnavailableError
from extensions.ext_database import db
from flask_login import current_user
from models.model import App
from werkzeug.exceptions import NotFound
def _get_app(app_id, mode=None):
app = db.session.query(App).filter(
App.id == app_id,
App.tenant_id == current_user.current_tenant_id,
App.status == 'normal'
).first()
if not app:
raise NotFound("App not found")
if mode and app.mode != mode:
raise NotFound("The {} app not found".format(mode))
return app

View File

@ -0,0 +1,32 @@
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from libs.helper import uuid_value
from libs.login import login_required
from models.model import AppMode
from services.agent_service import AgentService
class AgentLogApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.AGENT_CHAT])
def get(self, app_model):
"""Get agent logs"""
parser = reqparse.RequestParser()
parser.add_argument('message_id', type=uuid_value, required=True, location='args')
parser.add_argument('conversation_id', type=uuid_value, required=True, location='args')
args = parser.parse_args()
return AgentService.get_agent_logs(
app_model,
args['conversation_id'],
args['message_id']
)
api.add_resource(AgentLogApi, '/apps/<uuid:app_id>/agent/logs')

View File

@ -1,40 +1,28 @@
# -*- coding:utf-8 -*-
import json
import logging
from datetime import datetime
from flask_login import current_user
from flask_restful import Resource, abort, inputs, marshal_with, reqparse
from werkzeug.exceptions import Forbidden
from flask_restful import Resource, inputs, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, BadRequest
from constants.languages import demo_model_templates, languages
from constants.model_template import model_templates
from controllers.console import api
from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from events.app_event import app_was_created, app_was_deleted
from core.agent.entities import AgentToolEntity
from extensions.ext_database import db
from fields.app_fields import (
app_detail_fields,
app_detail_fields_with_site,
app_pagination_fields,
template_list_fields,
)
from libs.login import login_required
from models.model import App, AppModelConfig, Site
from services.app_model_config_service import AppModelConfigService
from services.app_service import AppService
from models.model import App, AppModelConfig, AppMode
from core.tools.utils.configuration import ToolParameterConfigurationManager
from core.tools.tool_manager import ToolManager
def _get_app(app_id, tenant_id):
app = db.session.query(App).filter(App.id == app_id, App.tenant_id == tenant_id).first()
if not app:
raise AppNotFoundError
return app
ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion']
class AppListApi(Resource):
@ -48,33 +36,15 @@ class AppListApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('page', type=inputs.int_range(1, 99999), required=False, default=1, location='args')
parser.add_argument('limit', type=inputs.int_range(1, 100), required=False, default=20, location='args')
parser.add_argument('mode', type=str, choices=['chat', 'completion', 'all'], default='all', location='args', required=False)
parser.add_argument('mode', type=str, choices=['chat', 'workflow', 'agent-chat', 'channel', 'all'], default='all', location='args', required=False)
parser.add_argument('name', type=str, location='args', required=False)
args = parser.parse_args()
filters = [
App.tenant_id == current_user.current_tenant_id,
App.is_universal == False
]
# get app list
app_service = AppService()
app_pagination = app_service.get_paginate_apps(current_user.current_tenant_id, args)
if args['mode'] == 'completion':
filters.append(App.mode == 'completion')
elif args['mode'] == 'chat':
filters.append(App.mode == 'chat')
else:
pass
if 'name' in args and args['name']:
filters.append(App.name.ilike(f'%{args["name"]}%'))
app_models = db.paginate(
db.select(App).where(*filters).order_by(App.created_at.desc()),
page=args['page'],
per_page=args['limit'],
error_out=False
)
return app_models
return app_pagination
@setup_required
@login_required
@ -85,153 +55,49 @@ class AppListApi(Resource):
"""Create app"""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('mode', type=str, choices=['completion', 'chat', 'assistant'], location='json')
parser.add_argument('description', type=str, location='json')
parser.add_argument('mode', type=str, choices=ALLOW_CREATE_APP_MODES, location='json')
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
parser.add_argument('model_config', type=dict, location='json')
args = parser.parse_args()
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
try:
provider_manager = ProviderManager()
default_model_entity = provider_manager.get_default_model(
tenant_id=current_user.current_tenant_id,
model_type=ModelType.LLM
)
except (ProviderTokenNotInitError, LLMBadRequestError):
default_model_entity = None
except Exception as e:
logging.exception(e)
default_model_entity = None
if 'mode' not in args or args['mode'] is None:
raise BadRequest("mode is required")
if args['model_config'] is not None:
# validate config
model_config_dict = args['model_config']
# Get provider configurations
provider_manager = ProviderManager()
provider_configurations = provider_manager.get_configurations(current_user.current_tenant_id)
# get available models from provider_configurations
available_models = provider_configurations.get_models(
model_type=ModelType.LLM,
only_active=True
)
# check if model is available
available_models_names = [f'{model.provider.provider}.{model.model}' for model in available_models]
provider_model = f"{model_config_dict['model']['provider']}.{model_config_dict['model']['name']}"
if provider_model not in available_models_names:
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=current_user.current_tenant_id,
model_type=ModelType.LLM
)
if not model_instance:
raise ProviderNotInitializeError(
f"No Default System Reasoning Model available. Please configure "
f"in the Settings -> Model Provider.")
else:
model_config_dict["model"]["provider"] = model_instance.provider
model_config_dict["model"]["name"] = model_instance.model
model_configuration = AppModelConfigService.validate_configuration(
tenant_id=current_user.current_tenant_id,
account=current_user,
config=model_config_dict,
app_mode=args['mode']
)
app = App(
enable_site=True,
enable_api=True,
is_demo=False,
api_rpm=0,
api_rph=0,
status='normal'
)
app_model_config = AppModelConfig()
app_model_config = app_model_config.from_model_config_dict(model_configuration)
else:
if 'mode' not in args or args['mode'] is None:
abort(400, message="mode is required")
model_config_template = model_templates[args['mode'] + '_default']
app = App(**model_config_template['app'])
app_model_config = AppModelConfig(**model_config_template['model_config'])
# get model provider
model_manager = ModelManager()
try:
model_instance = model_manager.get_default_model_instance(
tenant_id=current_user.current_tenant_id,
model_type=ModelType.LLM
)
except ProviderTokenNotInitError:
model_instance = None
if model_instance:
model_dict = app_model_config.model_dict
model_dict['provider'] = model_instance.provider
model_dict['name'] = model_instance.model
app_model_config.model = json.dumps(model_dict)
app.name = args['name']
app.mode = args['mode']
app.icon = args['icon']
app.icon_background = args['icon_background']
app.tenant_id = current_user.current_tenant_id
db.session.add(app)
db.session.flush()
app_model_config.app_id = app.id
db.session.add(app_model_config)
db.session.flush()
app.app_model_config_id = app_model_config.id
account = current_user
site = Site(
app_id=app.id,
title=app.name,
default_language=account.interface_language,
customize_token_strategy='not_allow',
code=Site.generate_code(16)
)
db.session.add(site)
db.session.commit()
app_was_created.send(app)
app_service = AppService()
app = app_service.create_app(current_user.current_tenant_id, args, current_user)
return app, 201
class AppTemplateApi(Resource):
class AppImportApi(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(template_list_fields)
def get(self):
"""Get app demo templates"""
account = current_user
interface_language = account.interface_language
@marshal_with(app_detail_fields_with_site)
@cloud_edition_billing_resource_check('apps')
def post(self):
"""Import app"""
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
templates = demo_model_templates.get(interface_language)
if not templates:
templates = demo_model_templates.get(languages[0])
parser = reqparse.RequestParser()
parser.add_argument('data', type=str, required=True, nullable=False, location='json')
parser.add_argument('name', type=str, location='json')
parser.add_argument('description', type=str, location='json')
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
args = parser.parse_args()
return {'data': templates}
app_service = AppService()
app = app_service.import_app(current_user.current_tenant_id, args['data'], args, current_user)
return app, 201
class AppApi(Resource):
@ -239,176 +105,198 @@ class AppApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_detail_fields_with_site)
def get(self, app_id):
def get(self, app_model):
"""Get app detail"""
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
# get original app model config
if app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
model_config: AppModelConfig = app_model.app_model_config
agent_mode = model_config.agent_mode_dict
# decrypt agent tool parameters if it's secret-input
for tool in agent_mode.get('tools') or []:
if not isinstance(tool, dict) or len(tool.keys()) <= 3:
continue
agent_tool_entity = AgentToolEntity(**tool)
# get tool
try:
tool_runtime = ToolManager.get_agent_tool_runtime(
tenant_id=current_user.current_tenant_id,
agent_tool=agent_tool_entity,
)
manager = ToolParameterConfigurationManager(
tenant_id=current_user.current_tenant_id,
tool_runtime=tool_runtime,
provider_name=agent_tool_entity.provider_id,
provider_type=agent_tool_entity.provider_type,
)
return app
# get decrypted parameters
if agent_tool_entity.tool_parameters:
parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
masked_parameter = manager.mask_tool_parameters(parameters or {})
else:
masked_parameter = {}
# override tool parameters
tool['tool_parameters'] = masked_parameter
except Exception as e:
pass
# override agent mode
model_config.agent_mode = json.dumps(agent_mode)
db.session.commit()
return app_model
@setup_required
@login_required
@account_initialization_required
def delete(self, app_id):
"""Delete app"""
app_id = str(app_id)
@get_app_model
@marshal_with(app_detail_fields_with_site)
def put(self, app_model):
"""Update app"""
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, nullable=False, location='json')
parser.add_argument('description', type=str, location='json')
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
args = parser.parse_args()
app_service = AppService()
app_model = app_service.update_app(app_model, args)
return app_model
@setup_required
@login_required
@account_initialization_required
@get_app_model
def delete(self, app_model):
"""Delete app"""
if not current_user.is_admin_or_owner:
raise Forbidden()
app = _get_app(app_id, current_user.current_tenant_id)
db.session.delete(app)
db.session.commit()
# todo delete related data??
# model_config, site, api_token, conversation, message, message_feedback, message_annotation
app_was_deleted.send(app)
app_service = AppService()
app_service.delete_app(app_model)
return {'result': 'success'}, 204
class AppCopyApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_detail_fields_with_site)
def post(self, app_model):
"""Copy app"""
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, location='json')
parser.add_argument('description', type=str, location='json')
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
args = parser.parse_args()
app_service = AppService()
data = app_service.export_app(app_model)
app = app_service.import_app(current_user.current_tenant_id, data, args, current_user)
return app, 201
class AppExportApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
def get(self, app_model):
"""Export app"""
app_service = AppService()
return {
"data": app_service.export_app(app_model)
}
class AppNameApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_id):
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, location='json')
args = parser.parse_args()
app.name = args.get('name')
app.updated_at = datetime.utcnow()
db.session.commit()
return app
app_service = AppService()
app_model = app_service.update_app_name(app_model, args.get('name'))
return app_model
class AppIconApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_id):
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
args = parser.parse_args()
app.icon = args.get('icon')
app.icon_background = args.get('icon_background')
app.updated_at = datetime.utcnow()
db.session.commit()
app_service = AppService()
app_model = app_service.update_app_icon(app_model, args.get('icon'), args.get('icon_background'))
return app
return app_model
class AppSiteStatus(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_id):
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('enable_site', type=bool, required=True, location='json')
args = parser.parse_args()
app_id = str(app_id)
app = db.session.query(App).filter(App.id == app_id, App.tenant_id == current_user.current_tenant_id).first()
if not app:
raise AppNotFoundError
if args.get('enable_site') == app.enable_site:
return app
app_service = AppService()
app_model = app_service.update_app_site_status(app_model, args.get('enable_site'))
app.enable_site = args.get('enable_site')
app.updated_at = datetime.utcnow()
db.session.commit()
return app
return app_model
class AppApiStatus(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_id):
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('enable_api', type=bool, required=True, location='json')
args = parser.parse_args()
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
app_service = AppService()
app_model = app_service.update_app_api_status(app_model, args.get('enable_api'))
if args.get('enable_api') == app.enable_api:
return app
app.enable_api = args.get('enable_api')
app.updated_at = datetime.utcnow()
db.session.commit()
return app
class AppCopy(Resource):
@staticmethod
def create_app_copy(app):
copy_app = App(
name=app.name + ' copy',
icon=app.icon,
icon_background=app.icon_background,
tenant_id=app.tenant_id,
mode=app.mode,
app_model_config_id=app.app_model_config_id,
enable_site=app.enable_site,
enable_api=app.enable_api,
api_rpm=app.api_rpm,
api_rph=app.api_rph
)
return copy_app
@staticmethod
def create_app_model_config_copy(app_config, copy_app_id):
copy_app_model_config = app_config.copy()
copy_app_model_config.app_id = copy_app_id
return copy_app_model_config
@setup_required
@login_required
@account_initialization_required
@marshal_with(app_detail_fields)
def post(self, app_id):
app_id = str(app_id)
app = _get_app(app_id, current_user.current_tenant_id)
copy_app = self.create_app_copy(app)
db.session.add(copy_app)
app_config = db.session.query(AppModelConfig). \
filter(AppModelConfig.app_id == app_id). \
one_or_none()
if app_config:
copy_app_model_config = self.create_app_model_config_copy(app_config, copy_app.id)
db.session.add(copy_app_model_config)
db.session.commit()
copy_app.app_model_config_id = copy_app_model_config.id
db.session.commit()
return copy_app, 201
return app_model
api.add_resource(AppListApi, '/apps')
api.add_resource(AppTemplateApi, '/app-templates')
api.add_resource(AppImportApi, '/apps/import')
api.add_resource(AppApi, '/apps/<uuid:app_id>')
api.add_resource(AppCopy, '/apps/<uuid:app_id>/copy')
api.add_resource(AppCopyApi, '/apps/<uuid:app_id>/copy')
api.add_resource(AppExportApi, '/apps/<uuid:app_id>/export')
api.add_resource(AppNameApi, '/apps/<uuid:app_id>/name')
api.add_resource(AppIconApi, '/apps/<uuid:app_id>/icon')
api.add_resource(AppSiteStatus, '/apps/<uuid:app_id>/site-enable')

View File

@ -1,13 +1,11 @@
# -*- coding:utf-8 -*-
import logging
from flask import request
from flask_restful import Resource
from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError
import services
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import (
AppUnavailableError,
AudioTooLargeError,
@ -19,11 +17,13 @@ from controllers.console.app.error import (
ProviderQuotaExceededError,
UnsupportedAudioTypeError,
)
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs.login import login_required
from models.model import AppMode
from services.audio_service import AudioService
from services.errors.audio import (
AudioTooLargeServiceError,
@ -37,16 +37,15 @@ class ChatMessageAudioApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
app_model = _get_app(app_id, 'chat')
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
def post(self, app_model):
file = request.files['file']
try:
response = AudioService.transcript_asr(
tenant_id=app_model.tenant_id,
file=file
app_model=app_model,
file=file,
end_user=None,
)
return response
@ -72,7 +71,7 @@ class ChatMessageAudioApi(Resource):
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
logging.exception(f"internal server error, {str(e)}.")
raise InternalServerError()
@ -80,13 +79,13 @@ class ChatMessageTextApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
app_model = _get_app(app_id, None)
@get_app_model
def post(self, app_model):
try:
response = AudioService.transcript_tts(
tenant_id=app_model.tenant_id,
app_model=app_model,
text=request.form['text'],
voice=request.form.get('voice'),
streaming=False
)
@ -113,9 +112,52 @@ class ChatMessageTextApi(Resource):
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
logging.exception(f"internal server error, {str(e)}.")
raise InternalServerError()
class TextModesApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
def get(self, app_model):
try:
parser = reqparse.RequestParser()
parser.add_argument('language', type=str, required=True, location='args')
args = parser.parse_args()
response = AudioService.transcript_tts_voices(
tenant_id=app_model.tenant_id,
language=args['language'],
)
return response
except services.errors.audio.ProviderNotSupportTextToSpeechLanageServiceError:
raise AppUnavailableError("Text to audio voices language parameter loss.")
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:
raise e
except Exception as e:
logging.exception(f"internal server error, {str(e)}.")
raise InternalServerError()
api.add_resource(ChatMessageAudioApi, '/apps/<uuid:app_id>/audio-to-text')
api.add_resource(ChatMessageTextApi, '/apps/<uuid:app_id>/text-to-audio')
api.add_resource(TextModesApi, '/apps/<uuid:app_id>/text-to-audio/voices')

View File

@ -1,16 +1,11 @@
# -*- coding:utf-8 -*-
import json
import logging
from typing import Generator, Union
import flask_login
from flask import Response, stream_with_context
from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import (
AppUnavailableError,
CompletionRequestError,
@ -19,15 +14,18 @@ from controllers.console.app.error import (
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.application_queue_manager import ApplicationQueueManager
from core.entities.application_entities import InvokeFrom
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs import helper
from libs.helper import uuid_value
from libs.login import login_required
from services.completion_service import CompletionService
from models.model import AppMode
from services.app_generate_service import AppGenerateService
# define completion message api for user
@ -36,12 +34,8 @@ class CompletionMessageApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
# get app info
app_model = _get_app(app_id, 'completion')
@get_app_model(mode=AppMode.COMPLETION)
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, location='json')
parser.add_argument('query', type=str, location='json', default='')
@ -57,16 +51,15 @@ class CompletionMessageApi(Resource):
account = flask_login.current_user
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=account,
args=args,
invoke_from=InvokeFrom.DEBUGGER,
streaming=streaming,
is_model_config_override=True
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -93,15 +86,11 @@ class CompletionMessageStopApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id, task_id):
app_id = str(app_id)
# get app info
_get_app(app_id, 'completion')
@get_app_model(mode=AppMode.COMPLETION)
def post(self, app_model, task_id):
account = flask_login.current_user
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
return {'result': 'success'}, 200
@ -110,12 +99,8 @@ class ChatMessageApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
# get app info
app_model = _get_app(app_id, 'chat')
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT])
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, location='json')
parser.add_argument('query', type=str, required=True, location='json')
@ -132,16 +117,15 @@ class ChatMessageApi(Resource):
account = flask_login.current_user
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=account,
args=args,
invoke_from=InvokeFrom.DEBUGGER,
streaming=streaming,
is_model_config_override=True
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -164,31 +148,15 @@ class ChatMessageApi(Resource):
raise InternalServerError()
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
class ChatMessageStopApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id, task_id):
app_id = str(app_id)
# get app info
_get_app(app_id, 'chat')
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
def post(self, app_model, task_id):
account = flask_login.current_user
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, account.id)
return {'result': 'success'}, 200

View File

@ -9,7 +9,7 @@ from sqlalchemy.orm import joinedload
from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
@ -21,7 +21,7 @@ from fields.conversation_fields import (
)
from libs.helper import datetime_string
from libs.login import login_required
from models.model import Conversation, Message, MessageAnnotation
from models.model import AppMode, Conversation, Message, MessageAnnotation
class CompletionConversationApi(Resource):
@ -29,10 +29,9 @@ class CompletionConversationApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=AppMode.COMPLETION)
@marshal_with(conversation_pagination_fields)
def get(self, app_id):
app_id = str(app_id)
def get(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, location='args')
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -43,10 +42,7 @@ class CompletionConversationApi(Resource):
parser.add_argument('limit', type=int_range(1, 100), default=20, location='args')
args = parser.parse_args()
# get app info
app = _get_app(app_id, 'completion')
query = db.select(Conversation).where(Conversation.app_id == app.id, Conversation.mode == 'completion')
query = db.select(Conversation).where(Conversation.app_id == app_model.id, Conversation.mode == 'completion')
if args['keyword']:
query = query.join(
@ -106,24 +102,22 @@ class CompletionConversationDetailApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=AppMode.COMPLETION)
@marshal_with(conversation_message_detail_fields)
def get(self, app_id, conversation_id):
app_id = str(app_id)
def get(self, app_model, conversation_id):
conversation_id = str(conversation_id)
return _get_conversation(app_id, conversation_id, 'completion')
return _get_conversation(app_model, conversation_id)
@setup_required
@login_required
@account_initialization_required
def delete(self, app_id, conversation_id):
app_id = str(app_id)
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
def delete(self, app_model, conversation_id):
conversation_id = str(conversation_id)
app = _get_app(app_id, 'chat')
conversation = db.session.query(Conversation) \
.filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
.filter(Conversation.id == conversation_id, Conversation.app_id == app_model.id).first()
if not conversation:
raise NotFound("Conversation Not Exists.")
@ -139,10 +133,9 @@ class ChatConversationApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@marshal_with(conversation_with_summary_pagination_fields)
def get(self, app_id):
app_id = str(app_id)
def get(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, location='args')
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -154,10 +147,7 @@ class ChatConversationApi(Resource):
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
args = parser.parse_args()
# get app info
app = _get_app(app_id, 'chat')
query = db.select(Conversation).where(Conversation.app_id == app.id, Conversation.mode == 'chat')
query = db.select(Conversation).where(Conversation.app_id == app_model.id)
if args['keyword']:
query = query.join(
@ -211,6 +201,9 @@ class ChatConversationApi(Resource):
.having(func.count(Message.id) >= args['message_count_gte'])
)
if app_model.mode == AppMode.ADVANCED_CHAT.value:
query = query.where(Conversation.override_model_configs.is_(None))
query = query.order_by(Conversation.created_at.desc())
conversations = db.paginate(
@ -228,25 +221,22 @@ class ChatConversationDetailApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@marshal_with(conversation_detail_fields)
def get(self, app_id, conversation_id):
app_id = str(app_id)
def get(self, app_model, conversation_id):
conversation_id = str(conversation_id)
return _get_conversation(app_id, conversation_id, 'chat')
return _get_conversation(app_model, conversation_id)
@setup_required
@login_required
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@account_initialization_required
def delete(self, app_id, conversation_id):
app_id = str(app_id)
def delete(self, app_model, conversation_id):
conversation_id = str(conversation_id)
# get app info
app = _get_app(app_id, 'chat')
conversation = db.session.query(Conversation) \
.filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
.filter(Conversation.id == conversation_id, Conversation.app_id == app_model.id).first()
if not conversation:
raise NotFound("Conversation Not Exists.")
@ -263,12 +253,9 @@ api.add_resource(ChatConversationApi, '/apps/<uuid:app_id>/chat-conversations')
api.add_resource(ChatConversationDetailApi, '/apps/<uuid:app_id>/chat-conversations/<uuid:conversation_id>')
def _get_conversation(app_id, conversation_id, mode):
# get app info
app = _get_app(app_id, mode)
def _get_conversation(app_model, conversation_id):
conversation = db.session.query(Conversation) \
.filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
.filter(Conversation.id == conversation_id, Conversation.app_id == app_model.id).first()
if not conversation:
raise NotFound("Conversation Not Exists.")

View File

@ -85,3 +85,9 @@ class TooManyFilesError(BaseHTTPException):
error_code = 'too_many_files'
description = "Only one file is allowed."
code = 400
class DraftWorkflowNotExist(BaseHTTPException):
error_code = 'draft_workflow_not_exist'
description = "Draft workflow need to be initialized."
code = 400

View File

@ -11,7 +11,7 @@ from controllers.console.app.error import (
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.generator.llm_generator import LLMGenerator
from core.llm_generator.llm_generator import LLMGenerator
from core.model_runtime.errors.invoke import InvokeError
from libs.login import login_required

View File

@ -1,25 +1,22 @@
import json
import logging
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_login import current_user
from flask_restful import Resource, fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import (
AppMoreLikeThisDisabledError,
CompletionRequestError,
ProviderModelCurrentlyNotSupportError,
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.console.app.wraps import get_app_model
from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.entities.application_entities import InvokeFrom
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from extensions.ext_database import db
@ -27,12 +24,10 @@ from fields.conversation_fields import annotation_fields, message_detail_fields
from libs.helper import uuid_value
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from libs.login import login_required
from models.model import Conversation, Message, MessageAnnotation, MessageFeedback
from models.model import AppMode, Conversation, Message, MessageAnnotation, MessageFeedback
from services.annotation_service import AppAnnotationService
from services.completion_service import CompletionService
from services.errors.app import MoreLikeThisDisabledError
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
from services.message_service import MessageService
@ -45,14 +40,10 @@ class ChatMessageListApi(Resource):
@setup_required
@login_required
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
@account_initialization_required
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_id):
app_id = str(app_id)
# get app info
app = _get_app(app_id, 'chat')
def get(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('conversation_id', required=True, type=uuid_value, location='args')
parser.add_argument('first_id', type=uuid_value, location='args')
@ -61,7 +52,7 @@ class ChatMessageListApi(Resource):
conversation = db.session.query(Conversation).filter(
Conversation.id == args['conversation_id'],
Conversation.app_id == app.id
Conversation.app_id == app_model.id
).first()
if not conversation:
@ -109,12 +100,8 @@ class MessageFeedbackApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
# get app info
app = _get_app(app_id)
@get_app_model
def post(self, app_model):
parser = reqparse.RequestParser()
parser.add_argument('message_id', required=True, type=uuid_value, location='json')
parser.add_argument('rating', type=str, choices=['like', 'dislike', None], location='json')
@ -124,7 +111,7 @@ class MessageFeedbackApi(Resource):
message = db.session.query(Message).filter(
Message.id == message_id,
Message.app_id == app.id
Message.app_id == app_model.id
).first()
if not message:
@ -140,7 +127,7 @@ class MessageFeedbackApi(Resource):
raise ValueError('rating cannot be None when feedback not exists')
else:
feedback = MessageFeedback(
app_id=app.id,
app_id=app_model.id,
conversation_id=message.conversation_id,
message_id=message.id,
rating=args['rating'],
@ -159,21 +146,20 @@ class MessageAnnotationApi(Resource):
@login_required
@account_initialization_required
@cloud_edition_billing_resource_check('annotation')
@get_app_model
@marshal_with(annotation_fields)
def post(self, app_id):
def post(self, app_model):
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
app_id = str(app_id)
parser = reqparse.RequestParser()
parser.add_argument('message_id', required=False, type=uuid_value, location='json')
parser.add_argument('question', required=True, type=str, location='json')
parser.add_argument('answer', required=True, type=str, location='json')
parser.add_argument('annotation_reply', required=False, type=dict, location='json')
args = parser.parse_args()
annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_id)
annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_model.id)
return annotation
@ -182,94 +168,29 @@ class MessageAnnotationCountApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
app_id = str(app_id)
# get app info
app = _get_app(app_id)
@get_app_model
def get(self, app_model):
count = db.session.query(MessageAnnotation).filter(
MessageAnnotation.app_id == app.id
MessageAnnotation.app_id == app_model.id
).count()
return {'count': count}
class MessageMoreLikeThisApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id, message_id):
app_id = str(app_id)
message_id = str(message_id)
parser = reqparse.RequestParser()
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'],
location='args')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
# get app info
app_model = _get_app(app_id, 'completion')
try:
response = CompletionService.generate_more_like_this(
app_model=app_model,
user=current_user,
message_id=message_id,
invoke_from=InvokeFrom.DEBUGGER,
streaming=streaming
)
return compact_response(response)
except MessageNotExistsError:
raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError:
raise AppMoreLikeThisDisabledError()
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
class MessageSuggestedQuestionApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id, message_id):
app_id = str(app_id)
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
def get(self, app_model, message_id):
message_id = str(message_id)
# get app info
app_model = _get_app(app_id, 'chat')
try:
questions = MessageService.get_suggested_questions_after_answer(
app_model=app_model,
message_id=message_id,
user=current_user,
check_enabled=False
invoke_from=InvokeFrom.DEBUGGER
)
except MessageNotExistsError:
raise NotFound("Message not found")
@ -283,6 +204,8 @@ class MessageSuggestedQuestionApi(Resource):
raise ProviderModelCurrentlyNotSupportError()
except InvokeError as e:
raise CompletionRequestError(e.description)
except SuggestedQuestionsAfterAnswerDisabledError:
raise AppSuggestedQuestionsAfterAnswerDisabledError()
except Exception:
logging.exception("internal server error.")
raise InternalServerError()
@ -294,14 +217,11 @@ class MessageApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(message_detail_fields)
def get(self, app_id, message_id):
app_id = str(app_id)
def get(self, app_model, message_id):
message_id = str(message_id)
# get app info
app_model = _get_app(app_id)
message = db.session.query(Message).filter(
Message.id == message_id,
Message.app_id == app_model.id
@ -313,7 +233,6 @@ class MessageApi(Resource):
return message
api.add_resource(MessageMoreLikeThisApi, '/apps/<uuid:app_id>/completion-messages/<uuid:message_id>/more-like-this')
api.add_resource(MessageSuggestedQuestionApi, '/apps/<uuid:app_id>/chat-messages/<uuid:message_id>/suggested-questions')
api.add_resource(ChatMessageListApi, '/apps/<uuid:app_id>/chat-messages', endpoint='console_chat_messages')
api.add_resource(MessageFeedbackApi, '/apps/<uuid:app_id>/feedbacks')

View File

@ -1,17 +1,20 @@
# -*- coding:utf-8 -*-
import json
from flask import request
from flask_login import current_user
from flask_restful import Resource
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.agent.entities import AgentToolEntity
from core.tools.tool_manager import ToolManager
from core.tools.utils.configuration import ToolParameterConfigurationManager
from events.app_event import app_model_config_was_updated
from extensions.ext_database import db
from libs.login import login_required
from models.model import AppModelConfig
from models.model import AppMode, AppModelConfig
from services.app_model_config_service import AppModelConfigService
@ -20,33 +23,113 @@ class ModelConfigResource(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
@get_app_model(mode=[AppMode.AGENT_CHAT, AppMode.CHAT, AppMode.COMPLETION])
def post(self, app_model):
"""Modify app model config"""
app_id = str(app_id)
app = _get_app(app_id)
# validate config
model_configuration = AppModelConfigService.validate_configuration(
tenant_id=current_user.current_tenant_id,
account=current_user,
config=request.json,
app_mode=app.mode
app_mode=AppMode.value_of(app_model.mode)
)
new_app_model_config = AppModelConfig(
app_id=app.id,
app_id=app_model.id,
)
new_app_model_config = new_app_model_config.from_model_config_dict(model_configuration)
if app_model.mode == AppMode.AGENT_CHAT.value or app_model.is_agent:
# get original app model config
original_app_model_config: AppModelConfig = db.session.query(AppModelConfig).filter(
AppModelConfig.id == app_model.app_model_config_id
).first()
agent_mode = original_app_model_config.agent_mode_dict
# decrypt agent tool parameters if it's secret-input
parameter_map = {}
masked_parameter_map = {}
tool_map = {}
for tool in agent_mode.get('tools') or []:
if not isinstance(tool, dict) or len(tool.keys()) <= 3:
continue
agent_tool_entity = AgentToolEntity(**tool)
# get tool
try:
tool_runtime = ToolManager.get_agent_tool_runtime(
tenant_id=current_user.current_tenant_id,
agent_tool=agent_tool_entity,
)
manager = ToolParameterConfigurationManager(
tenant_id=current_user.current_tenant_id,
tool_runtime=tool_runtime,
provider_name=agent_tool_entity.provider_id,
provider_type=agent_tool_entity.provider_type,
)
except Exception as e:
continue
# get decrypted parameters
if agent_tool_entity.tool_parameters:
parameters = manager.decrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
masked_parameter = manager.mask_tool_parameters(parameters or {})
else:
parameters = {}
masked_parameter = {}
key = f'{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}'
masked_parameter_map[key] = masked_parameter
parameter_map[key] = parameters
tool_map[key] = tool_runtime
# encrypt agent tool parameters if it's secret-input
agent_mode = new_app_model_config.agent_mode_dict
for tool in agent_mode.get('tools') or []:
agent_tool_entity = AgentToolEntity(**tool)
# get tool
key = f'{agent_tool_entity.provider_id}.{agent_tool_entity.provider_type}.{agent_tool_entity.tool_name}'
if key in tool_map:
tool_runtime = tool_map[key]
else:
try:
tool_runtime = ToolManager.get_agent_tool_runtime(
tenant_id=current_user.current_tenant_id,
agent_tool=agent_tool_entity,
)
except Exception as e:
continue
manager = ToolParameterConfigurationManager(
tenant_id=current_user.current_tenant_id,
tool_runtime=tool_runtime,
provider_name=agent_tool_entity.provider_id,
provider_type=agent_tool_entity.provider_type,
)
manager.delete_tool_parameters_cache()
# override parameters if it equals to masked parameters
if agent_tool_entity.tool_parameters:
if key not in masked_parameter_map:
continue
if agent_tool_entity.tool_parameters == masked_parameter_map[key]:
agent_tool_entity.tool_parameters = parameter_map[key]
# encrypt parameters
if agent_tool_entity.tool_parameters:
tool['tool_parameters'] = manager.encrypt_tool_parameters(agent_tool_entity.tool_parameters or {})
# update app model config
new_app_model_config.agent_mode = json.dumps(agent_mode)
db.session.add(new_app_model_config)
db.session.flush()
app.app_model_config_id = new_app_model_config.id
app_model.app_model_config_id = new_app_model_config.id
db.session.commit()
app_model_config_was_updated.send(
app,
app_model,
app_model_config=new_app_model_config
)

View File

@ -1,11 +1,10 @@
# -*- coding:utf-8 -*-
from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, NotFound
from constants.languages import supported_language
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
@ -35,13 +34,11 @@ class AppSite(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_site_fields)
def post(self, app_id):
def post(self, app_model):
args = parse_app_site_args()
app_id = str(app_id)
app_model = _get_app(app_id)
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
@ -83,11 +80,9 @@ class AppSiteAccessTokenReset(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
@marshal_with(app_site_fields)
def post(self, app_id):
app_id = str(app_id)
app_model = _get_app(app_id)
def post(self, app_model):
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from datetime import datetime
from decimal import Decimal
@ -8,12 +7,13 @@ from flask_login import current_user
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.helper import datetime_string
from libs.login import login_required
from models.model import AppMode
class DailyConversationStatistic(Resource):
@ -21,10 +21,9 @@ class DailyConversationStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -82,10 +81,9 @@ class DailyTerminalsStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -142,10 +140,9 @@ class DailyTokenCostStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -206,10 +203,9 @@ class AverageSessionInteractionStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT])
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id, 'chat')
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -272,10 +268,9 @@ class UserSatisfactionRateStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -335,10 +330,9 @@ class AverageResponseTimeStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model(mode=AppMode.COMPLETION)
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id, 'completion')
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
@ -397,10 +391,9 @@ class TokensPerSecondStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, app_id):
@get_app_model
def get(self, app_model):
account = current_user
app_id = str(app_id)
app_model = _get_app(app_id)
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')

View File

@ -0,0 +1,324 @@
import json
import logging
from flask import abort, request
from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from fields.workflow_fields import workflow_fields
from fields.workflow_run_fields import workflow_run_node_execution_fields
from libs import helper
from libs.helper import TimestampField, uuid_value
from libs.login import current_user, login_required
from models.model import App, AppMode
from services.app_generate_service import AppGenerateService
from services.workflow_service import WorkflowService
logger = logging.getLogger(__name__)
class DraftWorkflowApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
@marshal_with(workflow_fields)
def get(self, app_model: App):
"""
Get draft workflow
"""
# fetch draft workflow by app_model
workflow_service = WorkflowService()
workflow = workflow_service.get_draft_workflow(app_model=app_model)
if not workflow:
raise DraftWorkflowNotExist()
# return workflow, if not found, return None (initiate graph by frontend)
return workflow
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
def post(self, app_model: App):
"""
Sync draft workflow
"""
content_type = request.headers.get('Content-Type')
if 'application/json' in content_type:
parser = reqparse.RequestParser()
parser.add_argument('graph', type=dict, required=True, nullable=False, location='json')
parser.add_argument('features', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
elif 'text/plain' in content_type:
try:
data = json.loads(request.data.decode('utf-8'))
if 'graph' not in data or 'features' not in data:
raise ValueError('graph or features not found in data')
if not isinstance(data.get('graph'), dict) or not isinstance(data.get('features'), dict):
raise ValueError('graph or features is not a dict')
args = {
'graph': data.get('graph'),
'features': data.get('features')
}
except json.JSONDecodeError:
return {'message': 'Invalid JSON data'}, 400
else:
abort(415)
workflow_service = WorkflowService()
workflow = workflow_service.sync_draft_workflow(
app_model=app_model,
graph=args.get('graph'),
features=args.get('features'),
account=current_user
)
return {
"result": "success",
"updated_at": TimestampField().format(workflow.updated_at or workflow.created_at)
}
class AdvancedChatDraftWorkflowRunApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT])
def post(self, app_model: App):
"""
Run draft workflow
"""
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, location='json')
parser.add_argument('query', type=str, required=True, location='json', default='')
parser.add_argument('files', type=list, location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
args = parser.parse_args()
try:
response = AppGenerateService.generate(
app_model=app_model,
user=current_user,
args=args,
invoke_from=InvokeFrom.DEBUGGER,
streaming=True
)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
raise ConversationCompletedError()
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class DraftWorkflowRunApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.WORKFLOW])
def post(self, app_model: App):
"""
Run draft workflow
"""
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
parser.add_argument('files', type=list, required=False, location='json')
args = parser.parse_args()
try:
response = AppGenerateService.generate(
app_model=app_model,
user=current_user,
args=args,
invoke_from=InvokeFrom.DEBUGGER,
streaming=True
)
return helper.compact_generate_response(response)
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class WorkflowTaskStopApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
def post(self, app_model: App, task_id: str):
"""
Stop workflow task
"""
AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, current_user.id)
return {
"result": "success"
}
class DraftWorkflowNodeRunApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
@marshal_with(workflow_run_node_execution_fields)
def post(self, app_model: App, node_id: str):
"""
Run draft workflow node
"""
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
workflow_service = WorkflowService()
workflow_node_execution = workflow_service.run_draft_workflow_node(
app_model=app_model,
node_id=node_id,
user_inputs=args.get('inputs'),
account=current_user
)
return workflow_node_execution
class PublishedWorkflowApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
@marshal_with(workflow_fields)
def get(self, app_model: App):
"""
Get published workflow
"""
# fetch published workflow by app_model
workflow_service = WorkflowService()
workflow = workflow_service.get_published_workflow(app_model=app_model)
# return workflow, if not found, return None
return workflow
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
def post(self, app_model: App):
"""
Publish workflow
"""
workflow_service = WorkflowService()
workflow = workflow_service.publish_workflow(app_model=app_model, account=current_user)
return {
"result": "success",
"created_at": TimestampField().format(workflow.created_at)
}
class DefaultBlockConfigsApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
def get(self, app_model: App):
"""
Get default block config
"""
# Get default block configs
workflow_service = WorkflowService()
return workflow_service.get_default_block_configs()
class DefaultBlockConfigApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
def get(self, app_model: App, block_type: str):
"""
Get default block config
"""
parser = reqparse.RequestParser()
parser.add_argument('q', type=str, location='args')
args = parser.parse_args()
filters = None
if args.get('q'):
try:
filters = json.loads(args.get('q'))
except json.JSONDecodeError:
raise ValueError('Invalid filters')
# Get default block configs
workflow_service = WorkflowService()
return workflow_service.get_default_block_config(
node_type=block_type,
filters=filters
)
class ConvertToWorkflowApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.CHAT, AppMode.COMPLETION])
def post(self, app_model: App):
"""
Convert basic mode of chatbot app to workflow mode
Convert expert mode of chatbot app to workflow mode
Convert Completion App to Workflow App
"""
if request.data:
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=False, nullable=True, location='json')
parser.add_argument('icon', type=str, required=False, nullable=True, location='json')
parser.add_argument('icon_background', type=str, required=False, nullable=True, location='json')
args = parser.parse_args()
else:
args = {}
# convert to workflow mode
workflow_service = WorkflowService()
new_app_model = workflow_service.convert_to_workflow(
app_model=app_model,
account=current_user,
args=args
)
# return app id
return {
'new_app_id': new_app_model.id,
}
api.add_resource(DraftWorkflowApi, '/apps/<uuid:app_id>/workflows/draft')
api.add_resource(AdvancedChatDraftWorkflowRunApi, '/apps/<uuid:app_id>/advanced-chat/workflows/draft/run')
api.add_resource(DraftWorkflowRunApi, '/apps/<uuid:app_id>/workflows/draft/run')
api.add_resource(WorkflowTaskStopApi, '/apps/<uuid:app_id>/workflow-runs/tasks/<string:task_id>/stop')
api.add_resource(DraftWorkflowNodeRunApi, '/apps/<uuid:app_id>/workflows/draft/nodes/<string:node_id>/run')
api.add_resource(PublishedWorkflowApi, '/apps/<uuid:app_id>/workflows/publish')
api.add_resource(DefaultBlockConfigsApi, '/apps/<uuid:app_id>/workflows/default-workflow-block-configs')
api.add_resource(DefaultBlockConfigApi, '/apps/<uuid:app_id>/workflows/default-workflow-block-configs'
'/<string:block_type>')
api.add_resource(ConvertToWorkflowApi, '/apps/<uuid:app_id>/convert-to-workflow')

View File

@ -0,0 +1,41 @@
from flask_restful import Resource, marshal_with, reqparse
from flask_restful.inputs import int_range
from controllers.console import api
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
from libs.login import login_required
from models.model import App, AppMode
from services.workflow_app_service import WorkflowAppService
class WorkflowAppLogApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.WORKFLOW])
@marshal_with(workflow_app_log_pagination_fields)
def get(self, app_model: App):
"""
Get workflow app logs
"""
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, location='args')
parser.add_argument('status', type=str, choices=['succeeded', 'failed', 'stopped'], location='args')
parser.add_argument('page', type=int_range(1, 99999), default=1, location='args')
parser.add_argument('limit', type=int_range(1, 100), default=20, location='args')
args = parser.parse_args()
# get paginate workflow app logs
workflow_app_service = WorkflowAppService()
workflow_app_log_pagination = workflow_app_service.get_paginate_workflow_app_logs(
app_model=app_model,
args=args
)
return workflow_app_log_pagination
api.add_resource(WorkflowAppLogApi, '/apps/<uuid:app_id>/workflow-app-logs')

View File

@ -0,0 +1,109 @@
from flask_restful import Resource, marshal_with, reqparse
from flask_restful.inputs import int_range
from controllers.console import api
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.workflow_run_fields import (
advanced_chat_workflow_run_pagination_fields,
workflow_run_detail_fields,
workflow_run_node_execution_list_fields,
workflow_run_pagination_fields,
)
from libs.helper import uuid_value
from libs.login import login_required
from models.model import App, AppMode
from services.workflow_run_service import WorkflowRunService
class AdvancedChatAppWorkflowRunListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT])
@marshal_with(advanced_chat_workflow_run_pagination_fields)
def get(self, app_model: App):
"""
Get advanced chat app workflow run list
"""
parser = reqparse.RequestParser()
parser.add_argument('last_id', type=uuid_value, location='args')
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
args = parser.parse_args()
workflow_run_service = WorkflowRunService()
result = workflow_run_service.get_paginate_advanced_chat_workflow_runs(
app_model=app_model,
args=args
)
return result
class WorkflowRunListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
@marshal_with(workflow_run_pagination_fields)
def get(self, app_model: App):
"""
Get workflow run list
"""
parser = reqparse.RequestParser()
parser.add_argument('last_id', type=uuid_value, location='args')
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
args = parser.parse_args()
workflow_run_service = WorkflowRunService()
result = workflow_run_service.get_paginate_workflow_runs(
app_model=app_model,
args=args
)
return result
class WorkflowRunDetailApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
@marshal_with(workflow_run_detail_fields)
def get(self, app_model: App, run_id):
"""
Get workflow run detail
"""
run_id = str(run_id)
workflow_run_service = WorkflowRunService()
workflow_run = workflow_run_service.get_workflow_run(app_model=app_model, run_id=run_id)
return workflow_run
class WorkflowRunNodeExecutionListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW])
@marshal_with(workflow_run_node_execution_list_fields)
def get(self, app_model: App, run_id):
"""
Get workflow run node execution list
"""
run_id = str(run_id)
workflow_run_service = WorkflowRunService()
node_executions = workflow_run_service.get_workflow_run_node_executions(app_model=app_model, run_id=run_id)
return {
'data': node_executions
}
api.add_resource(AdvancedChatAppWorkflowRunListApi, '/apps/<uuid:app_id>/advanced-chat/workflow-runs')
api.add_resource(WorkflowRunListApi, '/apps/<uuid:app_id>/workflow-runs')
api.add_resource(WorkflowRunDetailApi, '/apps/<uuid:app_id>/workflow-runs/<uuid:run_id>')
api.add_resource(WorkflowRunNodeExecutionListApi, '/apps/<uuid:app_id>/workflow-runs/<uuid:run_id>/node-executions')

View File

@ -0,0 +1,278 @@
from datetime import datetime
from decimal import Decimal
import pytz
from flask import jsonify
from flask_login import current_user
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.helper import datetime_string
from libs.login import login_required
from models.model import AppMode
from models.workflow import WorkflowRunTriggeredFrom
class WorkflowDailyRunsStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
def get(self, app_model):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = '''
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(id) AS runs
FROM workflow_runs
WHERE app_id = :app_id
AND triggered_from = :triggered_from
'''
arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
response_data = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
for i in rs:
response_data.append({
'date': str(i.date),
'runs': i.runs
})
return jsonify({
'data': response_data
})
class WorkflowDailyTerminalsStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
def get(self, app_model):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = '''
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count
FROM workflow_runs
WHERE app_id = :app_id
AND triggered_from = :triggered_from
'''
arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
response_data = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
for i in rs:
response_data.append({
'date': str(i.date),
'terminal_count': i.terminal_count
})
return jsonify({
'data': response_data
})
class WorkflowDailyTokenCostStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model
def get(self, app_model):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = '''
SELECT
date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
SUM(workflow_runs.total_tokens) as token_count
FROM workflow_runs
WHERE app_id = :app_id
AND triggered_from = :triggered_from
'''
arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at >= :start'
arg_dict['start'] = start_datetime_utc
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += ' and created_at < :end'
arg_dict['end'] = end_datetime_utc
sql_query += ' GROUP BY date order by date'
response_data = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
for i in rs:
response_data.append({
'date': str(i.date),
'token_count': i.token_count,
})
return jsonify({
'data': response_data
})
class WorkflowAverageAppInteractionStatistic(Resource):
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.WORKFLOW])
def get(self, app_model):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
args = parser.parse_args()
sql_query = """
SELECT
AVG(sub.interactions) as interactions,
sub.date
FROM
(SELECT
date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
c.created_by,
COUNT(c.id) AS interactions
FROM workflow_runs c
WHERE c.app_id = :app_id
AND c.triggered_from = :triggered_from
{{start}}
{{end}}
GROUP BY date, c.created_by) sub
GROUP BY sub.created_by, sub.date
"""
arg_dict = {'tz': account.timezone, 'app_id': app_model.id, 'triggered_from': WorkflowRunTriggeredFrom.APP_RUN.value}
timezone = pytz.timezone(account.timezone)
utc_timezone = pytz.utc
if args['start']:
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
start_datetime = start_datetime.replace(second=0)
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query = sql_query.replace('{{start}}', ' AND c.created_at >= :start')
arg_dict['start'] = start_datetime_utc
else:
sql_query = sql_query.replace('{{start}}', '')
if args['end']:
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
end_datetime = end_datetime.replace(second=0)
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query = sql_query.replace('{{end}}', ' and c.created_at < :end')
arg_dict['end'] = end_datetime_utc
else:
sql_query = sql_query.replace('{{end}}', '')
response_data = []
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql_query), arg_dict)
for i in rs:
response_data.append({
'date': str(i.date),
'interactions': float(i.interactions.quantize(Decimal('0.01')))
})
return jsonify({
'data': response_data
})
api.add_resource(WorkflowDailyRunsStatistic, '/apps/<uuid:app_id>/workflow/statistics/daily-conversations')
api.add_resource(WorkflowDailyTerminalsStatistic, '/apps/<uuid:app_id>/workflow/statistics/daily-terminals')
api.add_resource(WorkflowDailyTokenCostStatistic, '/apps/<uuid:app_id>/workflow/statistics/token-costs')
api.add_resource(WorkflowAverageAppInteractionStatistic, '/apps/<uuid:app_id>/workflow/statistics/average-app-interactions')

View File

@ -0,0 +1,55 @@
from collections.abc import Callable
from functools import wraps
from typing import Optional, Union
from controllers.console.app.error import AppNotFoundError
from extensions.ext_database import db
from libs.login import current_user
from models.model import App, AppMode
def get_app_model(view: Optional[Callable] = None, *,
mode: Union[AppMode, list[AppMode]] = None):
def decorator(view_func):
@wraps(view_func)
def decorated_view(*args, **kwargs):
if not kwargs.get('app_id'):
raise ValueError('missing app_id in path parameters')
app_id = kwargs.get('app_id')
app_id = str(app_id)
del kwargs['app_id']
app_model = db.session.query(App).filter(
App.id == app_id,
App.tenant_id == current_user.current_tenant_id,
App.status == 'normal'
).first()
if not app_model:
raise AppNotFoundError()
app_mode = AppMode.value_of(app_model.mode)
if app_mode == AppMode.CHANNEL:
raise AppNotFoundError()
if mode is not None:
if isinstance(mode, list):
modes = mode
else:
modes = [mode]
if app_mode not in modes:
mode_values = {m.value for m in modes}
raise AppNotFoundError(f"App mode is not in the supported list: {mode_values}")
kwargs['app_model'] = app_model
return view_func(*args, **kwargs)
return decorated_view
if view is None:
return decorator
else:
return decorator(view)

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import flask_login
from flask import current_app, request
from flask_restful import Resource, reqparse
@ -8,7 +7,7 @@ from controllers.console import api
from controllers.console.setup import setup_required
from libs.helper import email
from libs.password import valid_password
from services.account_service import AccountService
from services.account_service import AccountService, TenantService
class LoginApi(Resource):
@ -30,6 +29,8 @@ class LoginApi(Resource):
except services.errors.account.AccountLoginError:
return {'code': 'unauthorized', 'message': 'Invalid email or password'}, 401
TenantService.create_owner_tenant_if_not_exist(account)
AccountService.update_last_login(account, request)
# todo: return the user info

View File

@ -10,7 +10,7 @@ from constants.languages import languages
from extensions.ext_database import db
from libs.oauth import GitHubOAuth, GoogleOAuth, OAuthUserInfo
from models.account import Account, AccountStatus
from services.account_service import AccountService, RegisterService
from services.account_service import AccountService, RegisterService, TenantService
from .. import api
@ -76,6 +76,8 @@ class OAuthCallback(Resource):
account.initialized_at = datetime.utcnow()
db.session.commit()
TenantService.create_owner_tenant_if_not_exist(account)
AccountService.update_last_login(account, request)
token = AccountService.get_account_jwt_token(account)

View File

@ -9,8 +9,9 @@ from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.data_loader.loader.notion import NotionLoader
from core.indexing_runner import IndexingRunner
from core.rag.extractor.entity.extract_setting import ExtractSetting
from core.rag.extractor.notion_extractor import NotionExtractor
from extensions.ext_database import db
from fields.data_source_fields import integrate_list_fields, integrate_notion_info_list_fields
from libs.login import login_required
@ -173,14 +174,15 @@ class DataSourceNotionApi(Resource):
if not data_source_binding:
raise NotFound('Data source binding not found.')
loader = NotionLoader(
notion_access_token=data_source_binding.access_token,
extractor = NotionExtractor(
notion_workspace_id=workspace_id,
notion_obj_id=page_id,
notion_page_type=page_type
notion_page_type=page_type,
notion_access_token=data_source_binding.access_token,
tenant_id=current_user.current_tenant_id
)
text_docs = loader.load()
text_docs = extractor.extract()
return {
'content': "\n".join([doc.page_content for doc in text_docs])
}, 200
@ -192,11 +194,31 @@ class DataSourceNotionApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('notion_info_list', type=list, required=True, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False, location='json')
args = parser.parse_args()
# validate args
DocumentService.estimate_args_validate(args)
notion_info_list = args['notion_info_list']
extract_settings = []
for notion_info in notion_info_list:
workspace_id = notion_info['workspace_id']
for page in notion_info['pages']:
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": workspace_id,
"notion_obj_id": page['page_id'],
"notion_page_type": page['type'],
"tenant_id": current_user.current_tenant_id
},
document_model=args['doc_form']
)
extract_settings.append(extract_setting)
indexing_runner = IndexingRunner()
response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id, args['notion_info_list'], args['process_rule'])
response = indexing_runner.indexing_estimate(current_user.current_tenant_id, extract_settings,
args['process_rule'], args['doc_form'],
args['doc_language'])
return response, 200

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import flask_restful
from flask import current_app, request
from flask_login import current_user
@ -16,6 +15,7 @@ from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db
from fields.app_fields import related_app_list
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
@ -179,9 +179,9 @@ class DatasetApi(Resource):
location='json', store_missing=False,
type=_validate_description_length)
parser.add_argument('indexing_technique', type=str, location='json',
choices=Dataset.INDEXING_TECHNIQUE_LIST,
nullable=True,
help='Invalid indexing technique.')
choices=Dataset.INDEXING_TECHNIQUE_LIST,
nullable=True,
help='Invalid indexing technique.')
parser.add_argument('permission', type=str, location='json', choices=(
'only_me', 'all_team_members'), help='Invalid permission.')
parser.add_argument('retrieval_model', type=dict, location='json', help='Invalid retrieval model.')
@ -259,7 +259,7 @@ class DatasetIndexingEstimateApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('info_list', type=dict, required=True, nullable=True, location='json')
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
parser.add_argument('indexing_technique', type=str, required=True,
parser.add_argument('indexing_technique', type=str, required=True,
choices=Dataset.INDEXING_TECHNIQUE_LIST,
nullable=True, location='json')
parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
@ -269,6 +269,7 @@ class DatasetIndexingEstimateApi(Resource):
args = parser.parse_args()
# validate args
DocumentService.estimate_args_validate(args)
extract_settings = []
if args['info_list']['data_source_type'] == 'upload_file':
file_ids = args['info_list']['file_info_list']['file_ids']
file_details = db.session.query(UploadFile).filter(
@ -279,37 +280,45 @@ class DatasetIndexingEstimateApi(Resource):
if file_details is None:
raise NotFound("File not found.")
indexing_runner = IndexingRunner()
try:
response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
args['process_rule'], args['doc_form'],
args['doc_language'], args['dataset_id'],
args['indexing_technique'])
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
if file_details:
for file_detail in file_details:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=args['doc_form']
)
extract_settings.append(extract_setting)
elif args['info_list']['data_source_type'] == 'notion_import':
indexing_runner = IndexingRunner()
try:
response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
args['info_list']['notion_info_list'],
args['process_rule'], args['doc_form'],
args['doc_language'], args['dataset_id'],
args['indexing_technique'])
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
notion_info_list = args['info_list']['notion_info_list']
for notion_info in notion_info_list:
workspace_id = notion_info['workspace_id']
for page in notion_info['pages']:
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": workspace_id,
"notion_obj_id": page['page_id'],
"notion_page_type": page['type'],
"tenant_id": current_user.current_tenant_id
},
document_model=args['doc_form']
)
extract_settings.append(extract_setting)
else:
raise ValueError('Data source type not support')
indexing_runner = IndexingRunner()
try:
response = indexing_runner.indexing_estimate(current_user.current_tenant_id, extract_settings,
args['process_rule'], args['doc_form'],
args['doc_language'], args['dataset_id'],
args['indexing_technique'])
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
return response, 200
@ -509,4 +518,3 @@ api.add_resource(DatasetApiDeleteApi, '/datasets/api-keys/<uuid:api_key_id>')
api.add_resource(DatasetApiBaseUrlApi, '/datasets/api-base-info')
api.add_resource(DatasetRetrievalSettingApi, '/datasets/retrieval-setting')
api.add_resource(DatasetRetrievalSettingMockApi, '/datasets/retrieval-setting/<string:vector_type>')

View File

@ -1,6 +1,4 @@
# -*- coding:utf-8 -*-
from datetime import datetime
from typing import List
from flask import request
from flask_login import current_user
@ -34,6 +32,7 @@ from core.indexing_runner import IndexingRunner
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError
from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from fields.document_fields import (
@ -71,7 +70,7 @@ class DocumentResource(Resource):
return document
def get_batch_documents(self, dataset_id: str, batch: str) -> List[Document]:
def get_batch_documents(self, dataset_id: str, batch: str) -> list[Document]:
dataset = DatasetService.get_dataset(dataset_id)
if not dataset:
raise NotFound('Dataset not found.')
@ -97,7 +96,7 @@ class GetProcessRuleApi(Resource):
req_data = request.args
document_id = req_data.get('document_id')
# get default rules
mode = DocumentService.DEFAULT_RULES['mode']
rules = DocumentService.DEFAULT_RULES['rules']
@ -296,8 +295,8 @@ class DatasetInitApi(Resource):
)
except InvokeAuthorizationError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -364,16 +363,22 @@ class DocumentIndexingEstimateApi(DocumentResource):
if not file:
raise NotFound('File not found.')
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file,
document_model=document.doc_form
)
indexing_runner = IndexingRunner()
try:
response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, [file],
data_process_rule_dict, None,
'English', dataset_id)
response = indexing_runner.indexing_estimate(current_user.current_tenant_id, [extract_setting],
data_process_rule_dict, document.doc_form,
'English', dataset_id)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -404,6 +409,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
data_process_rule = documents[0].dataset_process_rule
data_process_rule_dict = data_process_rule.to_dict()
info_list = []
extract_settings = []
for document in documents:
if document.indexing_status in ['completed', 'error']:
raise DocumentAlreadyFinishedError()
@ -426,42 +432,49 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
}
info_list.append(notion_info)
if dataset.data_source_type == 'upload_file':
file_details = db.session.query(UploadFile).filter(
UploadFile.tenant_id == current_user.current_tenant_id,
UploadFile.id.in_(info_list)
).all()
if document.data_source_type == 'upload_file':
file_id = data_source_info['upload_file_id']
file_detail = db.session.query(UploadFile).filter(
UploadFile.tenant_id == current_user.current_tenant_id,
UploadFile.id == file_id
).first()
if file_details is None:
raise NotFound("File not found.")
if file_detail is None:
raise NotFound("File not found.")
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=document.doc_form
)
extract_settings.append(extract_setting)
elif document.data_source_type == 'notion_import':
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"tenant_id": current_user.current_tenant_id
},
document_model=document.doc_form
)
extract_settings.append(extract_setting)
else:
raise ValueError('Data source type not support')
indexing_runner = IndexingRunner()
try:
response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details,
data_process_rule_dict, None,
'English', dataset_id)
response = indexing_runner.indexing_estimate(current_user.current_tenant_id, extract_settings,
data_process_rule_dict, document.doc_form,
'English', dataset_id)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
elif dataset.data_source_type == 'notion_import':
indexing_runner = IndexingRunner()
try:
response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id,
info_list,
data_process_rule_dict,
None, 'English', dataset_id)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
else:
raise ValueError('Data source type not support')
return response

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import uuid
from datetime import datetime
@ -13,7 +12,11 @@ from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import InvalidActionError, NoFileUploadedError, TooManyFilesError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from controllers.console.wraps import (
account_initialization_required,
cloud_edition_billing_knowledge_limit_check,
cloud_edition_billing_resource_check,
)
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
@ -143,8 +146,8 @@ class DatasetDocumentSegmentApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -208,6 +211,7 @@ class DatasetDocumentSegmentAddApi(Resource):
@login_required
@account_initialization_required
@cloud_edition_billing_resource_check('vector_space')
@cloud_edition_billing_knowledge_limit_check('add_segment')
def post(self, dataset_id, document_id):
# check dataset
dataset_id = str(dataset_id)
@ -234,8 +238,8 @@ class DatasetDocumentSegmentAddApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
try:
@ -286,8 +290,8 @@ class DatasetDocumentSegmentUpdateApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
# check segment
@ -358,6 +362,7 @@ class DatasetDocumentSegmentBatchImportApi(Resource):
@login_required
@account_initialization_required
@cloud_edition_billing_resource_check('vector_space')
@cloud_edition_billing_knowledge_limit_check('add_segment')
def post(self, dataset_id, document_id):
# check dataset
dataset_id = str(dataset_id)

View File

@ -11,7 +11,7 @@ from controllers.console.datasets.error import (
UnsupportedFileTypeError,
)
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from fields.file_fields import file_fields, upload_config_fields
from libs.login import login_required
from services.file_service import ALLOWED_EXTENSIONS, UNSTRUSTURED_ALLOWED_EXTENSIONS, FileService
@ -39,6 +39,7 @@ class FileApi(Resource):
@login_required
@account_initialization_required
@marshal_with(file_fields)
@cloud_edition_billing_resource_check(resource='documents')
def post(self):
# get file from request

View File

@ -76,8 +76,8 @@ class HitTestingApi(Resource):
raise ProviderModelCurrentlyNotSupportError()
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model or Reranking Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model or Reranking Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import logging
from flask import request
@ -20,7 +19,6 @@ from controllers.console.app.error import (
from controllers.console.explore.wraps import InstalledAppResource
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from models.model import AppModelConfig
from services.audio_service import AudioService
from services.errors.audio import (
AudioTooLargeServiceError,
@ -33,16 +31,12 @@ from services.errors.audio import (
class ChatAudioApi(InstalledAppResource):
def post(self, installed_app):
app_model = installed_app.app
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript_asr(
tenant_id=app_model.tenant_id,
app_model=app_model,
file=file,
end_user=None
)
@ -77,15 +71,12 @@ class ChatAudioApi(InstalledAppResource):
class ChatTextApi(InstalledAppResource):
def post(self, installed_app):
app_model = installed_app.app
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.text_to_speech_dict['enabled']:
raise AppUnavailableError()
try:
response = AudioService.transcript_tts(
tenant_id=app_model.tenant_id,
app_model=app_model,
text=request.form['text'],
voice=request.form.get('voice'),
streaming=False
)
return {'data': response.data.decode('latin1')}

View File

@ -1,10 +1,6 @@
# -*- coding:utf-8 -*-
import json
import logging
from datetime import datetime
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_login import current_user
from flask_restful import reqparse
from werkzeug.exceptions import InternalServerError, NotFound
@ -21,13 +17,15 @@ from controllers.console.app.error import (
)
from controllers.console.explore.error import NotChatAppError, NotCompletionAppError
from controllers.console.explore.wraps import InstalledAppResource
from core.application_queue_manager import ApplicationQueueManager
from core.entities.application_entities import InvokeFrom
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from extensions.ext_database import db
from libs import helper
from libs.helper import uuid_value
from services.completion_service import CompletionService
from models.model import AppMode
from services.app_generate_service import AppGenerateService
# define completion api for user
@ -53,7 +51,7 @@ class CompletionApi(InstalledAppResource):
db.session.commit()
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=current_user,
args=args,
@ -61,7 +59,7 @@ class CompletionApi(InstalledAppResource):
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -90,7 +88,7 @@ class CompletionStopApi(InstalledAppResource):
if app_model.mode != 'completion':
raise NotCompletionAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
return {'result': 'success'}, 200
@ -98,34 +96,33 @@ class CompletionStopApi(InstalledAppResource):
class ChatApi(InstalledAppResource):
def post(self, installed_app):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, location='json')
parser.add_argument('query', type=str, required=True, location='json')
parser.add_argument('files', type=list, required=False, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='explore_app', location='json')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
args['auto_generate_name'] = False
installed_app.last_used_at = datetime.utcnow()
db.session.commit()
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=current_user,
args=args,
invoke_from=InvokeFrom.EXPLORE,
streaming=streaming
streaming=True
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -151,26 +148,15 @@ class ChatApi(InstalledAppResource):
class ChatStopApi(InstalledAppResource):
def post(self, installed_app, task_id):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
return {'result': 'success'}, 200
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
api.add_resource(CompletionApi, '/installed-apps/<uuid:installed_app_id>/completion-messages', endpoint='installed_app_completion')
api.add_resource(CompletionStopApi, '/installed-apps/<uuid:installed_app_id>/completion-messages/<string:task_id>/stop', endpoint='installed_app_stop_completion')
api.add_resource(ChatApi, '/installed-apps/<uuid:installed_app_id>/chat-messages', endpoint='installed_app_chat_completion')

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from flask_login import current_user
from flask_restful import marshal_with, reqparse
from flask_restful.inputs import int_range
@ -9,6 +8,7 @@ from controllers.console.explore.error import NotChatAppError
from controllers.console.explore.wraps import InstalledAppResource
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
from libs.helper import uuid_value
from models.model import AppMode
from services.conversation_service import ConversationService
from services.errors.conversation import ConversationNotExistsError, LastConversationNotExistsError
from services.web_conversation_service import WebConversationService
@ -19,7 +19,8 @@ class ConversationListApi(InstalledAppResource):
@marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, installed_app):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -48,7 +49,8 @@ class ConversationListApi(InstalledAppResource):
class ConversationApi(InstalledAppResource):
def delete(self, installed_app, c_id):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
@ -66,7 +68,8 @@ class ConversationRenameApi(InstalledAppResource):
@marshal_with(simple_conversation_fields)
def post(self, installed_app, c_id):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
@ -92,7 +95,8 @@ class ConversationPinApi(InstalledAppResource):
def patch(self, installed_app, c_id):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
@ -108,7 +112,8 @@ class ConversationPinApi(InstalledAppResource):
class ConversationUnPinApi(InstalledAppResource):
def patch(self, installed_app, c_id):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from libs.exception import BaseHTTPException
@ -10,7 +9,13 @@ class NotCompletionAppError(BaseHTTPException):
class NotChatAppError(BaseHTTPException):
error_code = 'not_chat_app'
description = "Not Chat App"
description = "App mode is invalid."
code = 400
class NotWorkflowAppError(BaseHTTPException):
error_code = 'not_workflow_app'
description = "Only support workflow app."
code = 400

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from datetime import datetime
from flask_login import current_user
@ -35,8 +34,7 @@ class InstalledAppsListApi(Resource):
'is_pinned': installed_app.is_pinned,
'last_used_at': installed_app.last_used_at,
'editable': current_user.role in ["owner", "admin"],
'uninstallable': current_tenant_id == installed_app.app_owner_tenant_id,
'is_agent': installed_app.is_agent
'uninstallable': current_tenant_id == installed_app.app_owner_tenant_id
}
for installed_app in installed_apps
]

View File

@ -1,9 +1,5 @@
# -*- coding:utf-8 -*-
import json
import logging
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_login import current_user
from flask_restful import marshal_with, reqparse
from flask_restful.inputs import int_range
@ -24,12 +20,14 @@ from controllers.console.explore.error import (
NotCompletionAppError,
)
from controllers.console.explore.wraps import InstalledAppResource
from core.entities.application_entities import InvokeFrom
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from fields.message_fields import message_infinite_scroll_pagination_fields
from libs import helper
from libs.helper import uuid_value
from services.completion_service import CompletionService
from models.model import AppMode
from services.app_generate_service import AppGenerateService
from services.errors.app import MoreLikeThisDisabledError
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
@ -41,7 +39,8 @@ class MessageListApi(InstalledAppResource):
def get(self, installed_app):
app_model = installed_app.app
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -91,14 +90,14 @@ class MessageMoreLikeThisApi(InstalledAppResource):
streaming = args['response_mode'] == 'streaming'
try:
response = CompletionService.generate_more_like_this(
response = AppGenerateService.generate_more_like_this(
app_model=app_model,
user=current_user,
message_id=message_id,
invoke_from=InvokeFrom.EXPLORE,
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except MessageNotExistsError:
raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError:
@ -118,23 +117,12 @@ class MessageMoreLikeThisApi(InstalledAppResource):
raise InternalServerError()
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
class MessageSuggestedQuestionApi(InstalledAppResource):
def get(self, installed_app, message_id):
app_model = installed_app.app
if app_model.mode != 'chat':
raise NotCompletionAppError()
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
message_id = str(message_id)
@ -142,7 +130,8 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
questions = MessageService.get_suggested_questions_after_answer(
app_model=app_model,
user=current_user,
message_id=message_id
message_id=message_id,
invoke_from=InvokeFrom.EXPLORE
)
except MessageNotExistsError:
raise NotFound("Message not found")

View File

@ -1,14 +1,12 @@
# -*- coding:utf-8 -*-
import json
from flask import current_app
from flask_restful import fields, marshal_with
from controllers.console import api
from controllers.console.app.error import AppUnavailableError
from controllers.console.explore.wraps import InstalledAppResource
from extensions.ext_database import db
from models.model import AppModelConfig, InstalledApp
from models.tools import ApiToolProvider
from models.model import AppMode, InstalledApp
from services.app_service import AppService
class AppParameterApi(InstalledAppResource):
@ -46,61 +44,52 @@ class AppParameterApi(InstalledAppResource):
def get(self, installed_app: InstalledApp):
"""Retrieve app parameters."""
app_model = installed_app.app
app_model_config = app_model.app_model_config
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
workflow = app_model.workflow
if workflow is None:
raise AppUnavailableError()
features_dict = workflow.features_dict
user_input_form = workflow.user_input_form(to_old_structure=True)
else:
app_model_config = app_model.app_model_config
features_dict = app_model_config.to_dict()
user_input_form = features_dict.get('user_input_form', [])
return {
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'text_to_speech': app_model_config.text_to_speech_dict,
'retriever_resource': app_model_config.retriever_resource_dict,
'annotation_reply': app_model_config.annotation_reply_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list,
'sensitive_word_avoidance': app_model_config.sensitive_word_avoidance_dict,
'file_upload': app_model_config.file_upload_dict,
'opening_statement': features_dict.get('opening_statement'),
'suggested_questions': features_dict.get('suggested_questions', []),
'suggested_questions_after_answer': features_dict.get('suggested_questions_after_answer',
{"enabled": False}),
'speech_to_text': features_dict.get('speech_to_text', {"enabled": False}),
'text_to_speech': features_dict.get('text_to_speech', {"enabled": False}),
'retriever_resource': features_dict.get('retriever_resource', {"enabled": False}),
'annotation_reply': features_dict.get('annotation_reply', {"enabled": False}),
'more_like_this': features_dict.get('more_like_this', {"enabled": False}),
'user_input_form': user_input_form,
'sensitive_word_avoidance': features_dict.get('sensitive_word_avoidance',
{"enabled": False, "type": "", "configs": []}),
'file_upload': features_dict.get('file_upload', {"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"]
}}),
'system_parameters': {
'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
}
}
class ExploreAppMetaApi(InstalledAppResource):
def get(self, installed_app: InstalledApp):
"""Get app meta"""
app_model_config: AppModelConfig = installed_app.app.app_model_config
app_model = installed_app.app
return AppService().get_app_meta(app_model)
agent_config = app_model_config.agent_mode_dict or {}
meta = {
'tool_icons': {}
}
# get all tools
tools = agent_config.get('tools', [])
url_prefix = (current_app.config.get("CONSOLE_API_URL")
+ f"/console/api/workspaces/current/tool-provider/builtin/")
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
# current tool standard
provider_type = tool.get('provider_type')
provider_id = tool.get('provider_id')
tool_name = tool.get('tool_name')
if provider_type == 'builtin':
meta['tool_icons'][tool_name] = url_prefix + provider_id + '/icon'
elif provider_type == 'api':
try:
provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
ApiToolProvider.id == provider_id
)
meta['tool_icons'][tool_name] = json.loads(provider.icon)
except:
meta['tool_icons'][tool_name] = {
"background": "#252525",
"content": "\ud83d\ude01"
}
return meta
api.add_resource(AppParameterApi, '/installed-apps/<uuid:installed_app_id>/parameters', endpoint='installed_app_parameters')
api.add_resource(AppParameterApi, '/installed-apps/<uuid:installed_app_id>/parameters',
endpoint='installed_app_parameters')
api.add_resource(ExploreAppMetaApi, '/installed-apps/<uuid:installed_app_id>/meta', endpoint='installed_app_meta')

View File

@ -1,16 +1,11 @@
# -*- coding:utf-8 -*-
from flask_login import current_user
from flask_restful import Resource, fields, marshal_with
from sqlalchemy import and_
from flask_restful import Resource, fields, marshal_with, reqparse
from constants.languages import languages
from controllers.console import api
from controllers.console.app.error import AppNotFoundError
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.login import login_required
from models.model import App, InstalledApp, RecommendedApp
from services.account_service import TenantService
from services.recommended_app_service import RecommendedAppService
app_fields = {
'id': fields.String,
@ -28,11 +23,7 @@ recommended_app_fields = {
'privacy_policy': fields.String,
'category': fields.String,
'position': fields.Integer,
'is_listed': fields.Boolean,
'install_count': fields.Integer,
'installed': fields.Boolean,
'editable': fields.Boolean,
'is_agent': fields.Boolean
'is_listed': fields.Boolean
}
recommended_app_list_fields = {
@ -46,96 +37,27 @@ class RecommendedAppListApi(Resource):
@account_initialization_required
@marshal_with(recommended_app_list_fields)
def get(self):
language_prefix = current_user.interface_language if current_user.interface_language else languages[0]
# language args
parser = reqparse.RequestParser()
parser.add_argument('language', type=str, location='args')
args = parser.parse_args()
recommended_apps = db.session.query(RecommendedApp).filter(
RecommendedApp.is_listed == True,
RecommendedApp.language == language_prefix
).all()
if args.get('language') and args.get('language') in languages:
language_prefix = args.get('language')
elif current_user and current_user.interface_language:
language_prefix = current_user.interface_language
else:
language_prefix = languages[0]
categories = set()
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
recommended_apps_result = []
for recommended_app in recommended_apps:
installed = db.session.query(InstalledApp).filter(
and_(
InstalledApp.app_id == recommended_app.app_id,
InstalledApp.tenant_id == current_user.current_tenant_id
)
).first() is not None
app = recommended_app.app
if not app or not app.is_public:
continue
site = app.site
if not site:
continue
recommended_app_result = {
'id': recommended_app.id,
'app': app,
'app_id': recommended_app.app_id,
'description': site.description,
'copyright': site.copyright,
'privacy_policy': site.privacy_policy,
'category': recommended_app.category,
'position': recommended_app.position,
'is_listed': recommended_app.is_listed,
'install_count': recommended_app.install_count,
'installed': installed,
'editable': current_user.role in ['owner', 'admin'],
"is_agent": app.is_agent
}
recommended_apps_result.append(recommended_app_result)
categories.add(recommended_app.category) # add category to categories
return {'recommended_apps': recommended_apps_result, 'categories': list(categories)}
return RecommendedAppService.get_recommended_apps_and_categories(language_prefix)
class RecommendedAppApi(Resource):
model_config_fields = {
'opening_statement': fields.String,
'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
'more_like_this': fields.Raw(attribute='more_like_this_dict'),
'model': fields.Raw(attribute='model_dict'),
'user_input_form': fields.Raw(attribute='user_input_form_list'),
'pre_prompt': fields.String,
'agent_mode': fields.Raw(attribute='agent_mode_dict'),
}
app_simple_detail_fields = {
'id': fields.String,
'name': fields.String,
'icon': fields.String,
'icon_background': fields.String,
'mode': fields.String,
'app_model_config': fields.Nested(model_config_fields),
}
@login_required
@account_initialization_required
@marshal_with(app_simple_detail_fields)
def get(self, app_id):
app_id = str(app_id)
# is in public recommended list
recommended_app = db.session.query(RecommendedApp).filter(
RecommendedApp.is_listed == True,
RecommendedApp.app_id == app_id
).first()
if not recommended_app:
raise AppNotFoundError
# get app detail
app = db.session.query(App).filter(App.id == app_id).first()
if not app or not app.is_public:
raise AppNotFoundError
return app
return RecommendedAppService.get_recommend_app_detail(app_id)
api.add_resource(RecommendedAppListApi, '/explore/apps')

View File

@ -0,0 +1,85 @@
import logging
from flask_restful import reqparse
from werkzeug.exceptions import InternalServerError
from controllers.console import api
from controllers.console.app.error import (
CompletionRequestError,
ProviderModelCurrentlyNotSupportError,
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.console.explore.error import NotWorkflowAppError
from controllers.console.explore.wraps import InstalledAppResource
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs import helper
from libs.login import current_user
from models.model import AppMode, InstalledApp
from services.app_generate_service import AppGenerateService
logger = logging.getLogger(__name__)
class InstalledAppWorkflowRunApi(InstalledAppResource):
def post(self, installed_app: InstalledApp):
"""
Run workflow
"""
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode != AppMode.WORKFLOW:
raise NotWorkflowAppError()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
parser.add_argument('files', type=list, required=False, location='json')
args = parser.parse_args()
try:
response = AppGenerateService.generate(
app_model=app_model,
user=current_user,
args=args,
invoke_from=InvokeFrom.EXPLORE,
streaming=True
)
return helper.compact_generate_response(response)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class InstalledAppWorkflowTaskStopApi(InstalledAppResource):
def post(self, installed_app: InstalledApp, task_id: str):
"""
Stop workflow task
"""
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode != AppMode.WORKFLOW:
raise NotWorkflowAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)
return {
"result": "success"
}
api.add_resource(InstalledAppWorkflowRunApi, '/installed-apps/<uuid:installed_app_id>/workflows/run')
api.add_resource(InstalledAppWorkflowTaskStopApi, '/installed-apps/<uuid:installed_app_id>/workflows/tasks/<string:task_id>/stop')

View File

@ -0,0 +1,17 @@
from flask_restful import Resource
from controllers.console import api
class PingApi(Resource):
def get(self):
"""
For connection health check
"""
return {
"result": "pong"
}
api.add_resource(PingApi, '/ping')

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from functools import wraps
from flask import current_app, request

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import json
import logging

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from datetime import datetime
import pytz
@ -17,26 +16,13 @@ from controllers.console.workspace.error import (
)
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from fields.member_fields import account_fields
from libs.helper import TimestampField, timezone
from libs.login import login_required
from models.account import AccountIntegrate, InvitationCode
from services.account_service import AccountService
from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError
account_fields = {
'id': fields.String,
'name': fields.String,
'avatar': fields.String,
'email': fields.String,
'is_password_set': fields.Boolean,
'interface_language': fields.String,
'interface_theme': fields.String,
'timezone': fields.String,
'last_login_at': TimestampField,
'last_login_ip': fields.String,
'created_at': TimestampField
}
class AccountInitApi(Resource):

View File

@ -1,32 +1,17 @@
# -*- coding:utf-8 -*-
from flask import current_app
from flask_login import current_user
from flask_restful import Resource, abort, fields, marshal_with, reqparse
from flask_restful import Resource, abort, marshal_with, reqparse
import services
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from extensions.ext_database import db
from libs.helper import TimestampField
from fields.member_fields import account_with_role_list_fields
from libs.login import login_required
from models.account import Account
from services.account_service import RegisterService, TenantService
account_fields = {
'id': fields.String,
'name': fields.String,
'avatar': fields.String,
'email': fields.String,
'last_login_at': TimestampField,
'created_at': TimestampField,
'role': fields.String,
'status': fields.String,
}
account_list_fields = {
'accounts': fields.List(fields.Nested(account_fields))
}
from services.errors.account import AccountAlreadyInTenantError
class MemberListApi(Resource):
@ -35,7 +20,7 @@ class MemberListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@marshal_with(account_list_fields)
@marshal_with(account_with_role_list_fields)
def get(self):
members = TenantService.get_tenant_members(current_user.current_tenant)
return {'result': 'success', 'accounts': members}, 200
@ -72,6 +57,13 @@ class MemberInviteEmailApi(Resource):
'email': invitee_email,
'url': f'{console_web_url}/activate?email={invitee_email}&token={token}'
})
except AccountAlreadyInTenantError:
invitation_results.append({
'status': 'success',
'email': invitee_email,
'url': f'{console_web_url}/signin'
})
break
except Exception as e:
invitation_results.append({
'status': 'failed',

View File

@ -1,6 +1,6 @@
import io
from flask import send_file
from flask import current_app, send_file
from flask_login import current_user
from flask_restful import Resource, reqparse
from werkzeug.exceptions import Forbidden
@ -8,6 +8,7 @@ from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.model_runtime.utils.encoders import jsonable_encoder
from libs.login import login_required
from services.tools_manage_service import ToolManageService
@ -30,11 +31,11 @@ class ToolBuiltinProviderListToolsApi(Resource):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return ToolManageService.list_builtin_tool_provider_tools(
return jsonable_encoder(ToolManageService.list_builtin_tool_provider_tools(
user_id,
tenant_id,
provider,
)
))
class ToolBuiltinProviderDeleteApi(Resource):
@setup_required
@ -75,13 +76,52 @@ class ToolBuiltinProviderUpdateApi(Resource):
provider,
args['credentials'],
)
class ToolBuiltinProviderGetCredentialsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return ToolManageService.get_builtin_tool_provider_credentials(
user_id,
tenant_id,
provider,
)
class ToolBuiltinProviderIconApi(Resource):
@setup_required
def get(self, provider):
icon_bytes, minetype = ToolManageService.get_builtin_tool_provider_icon(provider)
return send_file(io.BytesIO(icon_bytes), mimetype=minetype)
icon_bytes, mimetype = ToolManageService.get_builtin_tool_provider_icon(provider)
icon_cache_max_age = int(current_app.config.get('TOOL_ICON_CACHE_MAX_AGE'))
return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age)
class ToolModelProviderIconApi(Resource):
@setup_required
def get(self, provider):
icon_bytes, mimetype = ToolManageService.get_model_tool_provider_icon(provider)
return send_file(io.BytesIO(icon_bytes), mimetype=mimetype)
class ToolModelProviderListToolsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument('provider', type=str, required=True, nullable=False, location='args')
args = parser.parse_args()
return jsonable_encoder(ToolManageService.list_model_tool_provider_tools(
user_id,
tenant_id,
args['provider'],
))
class ToolApiProviderAddApi(Resource):
@setup_required
@ -146,11 +186,11 @@ class ToolApiProviderListToolsApi(Resource):
args = parser.parse_args()
return ToolManageService.list_api_tool_provider_tools(
return jsonable_encoder(ToolManageService.list_api_tool_provider_tools(
user_id,
tenant_id,
args['provider'],
)
))
class ToolApiProviderUpdateApi(Resource):
@setup_required
@ -259,6 +299,7 @@ class ToolApiProviderPreviousTestApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument('tool_name', type=str, required=True, nullable=False, location='json')
parser.add_argument('provider_name', type=str, required=False, nullable=False, location='json')
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
parser.add_argument('parameters', type=dict, required=True, nullable=False, location='json')
parser.add_argument('schema_type', type=str, required=True, nullable=False, location='json')
@ -268,6 +309,7 @@ class ToolApiProviderPreviousTestApi(Resource):
return ToolManageService.test_api_tool_preview(
current_user.current_tenant_id,
args['provider_name'] if args['provider_name'] else '',
args['tool_name'],
args['credentials'],
args['parameters'],
@ -275,17 +317,49 @@ class ToolApiProviderPreviousTestApi(Resource):
args['schema'],
)
class ToolBuiltinListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return jsonable_encoder([provider.to_dict() for provider in ToolManageService.list_builtin_tools(
user_id,
tenant_id,
)])
class ToolApiListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user_id = current_user.id
tenant_id = current_user.current_tenant_id
return jsonable_encoder([provider.to_dict() for provider in ToolManageService.list_api_tools(
user_id,
tenant_id,
)])
api.add_resource(ToolProviderListApi, '/workspaces/current/tool-providers')
api.add_resource(ToolBuiltinProviderListToolsApi, '/workspaces/current/tool-provider/builtin/<provider>/tools')
api.add_resource(ToolBuiltinProviderDeleteApi, '/workspaces/current/tool-provider/builtin/<provider>/delete')
api.add_resource(ToolBuiltinProviderUpdateApi, '/workspaces/current/tool-provider/builtin/<provider>/update')
api.add_resource(ToolBuiltinProviderGetCredentialsApi, '/workspaces/current/tool-provider/builtin/<provider>/credentials')
api.add_resource(ToolBuiltinProviderCredentialsSchemaApi, '/workspaces/current/tool-provider/builtin/<provider>/credentials_schema')
api.add_resource(ToolBuiltinProviderIconApi, '/workspaces/current/tool-provider/builtin/<provider>/icon')
api.add_resource(ToolModelProviderIconApi, '/workspaces/current/tool-provider/model/<provider>/icon')
api.add_resource(ToolModelProviderListToolsApi, '/workspaces/current/tool-provider/model/tools')
api.add_resource(ToolApiProviderAddApi, '/workspaces/current/tool-provider/api/add')
api.add_resource(ToolApiProviderGetRemoteSchemaApi, '/workspaces/current/tool-provider/api/remote')
api.add_resource(ToolApiProviderListToolsApi, '/workspaces/current/tool-provider/api/tools')
api.add_resource(ToolApiProviderUpdateApi, '/workspaces/current/tool-provider/api/update')
api.add_resource(ToolApiProviderUpdateApi, '/workspaces/current/tool-provider/api/update')
api.add_resource(ToolApiProviderDeleteApi, '/workspaces/current/tool-provider/api/delete')
api.add_resource(ToolApiProviderGetApi, '/workspaces/current/tool-provider/api/get')
api.add_resource(ToolApiProviderSchemaApi, '/workspaces/current/tool-provider/api/schema')
api.add_resource(ToolApiProviderPreviousTestApi, '/workspaces/current/tool-provider/api/test/pre')
api.add_resource(ToolBuiltinListApi, '/workspaces/current/tools/builtin')
api.add_resource(ToolApiListApi, '/workspaces/current/tools/api')

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import logging
from flask import request

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import json
from functools import wraps
@ -52,19 +51,25 @@ def cloud_edition_billing_resource_check(resource: str,
@wraps(view)
def decorated(*args, **kwargs):
features = FeatureService.get_features(current_user.current_tenant_id)
if features.billing.enabled:
members = features.members
apps = features.apps
vector_space = features.vector_space
documents_upload_quota = features.documents_upload_quota
annotation_quota_limit = features.annotation_quota_limit
if resource == 'members' and 0 < members.limit <= members.size:
abort(403, error_msg)
elif resource == 'apps' and 0 < apps.limit <= apps.size:
abort(403, error_msg)
elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size:
abort(403, error_msg)
elif resource == 'documents' and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
# The api of file upload is used in the multiple places, so we need to check the source of the request from datasets
source = request.args.get('source')
if source == 'datasets':
abort(403, error_msg)
else:
return view(*args, **kwargs)
elif resource == 'workspace_custom' and not features.can_replace_logo:
abort(403, error_msg)
elif resource == 'annotation' and 0 < annotation_quota_limit.limit < annotation_quota_limit.size:
@ -73,7 +78,29 @@ def cloud_edition_billing_resource_check(resource: str,
return view(*args, **kwargs)
return view(*args, **kwargs)
return decorated
return interceptor
def cloud_edition_billing_knowledge_limit_check(resource: str,
error_msg: str = "To unlock this feature and elevate your Dify experience, please upgrade to a paid plan."):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
features = FeatureService.get_features(current_user.current_tenant_id)
if features.billing.enabled:
if resource == 'add_segment':
if features.billing.subscription.plan == 'sandbox':
abort(403, error_msg)
else:
return view(*args, **kwargs)
return view(*args, **kwargs)
return decorated
return interceptor
@ -92,4 +119,5 @@ def cloud_utm_record(view):
except Exception as e:
pass
return view(*args, **kwargs)
return decorated

View File

@ -41,7 +41,7 @@ class WorkspaceWebappLogoApi(Resource):
webapp_logo_file_id = custom_config.get('replace_webapp_logo') if custom_config is not None else None
if not webapp_logo_file_id:
raise NotFound(f'webapp logo is not found')
raise NotFound('webapp logo is not found')
try:
generator, mimetype = FileService.get_public_image_preview(

View File

@ -27,12 +27,12 @@ class ToolFilePreviewApi(Resource):
raise Forbidden('Invalid request.')
try:
result = ToolFileManager.get_file_generator_by_message_file_id(
result = ToolFileManager.get_file_generator_by_tool_file_id(
file_id,
)
if not result:
raise NotFound(f'file is not found')
raise NotFound('file is not found')
generator, mimetype = result
except Exception:

View File

@ -7,5 +7,5 @@ api = ExternalApi(bp)
from . import index
from .app import app, audio, completion, conversation, file, message
from .app import app, audio, completion, conversation, file, message, workflow
from .dataset import dataset, document, segment

View File

@ -1,27 +0,0 @@
from extensions.ext_database import db
from models.model import EndUser
def create_or_update_end_user_for_user_id(app_model, user_id):
"""
Create or update session terminal based on user ID.
"""
end_user = db.session.query(EndUser) \
.filter(
EndUser.tenant_id == app_model.tenant_id,
EndUser.session_id == user_id,
EndUser.type == 'service_api'
).first()
if end_user is None:
end_user = EndUser(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
type='service_api',
is_anonymous=True,
session_id=user_id
)
db.session.add(end_user)
db.session.commit()
return end_user

View File

@ -1,17 +1,18 @@
# -*- coding:utf-8 -*-
import json
from flask import current_app
from flask_restful import fields, marshal_with
from flask_restful import fields, marshal_with, Resource
from controllers.service_api import api
from controllers.service_api.wraps import AppApiResource
from controllers.service_api.app.error import AppUnavailableError
from controllers.service_api.wraps import validate_app_token
from extensions.ext_database import db
from models.model import App, AppModelConfig
from models.model import App, AppModelConfig, AppMode
from models.tools import ApiToolProvider
from services.app_service import AppService
class AppParameterApi(AppApiResource):
class AppParameterApi(Resource):
"""Resource for app variables."""
variable_fields = {
@ -43,64 +44,54 @@ class AppParameterApi(AppApiResource):
'system_parameters': fields.Nested(system_parameters_fields)
}
@validate_app_token
@marshal_with(parameters_fields)
def get(self, app_model: App, end_user):
def get(self, app_model: App):
"""Retrieve app parameters."""
app_model_config = app_model.app_model_config
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
workflow = app_model.workflow
if workflow is None:
raise AppUnavailableError()
features_dict = workflow.features_dict
user_input_form = workflow.user_input_form(to_old_structure=True)
else:
app_model_config = app_model.app_model_config
features_dict = app_model_config.to_dict()
user_input_form = features_dict.get('user_input_form', [])
return {
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'text_to_speech': app_model_config.text_to_speech_dict,
'retriever_resource': app_model_config.retriever_resource_dict,
'annotation_reply': app_model_config.annotation_reply_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list,
'sensitive_word_avoidance': app_model_config.sensitive_word_avoidance_dict,
'file_upload': app_model_config.file_upload_dict,
'opening_statement': features_dict.get('opening_statement'),
'suggested_questions': features_dict.get('suggested_questions', []),
'suggested_questions_after_answer': features_dict.get('suggested_questions_after_answer',
{"enabled": False}),
'speech_to_text': features_dict.get('speech_to_text', {"enabled": False}),
'text_to_speech': features_dict.get('text_to_speech', {"enabled": False}),
'retriever_resource': features_dict.get('retriever_resource', {"enabled": False}),
'annotation_reply': features_dict.get('annotation_reply', {"enabled": False}),
'more_like_this': features_dict.get('more_like_this', {"enabled": False}),
'user_input_form': user_input_form,
'sensitive_word_avoidance': features_dict.get('sensitive_word_avoidance',
{"enabled": False, "type": "", "configs": []}),
'file_upload': features_dict.get('file_upload', {"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"]
}}),
'system_parameters': {
'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
}
}
class AppMetaApi(AppApiResource):
def get(self, app_model: App, end_user):
class AppMetaApi(Resource):
@validate_app_token
def get(self, app_model: App):
"""Get app meta"""
app_model_config: AppModelConfig = app_model.app_model_config
return AppService().get_app_meta(app_model)
agent_config = app_model_config.agent_mode_dict or {}
meta = {
'tool_icons': {}
}
# get all tools
tools = agent_config.get('tools', [])
url_prefix = (current_app.config.get("CONSOLE_API_URL")
+ f"/console/api/workspaces/current/tool-provider/builtin/")
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
# current tool standard
provider_type = tool.get('provider_type')
provider_id = tool.get('provider_id')
tool_name = tool.get('tool_name')
if provider_type == 'builtin':
meta['tool_icons'][tool_name] = url_prefix + provider_id + '/icon'
elif provider_type == 'api':
try:
provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
ApiToolProvider.id == provider_id
)
meta['tool_icons'][tool_name] = json.loads(provider.icon)
except:
meta['tool_icons'][tool_name] = {
"background": "#252525",
"content": "\ud83d\ude01"
}
return meta
api.add_resource(AppParameterApi, '/parameters')
api.add_resource(AppMetaApi, '/meta')

View File

@ -1,7 +1,7 @@
import logging
from flask import request
from flask_restful import reqparse
from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError
import services
@ -17,10 +17,10 @@ from controllers.service_api.app.error import (
ProviderQuotaExceededError,
UnsupportedAudioTypeError,
)
from controllers.service_api.wraps import AppApiResource
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from models.model import App, AppModelConfig
from models.model import App, EndUser
from services.audio_service import AudioService
from services.errors.audio import (
AudioTooLargeServiceError,
@ -30,18 +30,14 @@ from services.errors.audio import (
)
class AudioApi(AppApiResource):
def post(self, app_model: App, end_user):
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
class AudioApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.FORM))
def post(self, app_model: App, end_user: EndUser):
file = request.files['file']
try:
response = AudioService.transcript_asr(
tenant_id=app_model.tenant_id,
app_model=app_model,
file=file,
end_user=end_user
)
@ -73,19 +69,21 @@ class AudioApi(AppApiResource):
raise InternalServerError()
class TextApi(AppApiResource):
def post(self, app_model: App, end_user):
class TextApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
def post(self, app_model: App, end_user: EndUser):
parser = reqparse.RequestParser()
parser.add_argument('text', type=str, required=True, nullable=False, location='json')
parser.add_argument('user', type=str, required=True, nullable=False, location='json')
parser.add_argument('voice', type=str, location='json')
parser.add_argument('streaming', type=bool, required=False, nullable=False, location='json')
args = parser.parse_args()
try:
response = AudioService.transcript_tts(
tenant_id=app_model.tenant_id,
app_model=app_model,
text=args['text'],
end_user=args['user'],
end_user=end_user,
voice=args.get('voice'),
streaming=args['streaming']
)

View File

@ -1,14 +1,10 @@
import json
import logging
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_restful import reqparse
from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.service_api import api
from controllers.service_api.app import create_or_update_end_user_for_user_id
from controllers.service_api.app.error import (
AppUnavailableError,
CompletionRequestError,
@ -18,17 +14,20 @@ from controllers.service_api.app.error import (
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.service_api.wraps import AppApiResource
from core.application_queue_manager import ApplicationQueueManager
from core.entities.application_entities import InvokeFrom
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs import helper
from libs.helper import uuid_value
from services.completion_service import CompletionService
from models.model import App, AppMode, EndUser
from services.app_generate_service import AppGenerateService
class CompletionApi(AppApiResource):
def post(self, app_model, end_user):
class CompletionApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser):
if app_model.mode != 'completion':
raise AppUnavailableError()
@ -37,20 +36,16 @@ class CompletionApi(AppApiResource):
parser.add_argument('query', type=str, location='json', default='')
parser.add_argument('files', type=list, required=False, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('user', required=True, nullable=False, type=str, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='dev', location='json')
args = parser.parse_args()
streaming = args['response_mode'] == 'streaming'
if end_user is None and args['user'] is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
args['auto_generate_name'] = False
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=end_user,
args=args,
@ -58,7 +53,7 @@ class CompletionApi(AppApiResource):
streaming=streaming,
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -81,30 +76,22 @@ class CompletionApi(AppApiResource):
raise InternalServerError()
class CompletionStopApi(AppApiResource):
def post(self, app_model, end_user, task_id):
class CompletionStopApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser, task_id):
if app_model.mode != 'completion':
raise AppUnavailableError()
if end_user is None:
parser = reqparse.RequestParser()
parser.add_argument('user', required=True, nullable=False, type=str, location='json')
args = parser.parse_args()
user = args.get('user')
if user is not None:
end_user = create_or_update_end_user_for_user_id(app_model, user)
else:
raise ValueError("arg user muse be input.")
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
return {'result': 'success'}, 200
class ChatApi(AppApiResource):
def post(self, app_model, end_user):
if app_model.mode != 'chat':
class ChatApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -113,7 +100,6 @@ class ChatApi(AppApiResource):
parser.add_argument('files', type=list, required=False, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
parser.add_argument('conversation_id', type=uuid_value, location='json')
parser.add_argument('user', type=str, required=True, nullable=False, location='json')
parser.add_argument('retriever_from', type=str, required=False, default='dev', location='json')
parser.add_argument('auto_generate_name', type=bool, required=False, default=True, location='json')
@ -121,11 +107,8 @@ class ChatApi(AppApiResource):
streaming = args['response_mode'] == 'streaming'
if end_user is None and args['user'] is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=end_user,
args=args,
@ -133,7 +116,7 @@ class ChatApi(AppApiResource):
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -156,39 +139,18 @@ class ChatApi(AppApiResource):
raise InternalServerError()
class ChatStopApi(AppApiResource):
def post(self, app_model, end_user, task_id):
if app_model.mode != 'chat':
class ChatStopApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser, task_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
if end_user is None:
parser = reqparse.RequestParser()
parser.add_argument('user', required=True, nullable=False, type=str, location='json')
args = parser.parse_args()
user = args.get('user')
if user is not None:
end_user = create_or_update_end_user_for_user_id(app_model, user)
else:
raise ValueError("arg user muse be input.")
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
return {'result': 'success'}, 200
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
api.add_resource(CompletionApi, '/completion-messages')
api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
api.add_resource(ChatApi, '/chat-messages')

View File

@ -1,53 +1,47 @@
# -*- coding:utf-8 -*-
from flask import request
from flask_restful import marshal_with, reqparse
from flask_restful import Resource, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound
import services
from controllers.service_api import api
from controllers.service_api.app import create_or_update_end_user_for_user_id
from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import AppApiResource
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
from libs.helper import uuid_value
from models.model import App, AppMode, EndUser
from services.conversation_service import ConversationService
class ConversationApi(AppApiResource):
class ConversationApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
@marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, app_model, end_user):
if app_model.mode != 'chat':
def get(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
parser.add_argument('last_id', type=uuid_value, location='args')
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
parser.add_argument('user', type=str, location='args')
args = parser.parse_args()
if end_user is None and args['user'] is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
try:
return ConversationService.pagination_by_last_id(app_model, end_user, args['last_id'], args['limit'])
except services.errors.conversation.LastConversationNotExistsError:
raise NotFound("Last Conversation Not Exists.")
class ConversationDetailApi(AppApiResource):
class ConversationDetailApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
@marshal_with(simple_conversation_fields)
def delete(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
def delete(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
user = request.get_json().get('user')
if end_user is None and user is not None:
end_user = create_or_update_end_user_for_user_id(app_model, user)
try:
ConversationService.delete(app_model, conversation_id, end_user)
except services.errors.conversation.ConversationNotExistsError:
@ -55,24 +49,22 @@ class ConversationDetailApi(AppApiResource):
return {"result": "success"}, 204
class ConversationRenameApi(AppApiResource):
class ConversationRenameApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
@marshal_with(simple_conversation_fields)
def post(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
def post(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=False, location='json')
parser.add_argument('user', type=str, location='json')
parser.add_argument('auto_generate', type=bool, required=False, default=False, location='json')
args = parser.parse_args()
if end_user is None and args['user'] is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
try:
return ConversationService.rename(
app_model,

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from libs.exception import BaseHTTPException
@ -16,7 +15,13 @@ class NotCompletionAppError(BaseHTTPException):
class NotChatAppError(BaseHTTPException):
error_code = 'not_chat_app'
description = "Please check if your Chat app mode matches the right API route."
description = "Please check if your app mode matches the right API route."
code = 400
class NotWorkflowAppError(BaseHTTPException):
error_code = 'not_workflow_app'
description = "Please check if your app mode matches the right API route."
code = 400

View File

@ -1,30 +1,27 @@
from flask import request
from flask_restful import marshal_with
from flask_restful import Resource, marshal_with
import services
from controllers.service_api import api
from controllers.service_api.app import create_or_update_end_user_for_user_id
from controllers.service_api.app.error import (
FileTooLargeError,
NoFileUploadedError,
TooManyFilesError,
UnsupportedFileTypeError,
)
from controllers.service_api.wraps import AppApiResource
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from fields.file_fields import file_fields
from models.model import App, EndUser
from services.file_service import FileService
class FileApi(AppApiResource):
class FileApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.FORM))
@marshal_with(file_fields)
def post(self, app_model, end_user):
def post(self, app_model: App, end_user: EndUser):
file = request.files['file']
user_args = request.form.get('user')
if end_user is None and user_args is not None:
end_user = create_or_update_end_user_for_user_id(app_model, user_args)
# check file
if 'file' not in request.files:

View File

@ -1,21 +1,22 @@
# -*- coding:utf-8 -*-
from flask_restful import fields, marshal_with, reqparse
import logging
from flask_restful import Resource, fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
import services
from controllers.service_api import api
from controllers.service_api.app import create_or_update_end_user_for_user_id
from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import AppApiResource
from extensions.ext_database import db
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.entities.app_invoke_entities import InvokeFrom
from fields.conversation_fields import message_file_fields
from libs.helper import TimestampField, uuid_value
from models.model import EndUser, Message
from models.model import App, AppMode, EndUser
from services.errors.message import SuggestedQuestionsAfterAnswerDisabledError
from services.message_service import MessageService
class MessageListApi(AppApiResource):
class MessageListApi(Resource):
feedback_fields = {
'rating': fields.String
}
@ -57,12 +58,14 @@ class MessageListApi(AppApiResource):
'conversation_id': fields.String,
'inputs': fields.Raw,
'query': fields.String,
'answer': fields.String,
'answer': fields.String(attribute='re_sign_file_url_answer'),
'message_files': fields.List(fields.Nested(message_file_fields), attribute='files'),
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
'created_at': TimestampField,
'agent_thoughts': fields.List(fields.Nested(agent_thought_fields))
'agent_thoughts': fields.List(fields.Nested(agent_thought_fields)),
'status': fields.String,
'error': fields.String,
}
message_infinite_scroll_pagination_fields = {
@ -71,21 +74,19 @@ class MessageListApi(AppApiResource):
'data': fields.List(fields.Nested(message_fields))
}
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_model, end_user):
if app_model.mode != 'chat':
def get(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
parser.add_argument('conversation_id', required=True, type=uuid_value, location='args')
parser.add_argument('first_id', type=uuid_value, location='args')
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
parser.add_argument('user', type=str, location='args')
args = parser.parse_args()
if end_user is None and args['user'] is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
try:
return MessageService.pagination_by_first_id(app_model, end_user,
args['conversation_id'], args['first_id'], args['limit'])
@ -95,18 +96,15 @@ class MessageListApi(AppApiResource):
raise NotFound("First Message Not Exists.")
class MessageFeedbackApi(AppApiResource):
def post(self, app_model, end_user, message_id):
class MessageFeedbackApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
def post(self, app_model: App, end_user: EndUser, message_id):
message_id = str(message_id)
parser = reqparse.RequestParser()
parser.add_argument('rating', type=str, choices=['like', 'dislike', None], location='json')
parser.add_argument('user', type=str, location='json')
args = parser.parse_args()
if end_user is None and args['user'] is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user'])
try:
MessageService.create_feedback(app_model, message_id, end_user, args['rating'])
except services.errors.message.MessageNotExistsError:
@ -115,34 +113,28 @@ class MessageFeedbackApi(AppApiResource):
return {'result': 'success'}
class MessageSuggestedApi(AppApiResource):
def get(self, app_model, end_user, message_id):
class MessageSuggestedApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY))
def get(self, app_model: App, end_user: EndUser, message_id):
message_id = str(message_id)
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
try:
message = db.session.query(Message).filter(
Message.id == message_id,
Message.app_id == app_model.id,
).first()
if end_user is None and message.from_end_user_id is not None:
user = db.session.query(EndUser) \
.filter(
EndUser.tenant_id == app_model.tenant_id,
EndUser.id == message.from_end_user_id,
EndUser.type == 'service_api'
).first()
else:
user = end_user
try:
questions = MessageService.get_suggested_questions_after_answer(
app_model=app_model,
user=user,
user=end_user,
message_id=message_id,
check_enabled=False
invoke_from=InvokeFrom.SERVICE_API
)
except services.errors.message.MessageNotExistsError:
raise NotFound("Message Not Exists.")
except SuggestedQuestionsAfterAnswerDisabledError:
raise BadRequest("Message Not Exists.")
except Exception:
logging.exception("internal server error.")
raise InternalServerError()
return {'result': 'success', 'data': questions}

View File

@ -0,0 +1,87 @@
import logging
from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError
from controllers.service_api import api
from controllers.service_api.app.error import (
CompletionRequestError,
NotWorkflowAppError,
ProviderModelCurrentlyNotSupportError,
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs import helper
from models.model import App, AppMode, EndUser
from services.app_generate_service import AppGenerateService
logger = logging.getLogger(__name__)
class WorkflowRunApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser):
"""
Run workflow
"""
app_mode = AppMode.value_of(app_model.mode)
if app_mode != AppMode.WORKFLOW:
raise NotWorkflowAppError()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
parser.add_argument('files', type=list, required=False, location='json')
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
args = parser.parse_args()
streaming = args.get('response_mode') == 'streaming'
try:
response = AppGenerateService.generate(
app_model=app_model,
user=end_user,
args=args,
invoke_from=InvokeFrom.SERVICE_API,
streaming=streaming
)
return helper.compact_generate_response(response)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class WorkflowTaskStopApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser, task_id: str):
"""
Stop workflow task
"""
app_mode = AppMode.value_of(app_model.mode)
if app_mode != AppMode.WORKFLOW:
raise NotWorkflowAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
return {
"result": "success"
}
api.add_resource(WorkflowRunApi, '/workflows/run')
api.add_resource(WorkflowTaskStopApi, '/workflows/tasks/<string:task_id>/stop')

View File

@ -1,7 +1,6 @@
import json
from flask import request
from flask_login import current_user
from flask_restful import marshal, reqparse
from sqlalchemy import desc
from werkzeug.exceptions import NotFound
@ -29,6 +28,7 @@ class DocumentAddByTextApi(DatasetApiResource):
"""Resource for documents."""
@cloud_edition_billing_resource_check('vector_space', 'dataset')
@cloud_edition_billing_resource_check('documents', 'dataset')
def post(self, tenant_id, dataset_id):
"""Create document by text."""
parser = reqparse.RequestParser()
@ -154,6 +154,7 @@ class DocumentUpdateByTextApi(DatasetApiResource):
class DocumentAddByFileApi(DatasetApiResource):
"""Resource for documents."""
@cloud_edition_billing_resource_check('vector_space', 'dataset')
@cloud_edition_billing_resource_check('documents', 'dataset')
def post(self, tenant_id, dataset_id):
"""Create document by upload file."""
args = {}

View File

@ -4,7 +4,11 @@ from werkzeug.exceptions import NotFound
from controllers.service_api import api
from controllers.service_api.app.error import ProviderNotInitializeError
from controllers.service_api.wraps import DatasetApiResource, cloud_edition_billing_resource_check
from controllers.service_api.wraps import (
DatasetApiResource,
cloud_edition_billing_knowledge_limit_check,
cloud_edition_billing_resource_check,
)
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
@ -18,6 +22,7 @@ class SegmentApi(DatasetApiResource):
"""Resource for segments."""
@cloud_edition_billing_resource_check('vector_space', 'dataset')
@cloud_edition_billing_knowledge_limit_check('add_segment', 'dataset')
def post(self, tenant_id, dataset_id, document_id):
"""Create single segment."""
# check dataset
@ -46,8 +51,8 @@ class SegmentApi(DatasetApiResource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
# validate args
@ -90,8 +95,8 @@ class SegmentApi(DatasetApiResource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -182,8 +187,8 @@ class DatasetSegmentApi(DatasetApiResource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
f"No Embedding Model available. Please configure a valid provider "
f"in the Settings -> Model Provider.")
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider.")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
# check segment
@ -197,11 +202,11 @@ class DatasetSegmentApi(DatasetApiResource):
# validate args
parser = reqparse.RequestParser()
parser.add_argument('segments', type=dict, required=False, nullable=True, location='json')
parser.add_argument('segment', type=dict, required=False, nullable=True, location='json')
args = parser.parse_args()
SegmentService.segment_create_args_validate(args['segments'], document)
segment = SegmentService.update_segment(args['segments'], segment, document, dataset)
SegmentService.segment_create_args_validate(args['segment'], document)
segment = SegmentService.update_segment(args['segment'], segment, document, dataset)
return {
'data': marshal(segment, segment_fields),
'doc_form': document.doc_form

View File

@ -1,23 +1,40 @@
# -*- coding:utf-8 -*-
from collections.abc import Callable
from datetime import datetime
from enum import Enum
from functools import wraps
from typing import Optional
from flask import current_app, request
from flask_login import user_logged_in
from flask_restful import Resource
from werkzeug.exceptions import NotFound, Unauthorized
from pydantic import BaseModel
from werkzeug.exceptions import Forbidden, NotFound, Unauthorized
from extensions.ext_database import db
from libs.login import _get_user
from models.account import Account, Tenant, TenantAccountJoin
from models.model import ApiToken, App
from models.model import ApiToken, App, EndUser
from services.feature_service import FeatureService
def validate_app_token(view=None):
def decorator(view):
@wraps(view)
def decorated(*args, **kwargs):
class WhereisUserArg(Enum):
"""
Enum for whereis_user_arg.
"""
QUERY = 'query'
JSON = 'json'
FORM = 'form'
class FetchUserArg(BaseModel):
fetch_from: WhereisUserArg
required: bool = False
def validate_app_token(view: Optional[Callable] = None, *, fetch_user_arg: Optional[FetchUserArg] = None):
def decorator(view_func):
@wraps(view_func)
def decorated_view(*args, **kwargs):
api_token = validate_and_get_api_token('app')
app_model = db.session.query(App).filter(App.id == api_token.app_id).first()
@ -30,16 +47,35 @@ def validate_app_token(view=None):
if not app_model.enable_api:
raise NotFound()
return view(app_model, None, *args, **kwargs)
return decorated
kwargs['app_model'] = app_model
if view:
if fetch_user_arg:
if fetch_user_arg.fetch_from == WhereisUserArg.QUERY:
user_id = request.args.get('user')
elif fetch_user_arg.fetch_from == WhereisUserArg.JSON:
user_id = request.get_json().get('user')
elif fetch_user_arg.fetch_from == WhereisUserArg.FORM:
user_id = request.form.get('user')
else:
# use default-user
user_id = None
if not user_id and fetch_user_arg.required:
raise ValueError("Arg user must be provided.")
if user_id:
user_id = str(user_id)
kwargs['end_user'] = create_or_update_end_user_for_user_id(app_model, user_id)
return view_func(*args, **kwargs)
return decorated_view
if view is None:
return decorator
else:
return decorator(view)
# if view is None, it means that the decorator is used without parentheses
# use the decorator as a function for method_decorators
return decorator
def cloud_edition_billing_resource_check(resource: str,
api_token_type: str,
@ -53,13 +89,16 @@ def cloud_edition_billing_resource_check(resource: str,
members = features.members
apps = features.apps
vector_space = features.vector_space
documents_upload_quota = features.documents_upload_quota
if resource == 'members' and 0 < members.limit <= members.size:
raise Unauthorized(error_msg)
raise Forbidden(error_msg)
elif resource == 'apps' and 0 < apps.limit <= apps.size:
raise Unauthorized(error_msg)
raise Forbidden(error_msg)
elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size:
raise Unauthorized(error_msg)
raise Forbidden(error_msg)
elif resource == 'documents' and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
raise Forbidden(error_msg)
else:
return view(*args, **kwargs)
@ -68,6 +107,27 @@ def cloud_edition_billing_resource_check(resource: str,
return interceptor
def cloud_edition_billing_knowledge_limit_check(resource: str,
api_token_type: str,
error_msg: str = "To unlock this feature and elevate your Dify experience, please upgrade to a paid plan."):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
api_token = validate_and_get_api_token(api_token_type)
features = FeatureService.get_features(api_token.tenant_id)
if features.billing.enabled:
if resource == 'add_segment':
if features.billing.subscription.plan == 'sandbox':
raise Forbidden(error_msg)
else:
return view(*args, **kwargs)
return view(*args, **kwargs)
return decorated
return interceptor
def validate_dataset_token(view=None):
def decorator(view):
@wraps(view)
@ -129,8 +189,33 @@ def validate_and_get_api_token(scope=None):
return api_token
class AppApiResource(Resource):
method_decorators = [validate_app_token]
def create_or_update_end_user_for_user_id(app_model: App, user_id: Optional[str] = None) -> EndUser:
"""
Create or update session terminal based on user ID.
"""
if not user_id:
user_id = 'DEFAULT-USER'
end_user = db.session.query(EndUser) \
.filter(
EndUser.tenant_id == app_model.tenant_id,
EndUser.app_id == app_model.id,
EndUser.session_id == user_id,
EndUser.type == 'service_api'
).first()
if end_user is None:
end_user = EndUser(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
type='service_api',
is_anonymous=True if user_id == 'DEFAULT-USER' else False,
session_id=user_id
)
db.session.add(end_user)
db.session.commit()
return end_user
class DatasetApiResource(Resource):

View File

@ -6,4 +6,4 @@ bp = Blueprint('web', __name__, url_prefix='/api')
api = ExternalApi(bp)
from . import app, audio, completion, conversation, file, message, passport, saved_message, site
from . import app, audio, completion, conversation, file, message, passport, saved_message, site, workflow

View File

@ -1,14 +1,15 @@
# -*- coding:utf-8 -*-
import json
from flask import current_app
from flask_restful import fields, marshal_with
from controllers.web import api
from controllers.web.error import AppUnavailableError
from controllers.web.wraps import WebApiResource
from extensions.ext_database import db
from models.model import App, AppModelConfig
from models.model import App, AppModelConfig, AppMode
from models.tools import ApiToolProvider
from services.app_service import AppService
class AppParameterApi(WebApiResource):
@ -45,61 +46,49 @@ class AppParameterApi(WebApiResource):
@marshal_with(parameters_fields)
def get(self, app_model: App, end_user):
"""Retrieve app parameters."""
app_model_config = app_model.app_model_config
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
workflow = app_model.workflow
if workflow is None:
raise AppUnavailableError()
features_dict = workflow.features_dict
user_input_form = workflow.user_input_form(to_old_structure=True)
else:
app_model_config = app_model.app_model_config
features_dict = app_model_config.to_dict()
user_input_form = features_dict.get('user_input_form', [])
return {
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'text_to_speech': app_model_config.text_to_speech_dict,
'retriever_resource': app_model_config.retriever_resource_dict,
'annotation_reply': app_model_config.annotation_reply_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list,
'sensitive_word_avoidance': app_model_config.sensitive_word_avoidance_dict,
'file_upload': app_model_config.file_upload_dict,
'opening_statement': features_dict.get('opening_statement'),
'suggested_questions': features_dict.get('suggested_questions', []),
'suggested_questions_after_answer': features_dict.get('suggested_questions_after_answer',
{"enabled": False}),
'speech_to_text': features_dict.get('speech_to_text', {"enabled": False}),
'text_to_speech': features_dict.get('text_to_speech', {"enabled": False}),
'retriever_resource': features_dict.get('retriever_resource', {"enabled": False}),
'annotation_reply': features_dict.get('annotation_reply', {"enabled": False}),
'more_like_this': features_dict.get('more_like_this', {"enabled": False}),
'user_input_form': user_input_form,
'sensitive_word_avoidance': features_dict.get('sensitive_word_avoidance',
{"enabled": False, "type": "", "configs": []}),
'file_upload': features_dict.get('file_upload', {"image": {
"enabled": False,
"number_limits": 3,
"detail": "high",
"transfer_methods": ["remote_url", "local_file"]
}}),
'system_parameters': {
'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT')
}
}
class AppMeta(WebApiResource):
def get(self, app_model: App, end_user):
"""Get app meta"""
app_model_config: AppModelConfig = app_model.app_model_config
return AppService().get_app_meta(app_model)
agent_config = app_model_config.agent_mode_dict or {}
meta = {
'tool_icons': {}
}
# get all tools
tools = agent_config.get('tools', [])
url_prefix = (current_app.config.get("CONSOLE_API_URL")
+ f"/console/api/workspaces/current/tool-provider/builtin/")
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
# current tool standard
provider_type = tool.get('provider_type')
provider_id = tool.get('provider_id')
tool_name = tool.get('tool_name')
if provider_type == 'builtin':
meta['tool_icons'][tool_name] = url_prefix + provider_id + '/icon'
elif provider_type == 'api':
try:
provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
ApiToolProvider.id == provider_id
)
meta['tool_icons'][tool_name] = json.loads(provider.icon)
except:
meta['tool_icons'][tool_name] = {
"background": "#252525",
"content": "\ud83d\ude01"
}
return meta
api.add_resource(AppParameterApi, '/parameters')
api.add_resource(AppMeta, '/meta')

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import logging
from flask import request
@ -20,7 +19,7 @@ from controllers.web.error import (
from controllers.web.wraps import WebApiResource
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from models.model import App, AppModelConfig
from models.model import App
from services.audio_service import AudioService
from services.errors.audio import (
AudioTooLargeServiceError,
@ -32,16 +31,11 @@ from services.errors.audio import (
class AudioApi(WebApiResource):
def post(self, app_model: App, end_user):
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript_asr(
tenant_id=app_model.tenant_id,
app_model=app_model,
file=file,
end_user=end_user
)
@ -69,7 +63,7 @@ class AudioApi(WebApiResource):
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
logging.exception(f"internal server error: {str(e)}")
raise InternalServerError()
@ -77,9 +71,10 @@ class TextApi(WebApiResource):
def post(self, app_model: App, end_user):
try:
response = AudioService.transcript_tts(
tenant_id=app_model.tenant_id,
app_model=app_model,
text=request.form['text'],
end_user=end_user.external_user_id,
voice=request.form.get('voice'),
streaming=False
)
@ -106,7 +101,7 @@ class TextApi(WebApiResource):
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
logging.exception(f"internal server error: {str(e)}")
raise InternalServerError()

View File

@ -1,9 +1,5 @@
# -*- coding:utf-8 -*-
import json
import logging
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_restful import reqparse
from werkzeug.exceptions import InternalServerError, NotFound
@ -20,12 +16,14 @@ from controllers.web.error import (
ProviderQuotaExceededError,
)
from controllers.web.wraps import WebApiResource
from core.application_queue_manager import ApplicationQueueManager
from core.entities.application_entities import InvokeFrom
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs import helper
from libs.helper import uuid_value
from services.completion_service import CompletionService
from models.model import AppMode
from services.app_generate_service import AppGenerateService
# define completion api for user
@ -48,7 +46,7 @@ class CompletionApi(WebApiResource):
args['auto_generate_name'] = False
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=end_user,
args=args,
@ -56,7 +54,7 @@ class CompletionApi(WebApiResource):
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -84,14 +82,15 @@ class CompletionStopApi(WebApiResource):
if app_model.mode != 'completion':
raise NotCompletionAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
return {'result': 'success'}, 200
class ChatApi(WebApiResource):
def post(self, app_model, end_user):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -108,7 +107,7 @@ class ChatApi(WebApiResource):
args['auto_generate_name'] = False
try:
response = CompletionService.completion(
response = AppGenerateService.generate(
app_model=app_model,
user=end_user,
args=args,
@ -116,7 +115,7 @@ class ChatApi(WebApiResource):
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
except services.errors.conversation.ConversationCompletedError:
@ -141,26 +140,15 @@ class ChatApi(WebApiResource):
class ChatStopApi(WebApiResource):
def post(self, app_model, end_user, task_id):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
ApplicationQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
return {'result': 'success'}, 200
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
api.add_resource(CompletionApi, '/completion-messages')
api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
api.add_resource(ChatApi, '/chat-messages')

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from flask_restful import marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound
@ -8,6 +7,7 @@ from controllers.web.error import NotChatAppError
from controllers.web.wraps import WebApiResource
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
from libs.helper import uuid_value
from models.model import AppMode
from services.conversation_service import ConversationService
from services.errors.conversation import ConversationNotExistsError, LastConversationNotExistsError
from services.web_conversation_service import WebConversationService
@ -17,7 +17,8 @@ class ConversationListApi(WebApiResource):
@marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, app_model, end_user):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -44,7 +45,8 @@ class ConversationListApi(WebApiResource):
class ConversationApi(WebApiResource):
def delete(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
@ -61,7 +63,8 @@ class ConversationRenameApi(WebApiResource):
@marshal_with(simple_conversation_fields)
def post(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
@ -86,7 +89,8 @@ class ConversationRenameApi(WebApiResource):
class ConversationPinApi(WebApiResource):
def patch(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)
@ -101,7 +105,8 @@ class ConversationPinApi(WebApiResource):
class ConversationUnPinApi(WebApiResource):
def patch(self, app_model, end_user, c_id):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
conversation_id = str(c_id)

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from libs.exception import BaseHTTPException
@ -16,7 +15,13 @@ class NotCompletionAppError(BaseHTTPException):
class NotChatAppError(BaseHTTPException):
error_code = 'not_chat_app'
description = "Please check if your Chat app mode matches the right API route."
description = "Please check if your app mode matches the right API route."
code = 400
class NotWorkflowAppError(BaseHTTPException):
error_code = 'not_workflow_app'
description = "Please check if your Workflow app mode matches the right API route."
code = 400

View File

@ -1,9 +1,5 @@
# -*- coding:utf-8 -*-
import json
import logging
from typing import Generator, Union
from flask import Response, stream_with_context
from flask_restful import fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import InternalServerError, NotFound
@ -21,13 +17,15 @@ from controllers.web.error import (
ProviderQuotaExceededError,
)
from controllers.web.wraps import WebApiResource
from core.entities.application_entities import InvokeFrom
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from fields.conversation_fields import message_file_fields
from fields.message_fields import agent_thought_fields
from libs import helper
from libs.helper import TimestampField, uuid_value
from services.completion_service import CompletionService
from models.model import AppMode
from services.app_generate_service import AppGenerateService
from services.errors.app import MoreLikeThisDisabledError
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
@ -63,12 +61,14 @@ class MessageListApi(WebApiResource):
'conversation_id': fields.String,
'inputs': fields.Raw,
'query': fields.String,
'answer': fields.String,
'answer': fields.String(attribute='re_sign_file_url_answer'),
'message_files': fields.List(fields.Nested(message_file_fields), attribute='files'),
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
'created_at': TimestampField,
'agent_thoughts': fields.List(fields.Nested(agent_thought_fields))
'agent_thoughts': fields.List(fields.Nested(agent_thought_fields)),
'status': fields.String,
'error': fields.String,
}
message_infinite_scroll_pagination_fields = {
@ -79,7 +79,8 @@ class MessageListApi(WebApiResource):
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_model, end_user):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -127,7 +128,7 @@ class MessageMoreLikeThisApi(WebApiResource):
streaming = args['response_mode'] == 'streaming'
try:
response = CompletionService.generate_more_like_this(
response = AppGenerateService.generate_more_like_this(
app_model=app_model,
user=end_user,
message_id=message_id,
@ -135,7 +136,7 @@ class MessageMoreLikeThisApi(WebApiResource):
streaming=streaming
)
return compact_response(response)
return helper.compact_generate_response(response)
except MessageNotExistsError:
raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError:
@ -155,21 +156,10 @@ class MessageMoreLikeThisApi(WebApiResource):
raise InternalServerError()
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
for chunk in response:
yield chunk
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream')
class MessageSuggestedQuestionApi(WebApiResource):
def get(self, app_model, end_user, message_id):
if app_model.mode != 'chat':
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
raise NotCompletionAppError()
message_id = str(message_id)
@ -178,7 +168,8 @@ class MessageSuggestedQuestionApi(WebApiResource):
questions = MessageService.get_suggested_questions_after_answer(
app_model=app_model,
user=end_user,
message_id=message_id
message_id=message_id,
invoke_from=InvokeFrom.WEB_APP
)
except MessageNotExistsError:
raise NotFound("Message not found")

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
import uuid
from flask import request

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from flask import current_app
from flask_restful import fields, marshal_with
@ -84,7 +83,3 @@ class AppSiteInfo:
'remove_webapp_brand': remove_webapp_brand,
'replace_webapp_logo': replace_webapp_logo,
}
if app.enable_site and site.prompt_public:
app_model_config = app.app_model_config
self.model_config = app_model_config

View File

@ -0,0 +1,82 @@
import logging
from flask_restful import reqparse
from werkzeug.exceptions import InternalServerError
from controllers.web import api
from controllers.web.error import (
CompletionRequestError,
NotWorkflowAppError,
ProviderModelCurrentlyNotSupportError,
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.web.wraps import WebApiResource
from core.app.apps.base_app_queue_manager import AppQueueManager
from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs import helper
from models.model import App, AppMode, EndUser
from services.app_generate_service import AppGenerateService
logger = logging.getLogger(__name__)
class WorkflowRunApi(WebApiResource):
def post(self, app_model: App, end_user: EndUser):
"""
Run workflow
"""
app_mode = AppMode.value_of(app_model.mode)
if app_mode != AppMode.WORKFLOW:
raise NotWorkflowAppError()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
parser.add_argument('files', type=list, required=False, location='json')
args = parser.parse_args()
try:
response = AppGenerateService.generate(
app_model=app_model,
user=end_user,
args=args,
invoke_from=InvokeFrom.WEB_APP,
streaming=True
)
return helper.compact_generate_response(response)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except InvokeError as e:
raise CompletionRequestError(e.description)
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
class WorkflowTaskStopApi(WebApiResource):
def post(self, app_model: App, end_user: EndUser, task_id: str):
"""
Stop workflow task
"""
app_mode = AppMode.value_of(app_model.mode)
if app_mode != AppMode.WORKFLOW:
raise NotWorkflowAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)
return {
"result": "success"
}
api.add_resource(WorkflowRunApi, '/workflows/run')
api.add_resource(WorkflowTaskStopApi, '/workflows/tasks/<string:task_id>/stop')

View File

@ -1,4 +1,3 @@
# -*- coding:utf-8 -*-
from functools import wraps
from flask import request

View File

@ -1,101 +0,0 @@
import logging
from typing import List, Optional
from core.callback_handler.agent_loop_gather_callback_handler import AgentLoopGatherCallbackHandler
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
from core.model_runtime.model_providers.__base.ai_model import AIModel
logger = logging.getLogger(__name__)
class AgentLLMCallback(Callback):
def __init__(self, agent_callback: AgentLoopGatherCallbackHandler) -> None:
self.agent_callback = agent_callback
def on_before_invoke(self, llm_instance: AIModel, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
"""
Before invoke callback
:param llm_instance: LLM instance
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
self.agent_callback.on_llm_before_invoke(
prompt_messages=prompt_messages
)
def on_new_chunk(self, llm_instance: AIModel, chunk: LLMResultChunk, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None,
stream: bool = True, user: Optional[str] = None):
"""
On new chunk callback
:param llm_instance: LLM instance
:param chunk: chunk
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
pass
def on_after_invoke(self, llm_instance: AIModel, result: LLMResult, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
"""
After invoke callback
:param llm_instance: LLM instance
:param result: result
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
self.agent_callback.on_llm_after_invoke(
result=result
)
def on_invoke_error(self, llm_instance: AIModel, ex: Exception, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
"""
Invoke error callback
:param llm_instance: LLM instance
:param ex: exception
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param tools: tools for tool calling
:param stop: stop words
:param stream: is stream response
:param user: unique user id
"""
self.agent_callback.on_llm_error(
error=ex
)

View File

@ -1,49 +0,0 @@
from typing import List, cast
from core.entities.application_entities import ModelConfigEntity
from core.model_runtime.entities.message_entities import PromptMessage
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
class CalcTokenMixin:
def get_message_rest_tokens(self, model_config: ModelConfigEntity, messages: List[PromptMessage], **kwargs) -> int:
"""
Got the rest tokens available for the model after excluding messages tokens and completion max tokens
:param model_config:
:param messages:
:return:
"""
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
max_tokens = 0
for parameter_rule in model_config.model_schema.parameter_rules:
if (parameter_rule.name == 'max_tokens'
or (parameter_rule.use_template and parameter_rule.use_template == 'max_tokens')):
max_tokens = (model_config.parameters.get(parameter_rule.name)
or model_config.parameters.get(parameter_rule.use_template)) or 0
if model_context_tokens is None:
return 0
if max_tokens is None:
max_tokens = 0
prompt_tokens = model_type_instance.get_num_tokens(
model_config.model,
model_config.credentials,
messages
)
rest_tokens = model_context_tokens - max_tokens - prompt_tokens
return rest_tokens
class ExceededLLMTokensLimitError(Exception):
pass

Some files were not shown because too many files have changed in this diff Show More