Compare commits

..

361 Commits

Author SHA1 Message Date
4b5586375f feat: add node id in agent log 2025-01-09 17:20:53 +08:00
94a62f6b4e enhancement: remove unrequired deps 2025-01-09 17:06:38 +08:00
d76af08784 feat: add agent log icon 2025-01-09 16:55:17 +08:00
f748d6c7c4 fix: mypy issues 2025-01-09 16:53:30 +08:00
76e24d91c0 fix: migrations 2025-01-09 13:30:43 +08:00
5ce4ddc0ed fix: change the agent strategy category 2025-01-09 11:13:00 +08:00
491d641485 feat: add agent node log 2025-01-09 08:32:32 +08:00
172c5f19cc fix: formatter 2025-01-08 21:11:58 +08:00
b7d168ac59 fix: mypy linter 2025-01-08 21:11:42 +08:00
fb309462ad Merge branch 'main' into fix/chore-fix 2025-01-08 20:36:22 +08:00
b56d2b739b feat: add fc agent mode support 2025-01-08 07:41:17 +08:00
fb7b2c8ff3 fix: backwards invoke nodes 2025-01-07 20:52:25 +08:00
c3440a27fb fix 2025-01-07 18:59:13 +08:00
ff3d3f71fb fix: use host.docker.internal as the default plugin daemon middleware endpoint 2025-01-07 14:56:03 +08:00
9685b9a302 refactor: docker-compose-middleware.yaml 2025-01-07 14:44:08 +08:00
07c7b7b886 fix: remove 5002 port from docker mapping 2025-01-06 21:45:44 +08:00
8d75abc976 fix: correct fetch_from for customizable models (#12400) 2025-01-06 21:16:39 +08:00
aa6452b3bf fix: use session to manage AppSite 2025-01-06 21:12:50 +08:00
3799d40937 feat: support docker deployment for plugin 2025-01-06 20:28:50 +08:00
d2ff8a2381 fix: bugs 2025-01-06 14:59:40 +08:00
5f51a19de2 fix: allow meta to be None 2025-01-03 14:48:19 +08:00
71e0bfcbd8 fix: updating tool credentials does not works as expected 2025-01-03 14:09:17 +08:00
d815c74fc5 fix: ruff 2024-12-31 16:48:20 +08:00
107e44c8fb Merge branch 'main' into fix/chore-fix 2024-12-31 16:47:56 +08:00
adf7eea7fe fix: ruff 2024-12-31 16:40:26 +08:00
6e73ad2fc6 feat: plugin migrations 2024-12-31 16:38:02 +08:00
06412b37d3 fix: no attribbute identity 2024-12-30 21:14:24 +08:00
63665a5ff1 feat: add conversation_id to invoke 2024-12-30 13:41:54 +08:00
05a43e3e80 fix: rebaseing to main 2024-12-30 13:34:45 +08:00
83fdb42520 fix: variable message 2024-12-27 22:56:39 +08:00
cbf405beea fix: remigrate 2024-12-27 18:37:34 +08:00
af2aede783 feat: support precision to PluginParameter 2024-12-27 18:07:28 +08:00
e359ace633 fix: add agent logs 2024-12-27 17:55:41 +08:00
a5555f90c6 fix: models 2024-12-27 17:34:17 +08:00
78664c8903 Merge branch 'main' into fix/chore-fix 2024-12-27 17:33:58 +08:00
45070535bd fix: linter 2024-12-27 14:47:48 +08:00
048e8cf0d1 fix: remove validate credentials 2024-12-27 12:16:58 +08:00
598d208e54 fix: agent error handling 2024-12-27 12:09:39 +08:00
8102cee8df fix: unbound reference 2024-12-27 11:33:04 +08:00
c9eb9c14d7 fix: block call to flask_app 2024-12-26 22:58:34 +08:00
e77cd87842 fix: linter 2024-12-26 22:30:22 +08:00
ac5e3caebc optimize: migrate speed 2024-12-26 22:30:06 +08:00
23066a9ba8 feat: support extracting plugins into local files 2024-12-26 18:05:14 +08:00
0249f15609 fix: linter 2024-12-26 17:39:21 +08:00
2f523dd29f optimize: add friendly logs 2024-12-26 17:39:13 +08:00
b34d815883 feat: support auto generate and template 2024-12-26 17:25:56 +08:00
51cc63d9ce fix: undefined dereference to ApiTool 2024-12-26 14:12:43 +08:00
430af95b53 fix: linter 2024-12-26 14:07:29 +08:00
0164d1410a migrations for plugins 2024-12-26 14:07:12 +08:00
cbc5045b7a fix: ruff formatter 2024-12-26 13:23:56 +08:00
b980c07af8 fix: ruff formatter 2024-12-26 13:22:18 +08:00
e231cf2c48 fix: errors occrus during rebasing 2024-12-26 13:20:12 +08:00
80d8e47e42 fix: skip json transforming if error occurs 2024-12-25 18:23:31 +08:00
fee4dd7d7a fix: unused stream variable 2024-12-25 15:32:59 +08:00
00cf5f3841 fix: linters 2024-12-25 15:18:29 +08:00
9ee0c7a694 merge 2024-12-25 14:39:15 +08:00
6ee7ca1890 fix: add specific exceptions 2024-12-24 22:00:45 +08:00
f589397f25 fix: import Optional 2024-12-24 21:56:55 +08:00
ee080dddf9 fix: rebase 2024-12-24 21:48:49 +08:00
ee6841648c fix: migrations and imports recycle 2024-12-24 21:36:42 +08:00
5a57dad93c fix: linter 2024-12-24 21:29:24 +08:00
4199998c7e Merge branch 'main' into fix/chore-fix 2024-12-24 21:28:56 +08:00
39656f7f84 fix: linter and formatter 2024-12-24 18:38:34 +08:00
bf39e314d8 fix: add install count 2024-12-24 18:38:12 +08:00
8cc4c109d0 fix: return types of builtin tools 2024-12-19 01:09:15 +08:00
a1cdca02e3 fix: formatter 2024-12-19 01:02:44 +08:00
1b21d7513d fix: reduce model provider fetchs 2024-12-19 01:02:08 +08:00
d5c708c62b feat: add plugin_model_providers context 2024-12-19 00:50:46 +08:00
342d4060ff fix: add additional parameters to exists tools 2024-12-18 23:54:48 +08:00
05232d36f0 fix: add default values to WorkflowAppGenerator 2024-12-17 15:49:33 +08:00
636dde94c7 fix: migrations 2024-12-16 14:17:39 +08:00
75fe785d88 Merge branch 'main' into fix/chore-fix 2024-12-16 14:08:18 +08:00
a61da6cf95 fix: replace Enum with StrEnum 2024-12-16 13:40:02 +08:00
93c3699128 feat: add label to agent log 2024-12-15 18:12:29 +08:00
6357450a7a feat: support hidden parameters 2024-12-13 22:53:08 +08:00
6339706c68 fix: ruff reformatter 2024-12-13 19:51:09 +08:00
65a4cb769b refactor: tool entities 2024-12-13 19:50:54 +08:00
63206a7967 fix: incorrect use of node execution id 2024-12-13 00:05:57 +08:00
9a6f120e5c feat: support agent log event 2024-12-12 23:46:26 +08:00
dedc1b0c3a refactor: agent strategy parameter 2024-12-12 19:16:06 +08:00
46bb246ecc refactor: rename agent to agent strategy 2024-12-12 18:27:43 +08:00
3c628d0c26 refactor: rename agent to agent strategy 2024-12-12 18:27:31 +08:00
c2983ecbb7 fix: rename stream to streaming 2024-12-12 13:50:34 +08:00
527c1cf608 fix: deduplicate provider id 2024-12-10 02:21:46 +08:00
93786f516c apply ruff 2024-12-10 00:22:54 +08:00
a175d6b2d7 feat: agent management 2024-12-10 00:22:41 +08:00
296fd82bbf fix: agent node 2024-12-09 23:26:16 +08:00
4ccd571364 fix: ruff 2024-12-09 23:02:25 +08:00
ae72514cb4 feat: support agent node 2024-12-09 23:02:11 +08:00
16b49ac436 Merge branch 'main' into fix/chore-fix 2024-12-09 16:08:19 +08:00
c377eb8c28 fix: unbound variable in tool node 2024-12-09 15:43:01 +08:00
337eff2b79 Merge branch 'main' into fix/chore-fix 2024-12-06 16:45:25 +08:00
b7ac287fec fix: use default_factory for list fields 2024-12-05 20:57:30 +08:00
c1a85b0208 fix: add default value to plugin permission field 2024-12-05 14:48:34 +08:00
01efdee1dd fix: support other file types for Tool 2024-12-04 19:26:01 +08:00
0af9c4fd9d chore: reformat 2024-12-04 19:02:28 +08:00
ee38bd8817 refactor: check dependencies 2024-12-04 19:01:54 +08:00
86291c13e4 Merge branch 'main' into fix/chore-fix 2024-12-04 15:34:39 +08:00
7679a57f18 fix: agent type errors 2024-12-03 19:44:57 +08:00
dcf19549cb feat: move audio and webscraper back to dify 2024-12-03 19:27:57 +08:00
574a6c1ded fix: add extension, filename and size to PluginFileEntity 2024-12-03 16:51:51 +08:00
c34877aecf fix: update tool provider credentials 2024-12-03 16:28:36 +08:00
632b2bac2a fix: invoke-email 2024-12-02 21:59:52 +08:00
77a62f33b3 fix: Lookup errors for contextvars used in ToolManager 2024-12-02 21:25:47 +08:00
ad899844a1 fix: workflow loads tool provider icon 2024-12-02 21:08:36 +08:00
b10d6051ba fix: summary and create_file_by_url 2024-12-02 16:51:37 +08:00
fb44cd87e7 fix: image url message 2024-11-29 18:20:36 +08:00
89af726985 fix: cot agent 2024-11-29 16:48:39 +08:00
6f2d5ff099 fix: add tenant_id to invoke tts 2024-11-29 15:59:07 +08:00
687455ca31 fix: tool file id 2024-11-29 14:09:34 +08:00
8c5928da2f fix: unify error handling 2024-11-28 20:44:06 +08:00
772009115d fix: keep process_data with None if not 2024-11-28 19:35:30 +08:00
0452dfd029 fix: missing tool invoke messages 2024-11-28 19:09:04 +08:00
eead6abe85 fix: tool image url response 2024-11-28 18:23:28 +08:00
5c6d919a4a fix: handle detailed error type 2024-11-28 17:12:29 +08:00
e39eddab03 fix: change to use convert_stream_full_response 2024-11-27 14:48:44 +08:00
db726e02a0 feat: support multi token count 2024-11-26 18:59:03 +08:00
e4b8220bc2 Merge branch 'main' into fix/chore-fix 2024-11-26 18:02:41 +08:00
08cfcb453c fix: missing marshal fields of leaked+dependencies 2024-11-26 13:59:52 +08:00
992e1eedde fix: export agent dsl 2024-11-25 23:36:19 +08:00
c2ce8e638e fix: deleted_tools 2024-11-25 23:22:17 +08:00
ba3659a792 feat: support delete all install tasks 2024-11-25 17:11:41 +08:00
965fabd578 fix: rename dependencies 2024-11-25 16:57:38 +08:00
accbbae755 cleanup: remove get_interates 2024-11-25 16:47:49 +08:00
49bd1a7a49 fix: riff 2024-11-25 16:44:08 +08:00
5ff9cee326 Merge branch 'main' into fix/chore-fix 2024-11-25 15:37:19 +08:00
200f9af5d8 optimize error messages 2024-11-22 20:04:20 +08:00
1443fd6739 optimize: indexing-estimate 2024-11-22 19:39:07 +08:00
e63ae36665 fix 2024-11-22 18:19:02 +08:00
cfa7c89dfe refactor: text-embedding interfaces to returns list[int] 2024-11-22 18:09:33 +08:00
a6835ac64d fix: add detailed error messages 2024-11-21 17:00:00 +08:00
a700b49461 fix: migration 2024-11-21 13:55:08 +08:00
22df86fe8a fix: ruff 2024-11-21 13:53:08 +08:00
24734009b9 Merge branch 'main' into fix/chore-fix 2024-11-21 13:52:28 +08:00
959d060a44 fix: remove signature verify 2024-11-21 00:30:28 +08:00
4492295683 fix: remove plugin files 2024-11-20 18:12:12 +08:00
88fac0d898 fix: add tenant_id to plugin upload files url 2024-11-19 16:50:14 +08:00
8b30099672 fix: convert backwards invocation into BaseBackwardsResponse 2024-11-19 14:03:40 +08:00
97a3727962 fix: optimize DEFAULT-USER 2024-11-18 17:21:17 +08:00
2cb640de15 refactor: load tools cache 2024-11-15 19:53:50 +08:00
fb4ee813c7 fix: agent 2024-11-15 18:37:33 +08:00
6300e506fb fix: rag 2024-11-15 15:54:14 +08:00
a0543ab8fb Merge branch 'main' into fix/chore-fix 2024-11-15 15:43:32 +08:00
634cb6233e feat: sypport batch fetch plugin installations 2024-11-15 00:47:25 +08:00
db68ae4a73 feat: support upload bundle 2024-11-14 22:58:57 +08:00
d25e79e794 feat: support uploading images through plugin 2024-11-14 18:32:51 +08:00
183b943803 feat: support check dependencies through url 2024-11-13 15:19:20 +08:00
5828abcd62 fix: uses to check if the tools are already loaded 2024-11-12 21:43:19 +08:00
56bd0dedfe fix: incorrect paths to upgrade plugins 2024-11-12 20:48:28 +08:00
f6136427a4 feat: export dsl with dependencies 2024-11-12 19:50:56 +08:00
21fd58caf9 Merge branch 'fix/chore-fix' of github.com:langgenius/dify into fix/chore-fix 2024-11-12 18:53:45 +08:00
9a69d03fbe feat: add icon and labels to plugin install task 2024-11-11 20:59:31 +08:00
1d2118fc5d fix: hosted moderation 2024-11-11 20:31:11 +08:00
bc0724b499 chore: fix typo 2024-11-11 19:50:39 +08:00
5cdbfe2f41 Merge branch 'main' into fix/chore-fix 2024-11-11 14:00:53 +08:00
5fd82084f9 fix: avoid empty plugin entity 2024-11-11 13:30:11 +08:00
f0637ba332 fix: create basic app causing internal error when default model is not exist 2024-11-08 23:09:52 +08:00
115c9486c3 fix hosted issues 2024-11-08 19:23:49 +08:00
8b5231b7ee fix: invalid key of marketplace response 2024-11-08 17:27:16 +08:00
38cae29757 fix: wrap marketplace apis with try catch 2024-11-08 17:20:54 +08:00
7a2b2a04c9 Merge branch 'main' into fix/chore-fix 2024-11-08 13:47:24 +08:00
fe677cc5f9 Merge branch 'main' into fix/chore-fix 2024-11-07 17:06:29 +08:00
28c9ec3f4f feat: support fetch tool provider info 2024-11-06 17:30:50 +08:00
6baa98f166 feat: support app-selector, model-selector and tool-selector as parameters 2024-11-06 17:13:05 +08:00
e9d69f020a feat: cast files into correct type while invoking 2024-11-05 20:30:13 +08:00
3c89d45a2d fix: iteration none output error (#10295) 2024-11-05 20:30:13 +08:00
baab81714e fix(http_request): improve parameter initialization and reorganize tests (#10297) 2024-11-05 20:30:13 +08:00
507bb3549a fix typo: writeOpner to writeOpener (#10290) 2024-11-05 20:30:13 +08:00
2d1e5fb4e0 fix: handle KeyError when accessing rules in CleanProcessor.clean (#10258) 2024-11-05 20:30:12 +08:00
eux
b9198639e2 fix: borken faq url in CONTRIBUTING.md (#10275) 2024-11-05 20:30:12 +08:00
43c7739b88 feat: add xAI model provider (#10272) 2024-11-05 20:30:12 +08:00
f65d577f54 fix(model_runtime): fix wrong max_tokens for Claude 3.5 Haiku on Amazon Bedrock (#10286) 2024-11-05 20:30:00 +08:00
b88145096f feat(model): add validation for custom disclaimer length (#10287) 2024-11-05 20:30:00 +08:00
33219e850a fix(node): correct file property name in function switch (#10284) 2024-11-05 20:30:00 +08:00
3040d538f7 refactor the logic of refreshing access_token (#10068) 2024-11-05 20:30:00 +08:00
4e1af81e11 chore: translate i18n files (#10273)
Co-authored-by: laipz8200 <16485841+laipz8200@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-11-05 20:30:00 +08:00
56e19fd8f5 Updates: Add mplfonts library for customizing matplotlib fonts and Va… (#9903) 2024-11-05 20:30:00 +08:00
d330d31ee5 feat: Iteration node support parallel mode (#9493) 2024-11-05 20:29:59 +08:00
0858108423 fix(workflow): handle else condition branch addition error in if-else node (#10257) 2024-11-05 20:29:59 +08:00
2cd976846a feat(document_extractor): support tool file in document extractor (#10217) 2024-11-05 20:29:59 +08:00
5d2c88ef59 feat: support Claude 3.5 Haiku on Amazon Bedrock (#10265) 2024-11-05 20:29:59 +08:00
fe3cde973e refactor(parameter_extractor): implement custom error classes (#10260) 2024-11-05 20:29:59 +08:00
794f495ef2 fix(validation): allow to use 0 in the inputs form (#10255) 2024-11-05 20:29:32 +08:00
0dda682033 chore(Dockerfile): upgrade zlib arm64 (#10244) 2024-11-05 20:29:31 +08:00
01d8d10f1c Using a dedicated interface to obtain the token credential for the gitee.ai provider (#10243) 2024-11-05 20:29:12 +08:00
c711c5e36e feat(workflow): add configurable workflow file upload limit (#10176)
Co-authored-by: JzoNg <jzongcode@gmail.com>
2024-11-05 20:29:09 +08:00
1e27557865 fix: missing working directory parameter in script (#10226) 2024-11-05 20:28:29 +08:00
2d9632d8b9 refactor(list_operator): replace ValueError with InvalidKeyError (#10222) 2024-11-05 20:28:29 +08:00
7e42de1e7b refactor(workflow): introduce specific error handling for LLM nodes (#10221) 2024-11-05 20:28:29 +08:00
bd674d27be refactor(http_request): add custom exception handling for HTTP request nodes (#10219) 2024-11-05 20:28:29 +08:00
5735761920 refactor(workflow): introduce specific exceptions for code validation (#10218) 2024-11-05 20:28:29 +08:00
405b704f02 chore(llm_node): remove unnecessary type ignore for context assignment (#10216) 2024-11-05 20:28:29 +08:00
f38abaaa6a fix the ssrf of docx file extractor external images (#10237) 2024-11-05 20:28:28 +08:00
c8a5fee622 Modify translation (#10213) 2024-11-05 20:28:28 +08:00
fe1c0ac602 Add Lindorm as a VDB choice (#10202)
Co-authored-by: jiangzhijie <jiangzhijie.jzj@alibaba-inc.com>
2024-11-05 20:28:28 +08:00
e79c3e4531 Fix/10199 application error a client side exception has occurred see the browser console for more information (#10211) 2024-11-05 20:28:28 +08:00
3ea3df7189 refactor(validation): improve input validation logic (#10175) 2024-11-05 20:28:28 +08:00
b01e7d778e chore(list_operator): refine exception handling for error specificity (#10206) 2024-11-05 20:28:28 +08:00
7c45859594 fix(document_extractor): update base exception class (#10208) 2024-11-05 20:28:27 +08:00
aa9fd76072 Feat : add LLM model indicator in prompt generator (#10187) 2024-11-05 20:28:27 +08:00
e7d947379f chore : code generator preview hint (#10188) 2024-11-05 20:28:17 +08:00
8cd386f2c1 fix: webapp upload file (#10195) 2024-11-05 20:28:17 +08:00
987e1b9ced fix(api): replace current_user with end_user in file upload (#10194) 2024-11-05 20:28:17 +08:00
81a77d0623 feat(document_extractor): integrate unstructured API for PPTX extraction (#10180) 2024-11-05 20:28:17 +08:00
ac1f93e3d5 [fix] fix the bug that modify document name not effective (#10154) 2024-11-05 20:27:58 +08:00
0d5c0b4fe4 fix(workflow model): ensure consistent timestamp updating (#10172) 2024-11-05 20:27:57 +08:00
d1c480a7d8 fix: Cannot find declaration to go to CLEAN_DAY_SETTING (#10157)
Co-authored-by: 刘江波 <liujiangbo1@xiaomi.com>
2024-11-05 20:27:57 +08:00
007b561e32 feat: add gpustack model provider (#10158) 2024-11-05 20:27:57 +08:00
c100f24f7d compatible model daemon request exception 2024-11-01 19:20:26 -07:00
d92cb994a9 fix voice list 2024-11-01 01:56:15 -07:00
413326905e rebase migrations 2024-11-01 16:55:07 +08:00
5605ff9803 fix voice list 2024-11-01 16:42:32 +08:00
84b7a4607a fix: setup_required 2024-11-01 16:28:17 +08:00
10cc4e758c Merge branch 'main' into fix/chore-fix 2024-11-01 16:23:04 +08:00
8070be9b76 fix: missing 'follow_redirects' argument while download plugin packages 2024-11-01 15:38:45 +08:00
f1f1baae9c feat: support plugin tags 2024-11-01 15:07:11 +08:00
f20c9ef763 fix 2024-11-01 00:01:05 -07:00
f798add31c compatible with original provider name 2024-11-01 00:00:53 -07:00
8c2dbe876f fix: custom tool parser 2024-11-01 14:26:56 +08:00
6fd0a55b00 fix: correct dockerfile dependencies 2024-10-31 15:32:25 +08:00
bb58f5c6e5 fix: avoid None to be assigned to WorkflowToolProviderController as provider id 2024-10-31 15:18:45 +08:00
18edeb8e0a integrate model provider with plugin daemon 2024-10-30 18:56:52 -07:00
459cb9dd72 fix: transform plugin icon incorrect 2024-10-30 16:09:17 +08:00
f9e2c738b0 fix: permission change api should not wraps a permission decorator 2024-10-29 17:16:32 +08:00
739e15f88b feat: support tool plugin id 2024-10-29 12:32:11 +08:00
5bf86ff66d feat: support latest package identifier 2024-10-28 15:56:15 +08:00
c657378d06 feat: support plugin permission management 2024-10-28 15:54:34 +08:00
685e8cdc7d refactor: document segment query 2024-10-28 15:07:33 +08:00
d36dece0af feat: support upgrade interfaces 2024-10-25 18:56:38 +08:00
5f61aa85db feat: add latest version 2024-10-25 13:52:33 +08:00
e5837b88e0 fix: add subpath 2024-10-25 13:26:32 +08:00
ffdc6f5c60 feat: support remove single item from installation task 2024-10-25 13:22:37 +08:00
99c8f364ae fix: temp fix for empty redis password 2024-10-24 13:20:26 +08:00
a0a1243c90 cleanup: remove hacked code 2024-10-22 17:56:13 +08:00
b916b4064a Merge remote-tracking branch 'origin/fix/tool-use-file' into fix/chore-fix 2024-10-22 17:47:01 +08:00
dea2962a79 Merge main into feat/plugin 2024-10-22 17:35:11 +08:00
1450e5d5cb feat: add supports for multimodal 2024-10-22 17:26:00 +08:00
43a2d4335b fix: tool use file caused error 2024-10-22 16:51:11 +08:00
11270a7ef2 Migrate to DeclarativeBaseModel 2024-10-21 20:38:27 +08:00
53e1b45d40 fix: remove .query reference of db.Model 2024-10-21 20:23:27 +08:00
bedbd658fe Merge main into fix/chore-fix 2024-10-21 20:01:49 +08:00
7b62b5578e refactor: add manifest into upload interfaces 2024-10-21 18:48:03 +08:00
ccbe42eb5f feat: add plugin id into tool api entities 2024-10-17 20:46:29 +08:00
45f8651a3d feat: support backwards invoke summary 2024-10-17 19:44:30 +08:00
7754431a34 feat: support plugin max package size 2024-10-17 18:44:16 +08:00
fa7215cfea Merge branch 'main' into fix/chore-fix 2024-10-17 13:46:43 +08:00
678c89891a feat: support verified 2024-10-17 13:40:33 +08:00
beebcbd962 feat: add description 2024-10-17 12:59:11 +08:00
8495ed3348 add conversation id, app id and message id into plugin session 2024-10-16 15:10:50 +08:00
31cca4a849 fix: add marketplace switch 2024-10-16 14:47:48 +08:00
43ffccc8fd fix: install plugins 2024-10-16 14:02:05 +08:00
a81293cf5a feat: add category for plugins 2024-10-16 13:03:50 +08:00
276701e1b7 refactor: plugin installation 2024-10-14 17:52:29 +08:00
8e1cf3233c fix: missing openai moderation 2024-10-14 16:42:36 +08:00
dd551e6ca8 Ruff: reformatter 2024-10-14 16:25:51 +08:00
ae1eeb9b2a Mergin main into fix/chore-fix 2024-10-14 16:22:12 +08:00
b58f8dd7b4 feat: download pkg from marketplace (#9184) 2024-10-11 02:00:02 +08:00
118fa66567 feat: backwards invoke tools 2024-10-10 18:09:06 +08:00
699d41deec fix: add source to plugin entity 2024-10-10 16:47:25 +08:00
dd0462c1dc feat: support two install source 2024-10-10 16:35:36 +08:00
a470e0e60e fix: missing detailed paths of endpoints 2024-10-10 00:12:46 +08:00
2622159763 feat: support verify signature 2024-10-09 23:13:01 +08:00
dfaf639790 feat: support endpoint url template 2024-10-09 22:58:36 +08:00
ae96f66a08 feat: support list endpoints for single plugin, fix: failed to clear endpoint credentials 2024-10-09 22:33:18 +08:00
570b7d18ac fix: endpoint apis 2024-10-08 23:48:38 +08:00
a9c21ef929 feat: uninstall plugins 2024-10-08 22:38:33 +08:00
e27a03ae15 feat: support install plugin 2024-10-08 21:28:59 +08:00
56b7853afe feat: compat tool provider credentials to updated data 2024-09-30 23:22:23 +08:00
e12f4009d3 feat: optimize icon url 2024-09-30 17:46:40 +08:00
6dfc31a542 refactor: credentials schemas to array 2024-09-30 17:39:13 +08:00
c9f80b46a1 fix: add endpoint name 2024-09-30 16:57:09 +08:00
0025b27200 fix: tool invocation logs 2024-09-29 21:09:01 +08:00
0dd05d7b6d feat: tool output schema 2024-09-29 20:58:07 +08:00
7c83d5ce76 feat: add dockerignore items 2024-09-29 20:16:21 +08:00
a57f60a6e0 feat: remove unused codes 2024-09-29 19:47:47 +08:00
2f36692bf9 fix: get tool runtime parameters 2024-09-29 19:37:03 +08:00
bcdb407be8 feat: remove unused codes 2024-09-29 18:24:33 +08:00
d4e007f9db feat: support get tool runtime parameters 2024-09-29 18:19:03 +08:00
8563155d1b feat: remove unused codes 2024-09-29 18:18:01 +08:00
8236373498 feat: remove unused codes 2024-09-29 18:16:21 +08:00
196bfeaaf4 Merge branch 'main' into fix/chore-fix 2024-09-29 17:14:10 +08:00
957ab093c9 enhancement: reduce requests to plugin daemon 2024-09-29 17:07:40 +08:00
e9e5c8806a refactor: using DeclarativeBase as parent class of models, refactored tools 2024-09-29 17:00:58 +08:00
c8bc3892b3 refactor: invoke tool from dify 2024-09-29 14:44:22 +08:00
735e57b73a fix: transform generic error message into correct type 2024-09-29 13:46:16 +08:00
635a53ea38 fix: import undefined types 2024-09-29 13:23:14 +08:00
7b76b1ff82 Merge fix/chore-fix into fix/chore-fix 2024-09-29 13:12:22 +08:00
47c8824be6 feat: move model request to plugin daemon 2024-09-29 00:15:17 +08:00
1c3213184e feat: move model request to plugin daemon 2024-09-29 00:15:14 +08:00
d9cced8419 Merge branch 'main' into fix/chore-fix 2024-09-28 20:18:28 +08:00
c3359a9291 refactor: using plugin id to dispatch request instead 2024-09-27 21:48:48 +08:00
2da32e49d0 fix: tests 2024-09-26 17:51:13 +08:00
1837692a66 fix: sse error message 2024-09-26 17:40:27 +08:00
5dcd25a613 fix: missing error message 2024-09-26 17:22:39 +08:00
507fff0259 fix: tts file was deleted before invocation 2024-09-26 15:47:16 +08:00
0ad9dbea63 feat: backwards invoke model 2024-09-26 15:38:22 +08:00
4c28034224 refactor: encryption 2024-09-26 14:51:10 +08:00
1d575524c3 fix: missing user id 2024-09-26 14:20:05 +08:00
dc255cc154 Merge main into feat/plugin 2024-09-26 12:59:06 +08:00
ea497f828f feat: endpoint management 2024-09-26 12:49:00 +08:00
153dc5b3f3 feat: endpoint apis 2024-09-26 10:26:45 +08:00
a91951b374 feat: invoke node 2024-09-24 20:15:13 +08:00
68c10a1672 feat: add backwards invoke node api 2024-09-24 18:03:48 +08:00
592f85f7a9 formatter 2024-09-24 16:40:42 +08:00
cda9f6ec6b Merge main into fix/chore-fix 2024-09-24 16:38:38 +08:00
64706c709c fix 2024-09-24 16:35:01 +08:00
9722e6bcb1 fix: allow duplicate tool providers 2024-09-24 16:33:19 +08:00
1907d791e1 enhance: add gzip 2024-09-24 16:15:50 +08:00
fb3a701c86 fix: stream with empty line 2024-09-24 16:02:01 +08:00
947bfdc807 feat: validate credentials 2024-09-23 21:13:02 +08:00
7a3e756020 refactor: list tools 2024-09-23 18:06:16 +08:00
435e71eb60 refactor 2024-09-23 13:09:46 +08:00
91cb80f795 refactor: tool 2024-09-20 23:48:48 +08:00
3c1d32e3ac feat: uninstall plugin 2024-09-20 21:50:44 +08:00
eef79a5196 feat: support install plugin 2024-09-20 21:35:19 +08:00
2223dfb266 feat: get debugging key 2024-09-20 15:08:39 +08:00
9693b5ad0c feat: debugging key 2024-09-20 14:43:01 +08:00
d4bf575d0a impl: basic plugin manager 2024-09-20 13:55:09 +08:00
73ce692e24 feat: add inner api key 2024-09-20 13:32:11 +08:00
661392eaef refactor: tool 2024-09-20 02:25:14 +08:00
c472ea6c67 fix: pydantic 2024-09-19 18:02:24 +08:00
4eaba3049a Merge main 2024-09-19 17:54:08 +08:00
00d1c45518 Merge main 2024-09-14 02:47:01 +08:00
87c746f6bb tmp 2024-09-14 01:26:22 +08:00
70c001436e support variable 2024-09-10 18:13:33 +08:00
cf73374c1b refactor: stream output 2024-09-10 17:16:55 +08:00
b0d53c0ac4 Merge main 2024-09-10 15:42:59 +08:00
9c7bcd5abc Merge main 2024-09-10 14:05:20 +08:00
b7c5abc5dd reformatter 2024-08-30 23:29:04 +08:00
de01ca8d55 feat: inner api encrypt 2024-08-30 21:25:58 +08:00
60e75dc748 fix: linter 2024-08-30 21:11:39 +08:00
279dee485d feat: type 2024-08-30 21:10:19 +08:00
db8bf2a85e Merge branch 'main' into feat/plugin 2024-08-30 18:21:22 +08:00
46ba16fe90 fix: reformatter 2024-08-30 18:21:03 +08:00
886a160115 fix: invoke tool streamingly 2024-08-30 18:11:38 +08:00
cf4e9f317e refactor: tool models 2024-08-30 15:55:10 +08:00
1fa3b9cfd8 refactor tools 2024-08-30 14:23:14 +08:00
50a5cfe56a fix: endpoint using default user 2024-08-29 21:48:20 +08:00
ece82b87bf feat: invoke app 2024-08-29 21:14:23 +08:00
12ea085e22 feat: implement invoke app args 2024-08-29 20:50:36 +08:00
41ed2e0cc2 feat: backwards invoke app 2024-08-29 20:17:17 +08:00
113ff27d07 fix: types 2024-08-29 20:06:14 +08:00
ec711d094d refactor: enforce return object in app generator 2024-08-29 19:49:57 +08:00
a073de44e9 Merge branch 'main' into feat/plugin 2024-08-29 17:08:44 +08:00
6ce02b07d3 feat: add type annatation 2024-08-29 14:23:19 +08:00
f47712beae feat: add type annatation 2024-08-29 14:18:00 +08:00
4a8d3c54ca fix: workflow as tool type 2024-08-29 14:09:47 +08:00
c8b0160ea9 fix: tool type 2024-08-29 14:06:10 +08:00
531ffaec4f fix: tool node 2024-08-29 13:56:48 +08:00
c28998a6f0 refactor: tool message transformer 2024-08-29 13:42:31 +08:00
4b4741f7ed Merge main into feat/plugin 2024-08-29 13:09:13 +08:00
25b8a512bf feat: invoke app 2024-08-29 12:55:00 +08:00
02d26818ad Merge branch 'main' into feat/plugin 2024-07-31 14:51:36 +08:00
31e8b134d1 feat: backwards invoke llm 2024-07-29 22:08:14 +08:00
d52476c1c9 feat: support backwards invocation 2024-07-29 18:57:34 +08:00
f29b44acd8 feat: support plugin inner api 2024-07-29 16:40:04 +08:00
ed7fcc5f7d Merge branch 'main' into feat/plugin 2024-07-29 16:07:19 +08:00
c6f34f5c17 Merge branch 'main' into feat/plugin 2024-07-15 16:03:11 +08:00
e1db77eec2 fix 2024-07-15 16:00:11 +08:00
563d81277b refactor: tool response to generator 2024-07-09 15:37:56 +08:00
364df36ac4 feat: plugin call dify 2024-07-08 22:37:20 +08:00
1421 changed files with 25963 additions and 68561 deletions

View File

@ -1,12 +1,11 @@
#!/bin/bash
npm add -g pnpm@9.12.2
cd web && pnpm install
cd web && npm install
pipx install poetry
echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc
echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc
echo 'alias start-web="cd /workspaces/dify/web && npm run dev"' >> ~/.bashrc
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc
echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify down"' >> ~/.bashrc

View File

@ -8,7 +8,7 @@ inputs:
poetry-version:
description: Poetry version to set up
required: true
default: '2.0.1'
default: '1.8.4'
poetry-lockfile:
description: Path to the Poetry lockfile to restore cache from
required: true

View File

@ -26,9 +26,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Poetry and Python ${{ matrix.python-version }}
uses: ./.github/actions/setup-poetry
@ -45,17 +42,25 @@ jobs:
run: poetry install -C api --with dev
- name: Check dependencies in pyproject.toml
run: poetry run -P api bash dev/pytest/pytest_artifacts.sh
run: poetry run -C api bash dev/pytest/pytest_artifacts.sh
- name: Run Unit tests
run: poetry run -P api bash dev/pytest/pytest_unit_tests.sh
run: poetry run -C api bash dev/pytest/pytest_unit_tests.sh
- name: Run ModelRuntime
run: poetry run -C api bash dev/pytest/pytest_model_runtime.sh
- name: Run dify config tests
run: poetry run -P api python dev/pytest/pytest_config_tests.py
run: poetry run -C api python dev/pytest/pytest_config_tests.py
- name: Run Tool
run: poetry run -C api bash dev/pytest/pytest_tools.sh
- name: Run mypy
run: |
poetry run -C api python -m mypy --install-types --non-interactive .
pushd api
poetry run python -m mypy --install-types --non-interactive .
popd
- name: Set up dotenvs
run: |
@ -75,4 +80,4 @@ jobs:
ssrf_proxy
- name: Run Workflow
run: poetry run -P api bash dev/pytest/pytest_workflow.sh
run: poetry run -C api bash dev/pytest/pytest_workflow.sh

View File

@ -5,7 +5,6 @@ on:
branches:
- "main"
- "deploy/dev"
- "dev/plugin-deploy"
release:
types: [published]
@ -80,12 +79,10 @@ jobs:
cache-to: type=gha,mode=max,scope=${{ matrix.service_name }}
- name: Export digest
env:
DIGEST: ${{ steps.build.outputs.digest }}
run: |
mkdir -p /tmp/digests
sanitized_digest=${DIGEST#sha256:}
touch "/tmp/digests/${sanitized_digest}"
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
@ -135,15 +132,10 @@ jobs:
- name: Create manifest list and push
working-directory: /tmp/digests
env:
IMAGE_NAME: ${{ env[matrix.image_name_env] }}
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "$IMAGE_NAME@sha256:%s " *)
$(printf '${{ env[matrix.image_name_env] }}@sha256:%s ' *)
- name: Inspect image
env:
IMAGE_NAME: ${{ env[matrix.image_name_env] }}
IMAGE_VERSION: ${{ steps.meta.outputs.version }}
run: |
docker buildx imagetools inspect "$IMAGE_NAME:$IMAGE_VERSION"
docker buildx imagetools inspect ${{ env[matrix.image_name_env] }}:${{ steps.meta.outputs.version }}

View File

@ -4,7 +4,6 @@ on:
pull_request:
branches:
- main
- plugins/beta
paths:
- api/migrations/**
- .github/workflows/db-migration-test.yml
@ -20,9 +19,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Poetry and Python
uses: ./.github/actions/setup-poetry

View File

@ -1,47 +0,0 @@
name: Build docker image
on:
pull_request:
branches:
- "main"
paths:
- api/Dockerfile
- web/Dockerfile
concurrency:
group: docker-build-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-docker:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- service_name: "api-amd64"
platform: linux/amd64
context: "api"
- service_name: "api-arm64"
platform: linux/arm64
context: "api"
- service_name: "web-amd64"
platform: linux/amd64
context: "web"
- service_name: "web-arm64"
platform: linux/arm64
context: "web"
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image
uses: docker/build-push-action@v6
with:
push: false
context: "{{defaultContext}}:${{ matrix.context }}"
platforms: ${{ matrix.platform }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -9,6 +9,6 @@ yq eval '.services["pgvecto-rs"].ports += ["5431:5432"]' -i docker/docker-compos
yq eval '.services["elasticsearch"].ports += ["9200:9200"]' -i docker/docker-compose.yaml
yq eval '.services.couchbase-server.ports += ["8091-8096:8091-8096"]' -i docker/docker-compose.yaml
yq eval '.services.couchbase-server.ports += ["11210:11210"]' -i docker/docker-compose.yaml
yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/tidb/docker-compose.yaml
yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/docker-compose.yaml
echo "Ports exposed for sandbox, weaviate, tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase"

View File

@ -17,9 +17,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files
id: changed-files
@ -41,12 +38,12 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
run: |
poetry run -C api ruff --version
poetry run -C api ruff check ./
poetry run -C api ruff format --check ./
poetry run -C api ruff check ./api
poetry run -C api ruff format --check ./api
- name: Dotenv check
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -P api dotenv-linter ./api/.env.example ./web/.env.example
run: poetry run -C api dotenv-linter ./api/.env.example ./web/.env.example
- name: Lint hints
if: failure()
@ -62,9 +59,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files
id: changed-files
@ -72,58 +66,22 @@ jobs:
with:
files: web/**
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Setup NodeJS
uses: actions/setup-node@v4
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 20
cache: pnpm
cache: yarn
cache-dependency-path: ./web/package.json
- name: Web dependencies
if: steps.changed-files.outputs.any_changed == 'true'
run: pnpm install --frozen-lockfile
run: yarn install --frozen-lockfile
- name: Web style check
if: steps.changed-files.outputs.any_changed == 'true'
run: pnpm run lint
run: yarn run lint
docker-compose-template:
name: Docker Compose Template
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v45
with:
files: |
docker/generate_docker_compose
docker/.env.example
docker/docker-compose-template.yaml
docker/docker-compose.yaml
- name: Generate Docker Compose
if: steps.changed-files.outputs.any_changed == 'true'
run: |
cd docker
./generate_docker_compose
- name: Check for changes
if: steps.changed-files.outputs.any_changed == 'true'
run: git diff --exit-code
superlinter:
name: SuperLinter
@ -132,9 +90,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files
id: changed-files

View File

@ -26,19 +26,16 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: ''
cache-dependency-path: 'pnpm-lock.yaml'
cache-dependency-path: 'yarn.lock'
- name: Install Dependencies
run: pnpm install --frozen-lockfile
run: yarn install
- name: Test
run: pnpm test
run: yarn test

View File

@ -16,7 +16,6 @@ jobs:
- uses: actions/checkout@v4
with:
fetch-depth: 2 # last 2 commits
persist-credentials: false
- name: Check for file changes in i18n/en-US
id: check_files
@ -39,11 +38,11 @@ jobs:
- name: Install dependencies
if: env.FILES_CHANGED == 'true'
run: pnpm install --frozen-lockfile
run: yarn install --frozen-lockfile
- name: Run npm script
if: env.FILES_CHANGED == 'true'
run: pnpm run auto-gen-i18n
run: npm run auto-gen-i18n
- name: Create Pull Request
if: env.FILES_CHANGED == 'true'

View File

@ -28,9 +28,6 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Poetry and Python ${{ matrix.python-version }}
uses: ./.github/actions/setup-poetry
@ -54,15 +51,7 @@ jobs:
- name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh
- name: Set up Vector Store (TiDB)
uses: hoverkraft-tech/compose-action@v2.0.2
with:
compose-file: docker/tidb/docker-compose.yaml
services: |
tidb
tiflash
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
- name: Set up Vector Stores (TiDB, Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.2
with:
compose-file: |
@ -78,9 +67,7 @@ jobs:
pgvector
chroma
elasticsearch
- name: Check TiDB Ready
run: poetry run -P api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
tidb
- name: Test Vector Stores
run: poetry run -P api bash dev/pytest/pytest_vdb.sh
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

View File

@ -22,34 +22,25 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v45
with:
files: web/**
# to run pnpm, should install package canvas, but it always install failed on amd64 under ubuntu-latest
# - name: Install pnpm
# uses: pnpm/action-setup@v4
# with:
# version: 10
# run_install: false
# - name: Setup Node.js
# uses: actions/setup-node@v4
# if: steps.changed-files.outputs.any_changed == 'true'
# with:
# node-version: 20
# cache: pnpm
# cache-dependency-path: ./web/package.json
- name: Setup Node.js
uses: actions/setup-node@v4
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 20
cache: yarn
cache-dependency-path: ./web/package.json
# - name: Install dependencies
# if: steps.changed-files.outputs.any_changed == 'true'
# run: pnpm install --frozen-lockfile
- name: Install dependencies
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn install --frozen-lockfile
# - name: Run tests
# if: steps.changed-files.outputs.any_changed == 'true'
# run: pnpm test
- name: Run tests
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn test

7
.gitignore vendored
View File

@ -163,7 +163,6 @@ docker/volumes/db/data/*
docker/volumes/redis/data/*
docker/volumes/weaviate/*
docker/volumes/qdrant/*
docker/tidb/volumes/*
docker/volumes/etcd/*
docker/volumes/minio/*
docker/volumes/milvus/*
@ -195,9 +194,3 @@ api/.vscode
.idea/
.vscode
# pnpm
/.pnpm-store
# plugin migrate
plugins.jsonl

View File

@ -73,7 +73,7 @@ Dify requires the following dependencies to build, make sure they're installed o
* [Docker](https://www.docker.com/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Node.js v18.x (LTS)](http://nodejs.org)
* [pnpm](https://pnpm.io/)
* [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
* [Python](https://www.python.org/) version 3.11.x or 3.12.x
### 4. Installations

View File

@ -70,7 +70,7 @@ Dify 依赖以下工具和库:
- [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org)
- [pnpm](https://pnpm.io/)
- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) version 3.11.x or 3.12.x
### 4. 安装

View File

@ -73,7 +73,7 @@ Dify を構築するには次の依存関係が必要です。それらがシス
- [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org)
- [pnpm](https://pnpm.io/)
- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) version 3.11.x or 3.12.x
### 4. インストール

View File

@ -72,7 +72,7 @@ Dify yêu cầu các phụ thuộc sau để build, hãy đảm bảo chúng đ
- [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org)
- [pnpm](https://pnpm.io/)
- [npm](https://www.npmjs.com/) phiên bản 8.x.x hoặc [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) phiên bản 3.11.x hoặc 3.12.x
### 4. Cài đặt

23
LICENSE
View File

@ -1,12 +1,12 @@
# Open Source License
Dify is licensed under a modified version of the Apache License 2.0, with the following additional conditions:
Dify is licensed under the Apache License 2.0, with the following additional conditions:
1. Dify may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. Should the conditions below be met, a commercial license must be obtained from the producer:
a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
- Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
b. LOGO and copyright information: In the process of using Dify's frontend, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend.
- Frontend Definition: For the purposes of this license, the "frontend" of Dify includes all components located in the `web/` directory when running Dify from the raw source code, or the "web" image when running Dify with Docker.
@ -21,4 +21,19 @@ Apart from the specific conditions mentioned above, all other rights and restric
The interactive design of this product is protected by appearance patent.
© 2025 LangGenius, Inc.
© 2024 LangGenius, Inc.
----------
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -25,9 +25,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -108,72 +105,6 @@ Please refer to our [FAQ](https://docs.dify.ai/getting-started/install-self-host
**7. Backend-as-a-Service**:
All of Dify's offerings come with corresponding APIs, so you could effortlessly integrate Dify into your own business logic.
## Feature Comparison
<table style="width: 100%;">
<tr>
<th align="center">Feature</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">Programming Approach</td>
<td align="center">API + App-oriented</td>
<td align="center">Python Code</td>
<td align="center">App-oriented</td>
<td align="center">API-oriented</td>
</tr>
<tr>
<td align="center">Supported LLMs</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">OpenAI-only</td>
</tr>
<tr>
<td align="center">RAG Engine</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Agent</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Workflow</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Observability</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Enterprise Feature (SSO/Access control)</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Local Deployment</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
</table>
## Using Dify

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="seguir en X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="seguir en LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Descargas de Docker" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="suivre sur X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="suivre sur LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Tirages Docker" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -55,7 +52,7 @@
Dify est une plateforme de développement d'applications LLM open source. Son interface intuitive combine un flux de travail d'IA, un pipeline RAG, des capacités d'agent, une gestion de modèles, des fonctionnalités d'observabilité, et plus encore, vous permettant de passer rapidement du prototype à la production. Voici une liste des fonctionnalités principales:
</br> </br>
**1. Flux de travail** :
**1. Flux de travail**:
Construisez et testez des flux de travail d'IA puissants sur un canevas visuel, en utilisant toutes les fonctionnalités suivantes et plus encore.
@ -63,25 +60,27 @@ Dify est une plateforme de développement d'applications LLM open source. Son in
**2. Prise en charge complète des modèles** :
**2. Prise en charge complète des modèles**:
Intégration transparente avec des centaines de LLM propriétaires / open source provenant de dizaines de fournisseurs d'inférence et de solutions auto-hébergées, couvrant GPT, Mistral, Llama3, et tous les modèles compatibles avec l'API OpenAI. Une liste complète des fournisseurs de modèles pris en charge se trouve [ici](https://docs.dify.ai/getting-started/readme/model-providers).
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
**3. IDE de prompt** :
**3. IDE de prompt**:
Interface intuitive pour créer des prompts, comparer les performances des modèles et ajouter des fonctionnalités supplémentaires telles que la synthèse vocale à une application basée sur des chats.
**4. Pipeline RAG** :
**4. Pipeline RAG**:
Des capacités RAG étendues qui couvrent tout, de l'ingestion de documents à la récupération, avec un support prêt à l'emploi pour l'extraction de texte à partir de PDF, PPT et autres formats de document courants.
**5. Capacités d'agent** :
**5. Capac
ités d'agent**:
Vous pouvez définir des agents basés sur l'appel de fonction LLM ou ReAct, et ajouter des outils pré-construits ou personnalisés pour l'agent. Dify fournit plus de 50 outils intégrés pour les agents d'IA, tels que la recherche Google, DALL·E, Stable Diffusion et WolframAlpha.
**6. LLMOps** :
**6. LLMOps**:
Surveillez et analysez les journaux d'application et les performances au fil du temps. Vous pouvez continuellement améliorer les prompts, les ensembles de données et les modèles en fonction des données de production et des annotations.
**7. Backend-as-a-Service** :
**7. Backend-as-a-Service**:
Toutes les offres de Dify sont accompagnées d'API correspondantes, vous permettant d'intégrer facilement Dify dans votre propre logique métier.

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="X(Twitter)でフォロー"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="LinkedInでフォロー"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -164,7 +161,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
- **企業/組織向けのDify</br>**
企業中心の機能を提供しています。[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)して企業のニーズについて相談してください。 </br>
> AWSを使用しているスタートアップ企業や中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t23mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで自分のAWS VPCにデプロイできます。さらに、手頃な価格のAMIオファリングして、ロゴやブランディングをカスタマイズしてアプリケーションを作成するオプションがあります。
> AWSを使用しているスタートアップ企業や中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで自分のAWS VPCにデプロイできます。さらに、手頃な価格のAMIオファリングして、ロゴやブランディングをカスタマイズしてアプリケーションを作成するオプションがあります。
## 最新の情報を入手

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -87,7 +84,9 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
## Feature Comparison
<table style="width: 100%;">
<tr>
<tr
>
<th align="center">Feature</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -25,9 +25,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -22,9 +22,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -106,73 +103,6 @@ Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-star
**7. Backend-as-a-Service**:
AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
## Primerjava Funkcij
<table style="width: 100%;">
<tr>
<th align="center">Funkcija</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">Programski pristop</td>
<td align="center">API + usmerjeno v aplikacije</td>
<td align="center">Python koda</td>
<td align="center">Usmerjeno v aplikacije</td>
<td align="center">Usmerjeno v API</td>
</tr>
<tr>
<td align="center">Podprti LLM-ji</td>
<td align="center">Bogata izbira</td>
<td align="center">Bogata izbira</td>
<td align="center">Bogata izbira</td>
<td align="center">Samo OpenAI</td>
</tr>
<tr>
<td align="center">RAG pogon</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Agent</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Potek dela</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Spremljanje</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Funkcija za podjetja (SSO/nadzor dostopa)</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Lokalna namestitev</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
</table>
## Uporaba Dify
@ -254,4 +184,4 @@ Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj
## Licenca
To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="X(Twitter)'da takip et"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="LinkedIn'da takip et"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Çekmeleri" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -65,6 +62,8 @@ Görsel bir arayüz üzerinde güçlü AI iş akışları oluşturun ve test edi
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
Özür dilerim, haklısınız. Daha anlamlı ve akıcı bir çeviri yapmaya çalışayım. İşte güncellenmiş çeviri:
**3. Prompt IDE**:
Komut istemlerini oluşturmak, model performansını karşılaştırmak ve sohbet tabanlı uygulamalara metin-konuşma gibi ek özellikler eklemek için kullanıcı dostu bir arayüz.
@ -151,6 +150,8 @@ Görsel bir arayüz üzerinde güçlü AI iş akışları oluşturun ve test edi
## Dify'ı Kullanma
- **Cloud </br>**
İşte verdiğiniz metnin Türkçe çevirisi, kod bloğu içinde:
-
Herkesin sıfır kurulumla denemesi için bir [Dify Cloud](https://dify.ai) hizmeti sunuyoruz. Bu hizmet, kendi kendine dağıtılan versiyonun tüm yeteneklerini sağlar ve sandbox planında 200 ücretsiz GPT-4 çağrısı içerir.
- **Dify Topluluk Sürümünü Kendi Sunucunuzda Barındırma</br>**
@ -176,6 +177,8 @@ GitHub'da Dify'a yıldız verin ve yeni sürümlerden anında haberdar olun.
>- RAM >= 4GB
</br>
İşte verdiğiniz metnin Türkçe çevirisi, kod bloğu içinde:
Dify sunucusunu başlatmanın en kolay yolu, [docker-compose.yml](docker/docker-compose.yaml) dosyamızı çalıştırmaktır. Kurulum komutunu çalıştırmadan önce, makinenizde [Docker](https://docs.docker.com/get-docker/) ve [Docker Compose](https://docs.docker.com/compose/install/)'un kurulu olduğundan emin olun:
```bash

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="theo dõi trên X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="theo dõi trên LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -422,8 +422,8 @@ POSITION_PROVIDER_INCLUDES=
POSITION_PROVIDER_EXCLUDES=
# Plugin configuration
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
PLUGIN_API_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
PLUGIN_API_URL=http://127.0.0.1:5002
PLUGIN_REMOTE_INSTALL_PORT=5003
PLUGIN_REMOTE_INSTALL_HOST=localhost
PLUGIN_MAX_PACKAGE_SIZE=15728640
@ -435,7 +435,7 @@ MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace.dify.ai
# Endpoint configuration
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
# Reset password token expiry minutes
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5

View File

@ -53,12 +53,10 @@ ignore = [
"FURB152", # math-constant
"UP007", # non-pep604-annotation
"UP032", # f-string
"UP045", # non-pep604-annotation-optional
"B005", # strip-with-multi-characters
"B006", # mutable-argument-default
"B007", # unused-loop-control-variable
"B026", # star-arg-unpacking-after-keyword-arg
"B903", # class-as-data-structure
"B904", # raise-without-from-inside-except
"B905", # zip-without-explicit-strict
"N806", # non-lowercase-variable-in-function

View File

@ -4,7 +4,7 @@ FROM python:3.12-slim-bookworm AS base
WORKDIR /app/api
# Install Poetry
ENV POETRY_VERSION=2.0.1
ENV POETRY_VERSION=1.8.4
# if you located in China, you can use aliyun mirror to speed up
# RUN pip install --no-cache-dir poetry==${POETRY_VERSION} -i https://mirrors.aliyun.com/pypi/simple/
@ -48,20 +48,16 @@ ENV TZ=UTC
WORKDIR /app/api
RUN \
apt-get update \
# Install dependencies
&& apt-get install -y --no-install-recommends \
# basic environment
curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
# For Security
expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
# install a chinese font to support the use of tools like matplotlib
fonts-noto-cjk \
# install a package to improve the accuracy of guessing mime type and file extension
media-types \
# install libmagic to support the use of python-magic guess MIMETYPE
libmagic1 \
RUN apt-get update \
&& apt-get install -y --no-install-recommends curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
# if you located in China, you can use aliyun mirror to speed up
# && echo "deb http://mirrors.aliyun.com/debian testing main" > /etc/apt/sources.list \
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
&& apt-get update \
# For Security
&& apt-get install -y --no-install-recommends expat=2.6.4-1 libldap-2.5-0=2.5.19+dfsg-1 perl=5.40.0-8 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
# install a chinese font to support the use of tools like matplotlib
&& apt-get install -y fonts-noto-cjk \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*
@ -84,6 +80,7 @@ COPY . /app/api/
COPY docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ARG COMMIT_SHA
ENV COMMIT_SHA=${COMMIT_SHA}

View File

@ -37,13 +37,7 @@
4. Create environment.
Dify API service uses [Poetry](https://python-poetry.org/docs/) to manage dependencies. First, you need to add the poetry shell plugin, if you don't have it already, in order to run in a virtual environment. [Note: Poetry shell is no longer a native command so you need to install the poetry plugin beforehand]
```bash
poetry self add poetry-plugin-shell
```
Then, You can execute `poetry shell` to activate the environment.
Dify API service uses [Poetry](https://python-poetry.org/docs/) to manage dependencies. You can execute `poetry shell` to activate the environment.
5. Install dependencies
@ -85,5 +79,5 @@
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash
poetry run -P api bash dev/pytest/pytest_all_tests.sh
poetry run -C api bash dev/pytest/pytest_all_tests.sh
```

View File

@ -2,7 +2,6 @@ import logging
import time
from configs import dify_config
from contexts.wrapper import RecyclableContextVar
from dify_app import DifyApp
@ -17,12 +16,6 @@ def create_flask_app_with_configs() -> DifyApp:
dify_app = DifyApp(__name__)
dify_app.config.from_mapping(dify_config.model_dump())
# add before request hook
@dify_app.before_request
def before_request():
# add an unique identifier to each request
RecyclableContextVar.increment_thread_recycles()
return dify_app

View File

@ -707,13 +707,12 @@ def extract_unique_plugins(output_file: str, input_file: str):
@click.option(
"--output_file", prompt=True, help="The file to store the installed plugins.", default="installed_plugins.jsonl"
)
@click.option("--workers", prompt=True, help="The number of workers to install plugins.", default=100)
def install_plugins(input_file: str, output_file: str, workers: int):
def install_plugins(input_file: str, output_file: str):
"""
Install plugins.
"""
click.echo(click.style("Starting install plugins.", fg="white"))
PluginMigration.install_plugins(input_file, output_file, workers)
PluginMigration.install_plugins(input_file, output_file)
click.echo(click.style("Install plugins completed.", fg="green"))

View File

@ -141,10 +141,10 @@ class PluginConfig(BaseSettings):
PLUGIN_DAEMON_URL: HttpUrl = Field(
description="Plugin API URL",
default="http://localhost:5002",
default="http://plugin:5002",
)
PLUGIN_DAEMON_KEY: str = Field(
PLUGIN_API_KEY: str = Field(
description="Plugin API key",
default="plugin-api-key",
)
@ -200,7 +200,7 @@ class EndpointConfig(BaseSettings):
)
CONSOLE_WEB_URL: str = Field(
description="Base URL for the console web interface,used for frontend references and CORS configuration",
description="Base URL for the console web interface," "used for frontend references and CORS configuration",
default="",
)
@ -373,8 +373,8 @@ class HttpConfig(BaseSettings):
)
RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field(
description="Enable handling of X-Forwarded-For, X-Forwarded-Proto, and X-Forwarded-Port headers"
" when the app is behind a single trusted reverse proxy.",
description="Enable or disable the X-Forwarded-For Proxy Fix middleware from Werkzeug"
" to respect X-* headers to redirect clients",
default=False,
)
@ -556,11 +556,6 @@ class AuthConfig(BaseSettings):
default=86400,
)
FORGOT_PASSWORD_LOCKOUT_DURATION: PositiveInt = Field(
description="Time (in seconds) a user must wait before retrying password reset after exceeding the rate limit.",
default=86400,
)
class ModerationConfig(BaseSettings):
"""

View File

@ -1,40 +1,9 @@
from typing import Optional
from pydantic import Field, NonNegativeInt, computed_field
from pydantic import Field, NonNegativeInt
from pydantic_settings import BaseSettings
class HostedCreditConfig(BaseSettings):
HOSTED_MODEL_CREDIT_CONFIG: str = Field(
description="Model credit configuration in format 'model:credits,model:credits', e.g., 'gpt-4:20,gpt-4o:10'",
default="",
)
def get_model_credits(self, model_name: str) -> int:
"""
Get credit value for a specific model name.
Returns 1 if model is not found in configuration (default credit).
:param model_name: The name of the model to search for
:return: The credit value for the model
"""
if not self.HOSTED_MODEL_CREDIT_CONFIG:
return 1
try:
credit_map = dict(
item.strip().split(":", 1) for item in self.HOSTED_MODEL_CREDIT_CONFIG.split(",") if ":" in item
)
# Search for matching model pattern
for pattern, credit in credit_map.items():
if pattern.strip() == model_name:
return int(credit)
return 1 # Default quota if no match found
except (ValueError, AttributeError):
return 1 # Return default quota if parsing fails
class HostedOpenAiConfig(BaseSettings):
"""
Configuration for hosted OpenAI service
@ -212,7 +181,7 @@ class HostedFetchAppTemplateConfig(BaseSettings):
"""
HOSTED_FETCH_APP_TEMPLATES_MODE: str = Field(
description="Mode for fetching app templates: remote, db, or builtin default to remote,",
description="Mode for fetching app templates: remote, db, or builtin" " default to remote,",
default="remote",
)
@ -233,7 +202,5 @@ class HostedServiceConfig(
HostedZhipuAIConfig,
# moderation
HostedModerationConfig,
# credit config
HostedCreditConfig,
):
pass

View File

@ -1,4 +1,3 @@
import os
from typing import Any, Literal, Optional
from urllib.parse import quote_plus
@ -167,11 +166,6 @@ class DatabaseConfig(BaseSettings):
default=False,
)
RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field(
description="Number of processes for the retrieval service, default to CPU cores.",
default=os.cpu_count(),
)
@computed_field
def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
return {

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="1.0.0",
default="0.15.0",
)
COMMIT_SHA: str = Field(

View File

@ -15,7 +15,7 @@ AUDIO_EXTENSIONS.extend([ext.upper() for ext in AUDIO_EXTENSIONS])
if dify_config.ETL_TYPE == "Unstructured":
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls"]
DOCUMENT_EXTENSIONS.extend(("doc", "docx", "csv", "eml", "msg", "pptx", "xml", "epub"))
DOCUMENT_EXTENSIONS.extend(("docx", "csv", "eml", "msg", "pptx", "xml", "epub"))
if dify_config.UNSTRUCTURED_API_URL:
DOCUMENT_EXTENSIONS.append("ppt")
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])

View File

@ -2,8 +2,6 @@ from contextvars import ContextVar
from threading import Lock
from typing import TYPE_CHECKING
from contexts.wrapper import RecyclableContextVar
if TYPE_CHECKING:
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
from core.tools.plugin_tool.provider import PluginToolProviderController
@ -14,17 +12,8 @@ tenant_id: ContextVar[str] = ContextVar("tenant_id")
workflow_variable_pool: ContextVar["VariablePool"] = ContextVar("workflow_variable_pool")
"""
To avoid race-conditions caused by gunicorn thread recycling, using RecyclableContextVar to replace with
"""
plugin_tool_providers: RecyclableContextVar[dict[str, "PluginToolProviderController"]] = RecyclableContextVar(
ContextVar("plugin_tool_providers")
)
plugin_tool_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(ContextVar("plugin_tool_providers_lock"))
plugin_tool_providers: ContextVar[dict[str, "PluginToolProviderController"]] = ContextVar("plugin_tool_providers")
plugin_tool_providers_lock: ContextVar[Lock] = ContextVar("plugin_tool_providers_lock")
plugin_model_providers: RecyclableContextVar[list["PluginModelProviderEntity"] | None] = RecyclableContextVar(
ContextVar("plugin_model_providers")
)
plugin_model_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(
ContextVar("plugin_model_providers_lock")
)
plugin_model_providers: ContextVar[list["PluginModelProviderEntity"] | None] = ContextVar("plugin_model_providers")
plugin_model_providers_lock: ContextVar[Lock] = ContextVar("plugin_model_providers_lock")

View File

@ -1,65 +0,0 @@
from contextvars import ContextVar
from typing import Generic, TypeVar
T = TypeVar("T")
class HiddenValue:
pass
_default = HiddenValue()
class RecyclableContextVar(Generic[T]):
"""
RecyclableContextVar is a wrapper around ContextVar
It's safe to use in gunicorn with thread recycling, but features like `reset` are not available for now
NOTE: you need to call `increment_thread_recycles` before requests
"""
_thread_recycles: ContextVar[int] = ContextVar("thread_recycles")
@classmethod
def increment_thread_recycles(cls):
try:
recycles = cls._thread_recycles.get()
cls._thread_recycles.set(recycles + 1)
except LookupError:
cls._thread_recycles.set(0)
def __init__(self, context_var: ContextVar[T]):
self._context_var = context_var
self._updates = ContextVar[int](context_var.name + "_updates", default=0)
def get(self, default: T | HiddenValue = _default) -> T:
thread_recycles = self._thread_recycles.get(0)
self_updates = self._updates.get()
if thread_recycles > self_updates:
self._updates.set(thread_recycles)
# check if thread is recycled and should be updated
if thread_recycles < self_updates:
return self._context_var.get()
else:
# thread_recycles >= self_updates, means current context is invalid
if isinstance(default, HiddenValue) or default is _default:
raise LookupError
else:
return default
def set(self, value: T):
# it leads to a situation that self.updates is less than cls.thread_recycles if `set` was never called before
# increase it manually
thread_recycles = self._thread_recycles.get(0)
self_updates = self._updates.get()
if thread_recycles > self_updates:
self._updates.set(thread_recycles)
if self._updates.get() == self._thread_recycles.get(0):
# after increment,
self._updates.set(self._updates.get() + 1)
# set the context
self._context_var.set(value)

View File

@ -1,32 +1,12 @@
import mimetypes
import os
import platform
import re
import urllib.parse
import warnings
from collections.abc import Mapping
from typing import Any
from uuid import uuid4
import httpx
try:
import magic
except ImportError:
if platform.system() == "Windows":
warnings.warn(
"To use python-magic guess MIMETYPE, you need to run `pip install python-magic-bin`", stacklevel=2
)
elif platform.system() == "Darwin":
warnings.warn("To use python-magic guess MIMETYPE, you need to run `brew install libmagic`", stacklevel=2)
elif platform.system() == "Linux":
warnings.warn(
"To use python-magic guess MIMETYPE, you need to run `sudo apt-get install libmagic1`", stacklevel=2
)
else:
warnings.warn("To use python-magic guess MIMETYPE, you need to install `libmagic`", stacklevel=2)
magic = None # type: ignore
from pydantic import BaseModel
from configs import dify_config
@ -67,13 +47,6 @@ def guess_file_info_from_response(response: httpx.Response):
# If guessing fails, use Content-Type from response headers
mimetype = response.headers.get("Content-Type", "application/octet-stream")
# Use python-magic to guess MIME type if still unknown or generic
if mimetype == "application/octet-stream" and magic is not None:
try:
mimetype = magic.from_buffer(response.content[:1024], mime=True)
except magic.MagicException:
pass
extension = os.path.splitext(filename)[1]
# Ensure filename has an extension

View File

@ -59,7 +59,7 @@ class InsertExploreAppListApi(Resource):
with Session(db.engine) as session:
app = session.execute(select(App).filter(App.id == args["app_id"])).scalar_one_or_none()
if not app:
raise NotFound(f"App '{args['app_id']}' is not found")
raise NotFound(f'App \'{args["app_id"]}\' is not found')
site = app.site
if not site:

View File

@ -22,7 +22,7 @@ from controllers.console.wraps import account_initialization_required, setup_req
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs.login import login_required
from models import App, AppMode
from models.model import AppMode
from services.audio_service import AudioService
from services.errors.audio import (
AudioTooLargeServiceError,
@ -79,7 +79,7 @@ class ChatMessageTextApi(Resource):
@login_required
@account_initialization_required
@get_app_model
def post(self, app_model: App):
def post(self, app_model):
from werkzeug.exceptions import InternalServerError
try:
@ -98,13 +98,9 @@ class ChatMessageTextApi(Resource):
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
if text_to_speech is None:
raise ValueError("TTS is not enabled")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
if app_model.app_model_config is None:
raise ValueError("AppModelConfig not found")
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None

View File

@ -59,9 +59,3 @@ class EmailCodeAccountDeletionRateLimitExceededError(BaseHTTPException):
error_code = "email_code_account_deletion_rate_limit_exceeded"
description = "Too many account deletion emails have been sent. Please try again in 5 minutes."
code = 429
class EmailPasswordResetLimitError(BaseHTTPException):
error_code = "email_password_reset_limit"
description = "Too many failed password reset attempts. Please try again in 24 hours."
code = 429

View File

@ -8,13 +8,7 @@ from sqlalchemy.orm import Session
from constants.languages import languages
from controllers.console import api
from controllers.console.auth.error import (
EmailCodeError,
EmailPasswordResetLimitError,
InvalidEmailError,
InvalidTokenError,
PasswordMismatchError,
)
from controllers.console.auth.error import EmailCodeError, InvalidEmailError, InvalidTokenError, PasswordMismatchError
from controllers.console.error import AccountInFreezeError, AccountNotFound, EmailSendIpLimitError
from controllers.console.wraps import setup_required
from events.tenant_event import tenant_was_created
@ -71,10 +65,6 @@ class ForgotPasswordCheckApi(Resource):
user_email = args["email"]
is_forgot_password_error_rate_limit = AccountService.is_forgot_password_error_rate_limit(args["email"])
if is_forgot_password_error_rate_limit:
raise EmailPasswordResetLimitError()
token_data = AccountService.get_reset_password_data(args["token"])
if token_data is None:
raise InvalidTokenError()
@ -83,10 +73,8 @@ class ForgotPasswordCheckApi(Resource):
raise InvalidEmailError()
if args["code"] != token_data.get("code"):
AccountService.add_forgot_password_error_rate_limit(args["email"])
raise EmailCodeError()
AccountService.reset_forgot_password_error_rate_limit(args["email"])
return {"is_valid": True, "email": token_data.get("email")}

View File

@ -135,7 +135,7 @@ class DataSourceNotionListApi(Resource):
data_source_info = json.loads(document.data_source_info)
exist_page_ids.append(data_source_info["notion_page_id"])
# get all authorized pages
data_source_bindings = session.scalars(
data_source_bindings = session.execute(
select(DataSourceOauthBinding).filter_by(
tenant_id=current_user.current_tenant_id, provider="notion", disabled=False
)

View File

@ -14,7 +14,6 @@ from controllers.console.wraps import account_initialization_required, enterpris
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType
from core.plugin.entities.plugin import ModelProviderID
from core.provider_manager import ProviderManager
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.extractor.entity.extract_setting import ExtractSetting
@ -53,12 +52,12 @@ class DatasetListApi(Resource):
# provider = request.args.get("provider", default="vendor")
search = request.args.get("keyword", default=None, type=str)
tag_ids = request.args.getlist("tag_ids")
include_all = request.args.get("include_all", default="false").lower() == "true"
if ids:
datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
else:
datasets, total = DatasetService.get_datasets(
page, limit, current_user.current_tenant_id, current_user, search, tag_ids, include_all
page, limit, current_user.current_tenant_id, current_user, search, tag_ids
)
# check embedding setting
@ -73,9 +72,7 @@ class DatasetListApi(Resource):
data = marshal(datasets, dataset_detail_fields)
for item in data:
# convert embedding_model_provider to plugin standard format
if item["indexing_technique"] == "high_quality":
item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
if item_model in model_names:
item["embedding_available"] = True
@ -460,7 +457,7 @@ class DatasetIndexingEstimateApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -622,7 +619,9 @@ class DatasetRetrievalSettingApi(Resource):
vector_type = dify_config.VECTOR_STORE
match vector_type:
case (
VectorType.RELYT
VectorType.MILVUS
| VectorType.RELYT
| VectorType.PGVECTOR
| VectorType.TIDB_VECTOR
| VectorType.CHROMA
| VectorType.TENCENT
@ -646,7 +645,6 @@ class DatasetRetrievalSettingApi(Resource):
| VectorType.TIDB_ON_QDRANT
| VectorType.LINDORM
| VectorType.COUCHBASE
| VectorType.MILVUS
):
return {
"retrieval_method": [

View File

@ -362,7 +362,8 @@ class DatasetInitApi(Resource):
)
except InvokeAuthorizationError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -539,7 +540,8 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
return response.model_dump(), 200
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -617,7 +619,7 @@ class DocumentDetailApi(DocumentResource):
raise InvalidMetadataError(f"Invalid metadata value: {metadata}")
if metadata == "only":
response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata_details}
response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata}
elif metadata == "without":
dataset_process_rules = DatasetService.get_process_rules(dataset_id)
document_process_rules = document.dataset_process_rule.to_dict()
@ -678,7 +680,7 @@ class DocumentDetailApi(DocumentResource):
"disabled_by": document.disabled_by,
"archived": document.archived,
"doc_type": document.doc_type,
"doc_metadata": document.doc_metadata_details,
"doc_metadata": document.doc_metadata,
"segment_count": document.segment_count,
"average_segment_length": document.average_segment_length,
"hit_count": document.hit_count,

View File

@ -168,7 +168,8 @@ class DatasetDocumentSegmentApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -216,7 +217,8 @@ class DatasetDocumentSegmentAddApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -265,7 +267,8 @@ class DatasetDocumentSegmentUpdateApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -365,9 +368,9 @@ class DatasetDocumentSegmentBatchImportApi(Resource):
result = []
for index, row in df.iterrows():
if document.doc_form == "qa_model":
data = {"content": row.iloc[0], "answer": row.iloc[1]}
data = {"content": row[0], "answer": row[1]}
else:
data = {"content": row.iloc[0]}
data = {"content": row[0]}
result.append(data)
if len(result) == 0:
raise ValueError("The CSV file is empty.")
@ -434,7 +437,8 @@ class ChildChunkAddApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)

View File

@ -1,143 +0,0 @@
from flask_login import current_user # type: ignore # type: ignore
from flask_restful import Resource, marshal_with, reqparse # type: ignore
from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.wraps import account_initialization_required, enterprise_license_required, setup_required
from fields.dataset_fields import dataset_metadata_fields
from libs.login import login_required
from services.dataset_service import DatasetService
from services.entities.knowledge_entities.knowledge_entities import (
MetadataArgs,
MetadataOperationData,
)
from services.metadata_service import MetadataService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 40:
raise ValueError("Name must be between 1 to 40 characters.")
return name
def _validate_description_length(description):
if len(description) > 400:
raise ValueError("Description cannot exceed 400 characters.")
return description
class DatasetListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
@marshal_with(dataset_metadata_fields)
def post(self, dataset_id):
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
args = parser.parse_args()
metadata_args = MetadataArgs(**args)
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
metadata = MetadataService.create_metadata(dataset_id_str, metadata_args)
return metadata, 201
class DatasetMetadataApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def patch(self, dataset_id, metadata_id):
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
args = parser.parse_args()
dataset_id_str = str(dataset_id)
metadata_id_str = str(metadata_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
metadata = MetadataService.update_metadata_name(dataset_id_str, metadata_id_str, args.get("name"))
return metadata, 200
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def delete(self, dataset_id, metadata_id):
dataset_id_str = str(dataset_id)
metadata_id_str = str(metadata_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
return 200
class DatasetMetadataBuiltInFieldApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def get(self):
built_in_fields = MetadataService.get_built_in_fields()
return built_in_fields, 200
class DatasetMetadataBuiltInFieldActionApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def post(self, dataset_id, action):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
if action == "enable":
MetadataService.enable_built_in_field(dataset)
elif action == "disable":
MetadataService.disable_built_in_field(dataset)
return 200
class DocumentMetadataApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def post(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
parser = reqparse.RequestParser()
parser.add_argument("operation_data", type=list, required=True, nullable=True, location="json")
args = parser.parse_args()
metadata_args = MetadataOperationData(**args)
MetadataService.update_documents_metadata(dataset, metadata_args)
return 200
api.add_resource(DatasetListApi, "/datasets/<uuid:dataset_id>/metadata")
api.add_resource(DatasetMetadataApi, "/datasets/<uuid:dataset_id>/metadata/<uuid:metadata_id>")
api.add_resource(DatasetMetadataBuiltInFieldApi, "/datasets/metadata/built-in")
api.add_resource(DatasetMetadataBuiltInFieldActionApi, "/datasets/metadata/built-in/<string:action>")
api.add_resource(DocumentMetadataApi, "/datasets/<uuid:dataset_id>/documents/metadata")

View File

@ -32,7 +32,7 @@ class ConversationListApi(InstalledAppResource):
pinned = None
if "pinned" in args and args["pinned"] is not None:
pinned = args["pinned"] == "true"
pinned = True if args["pinned"] == "true" else False
try:
with Session(db.engine) as session:

View File

@ -50,7 +50,7 @@ class MessageListApi(InstalledAppResource):
try:
return MessageService.pagination_by_first_id(
app_model, current_user, args["conversation_id"], args["first_id"], args["limit"]
app_model, current_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")

View File

@ -1,5 +1,3 @@
from urllib.parse import quote
from flask import Response, request
from flask_restful import Resource, reqparse # type: ignore
from werkzeug.exceptions import NotFound
@ -73,8 +71,7 @@ class FilePreviewApi(Resource):
if upload_file.size > 0:
response.headers["Content-Length"] = str(upload_file.size)
if args["as_attachment"]:
encoded_filename = quote(upload_file.name)
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
response.headers["Content-Disposition"] = f"attachment; filename={upload_file.name}"
return response

View File

@ -1,5 +1,3 @@
import json
from flask_restful import Resource, reqparse # type: ignore
from controllers.console.wraps import setup_required
@ -31,34 +29,4 @@ class EnterpriseWorkspace(Resource):
return {"message": "enterprise workspace created."}
class EnterpriseWorkspaceNoOwnerEmail(Resource):
@setup_required
@enterprise_inner_api_only
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, location="json")
args = parser.parse_args()
tenant = TenantService.create_tenant(args["name"], is_from_dashboard=True)
tenant_was_created.send(tenant)
resp = {
"id": tenant.id,
"name": tenant.name,
"encrypt_public_key": tenant.encrypt_public_key,
"plan": tenant.plan,
"status": tenant.status,
"custom_config": json.loads(tenant.custom_config) if tenant.custom_config else {},
"created_at": tenant.created_at.isoformat() + "Z" if tenant.created_at else None,
"updated_at": tenant.updated_at.isoformat() + "Z" if tenant.updated_at else None,
}
return {
"message": "enterprise workspace created.",
"tenant": resp,
}
api.add_resource(EnterpriseWorkspace, "/enterprise/workspace")
api.add_resource(EnterpriseWorkspaceNoOwnerEmail, "/enterprise/workspace/ownerless")

View File

@ -65,7 +65,7 @@ def enterprise_inner_api_user_auth(view):
def plugin_inner_api_only(view):
@wraps(view)
def decorated(*args, **kwargs):
if not dify_config.PLUGIN_DAEMON_KEY:
if not dify_config.PLUGIN_API_KEY:
abort(404)
# get header 'X-Inner-Api-Key'

View File

@ -7,4 +7,4 @@ api = ExternalApi(bp)
from . import index
from .app import app, audio, completion, conversation, file, message, workflow
from .dataset import dataset, document, hit_testing, segment, upload_file
from .dataset import dataset, document, hit_testing, segment

View File

@ -10,7 +10,6 @@ from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.entities.app_invoke_entities import InvokeFrom
from fields.conversation_fields import message_file_fields
from fields.message_fields import feedback_fields, retriever_resource_fields
from fields.raws import FilesContainedField
from libs.helper import TimestampField, uuid_value
from models.model import App, AppMode, EndUser
@ -19,6 +18,26 @@ from services.message_service import MessageService
class MessageListApi(Resource):
feedback_fields = {"rating": fields.String}
retriever_resource_fields = {
"id": fields.String,
"message_id": fields.String,
"position": fields.Integer,
"dataset_id": fields.String,
"dataset_name": fields.String,
"document_id": fields.String,
"document_name": fields.String,
"data_source_type": fields.String,
"segment_id": fields.String,
"score": fields.Float,
"hit_count": fields.Integer,
"word_count": fields.Integer,
"segment_position": fields.Integer,
"index_node_hash": fields.String,
"content": fields.String,
"created_at": TimestampField,
}
agent_thought_fields = {
"id": fields.String,
"chain_id": fields.String,
@ -70,7 +89,7 @@ class MessageListApi(Resource):
try:
return MessageService.pagination_by_first_id(
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"]
)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")

View File

@ -31,11 +31,8 @@ class DatasetListApi(DatasetApiResource):
# provider = request.args.get("provider", default="vendor")
search = request.args.get("keyword", default=None, type=str)
tag_ids = request.args.getlist("tag_ids")
include_all = request.args.get("include_all", default="false").lower() == "true"
datasets, total = DatasetService.get_datasets(
page, limit, tenant_id, current_user, search, tag_ids, include_all
)
datasets, total = DatasetService.get_datasets(page, limit, tenant_id, current_user, search, tag_ids)
# check embedding setting
provider_manager = ProviderManager()
configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)

View File

@ -18,7 +18,6 @@ from controllers.service_api.app.error import (
from controllers.service_api.dataset.error import (
ArchivedDocumentImmutableError,
DocumentIndexingError,
InvalidMetadataError,
)
from controllers.service_api.wraps import DatasetApiResource, cloud_edition_billing_resource_check
from core.errors.error import ProviderTokenNotInitError
@ -51,9 +50,6 @@ class DocumentAddByTextApi(DatasetApiResource):
"indexing_technique", type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False, location="json"
)
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
parser.add_argument("doc_type", type=str, required=False, nullable=True, location="json")
parser.add_argument("doc_metadata", type=dict, required=False, nullable=True, location="json")
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
@ -65,28 +61,6 @@ class DocumentAddByTextApi(DatasetApiResource):
if not dataset.indexing_technique and not args["indexing_technique"]:
raise ValueError("indexing_technique is required.")
# Validate metadata if provided
if args.get("doc_type") or args.get("doc_metadata"):
if not args.get("doc_type") or not args.get("doc_metadata"):
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
raise InvalidMetadataError(
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
)
if not isinstance(args["doc_metadata"], dict):
raise InvalidMetadataError("doc_metadata must be a dictionary")
# Validate metadata schema based on doc_type
if args["doc_type"] != "others":
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
for key, value in args["doc_metadata"].items():
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
# set to MetaDataConfig
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
text = args.get("text")
name = args.get("name")
if text is None or name is None:
@ -133,8 +107,6 @@ class DocumentUpdateByTextApi(DatasetApiResource):
"doc_language", type=str, default="English", required=False, nullable=False, location="json"
)
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
parser.add_argument("doc_type", type=str, required=False, nullable=True, location="json")
parser.add_argument("doc_metadata", type=dict, required=False, nullable=True, location="json")
args = parser.parse_args()
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
@ -143,32 +115,6 @@ class DocumentUpdateByTextApi(DatasetApiResource):
if not dataset:
raise ValueError("Dataset is not exist.")
# indexing_technique is already set in dataset since this is an update
args["indexing_technique"] = dataset.indexing_technique
# Validate metadata if provided
if args.get("doc_type") or args.get("doc_metadata"):
if not args.get("doc_type") or not args.get("doc_metadata"):
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
raise InvalidMetadataError(
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
)
if not isinstance(args["doc_metadata"], dict):
raise InvalidMetadataError("doc_metadata must be a dictionary")
# Validate metadata schema based on doc_type
if args["doc_type"] != "others":
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
for key, value in args["doc_metadata"].items():
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
# set to MetaDataConfig
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
if args["text"]:
text = args.get("text")
name = args.get("name")
@ -215,30 +161,6 @@ class DocumentAddByFileApi(DatasetApiResource):
args["doc_form"] = "text_model"
if "doc_language" not in args:
args["doc_language"] = "English"
# Validate metadata if provided
if args.get("doc_type") or args.get("doc_metadata"):
if not args.get("doc_type") or not args.get("doc_metadata"):
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
raise InvalidMetadataError(
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
)
if not isinstance(args["doc_metadata"], dict):
raise InvalidMetadataError("doc_metadata must be a dictionary")
# Validate metadata schema based on doc_type
if args["doc_type"] != "others":
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
for key, value in args["doc_metadata"].items():
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
# set to MetaDataConfig
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
# get dataset info
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
@ -306,29 +228,6 @@ class DocumentUpdateByFileApi(DatasetApiResource):
if "doc_language" not in args:
args["doc_language"] = "English"
# Validate metadata if provided
if args.get("doc_type") or args.get("doc_metadata"):
if not args.get("doc_type") or not args.get("doc_metadata"):
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
raise InvalidMetadataError(
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
)
if not isinstance(args["doc_metadata"], dict):
raise InvalidMetadataError("doc_metadata must be a dictionary")
# Validate metadata schema based on doc_type
if args["doc_type"] != "others":
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
for key, value in args["doc_metadata"].items():
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
# set to MetaDataConfig
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
# get dataset info
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
@ -336,10 +235,6 @@ class DocumentUpdateByFileApi(DatasetApiResource):
if not dataset:
raise ValueError("Dataset is not exist.")
# indexing_technique is already set in dataset since this is an update
args["indexing_technique"] = dataset.indexing_technique
if "file" in request.files:
# save file info
file = request.files["file"]

View File

@ -53,7 +53,8 @@ class SegmentApi(DatasetApiResource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -94,7 +95,8 @@ class SegmentApi(DatasetApiResource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -173,7 +175,8 @@ class DatasetSegmentApi(DatasetApiResource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider "
"in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)

View File

@ -1,54 +0,0 @@
from werkzeug.exceptions import NotFound
from controllers.service_api import api
from controllers.service_api.wraps import (
DatasetApiResource,
)
from core.file import helpers as file_helpers
from extensions.ext_database import db
from models.dataset import Dataset
from models.model import UploadFile
from services.dataset_service import DocumentService
class UploadFileApi(DatasetApiResource):
def get(self, tenant_id, dataset_id, document_id):
"""Get upload file."""
# check dataset
dataset_id = str(dataset_id)
tenant_id = str(tenant_id)
dataset = db.session.query(Dataset).filter(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
if not dataset:
raise NotFound("Dataset not found.")
# check document
document_id = str(document_id)
document = DocumentService.get_document(dataset.id, document_id)
if not document:
raise NotFound("Document not found.")
# check upload file
if document.data_source_type != "upload_file":
raise ValueError(f"Document data source type ({document.data_source_type}) is not upload_file.")
data_source_info = document.data_source_info_dict
if data_source_info and "upload_file_id" in data_source_info:
file_id = data_source_info["upload_file_id"]
upload_file = db.session.query(UploadFile).filter(UploadFile.id == file_id).first()
if not upload_file:
raise NotFound("UploadFile not found.")
else:
raise ValueError("Upload file id not found in document data source info.")
url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id)
return {
"id": upload_file.id,
"name": upload_file.name,
"size": upload_file.size,
"extension": upload_file.extension,
"url": url,
"download_url": f"{url}&as_attachment=true",
"mime_type": upload_file.mime_type,
"created_by": upload_file.created_by,
"created_at": upload_file.created_at.timestamp(),
}, 200
api.add_resource(UploadFileApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/upload-file")

View File

@ -154,7 +154,7 @@ def validate_dataset_token(view=None):
) # TODO: only owner information is required, so only one is returned.
if tenant_account_join:
tenant, ta = tenant_account_join
account = db.session.query(Account).filter(Account.id == ta.account_id).first()
account = Account.query.filter_by(id=ta.account_id).first()
# Login admin
if account:
account.current_tenant = tenant
@ -195,11 +195,7 @@ def validate_and_get_api_token(scope: str | None = None):
with Session(db.engine, expire_on_commit=False) as session:
update_stmt = (
update(ApiToken)
.where(
ApiToken.token == auth_token,
(ApiToken.last_used_at.is_(None) | (ApiToken.last_used_at < cutoff_time)),
ApiToken.type == scope,
)
.where(ApiToken.token == auth_token, ApiToken.last_used_at < cutoff_time, ApiToken.type == scope)
.values(last_used_at=current_time)
.returning(ApiToken)
)
@ -240,7 +236,7 @@ def create_or_update_end_user_for_user_id(app_model: App, user_id: Optional[str]
tenant_id=app_model.tenant_id,
app_id=app_model.id,
type="service_api",
is_anonymous=user_id == "DEFAULT-USER",
is_anonymous=True if user_id == "DEFAULT-USER" else False,
session_id=user_id,
)
db.session.add(end_user)

View File

@ -39,7 +39,7 @@ class ConversationListApi(WebApiResource):
pinned = None
if "pinned" in args and args["pinned"] is not None:
pinned = args["pinned"] == "true"
pinned = True if args["pinned"] == "true" else False
try:
with Session(db.engine) as session:

View File

@ -21,7 +21,7 @@ from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from fields.conversation_fields import message_file_fields
from fields.message_fields import agent_thought_fields, feedback_fields, retriever_resource_fields
from fields.message_fields import agent_thought_fields
from fields.raws import FilesContainedField
from libs import helper
from libs.helper import TimestampField, uuid_value
@ -34,6 +34,27 @@ from services.message_service import MessageService
class MessageListApi(WebApiResource):
feedback_fields = {"rating": fields.String}
retriever_resource_fields = {
"id": fields.String,
"message_id": fields.String,
"position": fields.Integer,
"dataset_id": fields.String,
"dataset_name": fields.String,
"document_id": fields.String,
"document_name": fields.String,
"data_source_type": fields.String,
"segment_id": fields.String,
"score": fields.Float,
"hit_count": fields.Integer,
"word_count": fields.Integer,
"segment_position": fields.Integer,
"index_node_hash": fields.String,
"content": fields.String,
"created_at": TimestampField,
}
message_fields = {
"id": fields.String,
"conversation_id": fields.String,
@ -70,7 +91,7 @@ class MessageListApi(WebApiResource):
try:
return MessageService.pagination_by_first_id(
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"]
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")

View File

@ -329,7 +329,6 @@ class BaseAgentRunner(AppRunner):
)
if not updated_agent_thought:
raise ValueError("agent thought not found")
agent_thought = updated_agent_thought
if thought:
agent_thought.thought = thought

View File

@ -168,7 +168,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
self.save_agent_thought(
agent_thought=agent_thought,
tool_name=(scratchpad.action.action_name if scratchpad.action and not scratchpad.is_final() else ""),
tool_name=scratchpad.action.action_name if scratchpad.action else "",
tool_input={scratchpad.action.action_name: scratchpad.action.action_input} if scratchpad.action else {},
tool_invoke_meta={},
thought=scratchpad.thought or "",

View File

@ -1,7 +1,7 @@
from enum import StrEnum
from typing import Any, Optional, Union
from pydantic import BaseModel, Field
from pydantic import BaseModel
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolProviderType
@ -14,7 +14,7 @@ class AgentToolEntity(BaseModel):
provider_type: ToolProviderType
provider_id: str
tool_name: str
tool_parameters: dict[str, Any] = Field(default_factory=dict)
tool_parameters: dict[str, Any] = {}
plugin_unique_identifier: str | None = None

View File

@ -2,9 +2,9 @@ from collections.abc import Mapping
from typing import Any
from core.app.app_config.entities import ModelConfigEntity
from core.entities import DEFAULT_PLUGIN_ID
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.plugin.entities.plugin import ModelProviderID
from core.provider_manager import ProviderManager
@ -61,7 +61,9 @@ class ModelConfigManager:
raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}")
if "/" not in config["model"]["provider"]:
config["model"]["provider"] = str(ModelProviderID(config["model"]["provider"]))
config["model"]["provider"] = (
f"{DEFAULT_PLUGIN_ID}/{config['model']['provider']}/{config['model']['provider']}"
)
if config["model"]["provider"] not in model_provider_names:
raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}")

View File

@ -17,8 +17,8 @@ class ModelConfigEntity(BaseModel):
provider: str
model: str
mode: Optional[str] = None
parameters: dict[str, Any] = Field(default_factory=dict)
stop: list[str] = Field(default_factory=list)
parameters: dict[str, Any] = {}
stop: list[str] = []
class AdvancedChatMessageEntity(BaseModel):
@ -132,7 +132,7 @@ class ExternalDataVariableEntity(BaseModel):
variable: str
type: str
config: dict[str, Any] = Field(default_factory=dict)
config: dict[str, Any] = {}
class DatasetRetrieveConfigEntity(BaseModel):
@ -188,7 +188,7 @@ class SensitiveWordAvoidanceEntity(BaseModel):
"""
type: str
config: dict[str, Any] = Field(default_factory=dict)
config: dict[str, Any] = {}
class TextToSpeechEntity(BaseModel):

View File

@ -140,7 +140,9 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
app_config=app_config,
file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None,
inputs=self._prepare_user_inputs(
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
),
query=query,

View File

@ -149,7 +149,9 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None,
inputs=self._prepare_user_inputs(
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
),
query=query,

View File

@ -8,16 +8,16 @@ from core.agent.fc_agent_runner import FunctionCallAgentRunner
from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig
from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom
from core.app.apps.base_app_runner import AppRunner
from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity
from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, ModelConfigWithCredentialsEntity
from core.app.entities.queue_entities import QueueAnnotationReplyEvent
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMMode
from core.model_runtime.entities.llm_entities import LLMMode, LLMUsage
from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.moderation.base import ModerationError
from extensions.ext_database import db
from models.model import App, Conversation, Message
from models.model import App, Conversation, Message, MessageAgentThought
logger = logging.getLogger(__name__)
@ -191,8 +191,7 @@ class AgentChatAppRunner(AppRunner):
# change function call strategy based on LLM model
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
model_schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials)
if not model_schema:
raise ValueError("Model schema not found")
assert model_schema is not None
if {ModelFeature.MULTI_TOOL_CALL, ModelFeature.TOOL_CALL}.intersection(model_schema.features or []):
agent_entity.strategy = AgentEntity.Strategy.FUNCTION_CALLING
@ -248,3 +247,29 @@ class AgentChatAppRunner(AppRunner):
stream=application_generate_entity.stream,
agent=True,
)
def _get_usage_of_all_agent_thoughts(
self, model_config: ModelConfigWithCredentialsEntity, message: Message
) -> LLMUsage:
"""
Get usage of all agent thoughts
:param model_config: model config
:param message: message
:return:
"""
agent_thoughts = (
db.session.query(MessageAgentThought).filter(MessageAgentThought.message_id == message.id).all()
)
all_message_tokens = 0
all_answer_tokens = 0
for agent_thought in agent_thoughts:
all_message_tokens += agent_thought.message_tokens
all_answer_tokens += agent_thought.answer_tokens
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
return model_type_instance._calc_response_usage(
model_config.model, model_config.credentials, all_message_tokens, all_answer_tokens
)

View File

@ -167,7 +167,8 @@ class AppQueueManager:
else:
if isinstance(data, DeclarativeMeta) or hasattr(data, "_sa_instance_state"):
raise TypeError(
"Critical Error: Passing SQLAlchemy Model instances that cause thread safety issues is not allowed."
"Critical Error: Passing SQLAlchemy Model instances "
"that cause thread safety issues is not allowed."
)

View File

@ -141,7 +141,9 @@ class ChatAppGenerator(MessageBasedAppGenerator):
model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None,
inputs=self._prepare_user_inputs(
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
),
query=query,

View File

@ -42,6 +42,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
ChatAppGenerateEntity,
CompletionAppGenerateEntity,
AgentChatAppGenerateEntity,
AgentChatAppGenerateEntity,
],
queue_manager: AppQueueManager,
conversation: Conversation,
@ -88,7 +89,6 @@ class MessageBasedAppGenerator(BaseAppGenerator):
Conversation.id == conversation_id,
Conversation.app_id == app_model.id,
Conversation.status == "normal",
Conversation.is_deleted.is_(False),
]
if isinstance(user, Account):

View File

@ -241,7 +241,6 @@ class WorkflowBasedAppRunner(AppRunner):
predecessor_node_id=event.predecessor_node_id,
in_iteration_id=event.in_iteration_id,
parallel_mode_run_id=event.parallel_mode_run_id,
agent_strategy=event.agent_strategy,
)
)
elif isinstance(event, NodeRunSucceededEvent):
@ -387,6 +386,7 @@ class WorkflowBasedAppRunner(AppRunner):
status=event.status,
data=event.data,
metadata=event.metadata,
node_id=event.node_id,
)
)
elif isinstance(event, ParallelBranchRunStartedEvent):

View File

@ -63,9 +63,9 @@ class ModelConfigWithCredentialsEntity(BaseModel):
model_schema: AIModelEntity
mode: str
provider_model_bundle: ProviderModelBundle
credentials: dict[str, Any] = Field(default_factory=dict)
parameters: dict[str, Any] = Field(default_factory=dict)
stop: list[str] = Field(default_factory=list)
credentials: dict[str, Any] = {}
parameters: dict[str, Any] = {}
stop: list[str] = []
# pydantic configs
model_config = ConfigDict(protected_namespaces=())
@ -94,7 +94,7 @@ class AppGenerateEntity(BaseModel):
call_depth: int = 0
# extra parameters, like: auto_generate_conversation_name
extras: dict[str, Any] = Field(default_factory=dict)
extras: dict[str, Any] = {}
# tracing instance
trace_manager: Optional[TraceQueueManager] = None

View File

@ -6,7 +6,7 @@ from typing import Any, Optional
from pydantic import BaseModel
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
from core.workflow.entities.node_entities import AgentNodeStrategyInit, NodeRunMetadataKey
from core.workflow.entities.node_entities import NodeRunMetadataKey
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes import NodeType
from core.workflow.nodes.base import BaseNodeData
@ -281,7 +281,6 @@ class QueueNodeStartedEvent(AppQueueEvent):
start_at: datetime
parallel_mode_run_id: Optional[str] = None
"""iteratoin run in parallel mode run id"""
agent_strategy: Optional[AgentNodeStrategyInit] = None
class QueueNodeSucceededEvent(AppQueueEvent):
@ -331,6 +330,7 @@ class QueueAgentLogEvent(AppQueueEvent):
status: str
data: Mapping[str, Any]
metadata: Optional[Mapping[str, Any]] = None
node_id: str
class QueueNodeRetryEvent(QueueNodeStartedEvent):

View File

@ -6,7 +6,6 @@ from pydantic import BaseModel, ConfigDict
from core.model_runtime.entities.llm_entities import LLMResult
from core.model_runtime.utils.encoders import jsonable_encoder
from core.workflow.entities.node_entities import AgentNodeStrategyInit
from models.workflow import WorkflowNodeExecutionStatus
@ -249,7 +248,6 @@ class NodeStartStreamResponse(StreamResponse):
parent_parallel_start_node_id: Optional[str] = None
iteration_id: Optional[str] = None
parallel_run_id: Optional[str] = None
agent_strategy: Optional[AgentNodeStrategyInit] = None
event: StreamEvent = StreamEvent.NODE_STARTED
workflow_run_id: str
@ -719,6 +717,7 @@ class AgentLogStreamResponse(StreamResponse):
status: str
data: Mapping[str, Any]
metadata: Optional[Mapping[str, Any]] = None
node_id: str
event: StreamEvent = StreamEvent.AGENT_LOG
data: Data

View File

@ -145,7 +145,7 @@ class MessageCycleManage:
# get extension
if "." in message_file.url:
extension = f".{message_file.url.split('.')[-1]}"
extension = f'.{message_file.url.split(".")[-1]}'
if len(extension) > 10:
extension = ".bin"
else:

View File

@ -541,7 +541,6 @@ class WorkflowCycleManage:
parent_parallel_start_node_id=event.parent_parallel_start_node_id,
iteration_id=event.in_iteration_id,
parallel_run_id=event.parallel_mode_run_id,
agent_strategy=event.agent_strategy,
),
)
@ -844,7 +843,7 @@ class WorkflowCycleManage:
if node_execution_id not in self._workflow_node_executions:
raise ValueError(f"Workflow node execution not found: {node_execution_id}")
cached_workflow_node_execution = self._workflow_node_executions[node_execution_id]
return session.merge(cached_workflow_node_execution)
return cached_workflow_node_execution
def _handle_agent_log(self, task_id: str, event: QueueAgentLogEvent) -> AgentLogStreamResponse:
"""
@ -864,5 +863,6 @@ class WorkflowCycleManage:
status=event.status,
data=event.data,
metadata=event.metadata,
node_id=event.node_id,
),
)

View File

@ -6,10 +6,10 @@ from collections.abc import Iterator, Sequence
from json import JSONDecodeError
from typing import Optional
from pydantic import BaseModel, ConfigDict, Field
from sqlalchemy import or_
from pydantic import BaseModel, ConfigDict
from constants import HIDDEN_VALUE
from core.entities import DEFAULT_PLUGIN_ID
from core.entities.model_entities import ModelStatus, ModelWithProviderEntity, SimpleModelProviderEntity
from core.entities.provider_entities import (
CustomConfiguration,
@ -28,7 +28,6 @@ from core.model_runtime.entities.provider_entities import (
)
from core.model_runtime.model_providers.__base.ai_model import AIModel
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
from core.plugin.entities.plugin import ModelProviderID
from extensions.ext_database import db
from models.provider import (
LoadBalancingModelConfig,
@ -191,11 +190,8 @@ class ProviderConfiguration(BaseModel):
db.session.query(Provider)
.filter(
Provider.tenant_id == self.tenant_id,
Provider.provider_name == self.provider.provider,
Provider.provider_type == ProviderType.CUSTOM.value,
or_(
Provider.provider_name == ModelProviderID(self.provider.provider).plugin_name,
Provider.provider_name == self.provider.provider,
),
)
.first()
)
@ -283,10 +279,7 @@ class ProviderConfiguration(BaseModel):
db.session.query(Provider)
.filter(
Provider.tenant_id == self.tenant_id,
or_(
Provider.provider_name == ModelProviderID(self.provider.provider).plugin_name,
Provider.provider_name == self.provider.provider,
),
Provider.provider_name == self.provider.provider,
Provider.provider_type == ProviderType.CUSTOM.value,
)
.first()
@ -1003,7 +996,7 @@ class ProviderConfigurations(BaseModel):
"""
tenant_id: str
configurations: dict[str, ProviderConfiguration] = Field(default_factory=dict)
configurations: dict[str, ProviderConfiguration] = {}
def __init__(self, tenant_id: str):
super().__init__(tenant_id=tenant_id)
@ -1059,7 +1052,7 @@ class ProviderConfigurations(BaseModel):
def __getitem__(self, key):
if "/" not in key:
key = str(ModelProviderID(key))
key = f"{DEFAULT_PLUGIN_ID}/{key}/{key}"
return self.configurations[key]
@ -1074,7 +1067,7 @@ class ProviderConfigurations(BaseModel):
def get(self, key, default=None) -> ProviderConfiguration | None:
if "/" not in key:
key = str(ModelProviderID(key))
key = f"{DEFAULT_PLUGIN_ID}/{key}/{key}"
return self.configurations.get(key, default) # type: ignore

View File

@ -62,9 +62,8 @@ class ApiExternalDataTool(ExternalDataTool):
if not api_based_extension:
raise ValueError(
"[External data tool] API query failed, variable: {}, error: api_based_extension_id is invalid".format(
self.variable
)
"[External data tool] API query failed, variable: {}, "
"error: api_based_extension_id is invalid".format(self.variable)
)
# decrypt api_key

View File

@ -33,7 +33,7 @@ def get_signed_file_url_for_plugin(filename: str, mimetype: str, tenant_id: str,
sign = hmac.new(key, msg.encode(), hashlib.sha256).digest()
encoded_sign = base64.urlsafe_b64encode(sign).decode()
return f"{url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}&user_id={user_id}&tenant_id={tenant_id}"
return f"{url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}&user_id={user_id}"
def verify_plugin_file_signature(

View File

@ -90,7 +90,7 @@ class File(BaseModel):
def markdown(self) -> str:
url = self.generate_url()
if self.type == FileType.IMAGE:
text = f"![{self.filename or ''}]({url})"
text = f'![{self.filename or ""}]({url})'
else:
text = f"[{self.filename or url}]({url})"

View File

@ -11,6 +11,15 @@ from configs import dify_config
SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES
proxy_mounts = (
{
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
}
if dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL
else None
)
BACKOFF_FACTOR = 0.5
STATUS_FORCELIST = [429, 500, 502, 503, 504]
@ -41,11 +50,7 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
if dify_config.SSRF_PROXY_ALL_URL:
with httpx.Client(proxy=dify_config.SSRF_PROXY_ALL_URL) as client:
response = client.request(method=method, url=url, **kwargs)
elif dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL:
proxy_mounts = {
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
}
elif proxy_mounts:
with httpx.Client(mounts=proxy_mounts) as client:
response = client.request(method=method, url=url, **kwargs)
else:

View File

@ -41,13 +41,9 @@ class HostedModerationConfig(BaseModel):
class HostingConfiguration:
provider_map: dict[str, HostingProvider]
provider_map: dict[str, HostingProvider] = {}
moderation_config: Optional[HostedModerationConfig] = None
def __init__(self) -> None:
self.provider_map = {}
self.moderation_config = None
def init_app(self, app: Flask) -> None:
if dify_config.EDITION != "CLOUD":
return

View File

@ -530,6 +530,7 @@ class IndexingRunner:
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 10
if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX:
# create keyword index
create_keyword_thread = threading.Thread(
@ -538,22 +539,11 @@ class IndexingRunner:
)
create_keyword_thread.start()
max_workers = 10
if dataset.indexing_technique == "high_quality":
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = []
# Distribute documents into multiple groups based on the hash values of page_content
# This is done to prevent multiple threads from processing the same document,
# Thereby avoiding potential database insertion deadlocks
document_groups: list[list[Document]] = [[] for _ in range(max_workers)]
for document in documents:
hash = helper.generate_text_hash(document.page_content)
group_index = int(hash, 16) % max_workers
document_groups[group_index].append(document)
for chunk_documents in document_groups:
if len(chunk_documents) == 0:
continue
for i in range(0, len(documents), chunk_size):
chunk_documents = documents[i : i + chunk_size]
futures.append(
executor.submit(
self._process_chunk,

View File

@ -131,7 +131,7 @@ JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE = (
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
"Please help me predict the three most likely questions that human would ask, "
"and keeping each question under 20 characters.\n"
"MAKE SURE your output is the SAME language as the Assistant's latest response. "
"MAKE SURE your output is the SAME language as the Assistant's latest response"
"The output must be an array in JSON format following the specified schema:\n"
'["question1","question2","question3"]\n'
)

View File

@ -1,4 +1,4 @@
from .llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from .llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from .message_entities import (
AssistantPromptMessage,
AudioPromptMessageContent,
@ -23,7 +23,6 @@ __all__ = [
"AudioPromptMessageContent",
"DocumentPromptMessageContent",
"ImagePromptMessageContent",
"LLMMode",
"LLMResult",
"LLMResultChunk",
"LLMResultChunkDelta",

View File

@ -1,5 +1,5 @@
from decimal import Decimal
from enum import StrEnum
from enum import Enum
from typing import Optional
from pydantic import BaseModel
@ -8,7 +8,7 @@ from core.model_runtime.entities.message_entities import AssistantPromptMessage,
from core.model_runtime.entities.model_entities import ModelUsage, PriceInfo
class LLMMode(StrEnum):
class LLMMode(Enum):
"""
Enum class for large language model mode.
"""

View File

@ -3,11 +3,8 @@ from typing import Optional
from pydantic import BaseModel, ConfigDict, Field
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.defaults import PARAMETER_RULE_TEMPLATE
from core.model_runtime.entities.model_entities import (
AIModelEntity,
DefaultParameterName,
ModelType,
PriceConfig,
PriceInfo,
@ -21,7 +18,6 @@ from core.model_runtime.errors.invoke import (
InvokeRateLimitError,
InvokeServerUnavailableError,
)
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer
from core.plugin.entities.plugin_daemon import PluginDaemonInnerError, PluginModelProviderEntity
from core.plugin.manager.model import PluginModelManager
@ -148,102 +144,3 @@ class AIModel(BaseModel):
model=model,
credentials=credentials or {},
)
def get_customizable_model_schema_from_credentials(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
"""
Get customizable model schema from credentials
:param model: model name
:param credentials: model credentials
:return: model schema
"""
return self._get_customizable_model_schema(model, credentials)
def _get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
"""
Get customizable model schema and fill in the template
"""
schema = self.get_customizable_model_schema(model, credentials)
if not schema:
return None
# fill in the template
new_parameter_rules = []
for parameter_rule in schema.parameter_rules:
if parameter_rule.use_template:
try:
default_parameter_name = DefaultParameterName.value_of(parameter_rule.use_template)
default_parameter_rule = self._get_default_parameter_rule_variable_map(default_parameter_name)
if not parameter_rule.max and "max" in default_parameter_rule:
parameter_rule.max = default_parameter_rule["max"]
if not parameter_rule.min and "min" in default_parameter_rule:
parameter_rule.min = default_parameter_rule["min"]
if not parameter_rule.default and "default" in default_parameter_rule:
parameter_rule.default = default_parameter_rule["default"]
if not parameter_rule.precision and "precision" in default_parameter_rule:
parameter_rule.precision = default_parameter_rule["precision"]
if not parameter_rule.required and "required" in default_parameter_rule:
parameter_rule.required = default_parameter_rule["required"]
if not parameter_rule.help and "help" in default_parameter_rule:
parameter_rule.help = I18nObject(
en_US=default_parameter_rule["help"]["en_US"],
)
if (
parameter_rule.help
and not parameter_rule.help.en_US
and ("help" in default_parameter_rule and "en_US" in default_parameter_rule["help"])
):
parameter_rule.help.en_US = default_parameter_rule["help"]["en_US"]
if (
parameter_rule.help
and not parameter_rule.help.zh_Hans
and ("help" in default_parameter_rule and "zh_Hans" in default_parameter_rule["help"])
):
parameter_rule.help.zh_Hans = default_parameter_rule["help"].get(
"zh_Hans", default_parameter_rule["help"]["en_US"]
)
except ValueError:
pass
new_parameter_rules.append(parameter_rule)
schema.parameter_rules = new_parameter_rules
return schema
def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]:
"""
Get customizable model schema
:param model: model name
:param credentials: model credentials
:return: model schema
"""
return None
def _get_default_parameter_rule_variable_map(self, name: DefaultParameterName) -> dict:
"""
Get default parameter rule for given name
:param name: parameter name
:return: parameter rule
"""
default_parameter_rule = PARAMETER_RULE_TEMPLATE.get(name)
if not default_parameter_rule:
raise Exception(f"Invalid model parameter rule name {name}")
return default_parameter_rule
def _get_num_tokens_by_gpt2(self, text: str) -> int:
"""
Get number of tokens for given prompt messages by gpt2
Some provider models do not provide an interface for obtaining the number of tokens.
Here, the gpt2 tokenizer is used to calculate the number of tokens.
This method can be executed offline, and the gpt2 tokenizer has been cached in the project.
:param text: plain text of prompt. You need to convert the original message to plain text
:return: number of tokens
"""
return GPT2Tokenizer.get_num_tokens(text)

View File

@ -107,46 +107,11 @@ class LargeLanguageModel(AIModel):
content_list = []
usage = LLMUsage.empty_usage()
system_fingerprint = None
tools_calls: list[AssistantPromptMessage.ToolCall] = []
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
def get_tool_call(tool_name: str):
if not tool_name:
return tools_calls[-1]
tool_call = next(
(tool_call for tool_call in tools_calls if tool_call.function.name == tool_name), None
)
if tool_call is None:
tool_call = AssistantPromptMessage.ToolCall(
id="",
type="",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tool_name, arguments=""),
)
tools_calls.append(tool_call)
return tool_call
for new_tool_call in new_tool_calls:
# get tool call
tool_call = get_tool_call(new_tool_call.function.name)
# update tool call
if new_tool_call.id:
tool_call.id = new_tool_call.id
if new_tool_call.type:
tool_call.type = new_tool_call.type
if new_tool_call.function.name:
tool_call.function.name = new_tool_call.function.name
if new_tool_call.function.arguments:
tool_call.function.arguments += new_tool_call.function.arguments
for chunk in result:
if isinstance(chunk.delta.message.content, str):
content += chunk.delta.message.content
elif isinstance(chunk.delta.message.content, list):
content_list.extend(chunk.delta.message.content)
if chunk.delta.message.tool_calls:
increase_tool_call(chunk.delta.message.tool_calls)
usage = chunk.delta.usage or LLMUsage.empty_usage()
system_fingerprint = chunk.system_fingerprint
@ -155,10 +120,7 @@ class LargeLanguageModel(AIModel):
result = LLMResult(
model=model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(
content=content or content_list,
tool_calls=tools_calls,
),
message=AssistantPromptMessage(content=content or content_list),
usage=usage,
system_fingerprint=system_fingerprint,
)
@ -228,7 +190,7 @@ class LargeLanguageModel(AIModel):
:return: result generator
"""
callbacks = callbacks or []
assistant_message = AssistantPromptMessage(content="")
prompt_message = AssistantPromptMessage(content="")
usage = None
system_fingerprint = None
real_model = model
@ -250,7 +212,7 @@ class LargeLanguageModel(AIModel):
callbacks=callbacks,
)
assistant_message.content += chunk.delta.message.content
prompt_message.content += chunk.delta.message.content
real_model = chunk.model
if chunk.delta.usage:
usage = chunk.delta.usage
@ -265,7 +227,7 @@ class LargeLanguageModel(AIModel):
result=LLMResult(
model=real_model,
prompt_messages=prompt_messages,
message=assistant_message,
message=prompt_message,
usage=usage or LLMUsage.empty_usage(),
system_fingerprint=system_fingerprint,
),

View File

@ -1,9 +1,6 @@
import logging
from threading import Lock
from typing import Any
logger = logging.getLogger(__name__)
_tokenizer: Any = None
_lock = Lock()
@ -46,6 +43,5 @@ class GPT2Tokenizer:
base_path = abspath(__file__)
gpt2_tokenizer_path = join(dirname(base_path), "gpt2")
_tokenizer = TransformerGPT2Tokenizer.from_pretrained(gpt2_tokenizer_path)
logger.info("Fallback to Transformers' GPT-2 tokenizer from tiktoken")
return _tokenizer

View File

@ -1,5 +1,4 @@
- openai
- deepseek
- anthropic
- azure_openai
- google
@ -33,6 +32,7 @@
- localai
- volcengine_maas
- openai_api_compatible
- deepseek
- hunyuan
- siliconflow
- perfxcloud

View File

@ -7,6 +7,7 @@ from typing import Optional
from pydantic import BaseModel
import contexts
from core.entities import DEFAULT_PLUGIN_ID
from core.helper.position_helper import get_provider_position_map, sort_to_dict_by_position_map
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
from core.model_runtime.entities.provider_entities import ProviderConfig, ProviderEntity, SimpleProviderEntity
@ -19,7 +20,6 @@ from core.model_runtime.model_providers.__base.text_embedding_model import TextE
from core.model_runtime.model_providers.__base.tts_model import TTSModel
from core.model_runtime.schema_validators.model_credential_schema_validator import ModelCredentialSchemaValidator
from core.model_runtime.schema_validators.provider_credential_schema_validator import ProviderCredentialSchemaValidator
from core.plugin.entities.plugin import ModelProviderID
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
from core.plugin.manager.asset import PluginAssetManager
from core.plugin.manager.model import PluginModelManager
@ -33,11 +33,9 @@ class ModelProviderExtension(BaseModel):
class ModelProviderFactory:
provider_position_map: dict[str, int]
provider_position_map: dict[str, int] = {}
def __init__(self, tenant_id: str) -> None:
self.provider_position_map = {}
self.tenant_id = tenant_id
self.plugin_model_manager = PluginModelManager()
@ -114,9 +112,6 @@ class ModelProviderFactory:
:param provider: provider name
:return: provider schema
"""
if "/" not in provider:
provider = str(ModelProviderID(provider))
# fetch plugin model providers
plugin_model_provider_entities = self.get_plugin_model_providers()
@ -361,5 +356,11 @@ class ModelProviderFactory:
:param provider: provider name
:return: plugin id and provider name
"""
provider_id = ModelProviderID(provider)
return provider_id.plugin_id, provider_id.provider_name
plugin_id = DEFAULT_PLUGIN_ID
provider_name = provider
if "/" in provider:
# get the plugin_id before provider
plugin_id = "/".join(provider.split("/")[:-1])
provider_name = provider.split("/")[-1]
return plugin_id, provider_name

View File

@ -1,22 +0,0 @@
- claude-3-haiku@20240307
- claude-3-opus@20240229
- claude-3-sonnet@20240229
- claude-3-5-sonnet-v2@20241022
- claude-3-5-sonnet@20240620
- gemini-1.0-pro-vision-001
- gemini-1.0-pro-002
- gemini-1.5-flash-001
- gemini-1.5-flash-002
- gemini-1.5-pro-001
- gemini-1.5-pro-002
- gemini-2.0-flash-001
- gemini-2.0-flash-exp
- gemini-2.0-flash-lite-preview-02-05
- gemini-2.0-flash-thinking-exp-01-21
- gemini-2.0-flash-thinking-exp-1219
- gemini-2.0-pro-exp-02-05
- gemini-exp-1114
- gemini-exp-1121
- gemini-exp-1206
- gemini-flash-experimental
- gemini-pro-experimental

Some files were not shown because too many files have changed in this diff Show More