Compare commits

..

368 Commits

Author SHA1 Message Date
Yi
9125971da2 fix: margin in rerank switch 2024-10-09 17:59:42 +08:00
Yi
6f9d6cd3e1 fix: edit external knowledge api warning message 2024-09-30 14:23:51 +08:00
Yi
f6074b6545 fix: chatbot rerank popup logics 2024-09-30 14:02:23 +08:00
Yi
fd4d7e9002 fix: edit dataset card from datasets page, naming 2024-09-30 11:58:46 +08:00
Yi
383a60a7df fix: rerank open logics added to chatgpt, modified the hit detail modal styling 2024-09-29 18:33:27 +08:00
Yi
918df23f64 Merge branch 'feat/external-knowledge-api' of github.com:langgenius/dify into feat/external-knowledge-api 2024-09-29 17:54:33 +08:00
Yi
bc81d2d30d fix: styling issues and create knowledge api from the knowledge base creation page 2024-09-29 17:26:49 +08:00
89290183c6 add score threshold enabled 2024-09-29 15:36:59 +08:00
Yi
6508e7e1e4 fix: retrieval config for rerank cases 2024-09-29 14:52:47 +08:00
1955de2463 add tidb on qdrant whitelist and batch job 2024-09-29 14:33:28 +08:00
4ee3743b20 add tidb on qdrant whitelist and batch job 2024-09-29 11:57:15 +08:00
Yi
e5d8c07508 add helper text 2024-09-29 11:12:03 +08:00
Yi
69c0f3f2ad fix: default selection issue & trigger retrieval setting unintentionally 2024-09-28 14:13:02 +08:00
Yi
b92fced974 Merge branch 'main' into feat/external-knowledge-api 2024-09-27 22:39:04 +08:00
Yi
644ab2df35 feat: add new external knowledge api from the knowledge create page 2024-09-27 22:38:13 +08:00
55e6123db9 feat: add min-connection and max-connection for pgvector (#8841) 2024-09-27 18:16:20 +08:00
020766a5e8 Merge branch 'main' into feat/external-knowledge-api
# Conflicts:
#	api/poetry.lock
2024-09-27 17:49:40 +08:00
Yi
c9e3a9e56a feat: add external api from the create external knowledge page 2024-09-27 17:44:01 +08:00
c828a5dfdf feat(Tools): add feishu tools (#8800)
Co-authored-by: 黎斌 <libin.23@bytedance.com>
2024-09-27 17:31:45 +08:00
9c9352bc73 update to external knowledge api 2024-09-27 16:17:45 +08:00
2a1cba9f4d Merge remote-tracking branch 'origin/feat/external-knowledge-api' into feat/external-knowledge-api 2024-09-27 16:03:18 +08:00
8e73844781 update to external knowledge api 2024-09-27 16:02:59 +08:00
Yi
5554cf7b20 feat: connect knowledge base to app 2024-09-27 15:50:22 +08:00
0603359e2d fix: delete harm catalog settings for gemini (#8829) 2024-09-27 13:49:03 +08:00
bb781764b8 Add Llama3.2 models in Groq provider (#8831) 2024-09-27 12:13:00 +08:00
29275c7447 feat: deprecate mistral model for siliconflow (#8828) 2024-09-27 12:11:56 +08:00
4c1063e1c5 fix: AnalyticdbVector retrieval scores (#8803) 2024-09-27 12:05:21 +08:00
d6b9587a97 fix: close log status option raise error (#8826) 2024-09-27 11:13:40 +08:00
6fbaabc1bc feat: add pgvecto-rs and analyticdb in docker/.env.example (#8823) 2024-09-27 11:13:29 +08:00
Yi
1597f34471 Merge branch 'feat/external-knowledge-api' of github.com:langgenius/dify into feat/external-knowledge-api 2024-09-27 10:11:19 +08:00
a36117e12d Updated the YouTube channel to Dify's (#8817) 2024-09-27 09:15:33 +08:00
e5efd09ebb chore: massive update of the Gemini models based on latest documentation (#8822) 2024-09-27 09:14:33 +08:00
Yi
1c7cb3fbc0 feat: external knowledge base 2024-09-27 00:33:56 +08:00
ecc951609d add more detailed doc for models of qwen series (#8799)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-26 22:32:33 +08:00
063474f408 Add llama3.2 model in fireworks provider (#8809) 2024-09-26 22:21:01 +08:00
3dfbc348e3 feat: improved SVG output UX (#8765) 2024-09-26 19:41:59 +08:00
9a4b53a212 feat: add stream for Gemini (#8678) 2024-09-26 19:08:59 +08:00
03edfbe6f5 feat: add qwen to add custom model parameters (#8759) 2024-09-26 19:04:25 +08:00
3d2cb25a67 fix: change wrong company name (#8801) 2024-09-26 17:53:11 +08:00
6df14e50b2 fix: workflow as tool always outdated (#8798) 2024-09-26 17:50:36 +08:00
611f0fb3f6 update to external knowledge api 2024-09-26 16:38:53 +08:00
008e0efeb0 refactor: update delete method as an abstract method (#8794) 2024-09-26 16:36:21 +08:00
cx
128a66f7fe fix: Ollama modelfeature set vision, and an exception occurred at the… (#8783) 2024-09-26 16:34:40 +08:00
62406991df fix: start node input config modal raise 'variable name is required' (#8793) 2024-09-26 16:28:20 +08:00
d1173a69f8 fix: the Image-1X tool (#8787) 2024-09-26 13:48:06 +08:00
a0b0809b1c Add more models for SiliconFlow (#8779) 2024-09-26 11:29:53 +08:00
4c9ef6e830 fix: update usage for Jina Embeddings v3 (#8771) 2024-09-26 11:29:35 +08:00
0c96f0aa51 fix: credential *** should be string (#8785) 2024-09-26 11:24:03 +08:00
ac73763726 chore: add input_type param desc for the _invoke method of text_embedding (#8778) 2024-09-26 11:23:09 +08:00
5ba19d64e9 fix: TavilySearch tool get api link (#8780) 2024-09-26 11:22:18 +08:00
Yi
ff0260e564 fix: minor issues 2024-09-26 10:23:06 +08:00
Qun
fefbc43fb0 chore: fix comfyui tool doc url (#8775) 2024-09-26 08:18:13 +08:00
Yi
85deb9d7af Merge branch 'feat/external-knowledge-api' of github.com:langgenius/dify into feat/external-knowledge-api 2024-09-26 01:01:30 +08:00
Yi
cfa4825073 feat: external knowledge api crud frontend & connect external knowledge base 2024-09-26 01:00:49 +08:00
a8b837c4a9 dep: bump ElasticSearch from 8.14.x to 8.15.x (#8197) 2024-09-25 22:55:24 +08:00
02ff6cca70 feat: add support for Vertex AI Gemini 1.5 002 and experimental models (#8767) 2024-09-25 21:27:26 +08:00
ef47f68e4a fix: the translation result may cause a different meaning (#8763) 2024-09-25 18:25:06 +08:00
2ef8b187fa Add GitHub Actions Workflow for Web Tests (#8753) 2024-09-25 15:50:51 +08:00
b0927c39fb fix: expose the configuration of HTTP request node to Docker (#8716)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-09-25 15:06:54 +08:00
d0e0111f88 fix:Spark's large language model token calculation error #7911 (#8755) 2024-09-25 14:51:42 +08:00
2328944987 chore: apply ruff reformat for python-client sdk (#8752) 2024-09-25 14:48:06 +08:00
5fa86074ed update to external knowledge api 2024-09-25 13:31:15 +08:00
Yi
d6c604a356 Merge branch 'feat/external-knowledge-api' of github.com:langgenius/dify into feat/external-knowledge-api 2024-09-25 13:05:57 +08:00
c927c97310 update to external knowledge api 2024-09-25 12:37:23 +08:00
cb1942c242 chore: make url display in the middle of http node (#8741) 2024-09-25 11:27:17 +08:00
a69dcb8bee add external_retrieval_model 2024-09-25 10:57:12 +08:00
bf64ff215b fix: . is missing in file_extension (#8736) 2024-09-25 10:09:20 +08:00
68c7e68a8a Fix Issue: switch LLM of SageMaker endpoint doesn't take effect (#8737)
Co-authored-by: Yuanbo Li <ybalbert@amazon.com>
2024-09-25 09:12:35 +08:00
91f70d0bd9 Add embedding models in fireworks provider (#8728) 2024-09-25 08:47:11 +08:00
02b06c420e add external_retrieval_model 2024-09-24 23:52:01 +08:00
a258f8dfdf remove description 2024-09-24 23:32:23 +08:00
a53b4fb2ff remove description 2024-09-24 22:28:23 +08:00
4669eb24be add embedding input type parameter (#8724) 2024-09-24 21:53:50 +08:00
680c1bd41d remove description 2024-09-24 21:37:55 +08:00
debe5953a8 Fix/update jina ai products labels and descriptions (#8730)
Co-authored-by: sa zhang <sa.zhang@jina.ai>
2024-09-24 21:19:49 +08:00
1c7877b048 fix: remove harm category setting from vertex ai (#8721) 2024-09-24 20:53:26 +08:00
Yi
b9b8ec1758 Merge branch 'feat/external-knowledge-api' of github.com:langgenius/dify into feat/external-knowledge-api 2024-09-24 20:09:07 +08:00
6452c34818 external knowledge api 2024-09-24 19:54:17 +08:00
Yi
2655dd2026 Merge branch 'feat/external-knowledge-api' of github.com:langgenius/dify into feat/external-knowledge-api 2024-09-24 19:33:15 +08:00
30dc137ccc Merge branch 'main' into feat/external-knowledge-api
# Conflicts:
#	api/core/rag/retrieval/dataset_retrieval.py
2024-09-24 18:03:14 +08:00
573b61b7e8 External knowledge api 2024-09-24 18:02:03 +08:00
089da063d4 External knowledge api 2024-09-24 18:00:45 +08:00
ed92c90a40 External knowledge api 2024-09-24 17:52:16 +08:00
9ca2e2c968 chore: remove windows platform timezone set (#8712) 2024-09-24 17:33:29 +08:00
f42ef0624d fix: embedded chat on ios (#8718) 2024-09-24 17:23:11 +08:00
64baedb484 fix: update nomic model provider token calculation (#8705) 2024-09-24 14:04:07 +08:00
4638f99aaa fix: change model provider name issue Ref #8691 (#8710) 2024-09-24 13:26:58 +08:00
aebe5fc68c fix: Remove unsupported parameters in qwen model (#8699) 2024-09-24 13:06:21 +08:00
1ecf70dca0 feat: add mixedbread as a new model provider (#8523) 2024-09-24 11:20:15 +08:00
7c485f8bb8 fix llm integration problem: It doesn't work on docker env (#8701)
Co-authored-by: Yuanbo Li <ybalbert@amazon.com>
2024-09-24 10:33:30 +08:00
21e9608b23 feat: add xinference sd web ui api tool (#8385)
Signed-off-by: themanforfree <themanforfree@gmail.com>
2024-09-24 10:20:06 +08:00
Yi
fbedd08292 feat: add external api 2024-09-23 23:34:01 +08:00
7f1b028840 fix: change the brand name to Jina AI (#8691)
Co-authored-by: sa zhang <sa.zhang@jina.ai>
2024-09-23 21:39:26 +08:00
bef83a4d2e fix: typos and improve naming conventions: (#8687) 2024-09-23 21:32:58 +08:00
8cc9e68363 fix: prompt for the follow-up suggestions (#8685) 2024-09-23 20:00:34 +08:00
d7aada38a1 Add nomic embedding model provider (#8640) 2024-09-23 19:57:21 +08:00
4f69adc8ab fix: document_create_args_validate (#8569) 2024-09-23 18:45:10 +08:00
52da5b16e7 fixbug tts(stream) not work on ios safari(17.1+) (#8645)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-23 18:44:24 +08:00
11d09a92d0 fix: send message error when last sent message not succeeded (#8682) 2024-09-23 18:44:09 +08:00
c7eacd1aac chore: Optimize I18nObject class for better performance and readability (#8681) 2024-09-23 18:40:40 +08:00
a126d535cf add Spark Max-32K (#8676) 2024-09-23 16:39:46 +08:00
3554a803e7 add zhipuai web search (#8668) 2024-09-23 16:19:42 +08:00
c66cecaa55 add Qwen model translate (#8674) 2024-09-23 16:18:55 +08:00
b37954b966 fix: png avatar upload as jpeg (#8665) 2024-09-23 15:33:06 +08:00
86f90fd9ff chore: skip PLR6201 linter rule (#8666) 2024-09-23 15:28:57 +08:00
4c7beb9d7b fix: Assignment exception (#8663)
Co-authored-by: fum <fum@investoday.com.cn>
2024-09-23 15:23:52 +08:00
3618a97c20 feat: extend api params for Jina Embeddings V3 (#8657) 2024-09-23 13:45:09 +08:00
03fdf5e7f8 chore: Enable Japanese descriptions for Tools (#8646) 2024-09-23 09:06:01 +08:00
cae73b9a32 Make WORKFLOW_* configurable as environment variables. (#8644) 2024-09-23 09:05:02 +08:00
e34f04380d feat: add deepseek-v2.5 for model provider siliconflow (#8639) 2024-09-22 21:44:06 +08:00
6df77038a2 docs: fix predefined_model_scale_out.md redirect error (#8633) 2024-09-22 16:45:45 +08:00
45c0a44411 feat: add qwen2.5 for model provider siliconflow (#8630) 2024-09-22 16:42:34 +08:00
2d869d6831 fix: send message error when chatting with opening statement (#8627) 2024-09-22 16:41:40 +08:00
eaa7e9b1f0 fix: llm_generator.py JSONDecodeError (#8504) 2024-09-22 14:02:12 +08:00
6e37750fbd fix: commands.py (#8483) 2024-09-22 13:41:09 +08:00
omr
8fd297f8b4 fix: redundant check for available_document_count (#8491) 2024-09-22 13:39:41 +08:00
ddf6569dc5 chore: enhance configuration descriptions (#8624) 2024-09-22 13:38:41 +08:00
97895ec41a chore: add Gemini newest experimental models (close #7121) (#8621) 2024-09-22 13:38:08 +08:00
6d56d5c1f6 feat: support o1 series models for openrouter (#8358) 2024-09-22 10:23:50 +08:00
HJY
6c2fa8defc fix: form input add tabIndex (#8478) 2024-09-22 10:14:43 +08:00
c9f1e18df1 Add model parameter translation (#8509)
Co-authored-by: swingchen01 <swings@126.com>
Co-authored-by: 陈长君 <chenchangjun@shuwen.com>
2024-09-22 10:14:33 +08:00
740fad06c1 feat(tools/cogview): Updated cogview tool to support cogview-3 and the latest cogview-3-plus (#8382) 2024-09-22 10:14:14 +08:00
0665268578 Add Fireworks AI as new model provider (#8428) 2024-09-22 10:13:00 +08:00
c8b9bdebfe feat:use xinference tts stream mode (#8616) 2024-09-22 10:08:35 +08:00
a587f0d3f1 docs: Add Japanese documentation for tools (#8469) 2024-09-22 09:04:00 +08:00
8c51d06222 feat: regenerate in Chat, agent and Chatflow app (#7661) 2024-09-22 03:15:11 +08:00
Joe
b32a7713e0 feat: update pyproject.toml (#8368) 2024-09-21 23:59:50 +08:00
831c5a93af refactor(ops): Optimize the iteration for filter_none_values and use logging.error to record logs when an exception occurs (#8461) 2024-09-21 22:56:37 +08:00
1a8dcae10e add Qwen custom add model interface (#8565) 2024-09-21 22:52:10 +08:00
8219f9e090 fix: api/core/ops/ops_trace_manager.py (#8501) 2024-09-21 20:49:01 +08:00
5ddb601e43 add MixtralAI Model (#8517) 2024-09-21 18:08:07 +08:00
5541248264 Update the PerfXCloud provider model list,Update PerfXCloudProvider validate_provider_credentials method. (#8587)
Co-authored-by: xhb <466010723@qq.com>
2024-09-21 17:33:15 +08:00
b3cb97f0ad docs: Update ssrf_proxy related doc link in docker-compose file (#8516) 2024-09-21 17:31:49 +08:00
e75c33a561 Enhance Readme Documentation to Clarify the Importance of Celery Service (#8558) 2024-09-21 17:30:58 +08:00
483ead55d5 chore: translate i18n files (#8557)
Co-authored-by: iamjoel <2120155+iamjoel@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-09-21 17:30:43 +08:00
d63a5a1c3c fix: a helper link error (#8508) 2024-09-21 17:30:30 +08:00
e0a3307563 fix(workflow): "Max submit count reached" error occurred when executing workflow as tool in iteration (#8595) 2024-09-20 19:47:25 +08:00
7f3282ec04 Update version to 0.8.3 in packaging and docker-compose files (#8590) 2024-09-20 18:24:03 +08:00
b773ebdab1 chore: fix webpack dependencies order (#8542) 2024-09-20 18:09:35 +08:00
Qun
1583283635 ComfyUI tool use the new internal enumeration class "VariableKey" (#8533) 2024-09-20 17:42:47 +08:00
c87f710d58 Fix: update qwen model and model config (#8584)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-09-20 17:05:57 +08:00
1568c5cae9 fix: fix qwen series model type (#8580) 2024-09-20 15:29:33 +08:00
a03919c3b3 feat: add hunyuan-vision (#8529) 2024-09-19 18:08:01 +08:00
7411bcf167 chore: improve delimiter (#8552) 2024-09-19 17:40:20 +08:00
d96f5ba1ca add storage error log (#8556) 2024-09-19 17:34:12 +08:00
d6de96c4b4 feat: sync Qwen API with Aliyun Bailian (#8538) 2024-09-19 17:08:59 +08:00
19c526120c external knowledge api 2024-09-19 17:07:33 +08:00
ffd2f61dd9 fix: thread_pool submit count in parallel workflow not releasing (#8549) 2024-09-19 15:34:56 +08:00
54b9e1f6d1 fix: ci issues(missing duckduckgo-search==6.2.11, ruff lint issue) (#8543) 2024-09-19 11:43:00 +08:00
HJY
2721cb8dee feat: add format util unit and add pre-commit unit check (#8427) 2024-09-19 10:39:27 +08:00
41bea4cafa validate user permission before enter app detail page (#8527) 2024-09-18 16:54:04 +08:00
37f7d5732a external knowledge api 2024-09-18 15:29:30 +08:00
6f222b49f2 refactor: rename task_type to task for jina embeddings v3 (#8488) 2024-09-18 14:53:15 +08:00
dcb033d221 Merge branch 'main' into feat/external-knowledge
# Conflicts:
#	api/core/rag/datasource/retrieval_service.py
#	api/models/dataset.py
#	api/services/dataset_service.py
2024-09-18 14:40:43 +08:00
8dfe8c773a chore: Deprecate gpt-3.5-turbo-0613 and gpt-3.5-turbo-16k-0613 models (#8500) 2024-09-18 14:38:09 +08:00
9f894bb3b3 external knowledge api 2024-09-18 14:36:51 +08:00
Qun
cf645c3ba1 feat: Add ComfyUI tool for Stable Diffusion (#8160) 2024-09-18 10:56:29 +08:00
e896d1e9d7 chore: update the .gitignore file to include opensearch,pgvector,and myscale (#8470) 2024-09-17 22:54:22 +08:00
6dba68f62d feat: Add base URL settings and secure_ascii options to the Brave search tool (#8463)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-15 17:38:43 +08:00
3d083b758f feat: add flux dev of siliconflow image-gen tool (#8450) 2024-09-15 17:14:12 +08:00
aa5b2db10a chore: workflow BRANCH, PARALLEL i18n (#8452) 2024-09-15 17:13:39 +08:00
b73faae0d0 fix(RunOnce): change to form submission instead of onKeyDown and onClick (#8460) 2024-09-15 17:09:47 +08:00
4788e1c8c8 [Python SDK] Add KnowledgeBaseClient and the corresponding test cases. (#8465)
Co-authored-by: Wang Ying <wangying@xkool.org>
2024-09-15 17:08:52 +08:00
bf16de50fe fix: internal error when tool authorization (#8449) 2024-09-14 21:50:02 +08:00
7e611ffbf3 multi-retrival use dataset's top-k (#8416) 2024-09-14 21:48:44 +08:00
65162a87b6 fix:docker-compose.middleware.yaml start the Weaviate container by default (#8446) (#8447) 2024-09-14 21:48:24 +08:00
445497cf89 add svg render & Image preview optimization (#8387)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-14 19:24:53 +08:00
fa1af8e47b add WorkflowClient.get_result, increase version number (#8435)
Co-authored-by: wangying <wangying@xkool.org>
2024-09-14 19:06:37 +08:00
624331472a fix: Improve scrolling behavior for Conversation Opener (#8437)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-14 19:05:19 +08:00
72b7f8a949 Bugfix/fix feishu plugins (#8443)
Co-authored-by: 黎斌 <libin.23@bytedance.com>
2024-09-14 18:59:06 +08:00
88c9834ef2 chore(workflow): Optimize the iteration when selecting a variable from a branch in the output variable causes iteration index err (#8440) 2024-09-14 18:02:43 +08:00
d882348f39 fix: delete the delay for the tooltips inside the add tool panel (#8436) 2024-09-14 17:24:31 +08:00
b6ad7a1e06 Fix: https://github.com/langgenius/dify/issues/8190 (Update Model nam… (#8426)
Co-authored-by: Yuanbo Li <ybalbert@amazon.com>
2024-09-14 17:14:18 +08:00
6f7625fa47 chore: update Jina embedding model (#8376) 2024-09-14 16:21:17 +08:00
de7bc22649 fix: sys_var startwith 'sys.' not 'sys' #8421 (#8422)
Co-authored-by: wuling <wuling@ke.com>
2024-09-14 15:16:12 +08:00
52857dc0a6 feat: allow users to specify timeout for text generations and workflows by environment variable (#8395) 2024-09-14 14:11:45 +08:00
032dd93b2f Fix: operation postion of answer in logs (#8411)
Co-authored-by: Yi <yxiaoisme@gmail.com>
2024-09-14 14:08:31 +08:00
5b18e851d2 fix: when the variable does not exist, an error should be prompted (#8413)
Co-authored-by: Chen(MAC) <chenchen404@outlook.com>
2024-09-14 14:08:10 +08:00
f01602b570 fix(workflow): the answer node after the iteration node containing the answer was output prematurely (#8419) 2024-09-14 14:02:09 +08:00
0123498452 fix:logs and rm unused codes in CacheEmbedding (#8409) 2024-09-14 12:56:45 +08:00
f55e06d8bf fix: resolve runtime error when self.folder is None (#8401)
Co-authored-by: 陈长君 <chenchangjun@shuwen.com>
2024-09-14 11:07:16 +08:00
b613b11422 Fix: Support Bedrock cross region inference #8190 (Update Model name to distinguish between different region groups) (#8402)
Co-authored-by: Yuanbo Li <ybalbert@amazon.com>
2024-09-14 11:06:20 +08:00
8efae1cba2 fix(docker): aliyun oss path env key (#8394) 2024-09-14 09:52:59 +08:00
bf55b1910f fix: pyproject.toml typo (#8396) 2024-09-14 09:45:49 +08:00
71b4480c4a fix: o1-mini 65563 -> 65536 (#8388) 2024-09-14 02:39:58 +08:00
b6b1057a18 fix: sandbox issue related httpx and requests (#8397) 2024-09-14 02:02:55 +08:00
5b98acde2f chore: improve usage of striping prefix or suffix of string with Ruff 0.6.5 (#8392) 2024-09-13 23:34:39 +08:00
aad6f340b3 fix (#8322 followup): resolve the violation of pylint rules (#8391) 2024-09-13 23:19:36 +08:00
a1104ab97e chore: refurish python code by applying Pylint linter rules (#8322) 2024-09-13 22:42:08 +08:00
1ab81b4972 support hunyuan-turbo (#8372)
Co-authored-by: sunkesi <sunkesi@hosecloud.com>
2024-09-13 20:21:48 +08:00
06b66216d7 chore: update firecrawl scrape to V1 api (#8367) 2024-09-13 20:02:00 +08:00
cd3eaed335 fix(workflow): both parallel and single branch errors occur in if-else (#8378) 2024-09-13 19:55:54 +08:00
9d80d7def7 fix: edit load balancing not pass id (#8370) 2024-09-13 17:15:03 +08:00
Joe
84ac5ccc8f fix: add before send to remove langfuse defaultErrorResponse (#8361) 2024-09-13 16:08:08 +08:00
5dfd7abb2b fix: when edit load balancing config not pass the empty filed value hidden (#8366) 2024-09-13 16:05:26 +08:00
24af4b9313 fix: o1-series model encounters an error when the generate mode is blocking (#8363) 2024-09-13 15:37:54 +08:00
6613b8f2e0 chore: fix unnecessary string concatation in single line (#8311) 2024-09-13 14:24:49 +08:00
08c486452f fix: score_threshold handling in vector search methods (#8356) 2024-09-13 14:24:35 +08:00
a45ac6ab98 fix: ark token usage is none (#8351) 2024-09-13 14:19:24 +08:00
80a322aaa2 chore: update version to 0.8.2 in packaging and docker-compose files (#8352) 2024-09-13 13:45:13 +08:00
Joe
82f7875a52 feat: add langfuse sentry ignore error (#8353) 2024-09-13 13:44:19 +08:00
4637ddaa7f feat: add o1-series models support in Agent App (ReACT only) (#8350) 2024-09-13 13:08:27 +08:00
8d2269f762 fix: copy and paste shortcut in the textarea of the workflow run panel (#8345) 2024-09-13 12:20:56 +08:00
5f03e66489 Feature/service api workflow logs (#8323) 2024-09-13 11:03:57 +08:00
a9c1f1a041 fix(workflow): fix var-selector not update when edges change (#8259)
Co-authored-by: Chen(MAC) <chenchen404@outlook.com>
2024-09-13 11:03:39 +08:00
49cee773c5 fixed score threshold is none (#8342) 2024-09-13 10:21:58 +08:00
89e81873c4 merge error 2024-09-13 09:49:24 +08:00
c78828ab7c chore: update Dify version to 0.8.1 (#8329) 2024-09-13 02:48:24 +08:00
e90d3c29ab feat: add OpenAI o1 series models support (#8328) 2024-09-13 02:15:19 +08:00
153807f243 fix: response_format label (#8326) 2024-09-12 23:17:29 +08:00
5db0b56c5b docs: update lambda_translate_utils.yaml (#8293) 2024-09-12 20:33:07 +08:00
404db1ae5b Fix VariableEntityType Bug external-data-tool -> external_data_tool (#8299) 2024-09-12 20:27:55 +08:00
02c4b1af71 chore:add Azure openai api version 2024-08-01-preview (#8291) 2024-09-12 20:22:57 +08:00
aa11659062 Revert "Feat: update app published time after clicking publish button" (#8320) 2024-09-12 20:06:06 +08:00
d4985fb3aa Fix: Support Bedrock cross region inference [#8190](https://github.com/langgenius/dify/issues/8190) (#8317) 2024-09-12 19:15:20 +08:00
8815511ccb chore: apply flake8-pytest-style linter rules (#8307) 2024-09-12 18:09:16 +08:00
40fb4d16ef chore: refurbish Python code by applying refurb linter rules (#8296) 2024-09-12 15:50:49 +08:00
c69f5b07ba chore: apply ruff E501 line-too-long linter rule (#8275)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-09-12 14:00:36 +08:00
56c90e212a fix(workflow): missing content in the answer node stream output during iterations (#8292)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-09-12 13:59:48 +08:00
0f14873255 chore: cleanup ruff flake8-simplify linter rules (#8286)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-09-12 12:55:45 +08:00
0bb7569d46 fix: markdown paragraph margin (#8289) 2024-09-12 11:28:14 +08:00
ec57922bb6 fix(workflow/hooks/use-shortcuts): resolve issue of copy shortcut not working in workflow debug and preview panel (#8249)
Co-authored-by: Yi <yxiaoisme@gmail.com>
2024-09-12 10:39:18 +08:00
781d294f49 chore: cleanup pycodestyle E rules (#8269) 2024-09-11 18:55:00 +08:00
f515af2232 let claude models in bedrock support the response_format parameter (#8220)
Co-authored-by: duyalei <>
2024-09-11 18:24:50 +08:00
fe8191b899 enhance: improve empty data display for detail panel (#8266) 2024-09-11 18:24:18 +08:00
4d2cd6703b chore: remove useless code (#8198) 2024-09-11 18:19:34 +08:00
9ca0e56a8a external dataset binding 2024-09-11 16:59:19 +08:00
292220c596 chore: apply pep8-naming rules for naming convention (#8261) 2024-09-11 16:40:52 +08:00
53f37a6704 fix:ollama text embedding 500 error (#8252) 2024-09-11 16:23:19 +08:00
75c1a82556 Update Gitlab query field, add query by path (#8244) 2024-09-11 16:09:53 +08:00
c5b3777d93 editor can also create api key (#8214) 2024-09-11 16:07:15 +08:00
678bbf8fe8 fix: upload img icon mis-align in the chat input area (#8263) 2024-09-11 15:58:20 +08:00
342607f4a4 fix: truthy value (#8208) 2024-09-11 15:44:53 +08:00
5f4cdd66fa fix(workflow): IF-ELSE nodes connected to the same subsequent node cause execution to stop (#8247) 2024-09-11 12:28:32 +08:00
91942e37ff fix: workflow parallel limit in ifelse node (#8242) 2024-09-11 11:30:33 +08:00
60913970dc fix: CHECK_UPDATE_URL comment (#8235) 2024-09-11 10:58:35 +08:00
82c42b9ec5 fix:error when adding the ollama embedding model (#8236)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-11 10:25:45 +08:00
2a3d8c25bc fix: improving the regionalization of translation (#8231)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-11 08:55:32 +08:00
cee0c51dbb feat: add from_variable_selector for stream chunk / message event (#8228) 2024-09-10 22:15:50 +08:00
fdbbdb706f fix(workflow): answers are output simultaneously across different braches in the question classifier node. (#8225) 2024-09-10 21:11:35 +08:00
f6dfe23cf8 fix(workflow): in multi-parallel execution with multiple conditional branches (#8221) 2024-09-10 21:09:18 +08:00
ffd4bf8bf0 fix(docker/docker-compose.yaml): Set default value for REDIS_SENTINEL_SOCKET_TIMEOUT and CELERY_SENTINEL_SOCKET_TIMEOUT (#8218) 2024-09-10 18:47:59 +08:00
bb3002b173 revert page column (#8217) 2024-09-10 18:21:22 +08:00
d4dc54447a fix the tooltip in tool nodes (#8215) 2024-09-10 17:53:44 +08:00
d109881410 chore(api/models): apply ruff reformatting (#7600)
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-09-10 17:08:06 +08:00
d1605952b0 fix: input chat input wrong padding (#8207) 2024-09-10 17:01:32 +08:00
2cf1187b32 chore(api/core): apply ruff reformatting (#7624) 2024-09-10 17:00:20 +08:00
178730266d chore: translate i18n files (#8202)
Co-authored-by: takatost <5485478+takatost@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-09-10 16:13:26 +08:00
dabfd74622 feat: Parallel Execution of Nodes in Workflows (#8192)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: Yi <yxiaoisme@gmail.com>
Co-authored-by: -LAN- <laipz8200@outlook.com>
2024-09-10 15:23:16 +08:00
5da0182800 docs: replace docker-compose with docker compose (#8195) 2024-09-10 15:02:52 +08:00
ed37439ef7 refactor(api/core): Improve type hints and apply ruff formatter in agent runner and model manager. (#8166) 2024-09-10 15:00:25 +08:00
af92f19291 filter excel empty sheet (#8194) 2024-09-10 14:55:08 +08:00
86f7f245e4 fix: The length of the tag should between 1 and 50 (#8187) (#8188) 2024-09-10 14:07:06 +08:00
2d690801d1 nvidia rerank top n missed (#8185) 2024-09-10 13:17:48 +08:00
fede54be77 fix: Version '2.6.2-2' for 'expat' was not found (#8182) 2024-09-10 13:00:37 +08:00
85ff82a694 code merge error (#8183)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-10 12:52:50 +08:00
c8df92d0eb add volcengine tos storage (#8164) 2024-09-10 09:19:47 +08:00
144d30d7ef chore: bump super-linter to v7 (#8148) 2024-09-10 09:13:48 +08:00
4313d92e6b feat(api/core/model_runtime/entities/defaults.py): Add TOP_K in default parameters. (#8167) 2024-09-10 09:11:31 +08:00
0695543f63 Fix variable typo (cont) (#8161) 2024-09-09 23:46:13 +08:00
0bec6a037c update qwen-long (#8157) 2024-09-09 19:09:42 +08:00
3ff9a1f24a Update LICENSE - remove 'SaaS' from restriction term definition (#8143) 2024-09-09 16:52:55 +08:00
a771eea4f6 fix: html raw render (#8138) 2024-09-09 16:12:59 +08:00
e7c77d961b Merge branch 'main' into feat/external-knowledge
# Conflicts:
#	api/controllers/console/auth/data_source_oauth.py
2024-09-09 15:54:43 +08:00
61a0ca9e0d chore: translate i18n files (#8135)
Co-authored-by: zxhlyh <16177003+zxhlyh@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-09 15:54:00 +08:00
551b33c8e5 fix: user-select style and pre-create iframe in embed.js (#8093) 2024-09-09 15:40:56 +08:00
fa34b9aed6 Modify model parameters in Spark LLMs and zhipuai LLMs (#8078)
Co-authored-by: Charlie.Wei <luowei@cvte.com>
2024-09-09 15:36:47 +08:00
bbb609179f chore: offline n to 1 retrieval (#8134) 2024-09-09 15:32:02 +08:00
a27d4d58ec fix: ollama text embedding 500 error (#8131) 2024-09-09 15:27:49 +08:00
50d92f0fd4 add dify-sandbox health check in docker-compose.yaml (#8121) (#8124) 2024-09-09 14:39:06 +08:00
a15791e788 Fix: tongyi code wrapper works not stable (#7871)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-09 11:15:17 +08:00
954580a4af feat: support more model types and builtin tools on aws/sagemaker (#8061)
Co-authored-by: Yuanbo Li <ybalbert@amazon.com>
2024-09-09 10:34:11 +08:00
ab7d79275e fix: Claude can not validate credientials (#8109) 2024-09-09 10:22:42 +08:00
d3658166fb Translate billing to PT-BR (#8105)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-09 10:16:22 +08:00
54b72bdd0a chore: keep dify compose file consistent format (#8102) 2024-09-09 08:30:03 +08:00
d28446301f feat:add fishaudio in xinference (#8100) 2024-09-08 23:58:02 +08:00
9050f92e5b fix: parameter input (#8076) 2024-09-08 15:43:55 +08:00
feefeb44d7 fix LangSmith project config error (#7996) 2024-09-08 13:25:27 +08:00
Zhi
d542b15cc0 feat: support redis sentinel mode (#7756) 2024-09-08 13:23:51 +08:00
2d7954c7da Fix variable typo (#8084) 2024-09-08 13:14:11 +08:00
b1918dae5e fix: knowledge input (#8065) 2024-09-07 17:53:39 +08:00
031a0b576d fix: i18n typo (#8077) 2024-09-07 16:59:38 +08:00
0cef25ef8c Revert "fix: parameter rule" (#8070) 2024-09-07 10:44:56 +08:00
cdb08be951 fix: overflow issues in chat history (#8062) 2024-09-06 19:20:18 +08:00
900fd82a92 fix: parameter rule (#8064) 2024-09-06 19:15:24 +08:00
44f963f281 If else add regexmatch (#8059)
Co-authored-by: 罗威 <luowei@cvte.com>
2024-09-06 18:35:51 +08:00
01858e1caf ifEsle node add regex match (#8007) 2024-09-06 17:44:09 +08:00
2060db8e11 fix: change milvus init args from (host, port) to (url, token) (#8019)
Signed-off-by: ChengZi <chen.zhang@zilliz.com>
2024-09-06 17:32:48 +08:00
9ded063417 chore: #7348, support query conversations by updated_at (#8047) 2024-09-06 17:31:51 +08:00
d72da2777c fix the tooltip in tools node (#8055) 2024-09-06 17:28:22 +08:00
89aede80cc Add OCI(Oracle Cloud Infrastructure) Generative AI Service as a Model Provider (#7775)
Co-authored-by: Walter Jin <jinshuhaicc@gmail.com>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: walter from vm <walter.jin@oracle.com>
2024-09-06 14:15:40 +08:00
e0d3cd91c6 support huawei cloud obs storage (#7980) (#7981) 2024-09-06 14:00:47 +08:00
1a054ac1f4 Update milvus-standalone version and expose required ports for the container. (#7709)
Co-authored-by: Jyong <76649700+JohnJyong@users.noreply.github.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-06 12:01:59 +08:00
3230f4a0ec Message rendering (#6868)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-09-05 21:00:09 +08:00
dadca0f91a Fix/datasets api description error (#8025) 2024-09-05 16:45:44 +08:00
d489b8b3e0 feat: return page number of pdf documents upon retrieval (#7749) 2024-09-05 16:43:26 +08:00
bd0992275c feat: support fish audio TTS (#7982) 2024-09-05 14:18:39 +08:00
3e7597f2bd feat: add gpt-4o-2024-08-06 and json_schema for azure openAI service (#7648) 2024-09-04 21:56:08 +08:00
0e71f6db84 fix spliter length missed (#7987) 2024-09-04 21:47:12 +08:00
f6b9982c23 Concurrent calls to the Wenxin model, and the exception problem when obtaining the token is fixed (#7976)
Co-authored-by: puqs1 <puqs1@lenovo.com>
2024-09-04 21:44:57 +08:00
fb113a9479 chore: translate i18n files (#7965)
Co-authored-by: JohnJyong <76649700+JohnJyong@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: Hanqing Zhao <sherry9277@gmail.com>
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-04 17:45:12 +08:00
15791510c8 fix wrong error message (#7972) 2024-09-04 16:46:41 +08:00
0f72a8e89d chore: refactor the beichuan model (#7953) 2024-09-04 16:22:31 +08:00
14af87527f Feat:remove estimation of embedding cost (#7950)
Co-authored-by: jyong <718720800@qq.com>
2024-09-04 14:41:47 +08:00
83e84865be feat: add health check for pg and redis in docker-compose.middleware.yaml (#7961) (#7962) 2024-09-04 14:25:46 +08:00
c2a3c5a748 fix: get commit sha failed in translate action (#7959) 2024-09-04 13:13:21 +08:00
83494cb4f5 fix:empty voice occurs when xinference CosyVoice tts model (#7958) 2024-09-04 13:04:31 +08:00
0bc19c3fbf Feat: update app published time after clicking publish button (#7801) 2024-09-04 13:03:06 +08:00
571415d1a4 fix: split text keep separator (#7930) 2024-09-04 12:59:10 +08:00
7b2cf8215f chore: fix inverted index japanese translation (#7957) 2024-09-04 12:44:59 +08:00
Joe
fee4d3f6ca feat: ops trace add llm model (#7306) 2024-09-04 10:39:00 +08:00
161cc0cda9 Revert "fix: an issue of keyword search feature in application log list" (#7949) 2024-09-04 10:00:55 +08:00
71bff9fcf3 chore: #7943 i18n (#7948) 2024-09-04 09:42:25 +08:00
80d14c9b22 fix(api): Code-Based Extension cause error on position map sorting (#7934)
Signed-off-by: 陳鈞 <jim60105@gmail.com>
2024-09-04 08:41:12 +08:00
c5bdf08558 Chore/add roadmap (#7943) 2024-09-04 08:33:02 +08:00
596f160a1e Chore/add default step 1x url (#7933) 2024-09-04 08:32:22 +08:00
d8b6c053a2 fix rerank model value is empty string (#7937) 2024-09-03 21:25:21 +08:00
4b262cae58 chore: #7603 i18n (#7931) 2024-09-03 19:19:52 +08:00
1a5116cba0 Fix/segment create with api (#7928) 2024-09-03 18:14:47 +08:00
01581dd35f improve the notion table extract (#7925) 2024-09-03 17:52:07 +08:00
7fdd964379 fix: frontend handle sometimes server not generate the wrong follow up data struct (#7916) 2024-09-03 14:09:46 +08:00
0cfcc97e9d feat: support auto generate i18n translate (#6964)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-03 10:17:05 +08:00
8986be0aab chore: Update versions to 0.7.3 (#7895) 2024-09-03 09:49:32 +08:00
f76bbbf5e6 chore(Dockerfile): Bump expat to 2.6.2-2 (#7904) 2024-09-03 09:48:30 +08:00
fe217da05c fix: correct typo in the setting screen (#7897) 2024-09-02 22:49:56 +08:00
80aa7c4019 feat: allow users to use the app icon as the answer icon (#7888)
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-02 20:00:41 +08:00
6f33351eb3 ignore linked images when image id is none (#7890) 2024-09-02 19:37:05 +08:00
35f13c7327 Add Russian language (#7860)
Co-authored-by: d8rt8v <alex@ydertev.ru>
Co-authored-by: crazywoola <427733928@qq.com>
2024-09-02 19:09:41 +08:00
a8b9e01b3e fix: fixed typo on loading reranking_mode (#7887) 2024-09-02 16:18:47 +08:00
7193e189f3 Add perplexity search as a new tool (#7861) 2024-09-02 14:48:13 +08:00
3f2a806abe fix: glm models prices and max_tokens correction (#7882) 2024-09-02 14:29:09 +08:00
5e4907e940 fix: layout shift on app card hover (#7872) 2024-09-02 11:05:54 +08:00
omr
bf63c5d1e3 fix typo: langauge -> language (#7875) 2024-09-02 08:41:45 +08:00
78989e9049 Add ALIYUN_OSS_PATH configuration for Aliyun OSS (#7864)
Co-authored-by: seayon <zhaoxuyang@shouqianba.com>
2024-09-01 21:30:17 +08:00
1510bdbcf6 refactor: Remove typecasting by any (#7862) 2024-09-01 14:58:12 +08:00
024d688b77 fix(RetrievalConfig): Fix score threshold assignment for zero value (#7865) 2024-09-01 14:57:50 +08:00
ef82a29e23 fix: crash when ECharts accesses undefined objects (#7853) 2024-09-01 14:52:27 +08:00
1f56a20b62 feat: support auth by api key for ark provider (#7845) 2024-08-31 10:56:32 +08:00
0c2a62f847 fix: correct http timeout configs‘ default values and ignorance by HttpRequestNode (#7762) 2024-08-30 19:09:10 +08:00
ea748b50f2 fix: an issue of keyword search feature in application log list (#7816) 2024-08-30 18:48:05 +08:00
62bfc4dba6 fix: tooltip size sets improperly (#7836) 2024-08-30 18:13:54 +08:00
Zhi
ceb2b150ff enhance: include workspace name in create-tenant command (#7834) 2024-08-30 15:53:50 +08:00
dc015c380a feat: add zhipu glm_4_plus and glm_4v_plus model (#7824) 2024-08-30 15:08:31 +08:00
c9e0f0bf20 fix: correct typo in environment variable description (#7817) 2024-08-30 00:03:40 +08:00
bd6d4d0553 fix: filter out installed apps without an app (#7799) 2024-08-29 19:03:08 +08:00
f0273f00e1 Fixed when testing the openai compatible interface model, an error is reported when no object is returned (#7808) 2024-08-29 18:58:19 +08:00
962cdbbebd chore: add app generator overload (#7792) 2024-08-29 16:04:01 +08:00
2c51e3a327 fix: webapp sso setting may not the latest value when refresh (#7795) 2024-08-29 15:57:43 +08:00
8e311cc45c fixed permission is None (#7788) 2024-08-29 12:46:42 +08:00
c441bea4d1 fix: datasets permission is missing (#7787) 2024-08-29 12:46:33 +08:00
ad30668eb6 Sync Input component from feat/attachments branch (#7782) 2024-08-29 11:23:16 +08:00
62f4801523 Update ssrf_proxy related doc link in docker-compose file (#7778) 2024-08-29 11:22:39 +08:00
ec1408346e docs: navigate to open issues in contributing documents (#7781) 2024-08-29 11:18:49 +08:00
0e0a703496 chore: ignore openai error record in sentry (#7770) 2024-08-28 23:26:11 +08:00
54b693d5b1 feat: update saas billing hint. (#7760) 2024-08-28 18:55:47 +08:00
1262277714 chore: improve http executor configs (#7730) 2024-08-28 17:46:37 +08:00
3a67fc6c5a feat: add support for array types in available variable list (#7715) 2024-08-28 17:30:13 +08:00
26abbe8e5b feat(Tools): add a tool to query the stock price from Alpha Vantage (#7019) (#7752) 2024-08-28 17:27:20 +08:00
5d0914daea fix: not able to pass array of string/number/object into variable aggregator groups (#7757) 2024-08-28 17:25:20 +08:00
7541a492b7 fix: crawl options max length can not set 0 (#7758)
Co-authored-by: Yi <yxiaoisme@gmail.com>
2024-08-28 17:16:07 +08:00
a63e15081f update nltk version 2024-08-23 16:43:47 +08:00
0724640bbb fix rerank mode is none 2024-08-22 15:36:47 +08:00
cb70e12827 fix rerank mode is none 2024-08-22 15:33:43 +08:00
067b956b2c merge migration 2024-08-21 16:25:18 +08:00
e7762b731c external knowledge 2024-08-20 16:18:35 +08:00
f6c8390b0b external knowledge 2024-08-20 12:47:51 +08:00
4fd57929df Merge branch 'main' into feat/external-knowledge 2024-08-20 12:46:37 +08:00
517cdb2ca4 add external knowledge 2024-08-20 11:13:29 +08:00
2076 changed files with 78078 additions and 34792 deletions

View File

@ -20,7 +20,7 @@ jobs:
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v44
uses: tj-actions/changed-files@v45
with:
files: api/**
@ -66,7 +66,7 @@ jobs:
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v44
uses: tj-actions/changed-files@v45
with:
files: web/**
@ -97,7 +97,7 @@ jobs:
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v44
uses: tj-actions/changed-files@v45
with:
files: |
**.sh
@ -107,7 +107,7 @@ jobs:
dev/**
- name: Super-linter
uses: super-linter/super-linter/slim@v6
uses: super-linter/super-linter/slim@v7
if: steps.changed-files.outputs.any_changed == 'true'
env:
BASH_SEVERITY: warning

View File

@ -0,0 +1,54 @@
name: Check i18n Files and Create PR
on:
pull_request:
types: [closed]
branches: [main]
jobs:
check-and-update:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
defaults:
run:
working-directory: web
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 2 # last 2 commits
- name: Check for file changes in i18n/en-US
id: check_files
run: |
recent_commit_sha=$(git rev-parse HEAD)
second_recent_commit_sha=$(git rev-parse HEAD~1)
changed_files=$(git diff --name-only $recent_commit_sha $second_recent_commit_sha -- 'i18n/en-US/*.ts')
echo "Changed files: $changed_files"
if [ -n "$changed_files" ]; then
echo "FILES_CHANGED=true" >> $GITHUB_ENV
else
echo "FILES_CHANGED=false" >> $GITHUB_ENV
fi
- name: Set up Node.js
if: env.FILES_CHANGED == 'true'
uses: actions/setup-node@v2
with:
node-version: 'lts/*'
- name: Install dependencies
if: env.FILES_CHANGED == 'true'
run: yarn install --frozen-lockfile
- name: Run npm script
if: env.FILES_CHANGED == 'true'
run: npm run auto-gen-i18n
- name: Create Pull Request
if: env.FILES_CHANGED == 'true'
uses: peter-evans/create-pull-request@v6
with:
commit-message: Update i18n files based on en-US changes
title: 'chore: translate i18n files'
body: This PR was automatically created to update i18n files based on changes in en-US locale.
branch: chore/automated-i18n-updates

46
.github/workflows/web-tests.yml vendored Normal file
View File

@ -0,0 +1,46 @@
name: Web Tests
on:
pull_request:
branches:
- main
paths:
- web/**
concurrency:
group: web-tests-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
name: Web Tests
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./web
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check changed files
id: changed-files
uses: tj-actions/changed-files@v45
with:
files: web/**
- name: Setup Node.js
uses: actions/setup-node@v4
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 20
cache: yarn
cache-dependency-path: ./web/package.json
- name: Install dependencies
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn install --frozen-lockfile
- name: Run tests
if: steps.changed-files.outputs.any_changed == 'true'
run: yarn test

9
.gitignore vendored
View File

@ -153,6 +153,9 @@ docker-legacy/volumes/etcd/*
docker-legacy/volumes/minio/*
docker-legacy/volumes/milvus/*
docker-legacy/volumes/chroma/*
docker-legacy/volumes/opensearch/data/*
docker-legacy/volumes/pgvectors/data/*
docker-legacy/volumes/pgvector/data/*
docker/volumes/app/storage/*
docker/volumes/certbot/*
@ -164,6 +167,12 @@ docker/volumes/etcd/*
docker/volumes/minio/*
docker/volumes/milvus/*
docker/volumes/chroma/*
docker/volumes/opensearch/data/*
docker/volumes/myscale/data/*
docker/volumes/myscale/log/*
docker/volumes/unstructured/*
docker/volumes/pgvector/data/*
docker/volumes/pgvecto_rs/data/*
docker/nginx/conf.d/default.conf
docker/middleware.env

View File

@ -8,7 +8,7 @@ In terms of licensing, please take a minute to read our short [License and Contr
## Before you jump in
[Find](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) an existing issue, or [open](https://github.com/langgenius/dify/issues/new/choose) a new one. We categorize issues into 2 types:
[Find](https://github.com/langgenius/dify/issues?q=is:issue+is:open) an existing issue, or [open](https://github.com/langgenius/dify/issues/new/choose) a new one. We categorize issues into 2 types:
### Feature requests:

View File

@ -8,7 +8,7 @@
## 在开始之前
[查找](https://github.com/langgenius/dify/issues?q=is:issue+is:closed)现有问题,或 [创建](https://github.com/langgenius/dify/issues/new/choose) 一个新问题。我们将问题分为两类:
[查找](https://github.com/langgenius/dify/issues?q=is:issue+is:open)现有问题,或 [创建](https://github.com/langgenius/dify/issues/new/choose) 一个新问题。我们将问题分为两类:
### 功能请求:
@ -36,7 +36,7 @@
| 被团队成员标记为高优先级的功能 | 高优先级 |
| 在 [community feedback board](https://github.com/langgenius/dify/discussions/categories/feedbacks) 内反馈的常见功能请求 | 中等优先级 |
| 非核心功能和小幅改进 | 低优先级 |
| 有价值不紧急 | 未来功能 |
| 有价值不紧急 | 未来功能 |
### 其他任何事情(例如 bug 报告、性能优化、拼写错误更正):
* 立即开始编码。
@ -138,7 +138,7 @@ Dify 的后端使用 Python 编写,使用 [Flask](https://flask.palletsproject
├── models // 描述数据模型和 API 响应的形状
├── public // 如 favicon 等元资源
├── service // 定义 API 操作的形状
├── test
├── test
├── types // 函数参数和返回值的描述
└── utils // 共享的实用函数
```

View File

@ -10,7 +10,7 @@ Dify にコントリビュートしたいとお考えなのですね。それは
## 飛び込む前に
[既存の Issue](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) を探すか、[新しい Issue](https://github.com/langgenius/dify/issues/new/choose) を作成してください。私たちは Issue を 2 つのタイプに分類しています。
[既存の Issue](https://github.com/langgenius/dify/issues?q=is:issue+is:open) を探すか、[新しい Issue](https://github.com/langgenius/dify/issues/new/choose) を作成してください。私たちは Issue を 2 つのタイプに分類しています。
### 機能リクエスト

View File

@ -8,7 +8,7 @@ Về vấn đề cấp phép, xin vui lòng dành chút thời gian đọc qua [
## Trước khi bắt đầu
[Tìm kiếm](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) một vấn đề hiện có, hoặc [tạo mới](https://github.com/langgenius/dify/issues/new/choose) một vấn đề. Chúng tôi phân loại các vấn đề thành 2 loại:
[Tìm kiếm](https://github.com/langgenius/dify/issues?q=is:issue+is:open) một vấn đề hiện có, hoặc [tạo mới](https://github.com/langgenius/dify/issues/new/choose) một vấn đề. Chúng tôi phân loại các vấn đề thành 2 loại:
### Yêu cầu tính năng:

View File

@ -4,7 +4,7 @@ Dify is licensed under the Apache License 2.0, with the following additional con
1. Dify may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. Should the conditions below be met, a commercial license must be obtained from the producer:
a. Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
- Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
b. LOGO and copyright information: In the process of using Dify's frontend components, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend components.

View File

@ -39,7 +39,7 @@ DB_DATABASE=dify
# Storage configuration
# use for store upload files, private keys...
# storage type: local, s3, azure-blob, google-storage
# storage type: local, s3, azure-blob, google-storage, tencent-cos, huawei-obs, volcengine-tos
STORAGE_TYPE=local
STORAGE_LOCAL_PATH=storage
S3_USE_AWS_MANAGED_IAM=false
@ -60,7 +60,8 @@ ALIYUN_OSS_SECRET_KEY=your-secret-key
ALIYUN_OSS_ENDPOINT=your-endpoint
ALIYUN_OSS_AUTH_VERSION=v1
ALIYUN_OSS_REGION=your-region
# Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path
# Google Storage configuration
GOOGLE_STORAGE_BUCKET_NAME=yout-bucket-name
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=your-google-service-account-json-base64-string
@ -72,6 +73,12 @@ TENCENT_COS_SECRET_ID=your-secret-id
TENCENT_COS_REGION=your-region
TENCENT_COS_SCHEME=your-scheme
# Huawei OBS Storage Configuration
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
HUAWEI_OBS_SECRET_KEY=your-secret-key
HUAWEI_OBS_ACCESS_KEY=your-access-key
HUAWEI_OBS_SERVER=your-server-url
# OCI Storage configuration
OCI_ENDPOINT=your-endpoint
OCI_BUCKET_NAME=your-bucket-name
@ -79,6 +86,13 @@ OCI_ACCESS_KEY=your-access-key
OCI_SECRET_KEY=your-secret-key
OCI_REGION=your-region
# Volcengine tos Storage configuration
VOLCENGINE_TOS_ENDPOINT=your-endpoint
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
VOLCENGINE_TOS_REGION=your-region
# CORS configuration
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
@ -100,11 +114,10 @@ QDRANT_GRPC_ENABLED=false
QDRANT_GRPC_PORT=6334
# Milvus configuration
MILVUS_HOST=127.0.0.1
MILVUS_PORT=19530
MILVUS_URI=http://127.0.0.1:19530
MILVUS_TOKEN=
MILVUS_USER=root
MILVUS_PASSWORD=Milvus
MILVUS_SECURE=false
# MyScale configuration
MYSCALE_HOST=127.0.0.1
@ -149,6 +162,8 @@ PGVECTOR_PORT=5433
PGVECTOR_USER=postgres
PGVECTOR_PASSWORD=postgres
PGVECTOR_DATABASE=postgres
PGVECTOR_MIN_CONNECTION=1
PGVECTOR_MAX_CONNECTION=5
# Tidb Vector configuration
TIDB_VECTOR_HOST=xxx.eu-central-1.xxx.aws.tidbcloud.com

View File

@ -55,7 +55,7 @@ RUN apt-get update \
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
&& apt-get update \
# For Security
&& apt-get install -y --no-install-recommends zlib1g=1:1.3.dfsg+really1.3.1-1 expat=2.6.2-1 libldap-2.5-0=2.5.18+dfsg-3 perl=5.38.2-5 libsqlite3-0=3.46.0-1 \
&& apt-get install -y --no-install-recommends zlib1g=1:1.3.dfsg+really1.3.1-1 expat=2.6.3-1 libldap-2.5-0=2.5.18+dfsg-3 perl=5.38.2-5 libsqlite3-0=3.46.0-1 \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*

View File

@ -65,14 +65,12 @@
8. Start Dify [web](../web) service.
9. Setup your application by visiting `http://localhost:3000`...
10. If you need to debug local async processing, please start the worker service.
10. If you need to handle and debug the async tasks (e.g. dataset importing and documents indexing), please start the worker service.
```bash
poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion
```
The started celery app handles the async tasks, e.g. dataset importing and documents indexing.
## Testing
1. Install dependencies for both the backend and the test environment

View File

@ -53,11 +53,9 @@ from services.account_service import AccountService
warnings.simplefilter("ignore", ResourceWarning)
# fix windows platform
if os.name == "nt":
os.system('tzutil /s "UTC"')
else:
os.environ["TZ"] = "UTC"
os.environ["TZ"] = "UTC"
# windows platform not support tzset
if hasattr(time, "tzset"):
time.tzset()
@ -164,7 +162,7 @@ def initialize_extensions(app):
@login_manager.request_loader
def load_user_from_request(request_from_flask_login):
"""Load user based on the request."""
if request.blueprint not in ["console", "inner_api"]:
if request.blueprint not in {"console", "inner_api"}:
return None
# Check if the user_id contains a dot, indicating the old format
auth_header = request.headers.get("Authorization", "")

View File

@ -28,28 +28,28 @@ from services.account_service import RegisterService, TenantService
@click.command("reset-password", help="Reset the account password.")
@click.option("--email", prompt=True, help="The email address of the account whose password you need to reset")
@click.option("--new-password", prompt=True, help="the new password.")
@click.option("--password-confirm", prompt=True, help="the new password confirm.")
@click.option("--email", prompt=True, help="Account email to reset password for")
@click.option("--new-password", prompt=True, help="New password")
@click.option("--password-confirm", prompt=True, help="Confirm new password")
def reset_password(email, new_password, password_confirm):
"""
Reset password of owner account
Only available in SELF_HOSTED mode
"""
if str(new_password).strip() != str(password_confirm).strip():
click.echo(click.style("sorry. The two passwords do not match.", fg="red"))
click.echo(click.style("Passwords do not match.", fg="red"))
return
account = db.session.query(Account).filter(Account.email == email).one_or_none()
if not account:
click.echo(click.style("sorry. the account: [{}] not exist .".format(email), fg="red"))
click.echo(click.style("Account not found for email: {}".format(email), fg="red"))
return
try:
valid_password(new_password)
except:
click.echo(click.style("sorry. The passwords must match {} ".format(password_pattern), fg="red"))
click.echo(click.style("Invalid password. Must match {}".format(password_pattern), fg="red"))
return
# generate password salt
@ -62,37 +62,37 @@ def reset_password(email, new_password, password_confirm):
account.password = base64_password_hashed
account.password_salt = base64_salt
db.session.commit()
click.echo(click.style("Congratulations! Password has been reset.", fg="green"))
click.echo(click.style("Password reset successfully.", fg="green"))
@click.command("reset-email", help="Reset the account email.")
@click.option("--email", prompt=True, help="The old email address of the account whose email you need to reset")
@click.option("--new-email", prompt=True, help="the new email.")
@click.option("--email-confirm", prompt=True, help="the new email confirm.")
@click.option("--email", prompt=True, help="Current account email")
@click.option("--new-email", prompt=True, help="New email")
@click.option("--email-confirm", prompt=True, help="Confirm new email")
def reset_email(email, new_email, email_confirm):
"""
Replace account email
:return:
"""
if str(new_email).strip() != str(email_confirm).strip():
click.echo(click.style("Sorry, new email and confirm email do not match.", fg="red"))
click.echo(click.style("New emails do not match.", fg="red"))
return
account = db.session.query(Account).filter(Account.email == email).one_or_none()
if not account:
click.echo(click.style("sorry. the account: [{}] not exist .".format(email), fg="red"))
click.echo(click.style("Account not found for email: {}".format(email), fg="red"))
return
try:
email_validate(new_email)
except:
click.echo(click.style("sorry. {} is not a valid email. ".format(email), fg="red"))
click.echo(click.style("Invalid email: {}".format(new_email), fg="red"))
return
account.email = new_email
db.session.commit()
click.echo(click.style("Congratulations!, email has been reset.", fg="green"))
click.echo(click.style("Email updated successfully.", fg="green"))
@click.command(
@ -104,7 +104,7 @@ def reset_email(email, new_email, email_confirm):
)
@click.confirmation_option(
prompt=click.style(
"Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red"
"Are you sure you want to reset encrypt key pair? This operation cannot be rolled back!", fg="red"
)
)
def reset_encrypt_key_pair():
@ -114,13 +114,13 @@ def reset_encrypt_key_pair():
Only support SELF_HOSTED mode.
"""
if dify_config.EDITION != "SELF_HOSTED":
click.echo(click.style("Sorry, only support SELF_HOSTED mode.", fg="red"))
click.echo(click.style("This command is only for SELF_HOSTED installations.", fg="red"))
return
tenants = db.session.query(Tenant).all()
for tenant in tenants:
if not tenant:
click.echo(click.style("Sorry, no workspace found. Please enter /install to initialize.", fg="red"))
click.echo(click.style("No workspaces found. Run /install first.", fg="red"))
return
tenant.encrypt_public_key = generate_key_pair(tenant.id)
@ -131,18 +131,18 @@ def reset_encrypt_key_pair():
click.echo(
click.style(
"Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id),
"Congratulations! The asymmetric key pair of workspace {} has been reset.".format(tenant.id),
fg="green",
)
)
@click.command("vdb-migrate", help="migrate vector db.")
@click.command("vdb-migrate", help="Migrate vector db.")
@click.option("--scope", default="all", prompt=False, help="The scope of vector database to migrate, Default is All.")
def vdb_migrate(scope: str):
if scope in ["knowledge", "all"]:
if scope in {"knowledge", "all"}:
migrate_knowledge_vector_database()
if scope in ["annotation", "all"]:
if scope in {"annotation", "all"}:
migrate_annotation_vector_database()
@ -150,7 +150,7 @@ def migrate_annotation_vector_database():
"""
Migrate annotation datas to target vector database .
"""
click.echo(click.style("Start migrate annotation data.", fg="green"))
click.echo(click.style("Starting annotation data migration.", fg="green"))
create_count = 0
skipped_count = 0
total_count = 0
@ -174,14 +174,14 @@ def migrate_annotation_vector_database():
f"Processing the {total_count} app {app.id}. " + f"{create_count} created, {skipped_count} skipped."
)
try:
click.echo("Create app annotation index: {}".format(app.id))
click.echo("Creating app annotation index: {}".format(app.id))
app_annotation_setting = (
db.session.query(AppAnnotationSetting).filter(AppAnnotationSetting.app_id == app.id).first()
)
if not app_annotation_setting:
skipped_count = skipped_count + 1
click.echo("App annotation setting is disabled: {}".format(app.id))
click.echo("App annotation setting disabled: {}".format(app.id))
continue
# get dataset_collection_binding info
dataset_collection_binding = (
@ -190,7 +190,7 @@ def migrate_annotation_vector_database():
.first()
)
if not dataset_collection_binding:
click.echo("App annotation collection binding is not exist: {}".format(app.id))
click.echo("App annotation collection binding not found: {}".format(app.id))
continue
annotations = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app.id).all()
dataset = Dataset(
@ -211,11 +211,11 @@ def migrate_annotation_vector_database():
documents.append(document)
vector = Vector(dataset, attributes=["doc_id", "annotation_id", "app_id"])
click.echo(f"Start to migrate annotation, app_id: {app.id}.")
click.echo(f"Migrating annotations for app: {app.id}.")
try:
vector.delete()
click.echo(click.style(f"Successfully delete vector index for app: {app.id}.", fg="green"))
click.echo(click.style(f"Deleted vector index for app {app.id}.", fg="green"))
except Exception as e:
click.echo(click.style(f"Failed to delete vector index for app {app.id}.", fg="red"))
raise e
@ -223,12 +223,12 @@ def migrate_annotation_vector_database():
try:
click.echo(
click.style(
f"Start to created vector index with {len(documents)} annotations for app {app.id}.",
f"Creating vector index with {len(documents)} annotations for app {app.id}.",
fg="green",
)
)
vector.create(documents)
click.echo(click.style(f"Successfully created vector index for app {app.id}.", fg="green"))
click.echo(click.style(f"Created vector index for app {app.id}.", fg="green"))
except Exception as e:
click.echo(click.style(f"Failed to created vector index for app {app.id}.", fg="red"))
raise e
@ -237,14 +237,14 @@ def migrate_annotation_vector_database():
except Exception as e:
click.echo(
click.style(
"Create app annotation index error: {} {}".format(e.__class__.__name__, str(e)), fg="red"
"Error creating app annotation index: {} {}".format(e.__class__.__name__, str(e)), fg="red"
)
)
continue
click.echo(
click.style(
f"Congratulations! Create {create_count} app annotation indexes, and skipped {skipped_count} apps.",
f"Migration complete. Created {create_count} app annotation indexes. Skipped {skipped_count} apps.",
fg="green",
)
)
@ -254,7 +254,7 @@ def migrate_knowledge_vector_database():
"""
Migrate vector database datas to target vector database .
"""
click.echo(click.style("Start migrate vector db.", fg="green"))
click.echo(click.style("Starting vector database migration.", fg="green"))
create_count = 0
skipped_count = 0
total_count = 0
@ -275,11 +275,10 @@ def migrate_knowledge_vector_database():
for dataset in datasets:
total_count = total_count + 1
click.echo(
f"Processing the {total_count} dataset {dataset.id}. "
+ f"{create_count} created, {skipped_count} skipped."
f"Processing the {total_count} dataset {dataset.id}. {create_count} created, {skipped_count} skipped."
)
try:
click.echo("Create dataset vdb index: {}".format(dataset.id))
click.echo("Creating dataset vector database index: {}".format(dataset.id))
if dataset.index_struct_dict:
if dataset.index_struct_dict["type"] == vector_type:
skipped_count = skipped_count + 1
@ -300,7 +299,7 @@ def migrate_knowledge_vector_database():
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError("Dataset Collection Bindings is not exist!")
raise ValueError("Dataset Collection Binding not found")
else:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
@ -352,14 +351,12 @@ def migrate_knowledge_vector_database():
raise ValueError(f"Vector store {vector_type} is not supported.")
vector = Vector(dataset)
click.echo(f"Start to migrate dataset {dataset.id}.")
click.echo(f"Migrating dataset {dataset.id}.")
try:
vector.delete()
click.echo(
click.style(
f"Successfully delete vector index {collection_name} for dataset {dataset.id}.", fg="green"
)
click.style(f"Deleted vector index {collection_name} for dataset {dataset.id}.", fg="green")
)
except Exception as e:
click.echo(
@ -411,14 +408,13 @@ def migrate_knowledge_vector_database():
try:
click.echo(
click.style(
f"Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.",
f"Creating vector index with {len(documents)} documents of {segments_count}"
f" segments for dataset {dataset.id}.",
fg="green",
)
)
vector.create(documents)
click.echo(
click.style(f"Successfully created vector index for dataset {dataset.id}.", fg="green")
)
click.echo(click.style(f"Created vector index for dataset {dataset.id}.", fg="green"))
except Exception as e:
click.echo(click.style(f"Failed to created vector index for dataset {dataset.id}.", fg="red"))
raise e
@ -429,13 +425,13 @@ def migrate_knowledge_vector_database():
except Exception as e:
db.session.rollback()
click.echo(
click.style("Create dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
click.style("Error creating dataset index: {} {}".format(e.__class__.__name__, str(e)), fg="red")
)
continue
click.echo(
click.style(
f"Congratulations! Create {create_count} dataset indexes, and skipped {skipped_count} datasets.", fg="green"
f"Migration complete. Created {create_count} dataset indexes. Skipped {skipped_count} datasets.", fg="green"
)
)
@ -445,7 +441,7 @@ def convert_to_agent_apps():
"""
Convert Agent Assistant to Agent App.
"""
click.echo(click.style("Start convert to agent apps.", fg="green"))
click.echo(click.style("Starting convert to agent apps.", fg="green"))
proceeded_app_ids = []
@ -496,23 +492,23 @@ def convert_to_agent_apps():
except Exception as e:
click.echo(click.style("Convert app error: {} {}".format(e.__class__.__name__, str(e)), fg="red"))
click.echo(click.style("Congratulations! Converted {} agent apps.".format(len(proceeded_app_ids)), fg="green"))
click.echo(click.style("Conversion complete. Converted {} agent apps.".format(len(proceeded_app_ids)), fg="green"))
@click.command("add-qdrant-doc-id-index", help="add qdrant doc_id index.")
@click.option("--field", default="metadata.doc_id", prompt=False, help="index field , default is metadata.doc_id.")
@click.command("add-qdrant-doc-id-index", help="Add Qdrant doc_id index.")
@click.option("--field", default="metadata.doc_id", prompt=False, help="Index field , default is metadata.doc_id.")
def add_qdrant_doc_id_index(field: str):
click.echo(click.style("Start add qdrant doc_id index.", fg="green"))
click.echo(click.style("Starting Qdrant doc_id index creation.", fg="green"))
vector_type = dify_config.VECTOR_STORE
if vector_type != "qdrant":
click.echo(click.style("Sorry, only support qdrant vector store.", fg="red"))
click.echo(click.style("This command only supports Qdrant vector store.", fg="red"))
return
create_count = 0
try:
bindings = db.session.query(DatasetCollectionBinding).all()
if not bindings:
click.echo(click.style("Sorry, no dataset collection bindings found.", fg="red"))
click.echo(click.style("No dataset collection bindings found.", fg="red"))
return
import qdrant_client
from qdrant_client.http.exceptions import UnexpectedResponse
@ -522,7 +518,7 @@ def add_qdrant_doc_id_index(field: str):
for binding in bindings:
if dify_config.QDRANT_URL is None:
raise ValueError("Qdrant url is required.")
raise ValueError("Qdrant URL is required.")
qdrant_config = QdrantConfig(
endpoint=dify_config.QDRANT_URL,
api_key=dify_config.QDRANT_API_KEY,
@ -539,40 +535,39 @@ def add_qdrant_doc_id_index(field: str):
except UnexpectedResponse as e:
# Collection does not exist, so return
if e.status_code == 404:
click.echo(
click.style(f"Collection not found, collection_name:{binding.collection_name}.", fg="red")
)
click.echo(click.style(f"Collection not found: {binding.collection_name}.", fg="red"))
continue
# Some other error occurred, so re-raise the exception
else:
click.echo(
click.style(
f"Failed to create qdrant index, collection_name:{binding.collection_name}.", fg="red"
f"Failed to create Qdrant index for collection: {binding.collection_name}.", fg="red"
)
)
except Exception as e:
click.echo(click.style("Failed to create qdrant client.", fg="red"))
click.echo(click.style("Failed to create Qdrant client.", fg="red"))
click.echo(click.style(f"Congratulations! Create {create_count} collection indexes.", fg="green"))
click.echo(click.style(f"Index creation complete. Created {create_count} collection indexes.", fg="green"))
@click.command("create-tenant", help="Create account and tenant.")
@click.option("--email", prompt=True, help="The email address of the tenant account.")
@click.option("--email", prompt=True, help="Tenant account email.")
@click.option("--name", prompt=True, help="Workspace name.")
@click.option("--language", prompt=True, help="Account language, default: en-US.")
def create_tenant(email: str, language: Optional[str] = None):
def create_tenant(email: str, language: Optional[str] = None, name: Optional[str] = None):
"""
Create tenant account
"""
if not email:
click.echo(click.style("Sorry, email is required.", fg="red"))
click.echo(click.style("Email is required.", fg="red"))
return
# Create account
email = email.strip()
if "@" not in email:
click.echo(click.style("Sorry, invalid email address.", fg="red"))
click.echo(click.style("Invalid email address.", fg="red"))
return
account_name = email.split("@")[0]
@ -580,29 +575,31 @@ def create_tenant(email: str, language: Optional[str] = None):
if language not in languages:
language = "en-US"
name = name.strip()
# generate random password
new_password = secrets.token_urlsafe(16)
# register account
account = RegisterService.register(email=email, name=account_name, password=new_password, language=language)
TenantService.create_owner_tenant_if_not_exist(account)
TenantService.create_owner_tenant_if_not_exist(account, name)
click.echo(
click.style(
"Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password),
"Account and tenant created.\nAccount: {}\nPassword: {}".format(email, new_password),
fg="green",
)
)
@click.command("upgrade-db", help="upgrade the database")
@click.command("upgrade-db", help="Upgrade the database")
def upgrade_db():
click.echo("Preparing database migration...")
lock = redis_client.lock(name="db_upgrade_lock", timeout=60)
if lock.acquire(blocking=False):
try:
click.echo(click.style("Start database migration.", fg="green"))
click.echo(click.style("Starting database migration.", fg="green"))
# run db migration
import flask_migrate
@ -612,7 +609,7 @@ def upgrade_db():
click.echo(click.style("Database migration successful!", fg="green"))
except Exception as e:
logging.exception(f"Database migration failed, error: {e}")
logging.exception(f"Database migration failed: {e}")
finally:
lock.release()
else:
@ -624,7 +621,7 @@ def fix_app_site_missing():
"""
Fix app related site missing issue.
"""
click.echo(click.style("Start fix app related site missing issue.", fg="green"))
click.echo(click.style("Starting fix for missing app-related sites.", fg="green"))
failed_app_ids = []
while True:
@ -647,22 +644,22 @@ where sites.id is null limit 1000"""
if tenant:
accounts = tenant.get_accounts()
if not accounts:
print("Fix app {} failed.".format(app.id))
print("Fix failed for app {}".format(app.id))
continue
account = accounts[0]
print("Fix app {} related site missing issue.".format(app.id))
print("Fixing missing site for app {}".format(app.id))
app_was_created.send(app, account=account)
except Exception as e:
failed_app_ids.append(app_id)
click.echo(click.style("Fix app {} related site missing issue failed!".format(app_id), fg="red"))
click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red"))
logging.exception(f"Fix app related site missing issue failed, error: {e}")
continue
if not processed_count:
break
click.echo(click.style("Congratulations! Fix app related site missing issue successful!", fg="green"))
click.echo(click.style("Fix for missing app-related sites completed successfully!", fg="green"))
def register_commands(app):

View File

@ -4,30 +4,30 @@ from pydantic_settings import BaseSettings
class DeploymentConfig(BaseSettings):
"""
Deployment configs
Configuration settings for application deployment
"""
APPLICATION_NAME: str = Field(
description="application name",
description="Name of the application, used for identification and logging purposes",
default="langgenius/dify",
)
DEBUG: bool = Field(
description="whether to enable debug mode.",
description="Enable debug mode for additional logging and development features",
default=False,
)
TESTING: bool = Field(
description="",
description="Enable testing mode for running automated tests",
default=False,
)
EDITION: str = Field(
description="deployment edition",
description="Deployment edition of the application (e.g., 'SELF_HOSTED', 'CLOUD')",
default="SELF_HOSTED",
)
DEPLOY_ENV: str = Field(
description="deployment environment, default to PRODUCTION.",
description="Deployment environment (e.g., 'PRODUCTION', 'DEVELOPMENT'), default to PRODUCTION",
default="PRODUCTION",
)

View File

@ -4,17 +4,17 @@ from pydantic_settings import BaseSettings
class EnterpriseFeatureConfig(BaseSettings):
"""
Enterprise feature configs.
Configuration for enterprise-level features.
**Before using, please contact business@dify.ai by email to inquire about licensing matters.**
"""
ENTERPRISE_ENABLED: bool = Field(
description="whether to enable enterprise features."
description="Enable or disable enterprise-level features."
"Before using, please contact business@dify.ai by email to inquire about licensing matters.",
default=False,
)
CAN_REPLACE_LOGO: bool = Field(
description="whether to allow replacing enterprise logo.",
description="Allow customization of the enterprise logo.",
default=False,
)

View File

@ -6,30 +6,31 @@ from pydantic_settings import BaseSettings
class NotionConfig(BaseSettings):
"""
Notion integration configs
Configuration settings for Notion integration
"""
NOTION_CLIENT_ID: Optional[str] = Field(
description="Notion client ID",
description="Client ID for Notion API authentication. Required for OAuth 2.0 flow.",
default=None,
)
NOTION_CLIENT_SECRET: Optional[str] = Field(
description="Notion client secret key",
description="Client secret for Notion API authentication. Required for OAuth 2.0 flow.",
default=None,
)
NOTION_INTEGRATION_TYPE: Optional[str] = Field(
description="Notion integration type, default to None, available values: internal.",
description="Type of Notion integration."
" Set to 'internal' for internal integrations, or None for public integrations.",
default=None,
)
NOTION_INTERNAL_SECRET: Optional[str] = Field(
description="Notion internal secret key",
description="Secret key for internal Notion integrations. Required when NOTION_INTEGRATION_TYPE is 'internal'.",
default=None,
)
NOTION_INTEGRATION_TOKEN: Optional[str] = Field(
description="Notion integration token",
description="Integration token for Notion API access. Used for direct API calls without OAuth flow.",
default=None,
)

View File

@ -6,20 +6,23 @@ from pydantic_settings import BaseSettings
class SentryConfig(BaseSettings):
"""
Sentry configs
Configuration settings for Sentry error tracking and performance monitoring
"""
SENTRY_DSN: Optional[str] = Field(
description="Sentry DSN",
description="Sentry Data Source Name (DSN)."
" This is the unique identifier of your Sentry project, used to send events to the correct project.",
default=None,
)
SENTRY_TRACES_SAMPLE_RATE: NonNegativeFloat = Field(
description="Sentry trace sample rate",
description="Sample rate for Sentry performance monitoring traces."
" Value between 0.0 and 1.0, where 1.0 means 100% of traces are sent to Sentry.",
default=1.0,
)
SENTRY_PROFILES_SAMPLE_RATE: NonNegativeFloat = Field(
description="Sentry profiles sample rate",
description="Sample rate for Sentry profiling."
" Value between 0.0 and 1.0, where 1.0 means 100% of profiles are sent to Sentry.",
default=1.0,
)

View File

@ -1,4 +1,4 @@
from typing import Optional
from typing import Annotated, Optional
from pydantic import AliasChoices, Field, HttpUrl, NegativeInt, NonNegativeInt, PositiveInt, computed_field
from pydantic_settings import BaseSettings
@ -8,145 +8,143 @@ from configs.feature.hosted_service import HostedServiceConfig
class SecurityConfig(BaseSettings):
"""
Secret Key configs
Security-related configurations for the application
"""
SECRET_KEY: Optional[str] = Field(
description="Your App secret key will be used for securely signing the session cookie"
description="Secret key for secure session cookie signing."
"Make sure you are changing this key for your deployment with a strong key."
"You can generate a strong key using `openssl rand -base64 42`."
"Alternatively you can set it with `SECRET_KEY` environment variable.",
"Generate a strong key using `openssl rand -base64 42` or set via the `SECRET_KEY` environment variable.",
default=None,
)
RESET_PASSWORD_TOKEN_EXPIRY_HOURS: PositiveInt = Field(
description="Expiry time in hours for reset token",
description="Duration in hours for which a password reset token remains valid",
default=24,
)
class AppExecutionConfig(BaseSettings):
"""
App Execution configs
Configuration parameters for application execution
"""
APP_MAX_EXECUTION_TIME: PositiveInt = Field(
description="execution timeout in seconds for app execution",
description="Maximum allowed execution time for the application in seconds",
default=1200,
)
APP_MAX_ACTIVE_REQUESTS: NonNegativeInt = Field(
description="max active request per app, 0 means unlimited",
description="Maximum number of concurrent active requests per app (0 for unlimited)",
default=0,
)
class CodeExecutionSandboxConfig(BaseSettings):
"""
Code Execution Sandbox configs
Configuration for the code execution sandbox environment
"""
CODE_EXECUTION_ENDPOINT: HttpUrl = Field(
description="endpoint URL of code execution servcie",
description="URL endpoint for the code execution service",
default="http://sandbox:8194",
)
CODE_EXECUTION_API_KEY: str = Field(
description="API key for code execution service",
description="API key for accessing the code execution service",
default="dify-sandbox",
)
CODE_EXECUTION_CONNECT_TIMEOUT: Optional[float] = Field(
description="connect timeout in seconds for code execution request",
description="Connection timeout in seconds for code execution requests",
default=10.0,
)
CODE_EXECUTION_READ_TIMEOUT: Optional[float] = Field(
description="read timeout in seconds for code execution request",
description="Read timeout in seconds for code execution requests",
default=60.0,
)
CODE_EXECUTION_WRITE_TIMEOUT: Optional[float] = Field(
description="write timeout in seconds for code execution request",
description="Write timeout in seconds for code execution request",
default=10.0,
)
CODE_MAX_NUMBER: PositiveInt = Field(
description="max depth for code execution",
description="Maximum allowed numeric value in code execution",
default=9223372036854775807,
)
CODE_MIN_NUMBER: NegativeInt = Field(
description="",
description="Minimum allowed numeric value in code execution",
default=-9223372036854775807,
)
CODE_MAX_DEPTH: PositiveInt = Field(
description="max depth for code execution",
description="Maximum allowed depth for nested structures in code execution",
default=5,
)
CODE_MAX_PRECISION: PositiveInt = Field(
description="max precision digits for float type in code execution",
description="mMaximum number of decimal places for floating-point numbers in code execution",
default=20,
)
CODE_MAX_STRING_LENGTH: PositiveInt = Field(
description="max string length for code execution",
description="Maximum allowed length for strings in code execution",
default=80000,
)
CODE_MAX_STRING_ARRAY_LENGTH: PositiveInt = Field(
description="",
description="Maximum allowed length for string arrays in code execution",
default=30,
)
CODE_MAX_OBJECT_ARRAY_LENGTH: PositiveInt = Field(
description="",
description="Maximum allowed length for object arrays in code execution",
default=30,
)
CODE_MAX_NUMBER_ARRAY_LENGTH: PositiveInt = Field(
description="",
description="Maximum allowed length for numeric arrays in code execution",
default=1000,
)
class EndpointConfig(BaseSettings):
"""
Module URL configs
Configuration for various application endpoints and URLs
"""
CONSOLE_API_URL: str = Field(
description="The backend URL prefix of the console API."
"used to concatenate the login authorization callback or notion integration callback.",
description="Base URL for the console API,"
"used for login authentication callback or notion integration callbacks",
default="",
)
CONSOLE_WEB_URL: str = Field(
description="The front-end URL prefix of the console web."
"used to concatenate some front-end addresses and for CORS configuration use.",
description="Base URL for the console web interface," "used for frontend references and CORS configuration",
default="",
)
SERVICE_API_URL: str = Field(
description="Service API Url prefix." "used to display Service API Base Url to the front-end.",
description="Base URL for the service API, displayed to users for API access",
default="",
)
APP_WEB_URL: str = Field(
description="WebApp Url prefix." "used to display WebAPP API Base Url to the front-end.",
description="Base URL for the web application, used for frontend references",
default="",
)
class FileAccessConfig(BaseSettings):
"""
File Access configs
Configuration for file access and handling
"""
FILES_URL: str = Field(
description="File preview or download Url prefix."
" used to display File preview or download Url to the front-end or as Multi-model inputs;"
description="Base URL for file preview or download,"
" used for frontend display and multi-model inputs"
"Url is signed and has expiration time.",
validation_alias=AliasChoices("FILES_URL", "CONSOLE_API_URL"),
alias_priority=1,
@ -154,49 +152,49 @@ class FileAccessConfig(BaseSettings):
)
FILES_ACCESS_TIMEOUT: int = Field(
description="timeout in seconds for file accessing",
description="Expiration time in seconds for file access URLs",
default=300,
)
class FileUploadConfig(BaseSettings):
"""
File Uploading configs
Configuration for file upload limitations
"""
UPLOAD_FILE_SIZE_LIMIT: NonNegativeInt = Field(
description="size limit in Megabytes for uploading files",
description="Maximum allowed file size for uploads in megabytes",
default=15,
)
UPLOAD_FILE_BATCH_LIMIT: NonNegativeInt = Field(
description="batch size limit for uploading files",
description="Maximum number of files allowed in a single upload batch",
default=5,
)
UPLOAD_IMAGE_FILE_SIZE_LIMIT: NonNegativeInt = Field(
description="image file size limit in Megabytes for uploading files",
description="Maximum allowed image file size for uploads in megabytes",
default=10,
)
BATCH_UPLOAD_LIMIT: NonNegativeInt = Field(
description="", # todo: to be clarified
description="Maximum number of files allowed in a batch upload operation",
default=20,
)
class HttpConfig(BaseSettings):
"""
HTTP configs
HTTP-related configurations for the application
"""
API_COMPRESSION_ENABLED: bool = Field(
description="whether to enable HTTP response compression of gzip",
description="Enable or disable gzip compression for HTTP responses",
default=False,
)
inner_CONSOLE_CORS_ALLOW_ORIGINS: str = Field(
description="",
description="Comma-separated list of allowed origins for CORS in the console",
validation_alias=AliasChoices("CONSOLE_CORS_ALLOW_ORIGINS", "CONSOLE_WEB_URL"),
default="",
)
@ -217,363 +215,361 @@ class HttpConfig(BaseSettings):
def WEB_API_CORS_ALLOW_ORIGINS(self) -> list[str]:
return self.inner_WEB_API_CORS_ALLOW_ORIGINS.split(",")
HTTP_REQUEST_MAX_CONNECT_TIMEOUT: NonNegativeInt = Field(
description="",
default=300,
)
HTTP_REQUEST_MAX_CONNECT_TIMEOUT: Annotated[
PositiveInt, Field(ge=10, description="Maximum connection timeout in seconds for HTTP requests")
] = 10
HTTP_REQUEST_MAX_READ_TIMEOUT: NonNegativeInt = Field(
description="",
default=600,
)
HTTP_REQUEST_MAX_READ_TIMEOUT: Annotated[
PositiveInt, Field(ge=60, description="Maximum read timeout in seconds for HTTP requests")
] = 60
HTTP_REQUEST_MAX_WRITE_TIMEOUT: NonNegativeInt = Field(
description="",
default=600,
)
HTTP_REQUEST_MAX_WRITE_TIMEOUT: Annotated[
PositiveInt, Field(ge=10, description="Maximum write timeout in seconds for HTTP requests")
] = 20
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: PositiveInt = Field(
description="",
description="Maximum allowed size in bytes for binary data in HTTP requests",
default=10 * 1024 * 1024,
)
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: PositiveInt = Field(
description="",
description="Maximum allowed size in bytes for text data in HTTP requests",
default=1 * 1024 * 1024,
)
SSRF_PROXY_HTTP_URL: Optional[str] = Field(
description="HTTP URL for SSRF proxy",
description="Proxy URL for HTTP requests to prevent Server-Side Request Forgery (SSRF)",
default=None,
)
SSRF_PROXY_HTTPS_URL: Optional[str] = Field(
description="HTTPS URL for SSRF proxy",
description="Proxy URL for HTTPS requests to prevent Server-Side Request Forgery (SSRF)",
default=None,
)
class InnerAPIConfig(BaseSettings):
"""
Inner API configs
Configuration for internal API functionality
"""
INNER_API: bool = Field(
description="whether to enable the inner API",
description="Enable or disable the internal API",
default=False,
)
INNER_API_KEY: Optional[str] = Field(
description="The inner API key is used to authenticate the inner API",
description="API key for accessing the internal API",
default=None,
)
class LoggingConfig(BaseSettings):
"""
Logging configs
Configuration for application logging
"""
LOG_LEVEL: str = Field(
description="Log output level, default to INFO." "It is recommended to set it to ERROR for production.",
description="Logging level, default to INFO. Set to ERROR for production environments.",
default="INFO",
)
LOG_FILE: Optional[str] = Field(
description="logging output file path",
description="File path for log output.",
default=None,
)
LOG_FORMAT: str = Field(
description="log format",
description="Format string for log messages",
default="%(asctime)s.%(msecs)03d %(levelname)s [%(threadName)s] [%(filename)s:%(lineno)d] - %(message)s",
)
LOG_DATEFORMAT: Optional[str] = Field(
description="log date format",
description="Date format string for log timestamps",
default=None,
)
LOG_TZ: Optional[str] = Field(
description="specify log timezone, eg: America/New_York",
description="Timezone for log timestamps (e.g., 'America/New_York')",
default=None,
)
class ModelLoadBalanceConfig(BaseSettings):
"""
Model load balance configs
Configuration for model load balancing
"""
MODEL_LB_ENABLED: bool = Field(
description="whether to enable model load balancing",
description="Enable or disable load balancing for models",
default=False,
)
class BillingConfig(BaseSettings):
"""
Platform Billing Configurations
Configuration for platform billing features
"""
BILLING_ENABLED: bool = Field(
description="whether to enable billing",
description="Enable or disable billing functionality",
default=False,
)
class UpdateConfig(BaseSettings):
"""
Update configs
Configuration for application update checks
"""
CHECK_UPDATE_URL: str = Field(
description="url for checking updates",
description="URL to check for application updates",
default="https://updates.dify.ai",
)
class WorkflowConfig(BaseSettings):
"""
Workflow feature configs
Configuration for workflow execution
"""
WORKFLOW_MAX_EXECUTION_STEPS: PositiveInt = Field(
description="max execution steps in single workflow execution",
description="Maximum number of steps allowed in a single workflow execution",
default=500,
)
WORKFLOW_MAX_EXECUTION_TIME: PositiveInt = Field(
description="max execution time in seconds in single workflow execution",
description="Maximum execution time in seconds for a single workflow",
default=1200,
)
WORKFLOW_CALL_MAX_DEPTH: PositiveInt = Field(
description="max depth of calling in single workflow execution",
description="Maximum allowed depth for nested workflow calls",
default=5,
)
MAX_VARIABLE_SIZE: PositiveInt = Field(
description="The maximum size in bytes of a variable. default to 5KB.",
description="Maximum size in bytes for a single variable in workflows. Default to 5KB.",
default=5 * 1024,
)
class OAuthConfig(BaseSettings):
"""
oauth configs
Configuration for OAuth authentication
"""
OAUTH_REDIRECT_PATH: str = Field(
description="redirect path for OAuth",
description="Redirect path for OAuth authentication callbacks",
default="/console/api/oauth/authorize",
)
GITHUB_CLIENT_ID: Optional[str] = Field(
description="GitHub client id for OAuth",
description="GitHub OAuth client secret",
default=None,
)
GITHUB_CLIENT_SECRET: Optional[str] = Field(
description="GitHub client secret key for OAuth",
description="GitHub OAuth client secret",
default=None,
)
GOOGLE_CLIENT_ID: Optional[str] = Field(
description="Google client id for OAuth",
description="Google OAuth client ID",
default=None,
)
GOOGLE_CLIENT_SECRET: Optional[str] = Field(
description="Google client secret key for OAuth",
description="Google OAuth client secret",
default=None,
)
class ModerationConfig(BaseSettings):
"""
Moderation in app configs.
Configuration for content moderation
"""
MODERATION_BUFFER_SIZE: PositiveInt = Field(
description="buffer size for moderation",
description="Size of the buffer for content moderation processing",
default=300,
)
class ToolConfig(BaseSettings):
"""
Tool configs
Configuration for tool management
"""
TOOL_ICON_CACHE_MAX_AGE: PositiveInt = Field(
description="max age in seconds for tool icon caching",
description="Maximum age in seconds for caching tool icons",
default=3600,
)
class MailConfig(BaseSettings):
"""
Mail Configurations
Configuration for email services
"""
MAIL_TYPE: Optional[str] = Field(
description="Mail provider type name, default to None, availabile values are `smtp` and `resend`.",
description="Email service provider type ('smtp' or 'resend'), default to None.",
default=None,
)
MAIL_DEFAULT_SEND_FROM: Optional[str] = Field(
description="default email address for sending from ",
description="Default email address to use as the sender",
default=None,
)
RESEND_API_KEY: Optional[str] = Field(
description="API key for Resend",
description="API key for Resend email service",
default=None,
)
RESEND_API_URL: Optional[str] = Field(
description="API URL for Resend",
description="API URL for Resend email service",
default=None,
)
SMTP_SERVER: Optional[str] = Field(
description="smtp server host",
description="SMTP server hostname",
default=None,
)
SMTP_PORT: Optional[int] = Field(
description="smtp server port",
description="SMTP server port number",
default=465,
)
SMTP_USERNAME: Optional[str] = Field(
description="smtp server username",
description="Username for SMTP authentication",
default=None,
)
SMTP_PASSWORD: Optional[str] = Field(
description="smtp server password",
description="Password for SMTP authentication",
default=None,
)
SMTP_USE_TLS: bool = Field(
description="whether to use TLS connection to smtp server",
description="Enable TLS encryption for SMTP connections",
default=False,
)
SMTP_OPPORTUNISTIC_TLS: bool = Field(
description="whether to use opportunistic TLS connection to smtp server",
description="Enable opportunistic TLS for SMTP connections",
default=False,
)
class RagEtlConfig(BaseSettings):
"""
RAG ETL Configurations.
Configuration for RAG ETL processes
"""
ETL_TYPE: str = Field(
description="RAG ETL type name, default to `dify`, available values are `dify` and `Unstructured`. ",
description="RAG ETL type ('dify' or 'Unstructured'), default to 'dify'",
default="dify",
)
KEYWORD_DATA_SOURCE_TYPE: str = Field(
description="source type for keyword data, default to `database`, available values are `database` .",
description="Data source type for keyword extraction"
" ('database' or other supported types), default to 'database'",
default="database",
)
UNSTRUCTURED_API_URL: Optional[str] = Field(
description="API URL for Unstructured",
description="API URL for Unstructured.io service",
default=None,
)
UNSTRUCTURED_API_KEY: Optional[str] = Field(
description="API key for Unstructured",
description="API key for Unstructured.io service",
default=None,
)
class DataSetConfig(BaseSettings):
"""
Dataset configs
Configuration for dataset management
"""
CLEAN_DAY_SETTING: PositiveInt = Field(
description="interval in days for cleaning up dataset",
description="Interval in days for dataset cleanup operations",
default=30,
)
DATASET_OPERATOR_ENABLED: bool = Field(
description="whether to enable dataset operator",
description="Enable or disable dataset operator functionality",
default=False,
)
class WorkspaceConfig(BaseSettings):
"""
Workspace configs
Configuration for workspace management
"""
INVITE_EXPIRY_HOURS: PositiveInt = Field(
description="workspaces invitation expiration in hours",
description="Expiration time in hours for workspace invitation links",
default=72,
)
class IndexingConfig(BaseSettings):
"""
Indexing configs.
Configuration for indexing operations
"""
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: PositiveInt = Field(
description="max segmentation token length for indexing",
description="Maximum token length for text segmentation during indexing",
default=1000,
)
class ImageFormatConfig(BaseSettings):
MULTIMODAL_SEND_IMAGE_FORMAT: str = Field(
description="multi model send image format, support base64, url, default is base64",
description="Format for sending images in multimodal contexts ('base64' or 'url'), default is base64",
default="base64",
)
class CeleryBeatConfig(BaseSettings):
CELERY_BEAT_SCHEDULER_TIME: int = Field(
description="the time of the celery scheduler, default to 1 day",
description="Interval in days for Celery Beat scheduler execution, default to 1 day",
default=1,
)
class PositionConfig(BaseSettings):
POSITION_PROVIDER_PINS: str = Field(
description="The heads of model providers",
description="Comma-separated list of pinned model providers",
default="",
)
POSITION_PROVIDER_INCLUDES: str = Field(
description="The included model providers",
description="Comma-separated list of included model providers",
default="",
)
POSITION_PROVIDER_EXCLUDES: str = Field(
description="The excluded model providers",
description="Comma-separated list of excluded model providers",
default="",
)
POSITION_TOOL_PINS: str = Field(
description="The heads of tools",
description="Comma-separated list of pinned tools",
default="",
)
POSITION_TOOL_INCLUDES: str = Field(
description="The included tools",
description="Comma-separated list of included tools",
default="",
)
POSITION_TOOL_EXCLUDES: str = Field(
description="The excluded tools",
description="Comma-separated list of excluded tools",
default="",
)

View File

@ -6,31 +6,31 @@ from pydantic_settings import BaseSettings
class HostedOpenAiConfig(BaseSettings):
"""
Hosted OpenAI service config
Configuration for hosted OpenAI service
"""
HOSTED_OPENAI_API_KEY: Optional[str] = Field(
description="",
description="API key for hosted OpenAI service",
default=None,
)
HOSTED_OPENAI_API_BASE: Optional[str] = Field(
description="",
description="Base URL for hosted OpenAI API",
default=None,
)
HOSTED_OPENAI_API_ORGANIZATION: Optional[str] = Field(
description="",
description="Organization ID for hosted OpenAI service",
default=None,
)
HOSTED_OPENAI_TRIAL_ENABLED: bool = Field(
description="",
description="Enable trial access to hosted OpenAI service",
default=False,
)
HOSTED_OPENAI_TRIAL_MODELS: str = Field(
description="",
description="Comma-separated list of available models for trial access",
default="gpt-3.5-turbo,"
"gpt-3.5-turbo-1106,"
"gpt-3.5-turbo-instruct,"
@ -42,17 +42,17 @@ class HostedOpenAiConfig(BaseSettings):
)
HOSTED_OPENAI_QUOTA_LIMIT: NonNegativeInt = Field(
description="",
description="Quota limit for hosted OpenAI service usage",
default=200,
)
HOSTED_OPENAI_PAID_ENABLED: bool = Field(
description="",
description="Enable paid access to hosted OpenAI service",
default=False,
)
HOSTED_OPENAI_PAID_MODELS: str = Field(
description="",
description="Comma-separated list of available models for paid access",
default="gpt-4,"
"gpt-4-turbo-preview,"
"gpt-4-turbo-2024-04-09,"
@ -71,124 +71,122 @@ class HostedOpenAiConfig(BaseSettings):
class HostedAzureOpenAiConfig(BaseSettings):
"""
Hosted OpenAI service config
Configuration for hosted Azure OpenAI service
"""
HOSTED_AZURE_OPENAI_ENABLED: bool = Field(
description="",
description="Enable hosted Azure OpenAI service",
default=False,
)
HOSTED_AZURE_OPENAI_API_KEY: Optional[str] = Field(
description="",
description="API key for hosted Azure OpenAI service",
default=None,
)
HOSTED_AZURE_OPENAI_API_BASE: Optional[str] = Field(
description="",
description="Base URL for hosted Azure OpenAI API",
default=None,
)
HOSTED_AZURE_OPENAI_QUOTA_LIMIT: NonNegativeInt = Field(
description="",
description="Quota limit for hosted Azure OpenAI service usage",
default=200,
)
class HostedAnthropicConfig(BaseSettings):
"""
Hosted Azure OpenAI service config
Configuration for hosted Anthropic service
"""
HOSTED_ANTHROPIC_API_BASE: Optional[str] = Field(
description="",
description="Base URL for hosted Anthropic API",
default=None,
)
HOSTED_ANTHROPIC_API_KEY: Optional[str] = Field(
description="",
description="API key for hosted Anthropic service",
default=None,
)
HOSTED_ANTHROPIC_TRIAL_ENABLED: bool = Field(
description="",
description="Enable trial access to hosted Anthropic service",
default=False,
)
HOSTED_ANTHROPIC_QUOTA_LIMIT: NonNegativeInt = Field(
description="",
description="Quota limit for hosted Anthropic service usage",
default=600000,
)
HOSTED_ANTHROPIC_PAID_ENABLED: bool = Field(
description="",
description="Enable paid access to hosted Anthropic service",
default=False,
)
class HostedMinmaxConfig(BaseSettings):
"""
Hosted Minmax service config
Configuration for hosted Minmax service
"""
HOSTED_MINIMAX_ENABLED: bool = Field(
description="",
description="Enable hosted Minmax service",
default=False,
)
class HostedSparkConfig(BaseSettings):
"""
Hosted Spark service config
Configuration for hosted Spark service
"""
HOSTED_SPARK_ENABLED: bool = Field(
description="",
description="Enable hosted Spark service",
default=False,
)
class HostedZhipuAIConfig(BaseSettings):
"""
Hosted Minmax service config
Configuration for hosted ZhipuAI service
"""
HOSTED_ZHIPUAI_ENABLED: bool = Field(
description="",
description="Enable hosted ZhipuAI service",
default=False,
)
class HostedModerationConfig(BaseSettings):
"""
Hosted Moderation service config
Configuration for hosted Moderation service
"""
HOSTED_MODERATION_ENABLED: bool = Field(
description="",
description="Enable hosted Moderation service",
default=False,
)
HOSTED_MODERATION_PROVIDERS: str = Field(
description="",
description="Comma-separated list of moderation providers",
default="",
)
class HostedFetchAppTemplateConfig(BaseSettings):
"""
Hosted Moderation service config
Configuration for fetching app templates
"""
HOSTED_FETCH_APP_TEMPLATES_MODE: str = Field(
description="the mode for fetching app templates,"
" default to remote,"
" available values: remote, db, builtin",
description="Mode for fetching app templates: remote, db, or builtin" " default to remote,",
default="remote",
)
HOSTED_FETCH_APP_TEMPLATES_REMOTE_DOMAIN: str = Field(
description="the domain for fetching remote app templates",
description="Domain for fetching remote app templates",
default="https://tmpl.dify.ai",
)

View File

@ -1,16 +1,19 @@
from typing import Any, Optional
from urllib.parse import quote_plus
from pydantic import Field, NonNegativeInt, PositiveInt, computed_field
from pydantic import Field, NonNegativeInt, PositiveFloat, PositiveInt, computed_field
from pydantic_settings import BaseSettings
from configs.middleware.cache.redis_config import RedisConfig
from configs.middleware.external.bedrock_config import BedrockConfig
from configs.middleware.storage.aliyun_oss_storage_config import AliyunOSSStorageConfig
from configs.middleware.storage.amazon_s3_storage_config import S3StorageConfig
from configs.middleware.storage.azure_blob_storage_config import AzureBlobStorageConfig
from configs.middleware.storage.google_cloud_storage_config import GoogleCloudStorageConfig
from configs.middleware.storage.huawei_obs_storage_config import HuaweiCloudOBSStorageConfig
from configs.middleware.storage.oci_storage_config import OCIStorageConfig
from configs.middleware.storage.tencent_cos_storage_config import TencentCloudCOSStorageConfig
from configs.middleware.storage.volcengine_tos_storage_config import VolcengineTOSStorageConfig
from configs.middleware.vdb.analyticdb_config import AnalyticdbConfig
from configs.middleware.vdb.chroma_config import ChromaConfig
from configs.middleware.vdb.elasticsearch_config import ElasticsearchConfig
@ -29,70 +32,71 @@ from configs.middleware.vdb.weaviate_config import WeaviateConfig
class StorageConfig(BaseSettings):
STORAGE_TYPE: str = Field(
description="storage type,"
" default to `local`,"
" available values are `local`, `s3`, `azure-blob`, `aliyun-oss`, `google-storage`.",
description="Type of storage to use."
" Options: 'local', 's3', 'azure-blob', 'aliyun-oss', 'google-storage'. Default is 'local'.",
default="local",
)
STORAGE_LOCAL_PATH: str = Field(
description="local storage path",
description="Path for local storage when STORAGE_TYPE is set to 'local'.",
default="storage",
)
class VectorStoreConfig(BaseSettings):
VECTOR_STORE: Optional[str] = Field(
description="vector store type",
description="Type of vector store to use for efficient similarity search."
" Set to None if not using a vector store.",
default=None,
)
class KeywordStoreConfig(BaseSettings):
KEYWORD_STORE: str = Field(
description="keyword store type",
description="Method for keyword extraction and storage."
" Default is 'jieba', a Chinese text segmentation library.",
default="jieba",
)
class DatabaseConfig:
DB_HOST: str = Field(
description="db host",
description="Hostname or IP address of the database server.",
default="localhost",
)
DB_PORT: PositiveInt = Field(
description="db port",
description="Port number for database connection.",
default=5432,
)
DB_USERNAME: str = Field(
description="db username",
description="Username for database authentication.",
default="postgres",
)
DB_PASSWORD: str = Field(
description="db password",
description="Password for database authentication.",
default="",
)
DB_DATABASE: str = Field(
description="db database",
description="Name of the database to connect to.",
default="dify",
)
DB_CHARSET: str = Field(
description="db charset",
description="Character set for database connection.",
default="",
)
DB_EXTRAS: str = Field(
description="db extras options. Example: keepalives_idle=60&keepalives=1",
description="Additional database connection parameters. Example: 'keepalives_idle=60&keepalives=1'",
default="",
)
SQLALCHEMY_DATABASE_URI_SCHEME: str = Field(
description="db uri scheme",
description="Database URI scheme for SQLAlchemy connection.",
default="postgresql",
)
@ -110,27 +114,27 @@ class DatabaseConfig:
)
SQLALCHEMY_POOL_SIZE: NonNegativeInt = Field(
description="pool size of SqlAlchemy",
description="Maximum number of database connections in the pool.",
default=30,
)
SQLALCHEMY_MAX_OVERFLOW: NonNegativeInt = Field(
description="max overflows for SqlAlchemy",
description="Maximum number of connections that can be created beyond the pool_size.",
default=10,
)
SQLALCHEMY_POOL_RECYCLE: NonNegativeInt = Field(
description="SqlAlchemy pool recycle",
description="Number of seconds after which a connection is automatically recycled.",
default=3600,
)
SQLALCHEMY_POOL_PRE_PING: bool = Field(
description="whether to enable pool pre-ping in SqlAlchemy",
description="If True, enables connection pool pre-ping feature to check connections.",
default=False,
)
SQLALCHEMY_ECHO: bool | str = Field(
description="whether to enable SqlAlchemy echo",
description="If True, SQLAlchemy will log all SQL statements.",
default=False,
)
@ -148,15 +152,30 @@ class DatabaseConfig:
class CeleryConfig(DatabaseConfig):
CELERY_BACKEND: str = Field(
description="Celery backend, available values are `database`, `redis`",
description="Backend for Celery task results. Options: 'database', 'redis'.",
default="database",
)
CELERY_BROKER_URL: Optional[str] = Field(
description="CELERY_BROKER_URL",
description="URL of the message broker for Celery tasks.",
default=None,
)
CELERY_USE_SENTINEL: Optional[bool] = Field(
description="Whether to use Redis Sentinel for high availability.",
default=False,
)
CELERY_SENTINEL_MASTER_NAME: Optional[str] = Field(
description="Name of the Redis Sentinel master.",
default=None,
)
CELERY_SENTINEL_SOCKET_TIMEOUT: Optional[PositiveFloat] = Field(
description="Timeout for Redis Sentinel socket operations in seconds.",
default=0.1,
)
@computed_field
@property
def CELERY_RESULT_BACKEND(self) -> str | None:
@ -184,6 +203,8 @@ class MiddlewareConfig(
AzureBlobStorageConfig,
GoogleCloudStorageConfig,
TencentCloudCOSStorageConfig,
HuaweiCloudOBSStorageConfig,
VolcengineTOSStorageConfig,
S3StorageConfig,
OCIStorageConfig,
# configs of vdb and vdb providers
@ -202,5 +223,6 @@ class MiddlewareConfig(
TiDBVectorConfig,
WeaviateConfig,
ElasticsearchConfig,
BedrockConfig,
):
pass

View File

@ -1,40 +1,70 @@
from typing import Optional
from pydantic import Field, NonNegativeInt, PositiveInt
from pydantic import Field, NonNegativeInt, PositiveFloat, PositiveInt
from pydantic_settings import BaseSettings
class RedisConfig(BaseSettings):
"""
Redis configs
Configuration settings for Redis connection
"""
REDIS_HOST: str = Field(
description="Redis host",
description="Hostname or IP address of the Redis server",
default="localhost",
)
REDIS_PORT: PositiveInt = Field(
description="Redis port",
description="Port number on which the Redis server is listening",
default=6379,
)
REDIS_USERNAME: Optional[str] = Field(
description="Redis username",
description="Username for Redis authentication (if required)",
default=None,
)
REDIS_PASSWORD: Optional[str] = Field(
description="Redis password",
description="Password for Redis authentication (if required)",
default=None,
)
REDIS_DB: NonNegativeInt = Field(
description="Redis database id, default to 0",
description="Redis database number to use (0-15)",
default=0,
)
REDIS_USE_SSL: bool = Field(
description="whether to use SSL for Redis connection",
description="Enable SSL/TLS for the Redis connection",
default=False,
)
REDIS_USE_SENTINEL: Optional[bool] = Field(
description="Enable Redis Sentinel mode for high availability",
default=False,
)
REDIS_SENTINELS: Optional[str] = Field(
description="Comma-separated list of Redis Sentinel nodes (host:port)",
default=None,
)
REDIS_SENTINEL_SERVICE_NAME: Optional[str] = Field(
description="Name of the Redis Sentinel service to monitor",
default=None,
)
REDIS_SENTINEL_USERNAME: Optional[str] = Field(
description="Username for Redis Sentinel authentication (if required)",
default=None,
)
REDIS_SENTINEL_PASSWORD: Optional[str] = Field(
description="Password for Redis Sentinel authentication (if required)",
default=None,
)
REDIS_SENTINEL_SOCKET_TIMEOUT: Optional[PositiveFloat] = Field(
description="Socket timeout in seconds for Redis Sentinel connections",
default=0.1,
)

View File

@ -0,0 +1,20 @@
from typing import Optional
from pydantic import Field
from pydantic_settings import BaseSettings
class BedrockConfig(BaseSettings):
"""
bedrock configs
"""
AWS_SECRET_ACCESS_KEY: Optional[str] = Field(
description="AWS secret access key",
default=None,
)
AWS_ACCESS_KEY_ID: Optional[str] = Field(
description="AWS secret access id",
default=None,
)

View File

@ -6,35 +6,40 @@ from pydantic_settings import BaseSettings
class AliyunOSSStorageConfig(BaseSettings):
"""
Aliyun storage configs
Configuration settings for Aliyun Object Storage Service (OSS)
"""
ALIYUN_OSS_BUCKET_NAME: Optional[str] = Field(
description="Aliyun OSS bucket name",
description="Name of the Aliyun OSS bucket to store and retrieve objects",
default=None,
)
ALIYUN_OSS_ACCESS_KEY: Optional[str] = Field(
description="Aliyun OSS access key",
description="Access key ID for authenticating with Aliyun OSS",
default=None,
)
ALIYUN_OSS_SECRET_KEY: Optional[str] = Field(
description="Aliyun OSS secret key",
description="Secret access key for authenticating with Aliyun OSS",
default=None,
)
ALIYUN_OSS_ENDPOINT: Optional[str] = Field(
description="Aliyun OSS endpoint URL",
description="URL of the Aliyun OSS endpoint for your chosen region",
default=None,
)
ALIYUN_OSS_REGION: Optional[str] = Field(
description="Aliyun OSS region",
description="Aliyun OSS region where your bucket is located (e.g., 'oss-cn-hangzhou')",
default=None,
)
ALIYUN_OSS_AUTH_VERSION: Optional[str] = Field(
description="Aliyun OSS authentication version",
description="Version of the authentication protocol to use with Aliyun OSS (e.g., 'v4')",
default=None,
)
ALIYUN_OSS_PATH: Optional[str] = Field(
description="Base path within the bucket to store objects (e.g., 'my-app-data/')",
default=None,
)

View File

@ -6,40 +6,40 @@ from pydantic_settings import BaseSettings
class S3StorageConfig(BaseSettings):
"""
S3 storage configs
Configuration settings for S3-compatible object storage
"""
S3_ENDPOINT: Optional[str] = Field(
description="S3 storage endpoint",
description="URL of the S3-compatible storage endpoint (e.g., 'https://s3.amazonaws.com')",
default=None,
)
S3_REGION: Optional[str] = Field(
description="S3 storage region",
description="Region where the S3 bucket is located (e.g., 'us-east-1')",
default=None,
)
S3_BUCKET_NAME: Optional[str] = Field(
description="S3 storage bucket name",
description="Name of the S3 bucket to store and retrieve objects",
default=None,
)
S3_ACCESS_KEY: Optional[str] = Field(
description="S3 storage access key",
description="Access key ID for authenticating with the S3 service",
default=None,
)
S3_SECRET_KEY: Optional[str] = Field(
description="S3 storage secret key",
description="Secret access key for authenticating with the S3 service",
default=None,
)
S3_ADDRESS_STYLE: str = Field(
description="S3 storage address style",
description="S3 addressing style: 'auto', 'path', or 'virtual'",
default="auto",
)
S3_USE_AWS_MANAGED_IAM: bool = Field(
description="whether to use aws managed IAM for S3",
description="Use AWS managed IAM roles for authentication instead of access/secret keys",
default=False,
)

View File

@ -6,25 +6,25 @@ from pydantic_settings import BaseSettings
class AzureBlobStorageConfig(BaseSettings):
"""
Azure Blob storage configs
Configuration settings for Azure Blob Storage
"""
AZURE_BLOB_ACCOUNT_NAME: Optional[str] = Field(
description="Azure Blob account name",
description="Name of the Azure Storage account (e.g., 'mystorageaccount')",
default=None,
)
AZURE_BLOB_ACCOUNT_KEY: Optional[str] = Field(
description="Azure Blob account key",
description="Access key for authenticating with the Azure Storage account",
default=None,
)
AZURE_BLOB_CONTAINER_NAME: Optional[str] = Field(
description="Azure Blob container name",
description="Name of the Azure Blob container to store and retrieve objects",
default=None,
)
AZURE_BLOB_ACCOUNT_URL: Optional[str] = Field(
description="Azure Blob account URL",
description="URL of the Azure Blob storage endpoint (e.g., 'https://mystorageaccount.blob.core.windows.net')",
default=None,
)

View File

@ -6,15 +6,15 @@ from pydantic_settings import BaseSettings
class GoogleCloudStorageConfig(BaseSettings):
"""
Google Cloud storage configs
Configuration settings for Google Cloud Storage
"""
GOOGLE_STORAGE_BUCKET_NAME: Optional[str] = Field(
description="Google Cloud storage bucket name",
description="Name of the Google Cloud Storage bucket to store and retrieve objects (e.g., 'my-gcs-bucket')",
default=None,
)
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: Optional[str] = Field(
description="Google Cloud storage service account json base64",
description="Base64-encoded JSON key file for Google Cloud service account authentication",
default=None,
)

View File

@ -0,0 +1,29 @@
from typing import Optional
from pydantic import BaseModel, Field
class HuaweiCloudOBSStorageConfig(BaseModel):
"""
Configuration settings for Huawei Cloud Object Storage Service (OBS)
"""
HUAWEI_OBS_BUCKET_NAME: Optional[str] = Field(
description="Name of the Huawei Cloud OBS bucket to store and retrieve objects (e.g., 'my-obs-bucket')",
default=None,
)
HUAWEI_OBS_ACCESS_KEY: Optional[str] = Field(
description="Access Key ID for authenticating with Huawei Cloud OBS",
default=None,
)
HUAWEI_OBS_SECRET_KEY: Optional[str] = Field(
description="Secret Access Key for authenticating with Huawei Cloud OBS",
default=None,
)
HUAWEI_OBS_SERVER: Optional[str] = Field(
description="Endpoint URL for Huawei Cloud OBS (e.g., 'https://obs.cn-north-4.myhuaweicloud.com')",
default=None,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class OCIStorageConfig(BaseSettings):
"""
OCI storage configs
Configuration settings for Oracle Cloud Infrastructure (OCI) Object Storage
"""
OCI_ENDPOINT: Optional[str] = Field(
description="OCI storage endpoint",
description="URL of the OCI Object Storage endpoint (e.g., 'https://objectstorage.us-phoenix-1.oraclecloud.com')",
default=None,
)
OCI_REGION: Optional[str] = Field(
description="OCI storage region",
description="OCI region where the bucket is located (e.g., 'us-phoenix-1')",
default=None,
)
OCI_BUCKET_NAME: Optional[str] = Field(
description="OCI storage bucket name",
description="Name of the OCI Object Storage bucket to store and retrieve objects (e.g., 'my-oci-bucket')",
default=None,
)
OCI_ACCESS_KEY: Optional[str] = Field(
description="OCI storage access key",
description="Access key (also known as API key) for authenticating with OCI Object Storage",
default=None,
)
OCI_SECRET_KEY: Optional[str] = Field(
description="OCI storage secret key",
description="Secret key associated with the access key for authenticating with OCI Object Storage",
default=None,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class TencentCloudCOSStorageConfig(BaseSettings):
"""
Tencent Cloud COS storage configs
Configuration settings for Tencent Cloud Object Storage (COS)
"""
TENCENT_COS_BUCKET_NAME: Optional[str] = Field(
description="Tencent Cloud COS bucket name",
description="Name of the Tencent Cloud COS bucket to store and retrieve objects",
default=None,
)
TENCENT_COS_REGION: Optional[str] = Field(
description="Tencent Cloud COS region",
description="Tencent Cloud region where the COS bucket is located (e.g., 'ap-guangzhou')",
default=None,
)
TENCENT_COS_SECRET_ID: Optional[str] = Field(
description="Tencent Cloud COS secret id",
description="SecretId for authenticating with Tencent Cloud COS (part of API credentials)",
default=None,
)
TENCENT_COS_SECRET_KEY: Optional[str] = Field(
description="Tencent Cloud COS secret key",
description="SecretKey for authenticating with Tencent Cloud COS (part of API credentials)",
default=None,
)
TENCENT_COS_SCHEME: Optional[str] = Field(
description="Tencent Cloud COS scheme",
description="Protocol scheme for COS requests: 'https' (recommended) or 'http'",
default=None,
)

View File

@ -0,0 +1,34 @@
from typing import Optional
from pydantic import BaseModel, Field
class VolcengineTOSStorageConfig(BaseModel):
"""
Configuration settings for Volcengine Tinder Object Storage (TOS)
"""
VOLCENGINE_TOS_BUCKET_NAME: Optional[str] = Field(
description="Name of the Volcengine TOS bucket to store and retrieve objects (e.g., 'my-tos-bucket')",
default=None,
)
VOLCENGINE_TOS_ACCESS_KEY: Optional[str] = Field(
description="Access Key ID for authenticating with Volcengine TOS",
default=None,
)
VOLCENGINE_TOS_SECRET_KEY: Optional[str] = Field(
description="Secret Access Key for authenticating with Volcengine TOS",
default=None,
)
VOLCENGINE_TOS_ENDPOINT: Optional[str] = Field(
description="URL of the Volcengine TOS endpoint (e.g., 'https://tos-cn-beijing.volces.com')",
default=None,
)
VOLCENGINE_TOS_REGION: Optional[str] = Field(
description="Volcengine region where the TOS bucket is located (e.g., 'cn-beijing')",
default=None,
)

View File

@ -5,33 +5,38 @@ from pydantic import BaseModel, Field
class AnalyticdbConfig(BaseModel):
"""
Configuration for connecting to AnalyticDB.
Configuration for connecting to Alibaba Cloud AnalyticDB for PostgreSQL.
Refer to the following documentation for details on obtaining credentials:
https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/getting-started/create-an-instance-instances-with-vector-engine-optimization-enabled
"""
ANALYTICDB_KEY_ID: Optional[str] = Field(
default=None, description="The Access Key ID provided by Alibaba Cloud for authentication."
default=None, description="The Access Key ID provided by Alibaba Cloud for API authentication."
)
ANALYTICDB_KEY_SECRET: Optional[str] = Field(
default=None, description="The Secret Access Key corresponding to the Access Key ID for secure access."
default=None, description="The Secret Access Key corresponding to the Access Key ID for secure API access."
)
ANALYTICDB_REGION_ID: Optional[str] = Field(
default=None, description="The region where the AnalyticDB instance is deployed (e.g., 'cn-hangzhou')."
default=None,
description="The region where the AnalyticDB instance is deployed (e.g., 'cn-hangzhou', 'ap-southeast-1').",
)
ANALYTICDB_INSTANCE_ID: Optional[str] = Field(
default=None,
description="The unique identifier of the AnalyticDB instance you want to connect to (e.g., 'gp-ab123456')..",
description="The unique identifier of the AnalyticDB instance you want to connect to.",
)
ANALYTICDB_ACCOUNT: Optional[str] = Field(
default=None, description="The account name used to log in to the AnalyticDB instance."
default=None,
description="The account name used to log in to the AnalyticDB instance"
" (usually the initial account created with the instance).",
)
ANALYTICDB_PASSWORD: Optional[str] = Field(
default=None, description="The password associated with the AnalyticDB account for authentication."
default=None, description="The password associated with the AnalyticDB account for database authentication."
)
ANALYTICDB_NAMESPACE: Optional[str] = Field(
default=None, description="The namespace within AnalyticDB for schema isolation."
default=None, description="The namespace within AnalyticDB for schema isolation (if using namespace feature)."
)
ANALYTICDB_NAMESPACE_PASSWORD: Optional[str] = Field(
default=None, description="The password for accessing the specified namespace within the AnalyticDB instance."
default=None,
description="The password for accessing the specified namespace within the AnalyticDB instance"
" (if namespace feature is enabled).",
)

View File

@ -6,35 +6,35 @@ from pydantic_settings import BaseSettings
class ChromaConfig(BaseSettings):
"""
Chroma configs
Configuration settings for Chroma vector database
"""
CHROMA_HOST: Optional[str] = Field(
description="Chroma host",
description="Hostname or IP address of the Chroma server (e.g., 'localhost' or '192.168.1.100')",
default=None,
)
CHROMA_PORT: PositiveInt = Field(
description="Chroma port",
description="Port number on which the Chroma server is listening (default is 8000)",
default=8000,
)
CHROMA_TENANT: Optional[str] = Field(
description="Chroma database",
description="Tenant identifier for multi-tenancy support in Chroma",
default=None,
)
CHROMA_DATABASE: Optional[str] = Field(
description="Chroma database",
description="Name of the Chroma database to connect to",
default=None,
)
CHROMA_AUTH_PROVIDER: Optional[str] = Field(
description="Chroma authentication provider",
description="Authentication provider for Chroma (e.g., 'basic', 'token', or a custom provider)",
default=None,
)
CHROMA_AUTH_CREDENTIALS: Optional[str] = Field(
description="Chroma authentication credentials",
description="Authentication credentials for Chroma (format depends on the auth provider)",
default=None,
)

View File

@ -6,25 +6,25 @@ from pydantic_settings import BaseSettings
class ElasticsearchConfig(BaseSettings):
"""
Elasticsearch configs
Configuration settings for Elasticsearch
"""
ELASTICSEARCH_HOST: Optional[str] = Field(
description="Elasticsearch host",
description="Hostname or IP address of the Elasticsearch server (e.g., 'localhost' or '192.168.1.100')",
default="127.0.0.1",
)
ELASTICSEARCH_PORT: PositiveInt = Field(
description="Elasticsearch port",
description="Port number on which the Elasticsearch server is listening (default is 9200)",
default=9200,
)
ELASTICSEARCH_USERNAME: Optional[str] = Field(
description="Elasticsearch username",
description="Username for authenticating with Elasticsearch (default is 'elastic')",
default="elastic",
)
ELASTICSEARCH_PASSWORD: Optional[str] = Field(
description="Elasticsearch password",
description="Password for authenticating with Elasticsearch (default is 'elastic')",
default="elastic",
)

View File

@ -1,40 +1,35 @@
from typing import Optional
from pydantic import Field, PositiveInt
from pydantic import Field
from pydantic_settings import BaseSettings
class MilvusConfig(BaseSettings):
"""
Milvus configs
Configuration settings for Milvus vector database
"""
MILVUS_HOST: Optional[str] = Field(
description="Milvus host",
MILVUS_URI: Optional[str] = Field(
description="URI for connecting to the Milvus server (e.g., 'http://localhost:19530' or 'https://milvus-instance.example.com:19530')",
default="http://127.0.0.1:19530",
)
MILVUS_TOKEN: Optional[str] = Field(
description="Authentication token for Milvus, if token-based authentication is enabled",
default=None,
)
MILVUS_PORT: PositiveInt = Field(
description="Milvus RestFul API port",
default=9091,
)
MILVUS_USER: Optional[str] = Field(
description="Milvus user",
description="Username for authenticating with Milvus, if username/password authentication is enabled",
default=None,
)
MILVUS_PASSWORD: Optional[str] = Field(
description="Milvus password",
description="Password for authenticating with Milvus, if username/password authentication is enabled",
default=None,
)
MILVUS_SECURE: bool = Field(
description="whether to use SSL connection for Milvus",
default=False,
)
MILVUS_DATABASE: str = Field(
description="Milvus database, default to `default`",
description="Name of the Milvus database to connect to (default is 'default')",
default="default",
)

View File

@ -3,35 +3,35 @@ from pydantic import BaseModel, Field, PositiveInt
class MyScaleConfig(BaseModel):
"""
MyScale configs
Configuration settings for MyScale vector database
"""
MYSCALE_HOST: str = Field(
description="MyScale host",
description="Hostname or IP address of the MyScale server (e.g., 'localhost' or 'myscale.example.com')",
default="localhost",
)
MYSCALE_PORT: PositiveInt = Field(
description="MyScale port",
description="Port number on which the MyScale server is listening (default is 8123)",
default=8123,
)
MYSCALE_USER: str = Field(
description="MyScale user",
description="Username for authenticating with MyScale (default is 'default')",
default="default",
)
MYSCALE_PASSWORD: str = Field(
description="MyScale password",
description="Password for authenticating with MyScale (default is an empty string)",
default="",
)
MYSCALE_DATABASE: str = Field(
description="MyScale database name",
description="Name of the MyScale database to connect to (default is 'default')",
default="default",
)
MYSCALE_FTS_PARAMS: str = Field(
description="MyScale fts index parameters",
description="Additional parameters for MyScale Full Text Search index)",
default="",
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class OpenSearchConfig(BaseSettings):
"""
OpenSearch configs
Configuration settings for OpenSearch
"""
OPENSEARCH_HOST: Optional[str] = Field(
description="OpenSearch host",
description="Hostname or IP address of the OpenSearch server (e.g., 'localhost' or 'opensearch.example.com')",
default=None,
)
OPENSEARCH_PORT: PositiveInt = Field(
description="OpenSearch port",
description="Port number on which the OpenSearch server is listening (default is 9200)",
default=9200,
)
OPENSEARCH_USER: Optional[str] = Field(
description="OpenSearch user",
description="Username for authenticating with OpenSearch",
default=None,
)
OPENSEARCH_PASSWORD: Optional[str] = Field(
description="OpenSearch password",
description="Password for authenticating with OpenSearch",
default=None,
)
OPENSEARCH_SECURE: bool = Field(
description="whether to use SSL connection for OpenSearch",
description="Whether to use SSL/TLS encrypted connection for OpenSearch (True for HTTPS, False for HTTP)",
default=False,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class OracleConfig(BaseSettings):
"""
ORACLE configs
Configuration settings for Oracle database
"""
ORACLE_HOST: Optional[str] = Field(
description="ORACLE host",
description="Hostname or IP address of the Oracle database server (e.g., 'localhost' or 'oracle.example.com')",
default=None,
)
ORACLE_PORT: Optional[PositiveInt] = Field(
description="ORACLE port",
description="Port number on which the Oracle database server is listening (default is 1521)",
default=1521,
)
ORACLE_USER: Optional[str] = Field(
description="ORACLE user",
description="Username for authenticating with the Oracle database",
default=None,
)
ORACLE_PASSWORD: Optional[str] = Field(
description="ORACLE password",
description="Password for authenticating with the Oracle database",
default=None,
)
ORACLE_DATABASE: Optional[str] = Field(
description="ORACLE database",
description="Name of the Oracle database or service to connect to (e.g., 'ORCL' or 'pdborcl')",
default=None,
)

View File

@ -6,30 +6,40 @@ from pydantic_settings import BaseSettings
class PGVectorConfig(BaseSettings):
"""
PGVector configs
Configuration settings for PGVector (PostgreSQL with vector extension)
"""
PGVECTOR_HOST: Optional[str] = Field(
description="PGVector host",
description="Hostname or IP address of the PostgreSQL server with PGVector extension (e.g., 'localhost')",
default=None,
)
PGVECTOR_PORT: Optional[PositiveInt] = Field(
description="PGVector port",
description="Port number on which the PostgreSQL server is listening (default is 5433)",
default=5433,
)
PGVECTOR_USER: Optional[str] = Field(
description="PGVector user",
description="Username for authenticating with the PostgreSQL database",
default=None,
)
PGVECTOR_PASSWORD: Optional[str] = Field(
description="PGVector password",
description="Password for authenticating with the PostgreSQL database",
default=None,
)
PGVECTOR_DATABASE: Optional[str] = Field(
description="PGVector database",
description="Name of the PostgreSQL database to connect to",
default=None,
)
PGVECTOR_MIN_CONNECTION: PositiveInt = Field(
description="Min connection of the PostgreSQL database",
default=1,
)
PGVECTOR_MAX_CONNECTION: PositiveInt = Field(
description="Max connection of the PostgreSQL database",
default=5,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class PGVectoRSConfig(BaseSettings):
"""
PGVectoRS configs
Configuration settings for PGVecto.RS (Rust-based vector extension for PostgreSQL)
"""
PGVECTO_RS_HOST: Optional[str] = Field(
description="PGVectoRS host",
description="Hostname or IP address of the PostgreSQL server with PGVecto.RS extension (e.g., 'localhost')",
default=None,
)
PGVECTO_RS_PORT: Optional[PositiveInt] = Field(
description="PGVectoRS port",
description="Port number on which the PostgreSQL server with PGVecto.RS is listening (default is 5431)",
default=5431,
)
PGVECTO_RS_USER: Optional[str] = Field(
description="PGVectoRS user",
description="Username for authenticating with the PostgreSQL database using PGVecto.RS",
default=None,
)
PGVECTO_RS_PASSWORD: Optional[str] = Field(
description="PGVectoRS password",
description="Password for authenticating with the PostgreSQL database using PGVecto.RS",
default=None,
)
PGVECTO_RS_DATABASE: Optional[str] = Field(
description="PGVectoRS database",
description="Name of the PostgreSQL database with PGVecto.RS extension to connect to",
default=None,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class QdrantConfig(BaseSettings):
"""
Qdrant configs
Configuration settings for Qdrant vector database
"""
QDRANT_URL: Optional[str] = Field(
description="Qdrant url",
description="URL of the Qdrant server (e.g., 'http://localhost:6333' or 'https://qdrant.example.com')",
default=None,
)
QDRANT_API_KEY: Optional[str] = Field(
description="Qdrant api key",
description="API key for authenticating with the Qdrant server",
default=None,
)
QDRANT_CLIENT_TIMEOUT: NonNegativeInt = Field(
description="Qdrant client timeout in seconds",
description="Timeout in seconds for Qdrant client operations (default is 20 seconds)",
default=20,
)
QDRANT_GRPC_ENABLED: bool = Field(
description="whether enable grpc support for Qdrant connection",
description="Whether to enable gRPC support for Qdrant connection (True for gRPC, False for HTTP)",
default=False,
)
QDRANT_GRPC_PORT: PositiveInt = Field(
description="Qdrant grpc port",
description="Port number for gRPC connection to Qdrant server (default is 6334)",
default=6334,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class RelytConfig(BaseSettings):
"""
Relyt configs
Configuration settings for Relyt database
"""
RELYT_HOST: Optional[str] = Field(
description="Relyt host",
description="Hostname or IP address of the Relyt server (e.g., 'localhost' or 'relyt.example.com')",
default=None,
)
RELYT_PORT: PositiveInt = Field(
description="Relyt port",
description="Port number on which the Relyt server is listening (default is 9200)",
default=9200,
)
RELYT_USER: Optional[str] = Field(
description="Relyt user",
description="Username for authenticating with the Relyt database",
default=None,
)
RELYT_PASSWORD: Optional[str] = Field(
description="Relyt password",
description="Password for authenticating with the Relyt database",
default=None,
)
RELYT_DATABASE: Optional[str] = Field(
description="Relyt database",
description="Name of the Relyt database to connect to (default is 'default')",
default="default",
)

View File

@ -6,45 +6,45 @@ from pydantic_settings import BaseSettings
class TencentVectorDBConfig(BaseSettings):
"""
Tencent Vector configs
Configuration settings for Tencent Vector Database
"""
TENCENT_VECTOR_DB_URL: Optional[str] = Field(
description="Tencent Vector URL",
description="URL of the Tencent Vector Database service (e.g., 'https://vectordb.tencentcloudapi.com')",
default=None,
)
TENCENT_VECTOR_DB_API_KEY: Optional[str] = Field(
description="Tencent Vector API key",
description="API key for authenticating with the Tencent Vector Database service",
default=None,
)
TENCENT_VECTOR_DB_TIMEOUT: PositiveInt = Field(
description="Tencent Vector timeout in seconds",
description="Timeout in seconds for Tencent Vector Database operations (default is 30 seconds)",
default=30,
)
TENCENT_VECTOR_DB_USERNAME: Optional[str] = Field(
description="Tencent Vector username",
description="Username for authenticating with the Tencent Vector Database (if required)",
default=None,
)
TENCENT_VECTOR_DB_PASSWORD: Optional[str] = Field(
description="Tencent Vector password",
description="Password for authenticating with the Tencent Vector Database (if required)",
default=None,
)
TENCENT_VECTOR_DB_SHARD: PositiveInt = Field(
description="Tencent Vector sharding number",
description="Number of shards for the Tencent Vector Database (default is 1)",
default=1,
)
TENCENT_VECTOR_DB_REPLICAS: NonNegativeInt = Field(
description="Tencent Vector replicas",
description="Number of replicas for the Tencent Vector Database (default is 2)",
default=2,
)
TENCENT_VECTOR_DB_DATABASE: Optional[str] = Field(
description="Tencent Vector Database",
description="Name of the specific Tencent Vector Database to connect to",
default=None,
)

View File

@ -6,30 +6,30 @@ from pydantic_settings import BaseSettings
class TiDBVectorConfig(BaseSettings):
"""
TiDB Vector configs
Configuration settings for TiDB Vector database
"""
TIDB_VECTOR_HOST: Optional[str] = Field(
description="TiDB Vector host",
description="Hostname or IP address of the TiDB Vector server (e.g., 'localhost' or 'tidb.example.com')",
default=None,
)
TIDB_VECTOR_PORT: Optional[PositiveInt] = Field(
description="TiDB Vector port",
description="Port number on which the TiDB Vector server is listening (default is 4000)",
default=4000,
)
TIDB_VECTOR_USER: Optional[str] = Field(
description="TiDB Vector user",
description="Username for authenticating with the TiDB Vector database",
default=None,
)
TIDB_VECTOR_PASSWORD: Optional[str] = Field(
description="TiDB Vector password",
description="Password for authenticating with the TiDB Vector database",
default=None,
)
TIDB_VECTOR_DATABASE: Optional[str] = Field(
description="TiDB Vector database",
description="Name of the TiDB Vector database to connect to",
default=None,
)

View File

@ -6,25 +6,25 @@ from pydantic_settings import BaseSettings
class WeaviateConfig(BaseSettings):
"""
Weaviate configs
Configuration settings for Weaviate vector database
"""
WEAVIATE_ENDPOINT: Optional[str] = Field(
description="Weaviate endpoint URL",
description="URL of the Weaviate server (e.g., 'http://localhost:8080' or 'https://weaviate.example.com')",
default=None,
)
WEAVIATE_API_KEY: Optional[str] = Field(
description="Weaviate API key",
description="API key for authenticating with the Weaviate server",
default=None,
)
WEAVIATE_GRPC_ENABLED: bool = Field(
description="whether to enable gRPC for Weaviate connection",
description="Whether to enable gRPC for Weaviate connection (True for gRPC, False for HTTP)",
default=True,
)
WEAVIATE_BATCH_SIZE: PositiveInt = Field(
description="Weaviate batch size",
description="Number of objects to be processed in a single batch operation (default is 100)",
default=100,
)

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="0.7.2",
default="0.8.3",
)
COMMIT_SHA: str = Field(

View File

@ -1 +1,2 @@
HIDDEN_VALUE = "[__HIDDEN__]"
UUID_NIL = "00000000-0000-0000-0000-000000000000"

File diff suppressed because one or more lines are too long

View File

@ -37,7 +37,17 @@ from .auth import activate, data_source_bearer_auth, data_source_oauth, forgot_p
from .billing import billing
# Import datasets controllers
from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing, website
from .datasets import (
data_source,
datasets,
datasets_document,
datasets_segments,
external,
file,
hit_testing,
test_external,
website,
)
# Import explore controllers
from .explore import (

View File

@ -60,23 +60,15 @@ class InsertExploreAppListApi(Resource):
site = app.site
if not site:
desc = args["desc"] if args["desc"] else ""
copy_right = args["copyright"] if args["copyright"] else ""
privacy_policy = args["privacy_policy"] if args["privacy_policy"] else ""
custom_disclaimer = args["custom_disclaimer"] if args["custom_disclaimer"] else ""
desc = args["desc"] or ""
copy_right = args["copyright"] or ""
privacy_policy = args["privacy_policy"] or ""
custom_disclaimer = args["custom_disclaimer"] or ""
else:
desc = site.description if site.description else args["desc"] if args["desc"] else ""
copy_right = site.copyright if site.copyright else args["copyright"] if args["copyright"] else ""
privacy_policy = (
site.privacy_policy if site.privacy_policy else args["privacy_policy"] if args["privacy_policy"] else ""
)
custom_disclaimer = (
site.custom_disclaimer
if site.custom_disclaimer
else args["custom_disclaimer"]
if args["custom_disclaimer"]
else ""
)
desc = site.description or args["desc"] or ""
copy_right = site.copyright or args["copyright"] or ""
privacy_policy = site.privacy_policy or args["privacy_policy"] or ""
custom_disclaimer = site.custom_disclaimer or args["custom_disclaimer"] or ""
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args["app_id"]).first()

View File

@ -57,7 +57,7 @@ class BaseApiKeyListResource(Resource):
def post(self, resource_id):
resource_id = str(resource_id)
_get_resource(resource_id, current_user.current_tenant_id, self.resource_model)
if not current_user.is_admin_or_owner:
if not current_user.is_editor:
raise Forbidden()
current_key_count = (

View File

@ -174,6 +174,7 @@ class AppApi(Resource):
parser.add_argument("icon", type=str, location="json")
parser.add_argument("icon_background", type=str, location="json")
parser.add_argument("max_active_requests", type=int, location="json")
parser.add_argument("use_icon_as_answer_icon", type=bool, location="json")
args = parser.parse_args()
app_service = AppService()

View File

@ -94,19 +94,15 @@ class ChatMessageTextApi(Resource):
message_id = args.get("message_id", None)
text = args.get("text", None)
if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None
response = AudioService.transcript_tts(app_model=app_model, text=text, message_id=message_id, voice=voice)

View File

@ -109,6 +109,7 @@ class ChatMessageApi(Resource):
parser.add_argument("files", type=list, required=False, location="json")
parser.add_argument("model_config", type=dict, required=True, location="json")
parser.add_argument("conversation_id", type=uuid_value, location="json")
parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json")
parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")
args = parser.parse_args()

View File

@ -20,7 +20,7 @@ from fields.conversation_fields import (
conversation_pagination_fields,
conversation_with_summary_pagination_fields,
)
from libs.helper import datetime_string
from libs.helper import DatetimeString
from libs.login import login_required
from models.model import AppMode, Conversation, EndUser, Message, MessageAnnotation
@ -36,8 +36,8 @@ class CompletionConversationApi(Resource):
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument("keyword", type=str, location="args")
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument(
"annotation_status", type=str, choices=["annotated", "not_annotated", "all"], default="all", location="args"
)
@ -143,8 +143,8 @@ class ChatConversationApi(Resource):
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument("keyword", type=str, location="args")
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument(
"annotation_status", type=str, choices=["annotated", "not_annotated", "all"], default="all", location="args"
)
@ -201,7 +201,11 @@ class ChatConversationApi(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
query = query.where(Conversation.created_at >= start_datetime_utc)
match args["sort_by"]:
case "updated_at" | "-updated_at":
query = query.where(Conversation.updated_at >= start_datetime_utc)
case "created_at" | "-created_at" | _:
query = query.where(Conversation.created_at >= start_datetime_utc)
if args["end"]:
end_datetime = datetime.strptime(args["end"], "%Y-%m-%d %H:%M")
@ -210,7 +214,11 @@ class ChatConversationApi(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
query = query.where(Conversation.created_at < end_datetime_utc)
match args["sort_by"]:
case "updated_at" | "-updated_at":
query = query.where(Conversation.updated_at <= end_datetime_utc)
case "created_at" | "-created_at" | _:
query = query.where(Conversation.created_at <= end_datetime_utc)
if args["annotation_status"] == "annotated":
query = query.options(joinedload(Conversation.message_annotations)).join(

View File

@ -105,8 +105,6 @@ class ChatMessageListApi(Resource):
if rest_count > 0:
has_more = True
history_messages = list(reversed(history_messages))
return InfiniteScrollPagination(data=history_messages, limit=args["limit"], has_more=has_more)

View File

@ -34,6 +34,7 @@ def parse_app_site_args():
)
parser.add_argument("prompt_public", type=bool, required=False, location="json")
parser.add_argument("show_workflow_steps", type=bool, required=False, location="json")
parser.add_argument("use_icon_as_answer_icon", type=bool, required=False, location="json")
return parser.parse_args()
@ -68,6 +69,7 @@ class AppSite(Resource):
"customize_token_strategy",
"prompt_public",
"show_workflow_steps",
"use_icon_as_answer_icon",
]:
value = args.get(attr_name)
if value is not None:

View File

@ -11,7 +11,7 @@ from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.helper import datetime_string
from libs.helper import DatetimeString
from libs.login import login_required
from models.model import AppMode
@ -25,14 +25,17 @@ class DailyMessageStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(*) AS message_count
FROM messages where app_id = :app_id
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(*) AS message_count
FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -45,7 +48,7 @@ class DailyMessageStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -55,10 +58,10 @@ class DailyMessageStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -79,14 +82,17 @@ class DailyConversationStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.conversation_id) AS conversation_count
FROM messages where app_id = :app_id
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(DISTINCT messages.conversation_id) AS conversation_count
FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -99,7 +105,7 @@ class DailyConversationStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -109,10 +115,10 @@ class DailyConversationStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -133,14 +139,17 @@ class DailyTerminalsStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.from_end_user_id) AS terminal_count
FROM messages where app_id = :app_id
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(DISTINCT messages.from_end_user_id) AS terminal_count
FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -153,7 +162,7 @@ class DailyTerminalsStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -163,10 +172,10 @@ class DailyTerminalsStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -187,16 +196,18 @@ class DailyTokenCostStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
(sum(messages.message_tokens) + sum(messages.answer_tokens)) as token_count,
sum(total_price) as total_price
FROM messages where app_id = :app_id
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
(SUM(messages.message_tokens) + SUM(messages.answer_tokens)) AS token_count,
SUM(total_price) AS total_price
FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -209,7 +220,7 @@ class DailyTokenCostStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -219,10 +230,10 @@ class DailyTokenCostStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -245,16 +256,26 @@ class AverageSessionInteractionStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """SELECT date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(subquery.message_count) AS interactions
FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
FROM conversations c
JOIN messages m ON c.id = m.conversation_id
WHERE c.override_model_configs IS NULL AND c.app_id = :app_id"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(subquery.message_count) AS interactions
FROM
(
SELECT
m.conversation_id,
COUNT(m.id) AS message_count
FROM
conversations c
JOIN
messages m
ON c.id = m.conversation_id
WHERE
c.override_model_configs IS NULL
AND c.app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -267,7 +288,7 @@ FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and c.created_at >= :start"
sql_query += " AND c.created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -277,14 +298,19 @@ FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and c.created_at < :end"
sql_query += " AND c.created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += """
GROUP BY m.conversation_id) subquery
LEFT JOIN conversations c on c.id=subquery.conversation_id
GROUP BY date
ORDER BY date"""
GROUP BY m.conversation_id
) subquery
LEFT JOIN
conversations c
ON c.id = subquery.conversation_id
GROUP BY
date
ORDER BY
date"""
response_data = []
@ -307,17 +333,21 @@ class UserSatisfactionRateStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count
FROM messages m
LEFT JOIN message_feedbacks mf on mf.message_id=m.id and mf.rating='like'
WHERE m.app_id = :app_id
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(m.id) AS message_count,
COUNT(mf.id) AS feedback_count
FROM
messages m
LEFT JOIN
message_feedbacks mf
ON mf.message_id=m.id AND mf.rating='like'
WHERE
m.app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -330,7 +360,7 @@ class UserSatisfactionRateStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and m.created_at >= :start"
sql_query += " AND m.created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -340,10 +370,10 @@ class UserSatisfactionRateStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and m.created_at < :end"
sql_query += " AND m.created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -369,16 +399,17 @@ class AverageResponseTimeStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(provider_response_latency) as latency
FROM messages
WHERE app_id = :app_id
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(provider_response_latency) AS latency
FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -391,7 +422,7 @@ class AverageResponseTimeStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -401,10 +432,10 @@ class AverageResponseTimeStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -425,17 +456,20 @@ class TokensPerSecondStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
CASE
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
CASE
WHEN SUM(provider_response_latency) = 0 THEN 0
ELSE (SUM(answer_tokens) / SUM(provider_response_latency))
END as tokens_per_second
FROM messages
WHERE app_id = :app_id"""
FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone)
@ -448,7 +482,7 @@ WHERE app_id = :app_id"""
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -458,10 +492,10 @@ WHERE app_id = :app_id"""
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []

View File

@ -166,6 +166,8 @@ class AdvancedChatDraftWorkflowRunApi(Resource):
parser.add_argument("query", type=str, required=True, location="json", default="")
parser.add_argument("files", type=list, location="json")
parser.add_argument("conversation_id", type=uuid_value, location="json")
parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json")
args = parser.parse_args()
try:
@ -465,6 +467,6 @@ api.add_resource(
api.add_resource(PublishedWorkflowApi, "/apps/<uuid:app_id>/workflows/publish")
api.add_resource(DefaultBlockConfigsApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs")
api.add_resource(
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs" "/<string:block_type>"
DefaultBlockConfigApi, "/apps/<uuid:app_id>/workflows/default-workflow-block-configs/<string:block_type>"
)
api.add_resource(ConvertToWorkflowApi, "/apps/<uuid:app_id>/convert-to-workflow")

View File

@ -11,7 +11,7 @@ from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from libs.helper import datetime_string
from libs.helper import DatetimeString
from libs.login import login_required
from models.model import AppMode
from models.workflow import WorkflowRunTriggeredFrom
@ -26,16 +26,18 @@ class WorkflowDailyRunsStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(id) AS runs
FROM workflow_runs
WHERE app_id = :app_id
AND triggered_from = :triggered_from
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(id) AS runs
FROM
workflow_runs
WHERE
app_id = :app_id
AND triggered_from = :triggered_from"""
arg_dict = {
"tz": account.timezone,
"app_id": app_model.id,
@ -52,7 +54,7 @@ class WorkflowDailyRunsStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -62,10 +64,10 @@ class WorkflowDailyRunsStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -86,16 +88,18 @@ class WorkflowDailyTerminalsStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count
FROM workflow_runs
WHERE app_id = :app_id
AND triggered_from = :triggered_from
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(DISTINCT workflow_runs.created_by) AS terminal_count
FROM
workflow_runs
WHERE
app_id = :app_id
AND triggered_from = :triggered_from"""
arg_dict = {
"tz": account.timezone,
"app_id": app_model.id,
@ -112,7 +116,7 @@ class WorkflowDailyTerminalsStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -122,10 +126,10 @@ class WorkflowDailyTerminalsStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -146,18 +150,18 @@ class WorkflowDailyTokenCostStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT
date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
SUM(workflow_runs.total_tokens) as token_count
FROM workflow_runs
WHERE app_id = :app_id
AND triggered_from = :triggered_from
"""
sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
SUM(workflow_runs.total_tokens) AS token_count
FROM
workflow_runs
WHERE
app_id = :app_id
AND triggered_from = :triggered_from"""
arg_dict = {
"tz": account.timezone,
"app_id": app_model.id,
@ -174,7 +178,7 @@ class WorkflowDailyTokenCostStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start"
sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc
if args["end"]:
@ -184,10 +188,10 @@ class WorkflowDailyTokenCostStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end"
sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date"
sql_query += " GROUP BY date ORDER BY date"
response_data = []
@ -213,27 +217,31 @@ class WorkflowAverageAppInteractionStatistic(Resource):
account = current_user
parser = reqparse.RequestParser()
parser.add_argument("start", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=datetime_string("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("start", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args()
sql_query = """
SELECT
AVG(sub.interactions) as interactions,
sub.date
FROM
(SELECT
date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
c.created_by,
COUNT(c.id) AS interactions
FROM workflow_runs c
WHERE c.app_id = :app_id
AND c.triggered_from = :triggered_from
{{start}}
{{end}}
GROUP BY date, c.created_by) sub
GROUP BY sub.date
"""
sql_query = """SELECT
AVG(sub.interactions) AS interactions,
sub.date
FROM
(
SELECT
DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
c.created_by,
COUNT(c.id) AS interactions
FROM
workflow_runs c
WHERE
c.app_id = :app_id
AND c.triggered_from = :triggered_from
{{start}}
{{end}}
GROUP BY
date, c.created_by
) sub
GROUP BY
sub.date"""
arg_dict = {
"tz": account.timezone,
"app_id": app_model.id,
@ -262,7 +270,7 @@ class WorkflowAverageAppInteractionStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query = sql_query.replace("{{end}}", " and c.created_at < :end")
sql_query = sql_query.replace("{{end}}", " AND c.created_at < :end")
arg_dict["end"] = end_datetime_utc
else:
sql_query = sql_query.replace("{{end}}", "")

View File

@ -8,7 +8,7 @@ from constants.languages import supported_language
from controllers.console import api
from controllers.console.error import AlreadyActivateError
from extensions.ext_database import db
from libs.helper import email, str_len, timezone
from libs.helper import StrLen, email, timezone
from libs.password import hash_password, valid_password
from models.account import AccountStatus
from services.account_service import RegisterService
@ -37,7 +37,7 @@ class ActivateApi(Resource):
parser.add_argument("workspace_id", type=str, required=False, nullable=True, location="json")
parser.add_argument("email", type=email, required=False, nullable=True, location="json")
parser.add_argument("token", type=str, required=True, nullable=False, location="json")
parser.add_argument("name", type=str_len(30), required=True, nullable=False, location="json")
parser.add_argument("name", type=StrLen(30), required=True, nullable=False, location="json")
parser.add_argument("password", type=valid_password, required=True, nullable=False, location="json")
parser.add_argument(
"interface_language", type=supported_language, required=True, nullable=False, location="json"

View File

@ -71,7 +71,7 @@ class OAuthCallback(Resource):
account = _generate_account(provider, user_info)
# Check account status
if account.status == AccountStatus.BANNED.value or account.status == AccountStatus.CLOSED.value:
if account.status in {AccountStatus.BANNED.value, AccountStatus.CLOSED.value}:
return {"error": "Account is banned or closed."}, 403
if account.status == AccountStatus.PENDING.value:
@ -101,7 +101,7 @@ def _generate_account(provider: str, user_info: OAuthUserInfo):
if not account:
# Create account
account_name = user_info.name if user_info.name else "Dify"
account_name = user_info.name or "Dify"
account = RegisterService.register(
email=user_info.email, name=account_name, password=None, open_id=user_info.id, provider=provider
)

View File

@ -18,7 +18,7 @@ from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.extractor.entity.extract_setting import ExtractSetting
from core.rag.retrieval.retrival_methods import RetrievalMethod
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from extensions.ext_database import db
from fields.app_fields import related_app_list
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
@ -49,7 +49,7 @@ class DatasetListApi(Resource):
page = request.args.get("page", default=1, type=int)
limit = request.args.get("limit", default=20, type=int)
ids = request.args.getlist("ids")
provider = request.args.get("provider", default="vendor")
# provider = request.args.get("provider", default="vendor")
search = request.args.get("keyword", default=None, type=str)
tag_ids = request.args.getlist("tag_ids")
@ -57,7 +57,7 @@ class DatasetListApi(Resource):
datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
else:
datasets, total = DatasetService.get_datasets(
page, limit, provider, current_user.current_tenant_id, current_user, search, tag_ids
page, limit, current_user.current_tenant_id, current_user, search, tag_ids
)
# check embedding setting
@ -110,6 +110,26 @@ class DatasetListApi(Resource):
nullable=True,
help="Invalid indexing technique.",
)
parser.add_argument(
"external_knowledge_api_id",
type=str,
nullable=True,
required=False,
)
parser.add_argument(
"provider",
type=str,
nullable=True,
choices=Dataset.PROVIDER_LIST,
required=False,
default="vendor",
)
parser.add_argument(
"external_knowledge_id",
type=str,
nullable=True,
required=False,
)
args = parser.parse_args()
# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
@ -122,6 +142,10 @@ class DatasetListApi(Resource):
name=args["name"],
indexing_technique=args["indexing_technique"],
account=current_user,
permission=DatasetPermissionEnum.ONLY_ME,
provider=args["provider"],
external_knowledge_api_id=args["external_knowledge_api_id"],
external_knowledge_id=args["external_knowledge_id"],
)
except services.errors.dataset.DatasetNameDuplicateError:
raise DatasetNameDuplicateError()
@ -210,6 +234,33 @@ class DatasetApi(Resource):
)
parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
parser.add_argument(
"external_retrieval_model",
type=dict,
required=False,
nullable=True,
location="json",
help="Invalid external retrieval model.",
)
parser.add_argument(
"external_knowledge_id",
type=str,
required=False,
nullable=True,
location="json",
help="Invalid external knowledge id.",
)
parser.add_argument(
"external_knowledge_api_id",
type=str,
required=False,
nullable=True,
location="json",
help="Invalid external knowledge api id.",
)
args = parser.parse_args()
data = request.get_json()
@ -398,7 +449,7 @@ class DatasetIndexingEstimateApi(Resource):
)
except LLMBadRequestError:
raise ProviderNotInitializeError(
"No Embedding Model available. Please configure a valid provider " "in the Settings -> Model Provider."
"No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
@ -549,12 +600,7 @@ class DatasetApiBaseUrlApi(Resource):
@login_required
@account_initialization_required
def get(self):
return {
"api_base_url": (
dify_config.SERVICE_API_URL if dify_config.SERVICE_API_URL else request.host_url.rstrip("/")
)
+ "/v1"
}
return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
class DatasetRetrievalSettingApi(Resource):

View File

@ -302,6 +302,8 @@ class DatasetInitApi(Resource):
"doc_language", type=str, default="English", required=False, nullable=False, location="json"
)
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
parser.add_argument("embedding_model", type=str, required=False, nullable=True, location="json")
parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
args = parser.parse_args()
# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
@ -309,6 +311,8 @@ class DatasetInitApi(Resource):
raise Forbidden()
if args["indexing_technique"] == "high_quality":
if args["embedding_model"] is None or args["embedding_model_provider"] is None:
raise ValueError("embedding model and embedding model provider are required for high quality indexing.")
try:
model_manager = ModelManager()
model_manager.get_default_model_instance(
@ -350,7 +354,7 @@ class DocumentIndexingEstimateApi(DocumentResource):
document_id = str(document_id)
document = self.get_document(dataset_id, document_id)
if document.indexing_status in ["completed", "error"]:
if document.indexing_status in {"completed", "error"}:
raise DocumentAlreadyFinishedError()
data_process_rule = document.dataset_process_rule
@ -417,7 +421,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
info_list = []
extract_settings = []
for document in documents:
if document.indexing_status in ["completed", "error"]:
if document.indexing_status in {"completed", "error"}:
raise DocumentAlreadyFinishedError()
data_source_info = document.data_source_info_dict
# format document files info
@ -661,7 +665,7 @@ class DocumentProcessingApi(DocumentResource):
db.session.commit()
elif action == "resume":
if document.indexing_status not in ["paused", "error"]:
if document.indexing_status not in {"paused", "error"}:
raise InvalidActionError("Document not in paused or error state.")
document.paused_by = None

View File

@ -0,0 +1,282 @@
from flask import request
from flask_login import current_user
from flask_restful import Resource, marshal, reqparse
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import DatasetNameDuplicateError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.dataset_fields import dataset_detail_fields
from libs.login import login_required
from services.dataset_service import DatasetService
from services.external_knowledge_service import ExternalDatasetService
from services.hit_testing_service import HitTestingService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 100:
raise ValueError("Name must be between 1 to 100 characters.")
return name
def _validate_description_length(description):
if description and len(description) > 400:
raise ValueError("Description cannot exceed 400 characters.")
return description
class ExternalApiTemplateListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
page = request.args.get("page", default=1, type=int)
limit = request.args.get("limit", default=20, type=int)
search = request.args.get("keyword", default=None, type=str)
external_knowledge_apis, total = ExternalDatasetService.get_external_knowledge_apis(
page, limit, current_user.current_tenant_id, search
)
response = {
"data": [item.to_dict() for item in external_knowledge_apis],
"has_more": len(external_knowledge_apis) == limit,
"limit": limit,
"total": total,
"page": page,
}
return response, 200
@setup_required
@login_required
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument(
"name",
nullable=False,
required=True,
help="Name is required. Name must be between 1 to 100 characters.",
type=_validate_name,
)
parser.add_argument(
"settings",
type=dict,
location="json",
nullable=False,
required=True,
)
args = parser.parse_args()
ExternalDatasetService.validate_api_list(args["settings"])
# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
if not current_user.is_dataset_editor:
raise Forbidden()
try:
external_knowledge_api = ExternalDatasetService.create_external_knowledge_api(
tenant_id=current_user.current_tenant_id, user_id=current_user.id, args=args
)
except services.errors.dataset.DatasetNameDuplicateError:
raise DatasetNameDuplicateError()
return external_knowledge_api.to_dict(), 201
class ExternalApiTemplateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, external_knowledge_api_id):
external_knowledge_api_id = str(external_knowledge_api_id)
external_knowledge_api = ExternalDatasetService.get_external_knowledge_api(external_knowledge_api_id)
if external_knowledge_api is None:
raise NotFound("API template not found.")
return external_knowledge_api.to_dict(), 200
@setup_required
@login_required
@account_initialization_required
def patch(self, external_knowledge_api_id):
external_knowledge_api_id = str(external_knowledge_api_id)
parser = reqparse.RequestParser()
parser.add_argument(
"name",
nullable=False,
required=True,
help="type is required. Name must be between 1 to 100 characters.",
type=_validate_name,
)
parser.add_argument(
"settings",
type=dict,
location="json",
nullable=False,
required=True,
)
args = parser.parse_args()
ExternalDatasetService.validate_api_list(args["settings"])
external_knowledge_api = ExternalDatasetService.update_external_knowledge_api(
tenant_id=current_user.current_tenant_id,
user_id=current_user.id,
external_knowledge_api_id=external_knowledge_api_id,
args=args,
)
return external_knowledge_api.to_dict(), 200
@setup_required
@login_required
@account_initialization_required
def delete(self, external_knowledge_api_id):
external_knowledge_api_id = str(external_knowledge_api_id)
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor or current_user.is_dataset_operator:
raise Forbidden()
ExternalDatasetService.delete_external_knowledge_api(current_user.current_tenant_id, external_knowledge_api_id)
return {"result": "success"}, 200
class ExternalApiUseCheckApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, external_knowledge_api_id):
external_knowledge_api_id = str(external_knowledge_api_id)
external_knowledge_api_is_using, count = ExternalDatasetService.external_knowledge_api_use_check(
external_knowledge_api_id
)
return {"is_using": external_knowledge_api_is_using, "count": count}, 200
class ExternalDatasetInitApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument("external_knowledge_api_id", type=str, required=True, nullable=True, location="json")
# parser.add_argument('name', nullable=False, required=True,
# help='name is required. Name must be between 1 to 100 characters.',
# type=_validate_name)
# parser.add_argument('description', type=str, required=True, nullable=True, location='json')
parser.add_argument("data_source", type=dict, required=True, nullable=True, location="json")
parser.add_argument("process_parameter", type=dict, required=True, nullable=True, location="json")
args = parser.parse_args()
# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
if not current_user.is_dataset_editor:
raise Forbidden()
# validate args
ExternalDatasetService.document_create_args_validate(
current_user.current_tenant_id, args["external_knowledge_api_id"], args["process_parameter"]
)
try:
dataset, documents, batch = ExternalDatasetService.init_external_dataset(
tenant_id=current_user.current_tenant_id,
user_id=current_user.id,
args=args,
)
except Exception as ex:
raise ProviderNotInitializeError(ex.description)
response = {"dataset": dataset, "documents": documents, "batch": batch}
return response
class ExternalDatasetCreateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument("external_knowledge_api_id", type=str, required=True, nullable=False, location="json")
parser.add_argument("external_knowledge_id", type=str, required=True, nullable=False, location="json")
parser.add_argument(
"name",
nullable=False,
required=True,
help="name is required. Name must be between 1 to 100 characters.",
type=_validate_name,
)
parser.add_argument("description", type=str, required=False, nullable=True, location="json")
parser.add_argument("external_retrieval_model", type=dict, required=False, location="json")
args = parser.parse_args()
# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
if not current_user.is_dataset_editor:
raise Forbidden()
try:
dataset = ExternalDatasetService.create_external_dataset(
tenant_id=current_user.current_tenant_id,
user_id=current_user.id,
args=args,
)
except services.errors.dataset.DatasetNameDuplicateError:
raise DatasetNameDuplicateError()
return marshal(dataset, dataset_detail_fields), 201
class ExternalKnowledgeHitTestingApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
try:
DatasetService.check_dataset_permission(dataset, current_user)
except services.errors.account.NoPermissionError as e:
raise Forbidden(str(e))
parser = reqparse.RequestParser()
parser.add_argument("query", type=str, location="json")
parser.add_argument("external_retrieval_model", type=dict, required=False, location="json")
args = parser.parse_args()
HitTestingService.hit_testing_args_check(args)
try:
response = HitTestingService.external_retrieve(
dataset=dataset,
query=args["query"],
account=current_user,
external_retrieval_model=args["external_retrieval_model"],
)
return response
except Exception as e:
raise InternalServerError(str(e))
api.add_resource(ExternalKnowledgeHitTestingApi, "/datasets/<uuid:dataset_id>/external-hit-testing")
api.add_resource(ExternalDatasetCreateApi, "/datasets/external")
api.add_resource(ExternalApiTemplateListApi, "/datasets/external-knowledge-api")
api.add_resource(ExternalApiTemplateApi, "/datasets/external-knowledge-api/<uuid:external_knowledge_api_id>")
api.add_resource(ExternalApiUseCheckApi, "/datasets/external-knowledge-api/<uuid:external_knowledge_api_id>/use-check")

View File

@ -39,7 +39,7 @@ class FileApi(Resource):
@login_required
@account_initialization_required
@marshal_with(file_fields)
@cloud_edition_billing_resource_check(resource="documents")
@cloud_edition_billing_resource_check("documents")
def post(self):
# get file from request
file = request.files["file"]

View File

@ -47,6 +47,7 @@ class HitTestingApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument("query", type=str, location="json")
parser.add_argument("retrieval_model", type=dict, required=False, location="json")
parser.add_argument("external_retrieval_model", type=dict, required=False, location="json")
args = parser.parse_args()
HitTestingService.hit_testing_args_check(args)
@ -57,6 +58,7 @@ class HitTestingApi(Resource):
query=args["query"],
account=current_user,
retrieval_model=args["retrieval_model"],
external_retrieval_model=args["external_retrieval_model"],
limit=10,
)

View File

@ -0,0 +1,33 @@
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from libs.login import login_required
from services.external_knowledge_service import ExternalDatasetService
class TestExternalApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("retrieval_setting", nullable=False, required=True, type=dict, location="json")
parser.add_argument(
"query",
nullable=False,
required=True,
type=str,
)
parser.add_argument(
"knowledge_id",
nullable=False,
required=True,
type=str,
)
args = parser.parse_args()
result = ExternalDatasetService.test_external_knowledge_retrieval(
args["retrieval_setting"], args["query"], args["knowledge_id"]
)
return result, 200
api.add_resource(TestExternalApi, "/retrieval")

View File

@ -18,9 +18,7 @@ class NotSetupError(BaseHTTPException):
class NotInitValidateError(BaseHTTPException):
error_code = "not_init_validated"
description = (
"Init validation has not been completed yet. " "Please proceed with the init validation process first."
)
description = "Init validation has not been completed yet. Please proceed with the init validation process first."
code = 401

View File

@ -81,19 +81,15 @@ class ChatTextApi(InstalledAppResource):
message_id = args.get("message_id", None)
text = args.get("text", None)
if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None
response = AudioService.transcript_tts(app_model=app_model, message_id=message_id, voice=voice, text=text)

View File

@ -92,7 +92,7 @@ class ChatApi(InstalledAppResource):
def post(self, installed_app):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -100,6 +100,7 @@ class ChatApi(InstalledAppResource):
parser.add_argument("query", type=str, required=True, location="json")
parser.add_argument("files", type=list, required=False, location="json")
parser.add_argument("conversation_id", type=uuid_value, location="json")
parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json")
parser.add_argument("retriever_from", type=str, required=False, default="explore_app", location="json")
args = parser.parse_args()
@ -140,7 +141,7 @@ class ChatStopApi(InstalledAppResource):
def post(self, installed_app, task_id):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.EXPLORE, current_user.id)

View File

@ -20,7 +20,7 @@ class ConversationListApi(InstalledAppResource):
def get(self, installed_app):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -50,7 +50,7 @@ class ConversationApi(InstalledAppResource):
def delete(self, installed_app, c_id):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -68,7 +68,7 @@ class ConversationRenameApi(InstalledAppResource):
def post(self, installed_app, c_id):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -90,7 +90,7 @@ class ConversationPinApi(InstalledAppResource):
def patch(self, installed_app, c_id):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -107,7 +107,7 @@ class ConversationUnPinApi(InstalledAppResource):
def patch(self, installed_app, c_id):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)

View File

@ -31,10 +31,11 @@ class InstalledAppsListApi(Resource):
"app_owner_tenant_id": installed_app.app_owner_tenant_id,
"is_pinned": installed_app.is_pinned,
"last_used_at": installed_app.last_used_at,
"editable": current_user.role in ["owner", "admin"],
"editable": current_user.role in {"owner", "admin"},
"uninstallable": current_tenant_id == installed_app.app_owner_tenant_id,
}
for installed_app in installed_apps
if installed_app.app is not None
]
installed_apps.sort(
key=lambda app: (

View File

@ -40,7 +40,7 @@ class MessageListApi(InstalledAppResource):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -51,7 +51,7 @@ class MessageListApi(InstalledAppResource):
try:
return MessageService.pagination_by_first_id(
app_model, current_user, args["conversation_id"], args["first_id"], args["limit"]
app_model, current_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
@ -125,7 +125,7 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
def get(self, installed_app, message_id):
app_model = installed_app.app
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
message_id = str(message_id)

View File

@ -43,7 +43,7 @@ class AppParameterApi(InstalledAppResource):
"""Retrieve app parameters."""
app_model = installed_app.app
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow
if workflow is None:
raise AppUnavailableError()

View File

@ -4,7 +4,7 @@ from flask import session
from flask_restful import Resource, reqparse
from configs import dify_config
from libs.helper import str_len
from libs.helper import StrLen
from models.model import DifySetup
from services.account_service import TenantService
@ -28,7 +28,7 @@ class InitValidateAPI(Resource):
raise AlreadySetupError()
parser = reqparse.RequestParser()
parser.add_argument("password", type=str_len(30), required=True, location="json")
parser.add_argument("password", type=StrLen(30), required=True, location="json")
input_password = parser.parse_args()["password"]
if input_password != os.environ.get("INIT_PASSWORD"):

View File

@ -4,7 +4,7 @@ from flask import request
from flask_restful import Resource, reqparse
from configs import dify_config
from libs.helper import email, get_remote_ip, str_len
from libs.helper import StrLen, email, get_remote_ip
from libs.password import valid_password
from models.model import DifySetup
from services.account_service import RegisterService, TenantService
@ -40,7 +40,7 @@ class SetupApi(Resource):
parser = reqparse.RequestParser()
parser.add_argument("email", type=email, required=True, location="json")
parser.add_argument("name", type=str_len(30), required=True, location="json")
parser.add_argument("name", type=StrLen(30), required=True, location="json")
parser.add_argument("password", type=valid_password, required=True, location="json")
args = parser.parse_args()

View File

@ -13,7 +13,7 @@ from services.tag_service import TagService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 40:
if not name or len(name) < 1 or len(name) > 50:
raise ValueError("Name must be between 1 to 50 characters.")
return name

View File

@ -218,7 +218,7 @@ api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-provider
api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
api.add_resource(
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/" "<string:icon_type>/<string:lang>"
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/<string:icon_type>/<string:lang>"
)
api.add_resource(

View File

@ -327,7 +327,7 @@ class ToolApiProviderPreviousTestApi(Resource):
return ApiToolManageService.test_api_tool_preview(
current_user.current_tenant_id,
args["provider_name"] if args["provider_name"] else "",
args["provider_name"] or "",
args["tool_name"],
args["credentials"],
args["parameters"],

View File

@ -194,7 +194,7 @@ class WebappLogoWorkspaceApi(Resource):
raise TooManyFilesError()
extension = file.filename.split(".")[-1]
if extension.lower() not in ["svg", "png"]:
if extension.lower() not in {"svg", "png"}:
raise UnsupportedFileTypeError()
try:

View File

@ -46,9 +46,7 @@ def only_edition_self_hosted(view):
return decorated
def cloud_edition_billing_resource_check(
resource: str, error_msg: str = "You have reached the limit of your subscription."
):
def cloud_edition_billing_resource_check(resource: str):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
@ -60,22 +58,23 @@ def cloud_edition_billing_resource_check(
documents_upload_quota = features.documents_upload_quota
annotation_quota_limit = features.annotation_quota_limit
if resource == "members" and 0 < members.limit <= members.size:
abort(403, error_msg)
abort(403, "The number of members has reached the limit of your subscription.")
elif resource == "apps" and 0 < apps.limit <= apps.size:
abort(403, error_msg)
abort(403, "The number of apps has reached the limit of your subscription.")
elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size:
abort(403, error_msg)
abort(403, "The capacity of the vector space has reached the limit of your subscription.")
elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
# The api of file upload is used in the multiple places, so we need to check the source of the request from datasets
# The api of file upload is used in the multiple places,
# so we need to check the source of the request from datasets
source = request.args.get("source")
if source == "datasets":
abort(403, error_msg)
abort(403, "The number of documents has reached the limit of your subscription.")
else:
return view(*args, **kwargs)
elif resource == "workspace_custom" and not features.can_replace_logo:
abort(403, error_msg)
abort(403, "The workspace custom feature has reached the limit of your subscription.")
elif resource == "annotation" and 0 < annotation_quota_limit.limit < annotation_quota_limit.size:
abort(403, error_msg)
abort(403, "The annotation quota has reached the limit of your subscription.")
else:
return view(*args, **kwargs)
@ -86,10 +85,7 @@ def cloud_edition_billing_resource_check(
return interceptor
def cloud_edition_billing_knowledge_limit_check(
resource: str,
error_msg: str = "To unlock this feature and elevate your Dify experience, please upgrade to a paid plan.",
):
def cloud_edition_billing_knowledge_limit_check(resource: str):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
@ -97,7 +93,10 @@ def cloud_edition_billing_knowledge_limit_check(
if features.billing.enabled:
if resource == "add_segment":
if features.billing.subscription.plan == "sandbox":
abort(403, error_msg)
abort(
403,
"To unlock this feature and elevate your Dify experience, please upgrade to a paid plan.",
)
else:
return view(*args, **kwargs)

View File

@ -42,7 +42,7 @@ class AppParameterApi(Resource):
@marshal_with(parameters_fields)
def get(self, app_model: App):
"""Retrieve app parameters."""
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow
if workflow is None:
raise AppUnavailableError()

View File

@ -79,19 +79,15 @@ class TextApi(Resource):
message_id = args.get("message_id", None)
text = args.get("text", None)
if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None
response = AudioService.transcript_tts(

View File

@ -96,7 +96,7 @@ class ChatApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -144,7 +144,7 @@ class ChatStopApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
def post(self, app_model: App, end_user: EndUser, task_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)

View File

@ -18,7 +18,7 @@ class ConversationApi(Resource):
@marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -52,7 +52,7 @@ class ConversationDetailApi(Resource):
@marshal_with(simple_conversation_fields)
def delete(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -69,7 +69,7 @@ class ConversationRenameApi(Resource):
@marshal_with(simple_conversation_fields)
def post(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)

View File

@ -54,6 +54,7 @@ class MessageListApi(Resource):
message_fields = {
"id": fields.String,
"conversation_id": fields.String,
"parent_message_id": fields.String,
"inputs": fields.Raw,
"query": fields.String,
"answer": fields.String(attribute="re_sign_file_url_answer"),
@ -76,7 +77,7 @@ class MessageListApi(Resource):
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_model: App, end_user: EndUser):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -117,7 +118,7 @@ class MessageSuggestedApi(Resource):
def get(self, app_model: App, end_user: EndUser, message_id):
message_id = str(message_id)
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
try:

View File

@ -1,6 +1,7 @@
import logging
from flask_restful import Resource, fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import InternalServerError
from controllers.service_api import api
@ -22,10 +23,12 @@ from core.errors.error import (
)
from core.model_runtime.errors.invoke import InvokeError
from extensions.ext_database import db
from fields.workflow_app_log_fields import workflow_app_log_pagination_fields
from libs import helper
from models.model import App, AppMode, EndUser
from models.workflow import WorkflowRun
from services.app_generate_service import AppGenerateService
from services.workflow_app_service import WorkflowAppService
logger = logging.getLogger(__name__)
@ -113,6 +116,30 @@ class WorkflowTaskStopApi(Resource):
return {"result": "success"}
class WorkflowAppLogApi(Resource):
@validate_app_token
@marshal_with(workflow_app_log_pagination_fields)
def get(self, app_model: App):
"""
Get workflow app logs
"""
parser = reqparse.RequestParser()
parser.add_argument("keyword", type=str, location="args")
parser.add_argument("status", type=str, choices=["succeeded", "failed", "stopped"], location="args")
parser.add_argument("page", type=int_range(1, 99999), default=1, location="args")
parser.add_argument("limit", type=int_range(1, 100), default=20, location="args")
args = parser.parse_args()
# get paginate workflow app logs
workflow_app_service = WorkflowAppService()
workflow_app_log_pagination = workflow_app_service.get_paginate_workflow_app_logs(
app_model=app_model, args=args
)
return workflow_app_log_pagination
api.add_resource(WorkflowRunApi, "/workflows/run")
api.add_resource(WorkflowRunDetailApi, "/workflows/run/<string:workflow_id>")
api.add_resource(WorkflowTaskStopApi, "/workflows/tasks/<string:task_id>/stop")
api.add_resource(WorkflowAppLogApi, "/workflows/logs")

View File

@ -28,11 +28,11 @@ class DatasetListApi(DatasetApiResource):
page = request.args.get("page", default=1, type=int)
limit = request.args.get("limit", default=20, type=int)
provider = request.args.get("provider", default="vendor")
# provider = request.args.get("provider", default="vendor")
search = request.args.get("keyword", default=None, type=str)
tag_ids = request.args.getlist("tag_ids")
datasets, total = DatasetService.get_datasets(page, limit, provider, tenant_id, current_user, search, tag_ids)
datasets, total = DatasetService.get_datasets(page, limit, tenant_id, current_user, search, tag_ids)
# check embedding setting
provider_manager = ProviderManager()
configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
@ -82,6 +82,26 @@ class DatasetListApi(DatasetApiResource):
required=False,
nullable=False,
)
parser.add_argument(
"external_knowledge_api_id",
type=str,
nullable=True,
required=False,
default="_validate_name",
)
parser.add_argument(
"provider",
type=str,
nullable=True,
required=False,
default="vendor",
)
parser.add_argument(
"external_knowledge_id",
type=str,
nullable=True,
required=False,
)
args = parser.parse_args()
try:
@ -91,6 +111,9 @@ class DatasetListApi(DatasetApiResource):
indexing_technique=args["indexing_technique"],
account=current_user,
permission=args["permission"],
provider=args["provider"],
external_knowledge_api_id=args["external_knowledge_api_id"],
external_knowledge_id=args["external_knowledge_id"],
)
except services.errors.dataset.DatasetNameDuplicateError:
raise DatasetNameDuplicateError()

View File

@ -36,6 +36,10 @@ class SegmentApi(DatasetApiResource):
document = DocumentService.get_document(dataset.id, document_id)
if not document:
raise NotFound("Document not found.")
if document.indexing_status != "completed":
raise NotFound("Document is not completed.")
if not document.enabled:
raise NotFound("Document is disabled.")
# check embedding model setting
if dataset.indexing_technique == "high_quality":
try:
@ -63,7 +67,7 @@ class SegmentApi(DatasetApiResource):
segments = SegmentService.multi_create_segment(args["segments"], document, dataset)
return {"data": marshal(segments, segment_fields), "doc_form": document.doc_form}, 200
else:
return {"error": "Segemtns is required"}, 400
return {"error": "Segments is required"}, 400
def get(self, tenant_id, dataset_id, document_id):
"""Create single segment."""

View File

@ -83,9 +83,7 @@ def validate_app_token(view: Optional[Callable] = None, *, fetch_user_arg: Optio
return decorator(view)
def cloud_edition_billing_resource_check(
resource: str, api_token_type: str, error_msg: str = "You have reached the limit of your subscription."
):
def cloud_edition_billing_resource_check(resource: str, api_token_type: str):
def interceptor(view):
def decorated(*args, **kwargs):
api_token = validate_and_get_api_token(api_token_type)
@ -98,13 +96,13 @@ def cloud_edition_billing_resource_check(
documents_upload_quota = features.documents_upload_quota
if resource == "members" and 0 < members.limit <= members.size:
raise Forbidden(error_msg)
raise Forbidden("The number of members has reached the limit of your subscription.")
elif resource == "apps" and 0 < apps.limit <= apps.size:
raise Forbidden(error_msg)
raise Forbidden("The number of apps has reached the limit of your subscription.")
elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size:
raise Forbidden(error_msg)
raise Forbidden("The capacity of the vector space has reached the limit of your subscription.")
elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
raise Forbidden(error_msg)
raise Forbidden("The number of documents has reached the limit of your subscription.")
else:
return view(*args, **kwargs)
@ -115,11 +113,7 @@ def cloud_edition_billing_resource_check(
return interceptor
def cloud_edition_billing_knowledge_limit_check(
resource: str,
api_token_type: str,
error_msg: str = "To unlock this feature and elevate your Dify experience, please upgrade to a paid plan.",
):
def cloud_edition_billing_knowledge_limit_check(resource: str, api_token_type: str):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
@ -128,7 +122,9 @@ def cloud_edition_billing_knowledge_limit_check(
if features.billing.enabled:
if resource == "add_segment":
if features.billing.subscription.plan == "sandbox":
raise Forbidden(error_msg)
raise Forbidden(
"To unlock this feature and elevate your Dify experience, please upgrade to a paid plan."
)
else:
return view(*args, **kwargs)

View File

@ -41,7 +41,7 @@ class AppParameterApi(WebApiResource):
@marshal_with(parameters_fields)
def get(self, app_model: App, end_user):
"""Retrieve app parameters."""
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
workflow = app_model.workflow
if workflow is None:
raise AppUnavailableError()

View File

@ -78,19 +78,15 @@ class TextApi(WebApiResource):
message_id = args.get("message_id", None)
text = args.get("text", None)
if (
app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
and app_model.workflow
and app_model.workflow.features_dict
):
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
voice = args.get("voice") if args.get("voice") else text_to_speech.get("voice")
voice = args.get("voice") or text_to_speech.get("voice")
else:
try:
voice = (
args.get("voice")
if args.get("voice")
else app_model.app_model_config.text_to_speech_dict.get("voice")
)
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
except Exception:
voice = None

View File

@ -87,7 +87,7 @@ class CompletionStopApi(WebApiResource):
class ChatApi(WebApiResource):
def post(self, app_model, end_user):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -96,6 +96,7 @@ class ChatApi(WebApiResource):
parser.add_argument("files", type=list, required=False, location="json")
parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
parser.add_argument("conversation_id", type=uuid_value, location="json")
parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json")
parser.add_argument("retriever_from", type=str, required=False, default="web_app", location="json")
args = parser.parse_args()
@ -136,7 +137,7 @@ class ChatApi(WebApiResource):
class ChatStopApi(WebApiResource):
def post(self, app_model, end_user, task_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.WEB_APP, end_user.id)

View File

@ -18,7 +18,7 @@ class ConversationListApi(WebApiResource):
@marshal_with(conversation_infinite_scroll_pagination_fields)
def get(self, app_model, end_user):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -56,7 +56,7 @@ class ConversationListApi(WebApiResource):
class ConversationApi(WebApiResource):
def delete(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -73,7 +73,7 @@ class ConversationRenameApi(WebApiResource):
@marshal_with(simple_conversation_fields)
def post(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -92,7 +92,7 @@ class ConversationRenameApi(WebApiResource):
class ConversationPinApi(WebApiResource):
def patch(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)
@ -108,7 +108,7 @@ class ConversationPinApi(WebApiResource):
class ConversationUnPinApi(WebApiResource):
def patch(self, app_model, end_user, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
conversation_id = str(c_id)

View File

@ -57,6 +57,7 @@ class MessageListApi(WebApiResource):
message_fields = {
"id": fields.String,
"conversation_id": fields.String,
"parent_message_id": fields.String,
"inputs": fields.Raw,
"query": fields.String,
"answer": fields.String(attribute="re_sign_file_url_answer"),
@ -78,7 +79,7 @@ class MessageListApi(WebApiResource):
@marshal_with(message_infinite_scroll_pagination_fields)
def get(self, app_model, end_user):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotChatAppError()
parser = reqparse.RequestParser()
@ -89,7 +90,7 @@ class MessageListApi(WebApiResource):
try:
return MessageService.pagination_by_first_id(
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"]
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
)
except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.")
@ -160,7 +161,7 @@ class MessageMoreLikeThisApi(WebApiResource):
class MessageSuggestedQuestionApi(WebApiResource):
def get(self, app_model, end_user, message_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]:
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
raise NotCompletionAppError()
message_id = str(message_id)

View File

@ -39,6 +39,7 @@ class AppSiteApi(WebApiResource):
"default_language": fields.String,
"prompt_public": fields.Boolean,
"show_workflow_steps": fields.Boolean,
"use_icon_as_answer_icon": fields.Boolean,
}
app_fields = {

View File

@ -80,7 +80,8 @@ def _validate_web_sso_token(decoded, system_features, app_code):
if not source or source != "sso":
raise WebSSOAuthRequiredError()
# Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login
# Check if SSO is not enforced for web, and if the token source is SSO,
# raise an error and redirect to normal passport login
if not system_features.sso_enforced_for_web or not app_web_sso_enabled:
source = decoded.get("token_source")
if source and source == "sso":

View File

@ -1 +1 @@
import core.moderation.base
import core.moderation.base

View File

@ -1,6 +1,7 @@
import json
import logging
import uuid
from collections.abc import Mapping, Sequence
from datetime import datetime, timezone
from typing import Optional, Union, cast
@ -31,6 +32,7 @@ from core.model_runtime.entities.message_entities import (
from core.model_runtime.entities.model_entities import ModelFeature
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
from core.prompt.utils.extract_thread_messages import extract_thread_messages
from core.tools.entities.tool_entities import (
ToolParameter,
ToolRuntimeVariablePool,
@ -45,22 +47,25 @@ from models.tools import ToolConversationVariables
logger = logging.getLogger(__name__)
class BaseAgentRunner(AppRunner):
def __init__(self, tenant_id: str,
application_generate_entity: AgentChatAppGenerateEntity,
conversation: Conversation,
app_config: AgentChatAppConfig,
model_config: ModelConfigWithCredentialsEntity,
config: AgentEntity,
queue_manager: AppQueueManager,
message: Message,
user_id: str,
memory: Optional[TokenBufferMemory] = None,
prompt_messages: Optional[list[PromptMessage]] = None,
variables_pool: Optional[ToolRuntimeVariablePool] = None,
db_variables: Optional[ToolConversationVariables] = None,
model_instance: ModelInstance = None
) -> None:
def __init__(
self,
tenant_id: str,
application_generate_entity: AgentChatAppGenerateEntity,
conversation: Conversation,
app_config: AgentChatAppConfig,
model_config: ModelConfigWithCredentialsEntity,
config: AgentEntity,
queue_manager: AppQueueManager,
message: Message,
user_id: str,
memory: Optional[TokenBufferMemory] = None,
prompt_messages: Optional[list[PromptMessage]] = None,
variables_pool: Optional[ToolRuntimeVariablePool] = None,
db_variables: Optional[ToolConversationVariables] = None,
model_instance: ModelInstance = None,
) -> None:
"""
Agent runner
:param tenant_id: tenant id
@ -88,9 +93,7 @@ class BaseAgentRunner(AppRunner):
self.message = message
self.user_id = user_id
self.memory = memory
self.history_prompt_messages = self.organize_agent_history(
prompt_messages=prompt_messages or []
)
self.history_prompt_messages = self.organize_agent_history(prompt_messages=prompt_messages or [])
self.variables_pool = variables_pool
self.db_variables_pool = db_variables
self.model_instance = model_instance
@ -111,12 +114,16 @@ class BaseAgentRunner(AppRunner):
retrieve_config=app_config.dataset.retrieve_config if app_config.dataset else None,
return_resource=app_config.additional_features.show_retrieve_source,
invoke_from=application_generate_entity.invoke_from,
hit_callback=hit_callback
hit_callback=hit_callback,
)
# get how many agent thoughts have been created
self.agent_thought_count = db.session.query(MessageAgentThought).filter(
MessageAgentThought.message_id == self.message.id,
).count()
self.agent_thought_count = (
db.session.query(MessageAgentThought)
.filter(
MessageAgentThought.message_id == self.message.id,
)
.count()
)
db.session.close()
# check if model supports stream tool call
@ -135,25 +142,26 @@ class BaseAgentRunner(AppRunner):
self.query = None
self._current_thoughts: list[PromptMessage] = []
def _repack_app_generate_entity(self, app_generate_entity: AgentChatAppGenerateEntity) \
-> AgentChatAppGenerateEntity:
def _repack_app_generate_entity(
self, app_generate_entity: AgentChatAppGenerateEntity
) -> AgentChatAppGenerateEntity:
"""
Repack app generate entity
"""
if app_generate_entity.app_config.prompt_template.simple_prompt_template is None:
app_generate_entity.app_config.prompt_template.simple_prompt_template = ''
app_generate_entity.app_config.prompt_template.simple_prompt_template = ""
return app_generate_entity
def _convert_tool_to_prompt_message_tool(self, tool: AgentToolEntity) -> tuple[PromptMessageTool, Tool]:
"""
convert tool to prompt message tool
convert tool to prompt message tool
"""
tool_entity = ToolManager.get_agent_tool_runtime(
tenant_id=self.tenant_id,
app_id=self.app_config.app_id,
agent_tool=tool,
invoke_from=self.application_generate_entity.invoke_from
invoke_from=self.application_generate_entity.invoke_from,
)
tool_entity.load_variables(self.variables_pool)
@ -164,7 +172,7 @@ class BaseAgentRunner(AppRunner):
"type": "object",
"properties": {},
"required": [],
}
},
)
parameters = tool_entity.get_all_runtime_parameters()
@ -177,19 +185,19 @@ class BaseAgentRunner(AppRunner):
if parameter.type == ToolParameter.ToolParameterType.SELECT:
enum = [option.value for option in parameter.options]
message_tool.parameters['properties'][parameter.name] = {
message_tool.parameters["properties"][parameter.name] = {
"type": parameter_type,
"description": parameter.llm_description or '',
"description": parameter.llm_description or "",
}
if len(enum) > 0:
message_tool.parameters['properties'][parameter.name]['enum'] = enum
message_tool.parameters["properties"][parameter.name]["enum"] = enum
if parameter.required:
message_tool.parameters['required'].append(parameter.name)
message_tool.parameters["required"].append(parameter.name)
return message_tool, tool_entity
def _convert_dataset_retriever_tool_to_prompt_message_tool(self, tool: DatasetRetrieverTool) -> PromptMessageTool:
"""
convert dataset retriever tool to prompt message tool
@ -201,24 +209,24 @@ class BaseAgentRunner(AppRunner):
"type": "object",
"properties": {},
"required": [],
}
},
)
for parameter in tool.get_runtime_parameters():
parameter_type = 'string'
prompt_tool.parameters['properties'][parameter.name] = {
parameter_type = "string"
prompt_tool.parameters["properties"][parameter.name] = {
"type": parameter_type,
"description": parameter.llm_description or '',
"description": parameter.llm_description or "",
}
if parameter.required:
if parameter.name not in prompt_tool.parameters['required']:
prompt_tool.parameters['required'].append(parameter.name)
if parameter.name not in prompt_tool.parameters["required"]:
prompt_tool.parameters["required"].append(parameter.name)
return prompt_tool
def _init_prompt_tools(self) -> tuple[dict[str, Tool], list[PromptMessageTool]]:
def _init_prompt_tools(self) -> tuple[Mapping[str, Tool], Sequence[PromptMessageTool]]:
"""
Init tools
"""
@ -261,51 +269,51 @@ class BaseAgentRunner(AppRunner):
enum = []
if parameter.type == ToolParameter.ToolParameterType.SELECT:
enum = [option.value for option in parameter.options]
prompt_tool.parameters['properties'][parameter.name] = {
prompt_tool.parameters["properties"][parameter.name] = {
"type": parameter_type,
"description": parameter.llm_description or '',
"description": parameter.llm_description or "",
}
if len(enum) > 0:
prompt_tool.parameters['properties'][parameter.name]['enum'] = enum
prompt_tool.parameters["properties"][parameter.name]["enum"] = enum
if parameter.required:
if parameter.name not in prompt_tool.parameters['required']:
prompt_tool.parameters['required'].append(parameter.name)
if parameter.name not in prompt_tool.parameters["required"]:
prompt_tool.parameters["required"].append(parameter.name)
return prompt_tool
def create_agent_thought(self, message_id: str, message: str,
tool_name: str, tool_input: str, messages_ids: list[str]
) -> MessageAgentThought:
def create_agent_thought(
self, message_id: str, message: str, tool_name: str, tool_input: str, messages_ids: list[str]
) -> MessageAgentThought:
"""
Create agent thought
"""
thought = MessageAgentThought(
message_id=message_id,
message_chain_id=None,
thought='',
thought="",
tool=tool_name,
tool_labels_str='{}',
tool_meta_str='{}',
tool_labels_str="{}",
tool_meta_str="{}",
tool_input=tool_input,
message=message,
message_token=0,
message_unit_price=0,
message_price_unit=0,
message_files=json.dumps(messages_ids) if messages_ids else '',
answer='',
observation='',
message_files=json.dumps(messages_ids) if messages_ids else "",
answer="",
observation="",
answer_token=0,
answer_unit_price=0,
answer_price_unit=0,
tokens=0,
total_price=0,
position=self.agent_thought_count + 1,
currency='USD',
currency="USD",
latency=0,
created_by_role='account',
created_by_role="account",
created_by=self.user_id,
)
@ -318,22 +326,22 @@ class BaseAgentRunner(AppRunner):
return thought
def save_agent_thought(self,
agent_thought: MessageAgentThought,
tool_name: str,
tool_input: Union[str, dict],
thought: str,
observation: Union[str, dict],
tool_invoke_meta: Union[str, dict],
answer: str,
messages_ids: list[str],
llm_usage: LLMUsage = None) -> MessageAgentThought:
def save_agent_thought(
self,
agent_thought: MessageAgentThought,
tool_name: str,
tool_input: Union[str, dict],
thought: str,
observation: Union[str, dict],
tool_invoke_meta: Union[str, dict],
answer: str,
messages_ids: list[str],
llm_usage: LLMUsage = None,
) -> MessageAgentThought:
"""
Save agent thought
"""
agent_thought = db.session.query(MessageAgentThought).filter(
MessageAgentThought.id == agent_thought.id
).first()
agent_thought = db.session.query(MessageAgentThought).filter(MessageAgentThought.id == agent_thought.id).first()
if thought is not None:
agent_thought.thought = thought
@ -356,7 +364,7 @@ class BaseAgentRunner(AppRunner):
observation = json.dumps(observation, ensure_ascii=False)
except Exception as e:
observation = json.dumps(observation)
agent_thought.observation = observation
if answer is not None:
@ -364,7 +372,7 @@ class BaseAgentRunner(AppRunner):
if messages_ids is not None and len(messages_ids) > 0:
agent_thought.message_files = json.dumps(messages_ids)
if llm_usage:
agent_thought.message_token = llm_usage.prompt_tokens
agent_thought.message_price_unit = llm_usage.prompt_price_unit
@ -377,7 +385,7 @@ class BaseAgentRunner(AppRunner):
# check if tool labels is not empty
labels = agent_thought.tool_labels or {}
tools = agent_thought.tool.split(';') if agent_thought.tool else []
tools = agent_thought.tool.split(";") if agent_thought.tool else []
for tool in tools:
if not tool:
continue
@ -386,7 +394,7 @@ class BaseAgentRunner(AppRunner):
if tool_label:
labels[tool] = tool_label.to_dict()
else:
labels[tool] = {'en_US': tool, 'zh_Hans': tool}
labels[tool] = {"en_US": tool, "zh_Hans": tool}
agent_thought.tool_labels_str = json.dumps(labels)
@ -401,14 +409,18 @@ class BaseAgentRunner(AppRunner):
db.session.commit()
db.session.close()
def update_db_variables(self, tool_variables: ToolRuntimeVariablePool, db_variables: ToolConversationVariables):
"""
convert tool variables to db variables
"""
db_variables = db.session.query(ToolConversationVariables).filter(
ToolConversationVariables.conversation_id == self.message.conversation_id,
).first()
db_variables = (
db.session.query(ToolConversationVariables)
.filter(
ToolConversationVariables.conversation_id == self.message.conversation_id,
)
.first()
)
db_variables.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
db_variables.variables_str = json.dumps(jsonable_encoder(tool_variables.pool))
@ -425,9 +437,16 @@ class BaseAgentRunner(AppRunner):
if isinstance(prompt_message, SystemPromptMessage):
result.append(prompt_message)
messages: list[Message] = db.session.query(Message).filter(
Message.conversation_id == self.message.conversation_id,
).order_by(Message.created_at.asc()).all()
messages: list[Message] = (
db.session.query(Message)
.filter(
Message.conversation_id == self.message.conversation_id,
)
.order_by(Message.created_at.desc())
.all()
)
messages = list(reversed(extract_thread_messages(messages)))
for message in messages:
if message.id == self.message.id:
@ -439,13 +458,13 @@ class BaseAgentRunner(AppRunner):
for agent_thought in agent_thoughts:
tools = agent_thought.tool
if tools:
tools = tools.split(';')
tools = tools.split(";")
tool_calls: list[AssistantPromptMessage.ToolCall] = []
tool_call_response: list[ToolPromptMessage] = []
try:
tool_inputs = json.loads(agent_thought.tool_input)
except Exception as e:
tool_inputs = { tool: {} for tool in tools }
tool_inputs = {tool: {} for tool in tools}
try:
tool_responses = json.loads(agent_thought.observation)
except Exception as e:
@ -454,27 +473,33 @@ class BaseAgentRunner(AppRunner):
for tool in tools:
# generate a uuid for tool call
tool_call_id = str(uuid.uuid4())
tool_calls.append(AssistantPromptMessage.ToolCall(
id=tool_call_id,
type='function',
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
name=tool,
arguments=json.dumps(tool_inputs.get(tool, {})),
tool_calls.append(
AssistantPromptMessage.ToolCall(
id=tool_call_id,
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
name=tool,
arguments=json.dumps(tool_inputs.get(tool, {})),
),
)
))
tool_call_response.append(ToolPromptMessage(
content=tool_responses.get(tool, agent_thought.observation),
name=tool,
tool_call_id=tool_call_id,
))
)
tool_call_response.append(
ToolPromptMessage(
content=tool_responses.get(tool, agent_thought.observation),
name=tool,
tool_call_id=tool_call_id,
)
)
result.extend([
AssistantPromptMessage(
content=agent_thought.thought,
tool_calls=tool_calls,
),
*tool_call_response
])
result.extend(
[
AssistantPromptMessage(
content=agent_thought.thought,
tool_calls=tool_calls,
),
*tool_call_response,
]
)
if not tools:
result.append(AssistantPromptMessage(content=agent_thought.thought))
else:
@ -496,10 +521,7 @@ class BaseAgentRunner(AppRunner):
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
if file_extra_config:
file_objs = message_file_parser.transform_message_files(
files,
file_extra_config
)
file_objs = message_file_parser.transform_message_files(files, file_extra_config)
else:
file_objs = []

Some files were not shown because too many files have changed in this diff Show More