mirror of
https://github.com/langgenius/dify.git
synced 2026-01-19 19:55:06 +08:00
Compare commits
1279 Commits
bug1
...
codex/run-
| Author | SHA1 | Date | |
|---|---|---|---|
| 92d44c0caa | |||
| 0478e4cde5 | |||
| 0f816b52c2 | |||
| 4fce99379e | |||
| 8d1e36540a | |||
| 1d1351393a | |||
| 44f8915e30 | |||
| 94a5fd3617 | |||
| 5bb1346da8 | |||
| a93eecaeee | |||
| 86131d4bd8 | |||
| 581b62cf01 | |||
| 91714ee413 | |||
| 232149e63f | |||
| 4a1ddea431 | |||
| 5539bf8788 | |||
| dda7eb03c9 | |||
| c2f2be6b08 | |||
| b7649f61f8 | |||
| ae4a9040df | |||
| d2b63df7a1 | |||
| 0749e6e090 | |||
| 4589157963 | |||
| 37d4dbeb96 | |||
| c036a12999 | |||
| 47cd94ec3e | |||
| e5cf0d0bf6 | |||
| 240e1d155a | |||
| a915b8a584 | |||
| 4553e4c12f | |||
| 7695f9151c | |||
| bdccbb6e86 | |||
| c904c58c43 | |||
| cb5162f37a | |||
| eeb5129a17 | |||
| 4cc6652424 | |||
| a232da564a | |||
| 7fc501915e | |||
| 103a5e0122 | |||
| 23f75a1185 | |||
| 7fb68b62b8 | |||
| dd58d4a38d | |||
| 4bf6c4dafa | |||
| 187450b875 | |||
| 09982a1c95 | |||
| a8f3061b3c | |||
| bd7b1fc6fb | |||
| 2bf44057e9 | |||
| 2b3c55d95a | |||
| a951f46a09 | |||
| d942adf3b2 | |||
| 724cd57dbf | |||
| 714b443077 | |||
| 80c74cf725 | |||
| 1e47ffb50c | |||
| 323e0c4d30 | |||
| 094f417b32 | |||
| acef56d7fd | |||
| 8f3fd9a728 | |||
| 1a18012f98 | |||
| 355a2356d4 | |||
| 7fead6a9da | |||
| 63624dece1 | |||
| 02122907e5 | |||
| 7ee7155fd5 | |||
| d01f2f7436 | |||
| 9d683fd34d | |||
| 569c593240 | |||
| bb157c93a3 | |||
| b7bdd5920b | |||
| 470650d1d7 | |||
| 59137f1d05 | |||
| b8d54d745e | |||
| 916df2d0f7 | |||
| 61199663e7 | |||
| 3db27c3158 | |||
| 3653f54bea | |||
| 886ce981cf | |||
| f4c7f98a01 | |||
| a8613f0233 | |||
| 086ee4c19d | |||
| 336bcfbae2 | |||
| e244856ef1 | |||
| 2058186f22 | |||
| bece2f101c | |||
| 04d09c2d77 | |||
| db42f467c8 | |||
| ac40309850 | |||
| 12e39365fa | |||
| d48300d08c | |||
| 761f8c8043 | |||
| 05f63c88c6 | |||
| 8daf9ce98d | |||
| 61ee1b9094 | |||
| 87c4b4c576 | |||
| 193c8e2362 | |||
| 4d57460356 | |||
| 063b39ada5 | |||
| 6419ce02c7 | |||
| 1a877bb4d0 | |||
| 281e9d4f51 | |||
| a195b410d1 | |||
| 91e5db3e83 | |||
| f20a2d1586 | |||
| 6e802a343e | |||
| a30cbe3c95 | |||
| 7344adf65e | |||
| fcadee9413 | |||
| 69a22af1c9 | |||
| aac6f44562 | |||
| 2e1efd62e1 | |||
| 1847609926 | |||
| 91f6d25dae | |||
| acdbcdb6f8 | |||
| a9627ba60a | |||
| 266d1c70ac | |||
| d152d63e7d | |||
| b4afc7e435 | |||
| 2d496e7e08 | |||
| 693877e5e4 | |||
| 8cab3e5a1e | |||
| 18082752a0 | |||
| 813a734f27 | |||
| 94244ed8f6 | |||
| ec3a52f012 | |||
| ea063a1139 | |||
| 784008997b | |||
| 0c2a354115 | |||
| e477e6c928 | |||
| bafd093fa9 | |||
| 88b20bc6d0 | |||
| 12d019cd31 | |||
| b49e2646ff | |||
| e8720de9ad | |||
| 0867c1800b | |||
| 681c06186e | |||
| f722fdfa6d | |||
| c033030d8c | |||
| 51330c0ee6 | |||
| 7df360a292 | |||
| e205182e1f | |||
| 4a88c8fd19 | |||
| 1b9165624f | |||
| 56f8bdd724 | |||
| efa1b452da | |||
| bcbc07e99c | |||
| d79d0a47a7 | |||
| f5d676f3f1 | |||
| 8f7173b69b | |||
| 8275533418 | |||
| c1c1fd0509 | |||
| c24835ca87 | |||
| 022cfbd186 | |||
| 9affc546c6 | |||
| 77cf8f6c27 | |||
| 18601d8b38 | |||
| 57d244de69 | |||
| 750db10476 | |||
| 14d1b3f9b3 | |||
| a44b800c85 | |||
| 48efd2d174 | |||
| a0c8ebf487 | |||
| 97d671d9aa | |||
| ca61bb5de0 | |||
| c6eb18daae | |||
| 3cb944f318 | |||
| e6d504558a | |||
| 0cb696b208 | |||
| b466d8da92 | |||
| 71497954b8 | |||
| 05fe92a541 | |||
| 2f96374837 | |||
| 6942666d03 | |||
| 4b8bd4b891 | |||
| ee0fe8c7f9 | |||
| 88bfeee234 | |||
| 18d5d513b4 | |||
| d1f4a75272 | |||
| b365bffd02 | |||
| 15fec024c0 | |||
| e1aa0e438b | |||
| d66dceae16 | |||
| d998cbc18d | |||
| 91667e3c1d | |||
| a25faa334a | |||
| 52ea799cec | |||
| 3dc3589b8c | |||
| c012eddb47 | |||
| bbbfffb62f | |||
| 10b59cd6ba | |||
| 72f83c010f | |||
| e7c26a2f3f | |||
| 99e2cb0702 | |||
| 02fdc5e2a4 | |||
| c2cf0a98bb | |||
| c3003dd47d | |||
| b509661b08 | |||
| e83099e44a | |||
| 7396eba1af | |||
| 6325dcf8aa | |||
| 45911ab0af | |||
| a849411978 | |||
| b927ff9fcf | |||
| 102ee7ae13 | |||
| d672774c18 | |||
| 7f5fda9175 | |||
| cc6c59b27a | |||
| 725d6b52a7 | |||
| 79640a04cc | |||
| 63d8fe876e | |||
| 2219b93d6b | |||
| e8c47ec8ac | |||
| e904c65a9d | |||
| f62926f0ca | |||
| b033bb02fc | |||
| 031cba81b4 | |||
| 693ab6ad82 | |||
| 541fd7daa2 | |||
| 61d79a1502 | |||
| 03357ff1ec | |||
| b4bed94cc5 | |||
| e924dc7b30 | |||
| 4b969bdce3 | |||
| d07afb38a0 | |||
| 5bb715ee2f | |||
| 3e5f683e90 | |||
| 31481581e8 | |||
| 2e0c2e8482 | |||
| 0343374d52 | |||
| c1fe394c0e | |||
| 876f48df76 | |||
| fbb2d076f4 | |||
| c7d2a13524 | |||
| 9b9588f20d | |||
| d6bbf0f975 | |||
| f48522e923 | |||
| f8b10c2272 | |||
| 369892634d | |||
| 8e5cb86409 | |||
| a85afe4d07 | |||
| e8f93380d1 | |||
| 0a22bc5d05 | |||
| 626d4f3e35 | |||
| f4db5f9973 | |||
| 70dabe318c | |||
| f94972f662 | |||
| d162f7e5ef | |||
| 2f8cb2a1af | |||
| b91d22375f | |||
| a087ace697 | |||
| 0af8a7b958 | |||
| 861098714b | |||
| 63b345110e | |||
| 247069c7e9 | |||
| bb096f4ae3 | |||
| a37497ffb5 | |||
| 02adf4ff06 | |||
| acbc886ecd | |||
| 0a2d478749 | |||
| 95528ad8e5 | |||
| ddad2460f3 | |||
| a8491c26ea | |||
| 0aed7afdc0 | |||
| 18b800a33b | |||
| c64fe595d3 | |||
| dd3b1ccd45 | |||
| 6f927b4a62 | |||
| c76bb8ffa0 | |||
| 4dcd871cef | |||
| abe1d31ae0 | |||
| 2d71fff2b2 | |||
| c4f61b8ae7 | |||
| c51ab6ec37 | |||
| 1fc2255219 | |||
| 037389137d | |||
| 8cd3e84c06 | |||
| b3c6ac1430 | |||
| 68bb97919a | |||
| f268d7c7be | |||
| d695a79ba1 | |||
| cd5a745bd2 | |||
| 51e5f422c4 | |||
| ec3b2b40c2 | |||
| 67ae3e9253 | |||
| d38e3b7792 | |||
| 43d27edef2 | |||
| 94b87eac72 | |||
| fd31af6012 | |||
| 228deccec2 | |||
| 639f1d31f7 | |||
| ec786fe236 | |||
| fe3a6ef049 | |||
| 8b761319f6 | |||
| 002d8769b0 | |||
| 5aba111297 | |||
| dc9b3a7e03 | |||
| 5f2e0d6347 | |||
| 1f72571c06 | |||
| 820925a866 | |||
| 299bd351fd | |||
| 13bf6547ee | |||
| 1b733abe82 | |||
| 5782e26ab2 | |||
| 38d329e75a | |||
| 58f448a926 | |||
| 7a7fea40d9 | |||
| 0309545ff1 | |||
| 6deabfdad3 | |||
| f9b4c31344 | |||
| 8d8800e632 | |||
| 4ca4493084 | |||
| 7efa0df1fd | |||
| b786e101e5 | |||
| 09a8046b10 | |||
| 2f6b3f1c5f | |||
| 2551f6f279 | |||
| 01afa56166 | |||
| 5815950092 | |||
| 766e16b26f | |||
| 0fdb4e7c12 | |||
| 64babb35e2 | |||
| 38522e5dfa | |||
| 4ccc150fd1 | |||
| a4c57017d5 | |||
| b2a7cec644 | |||
| ddc5cbe865 | |||
| 1e23957657 | |||
| 2731b04ff9 | |||
| e8ca80a61a | |||
| e76129b5a4 | |||
| 6635ea62c2 | |||
| 6b8c649876 | |||
| af587f3869 | |||
| 1c1f124891 | |||
| b353a126d8 | |||
| ef0e1031b0 | |||
| d7010f582f | |||
| d696b9f35e | |||
| 665d49d375 | |||
| 26a1c84881 | |||
| dbecba710b | |||
| 591414307a | |||
| 1241cab113 | |||
| 490b7ac43c | |||
| 0f521b26ae | |||
| e4ec4e1470 | |||
| 203c2f0456 | |||
| b502d30e77 | |||
| a486c47b1e | |||
| f76a3f545c | |||
| b5650b579d | |||
| 83702762c8 | |||
| abc13ef762 | |||
| ce00388278 | |||
| 4a76318877 | |||
| e073e755f9 | |||
| 57b405c4c2 | |||
| 2181ffdc89 | |||
| 82dac2eba0 | |||
| 58be008676 | |||
| eed38c8b2a | |||
| 6bd114285c | |||
| 25698ccd54 | |||
| bb3aa0178d | |||
| 751ce4ec41 | |||
| da98a38b14 | |||
| 034e3e85e9 | |||
| aab95d0626 | |||
| 15ea27868e | |||
| bcbd3de336 | |||
| a0daab2711 | |||
| e1d11681c0 | |||
| 8a995d0c21 | |||
| 6241b87f90 | |||
| 2c9e435558 | |||
| b12057b7e5 | |||
| 2445d04d19 | |||
| a58986eb06 | |||
| a39b151d5f | |||
| 3841e8578f | |||
| e0824c2d93 | |||
| c75a4e6309 | |||
| 1dfde240cb | |||
| c6e6f3b7cb | |||
| ea320ce055 | |||
| 6d3ed468d8 | |||
| a6c6bcf95c | |||
| 63b8bbbab3 | |||
| 33ff01d04c | |||
| ae126fd56f | |||
| 9fed2dc065 | |||
| 2e0964e0b0 | |||
| 237bb4595b | |||
| 4486b54680 | |||
| 1a2f8dfcb4 | |||
| 3c30d0f41b | |||
| 5f61ca5e6f | |||
| 06466cb73a | |||
| c5b6219006 | |||
| ae5b5a6aa9 | |||
| a4c4d18f42 | |||
| 3cf19dc07f | |||
| 73c58e4cbb | |||
| c2043d0f6d | |||
| cad2991946 | |||
| e260815c5e | |||
| b4e7239ac7 | |||
| 4b6f4081d6 | |||
| d1c9183d3b | |||
| 2f9705eb6f | |||
| 0e3fab1f9f | |||
| 2431ddfde6 | |||
| 1e4e963d8c | |||
| 522508df28 | |||
| 859f73c19d | |||
| 82c11e36ea | |||
| a6cd2ad880 | |||
| b2a604b801 | |||
| 7c060fc35c | |||
| 48e39b60a8 | |||
| f038aa4746 | |||
| 4833d39ab3 | |||
| fa910be0f6 | |||
| bc274e7300 | |||
| 7b1fc4d2e6 | |||
| 204d5f1bb9 | |||
| 8fc1c7d994 | |||
| 879869d3e3 | |||
| 1d2cdf3489 | |||
| a5d0e68675 | |||
| 605e543372 | |||
| c432f601ab | |||
| e8d03a422d | |||
| 6be013e072 | |||
| 014cbaf387 | |||
| 1be38183e5 | |||
| 8bab42e224 | |||
| 99e9fc751b | |||
| a1b735a4c0 | |||
| c0b7ffd5d0 | |||
| 012877d8d4 | |||
| 41bb6f3109 | |||
| 88c9b18cb6 | |||
| 6efdc94661 | |||
| 68526c09fc | |||
| a78bc507c0 | |||
| e83c7438cb | |||
| 82068a6918 | |||
| 108bcbeb7c | |||
| c4b02be6d3 | |||
| 30eebf804f | |||
| ad7fdd18d0 | |||
| 5d2fbf5215 | |||
| 4a89403566 | |||
| e0c05b2123 | |||
| 85b99580ea | |||
| 15fbedfcad | |||
| 1e6d0de48b | |||
| cad751c00c | |||
| a47276ac24 | |||
| 20403c69b2 | |||
| ffc04f2a9b | |||
| d1580791e4 | |||
| c74eb4fcf3 | |||
| a798534337 | |||
| 470883858e | |||
| 4f4911686d | |||
| 6d479dcdbb | |||
| 24348c40a6 | |||
| a39b50adbb | |||
| 81832c14ee | |||
| b86022c64a | |||
| 45e816a9f6 | |||
| 667b1c37a3 | |||
| b75d533f9b | |||
| aece55d82f | |||
| c432b398f4 | |||
| 9cb2645793 | |||
| 6ac61bd585 | |||
| b02165ffe6 | |||
| 6c576e2c66 | |||
| b0e7e7752f | |||
| 2799b79e8c | |||
| 805a1479f9 | |||
| fe6538b08d | |||
| 1bbb9d6644 | |||
| 5c06e285ec | |||
| 19c92fd670 | |||
| 6026bd873b | |||
| 1369119a0c | |||
| b76e17b25d | |||
| ca7794305b | |||
| fd255e81e1 | |||
| 09d31d1263 | |||
| 47dc26f011 | |||
| 123bb3ec08 | |||
| 90f77282e3 | |||
| 5208867ccc | |||
| edc7ccc795 | |||
| c9798f6425 | |||
| 20ecf7f1d0 | |||
| 9dcb780fcb | |||
| 1cb7b09933 | |||
| 2c62a77cf4 | |||
| b9bc48d8dd | |||
| ed234e311b | |||
| 9843fec393 | |||
| aa4cabdeb5 | |||
| eea713b668 | |||
| fc62538a94 | |||
| 7994144df7 | |||
| e153c483b6 | |||
| 422bb4d4bb | |||
| 87a80d7613 | |||
| e91105ca87 | |||
| 37903722fe | |||
| f4c82d0010 | |||
| fe50093c18 | |||
| 4317af1e90 | |||
| 61a0fcc2ea | |||
| f627348b11 | |||
| 87fb9a6b69 | |||
| 97a2e2ec2e | |||
| 68d357d7f6 | |||
| a103ad3ee7 | |||
| f65d5a9761 | |||
| 6e0a5f5bbd | |||
| 22f858152f | |||
| 775d2e14fc | |||
| 744b287e67 | |||
| c0fc5d98f0 | |||
| 08ea79d730 | |||
| f31b821cc0 | |||
| 34be16874f | |||
| e9738b891f | |||
| 829796514a | |||
| ef1db35f80 | |||
| f9c67621ca | |||
| e29e8e3180 | |||
| 7a81e720d4 | |||
| 55600c0eb1 | |||
| 35e41d7d68 | |||
| b610cf9a11 | |||
| c8e9edc024 | |||
| 471cd760d7 | |||
| 7f48c57edf | |||
| 6569801162 | |||
| 9dd83f50a7 | |||
| 59c56b1b0d | |||
| 94cd2de940 | |||
| 3c23375607 | |||
| 56047f638f | |||
| 9c01d3e775 | |||
| c85c87f3da | |||
| eaa02e3d55 | |||
| 0219222a60 | |||
| dba659b220 | |||
| ee6458768e | |||
| ed3d02dc6d | |||
| 95471b1188 | |||
| 6190cfbfd8 | |||
| 11f2f95103 | |||
| 2abbc14703 | |||
| b2b2816ade | |||
| 4461df1bd9 | |||
| f7f6b4a8b0 | |||
| 41be581594 | |||
| 20ad5b7ac2 | |||
| a1c0bd7a1c | |||
| fd7c4e8a6d | |||
| 41e549af14 | |||
| b7360140ee | |||
| c71f7c7613 | |||
| c905c47775 | |||
| 4ca7ba000c | |||
| f260627660 | |||
| 1e9142c213 | |||
| 82890fe38e | |||
| 7dc7c8af98 | |||
| addebc465a | |||
| 5ab315aeaf | |||
| f092bc1912 | |||
| 23b49b8304 | |||
| 9e97248ede | |||
| d532b06310 | |||
| 07a2281730 | |||
| 42385f3ffa | |||
| c597234374 | |||
| 3de73f07c6 | |||
| 0caeaf6e5c | |||
| 3395297c3e | |||
| e60a7c7143 | |||
| 0e62a66cc2 | |||
| ff32dff163 | |||
| 543c5236e7 | |||
| 341b3ae7c9 | |||
| f01907aac2 | |||
| a7c855cab8 | |||
| 29afc0657d | |||
| d9860b8907 | |||
| dc1ae57dc6 | |||
| d6bd2a9bdb | |||
| c9eed67cf6 | |||
| 0ded6303c1 | |||
| b6e0abadab | |||
| 43bcf40f80 | |||
| f06025a342 | |||
| 24fb95b050 | |||
| 49fca63927 | |||
| ce5fe86430 | |||
| 666586b59c | |||
| 8a2851551a | |||
| a2fe4a28c3 | |||
| 417ebd160b | |||
| 82be305680 | |||
| 03002f4971 | |||
| 1e7e8a8988 | |||
| a715d5ac23 | |||
| 398c8117fe | |||
| f45c18ee35 | |||
| 15c1db42dd | |||
| a31c01f8d9 | |||
| 62753cdf13 | |||
| dc7ce125ad | |||
| eabdb09f8e | |||
| fa6d03c979 | |||
| 634fb192ef | |||
| a4b38e7521 | |||
| 8ff6de91b0 | |||
| 7fa0ad3161 | |||
| 53b21eea61 | |||
| 2f3a61b51b | |||
| 8bca7814f4 | |||
| 92c81b1833 | |||
| 44553d412c | |||
| 95ce224df0 | |||
| 8555635967 | |||
| e843fe8aa6 | |||
| b198c9474a | |||
| 4bb00b83d9 | |||
| c91cbf6b97 | |||
| f6ede6f1c1 | |||
| 65976b27fe | |||
| 2d73ee64a3 | |||
| c61c2b0abd | |||
| 40d3332690 | |||
| 8e45753c68 | |||
| 73e217ab0d | |||
| 26ff59172e | |||
| bebb4ffbaa | |||
| 523da66134 | |||
| e1ca7a9bdb | |||
| 9a8cf709ba | |||
| f909040567 | |||
| 845adb664a | |||
| 0c6cae2d59 | |||
| a893ee0ffc | |||
| 82b63cc6e2 | |||
| c327cfa86e | |||
| 82219c1162 | |||
| cfc3f1527a | |||
| caf1a5fbab | |||
| 4a6398fc1f | |||
| 2bcf96565a | |||
| 9a9d6a4a2b | |||
| 05f66fcf0d | |||
| ea8245a91b | |||
| 759a932bb7 | |||
| fb6f05c267 | |||
| ff9b74efeb | |||
| d6e7543ba6 | |||
| e45d5700ec | |||
| 4e6682bd85 | |||
| 32c715c4d0 | |||
| c11cdf7468 | |||
| 6217c96576 | |||
| 977690590e | |||
| fd845c8b6c | |||
| d7d9abb007 | |||
| 9f22b2726b | |||
| f28b519556 | |||
| 762cf91133 | |||
| 9dd3dcff2b | |||
| 34fbcc9457 | |||
| 9cc8ac981b | |||
| 1153dcef69 | |||
| f811471b18 | |||
| 2382229c7d | |||
| f0e739be43 | |||
| 4dccdf9478 | |||
| 4c37d650d3 | |||
| 1b334e6966 | |||
| d463bd6323 | |||
| 8c298b33cd | |||
| dc1a380888 | |||
| 7e9be4d3d9 | |||
| 5579521ffc | |||
| ab1059134d | |||
| fe2ac66a52 | |||
| f87db2652b | |||
| 3f9f02b9e7 | |||
| 578247ffbc | |||
| 9a5f214623 | |||
| 141ca8904a | |||
| 4488c090b2 | |||
| 59c1fde351 | |||
| cf7ff76165 | |||
| ac79691d69 | |||
| 1a37989769 | |||
| 830f891a74 | |||
| 5937a66e22 | |||
| 894e38f713 | |||
| e4b5b0e5fd | |||
| 598dd1f816 | |||
| 35e24d4d14 | |||
| fea2ffb3ba | |||
| 64f55d55a1 | |||
| bfda4ce7e6 | |||
| 4f7cb7cd2a | |||
| 6517323add | |||
| 531a0b755a | |||
| 91bb8ae4d2 | |||
| 8cafc20098 | |||
| 9d5300440c | |||
| 58524d6d2b | |||
| 19cc6ea993 | |||
| d7f0a31e24 | |||
| 312974aa20 | |||
| d19c100166 | |||
| a8ad80c405 | |||
| 650e38e17f | |||
| 24612adf2c | |||
| 06649f6c21 | |||
| 8b61f5e9c4 | |||
| 6432898e7a | |||
| cced33d068 | |||
| bd01af6415 | |||
| 35011b810d | |||
| f295c7532c | |||
| 7065b67d07 | |||
| c0b50ef61d | |||
| 1d8cca4fa2 | |||
| 3474c179e6 | |||
| 433dad7e1a | |||
| be7ee380bc | |||
| cff5de626b | |||
| 4d8b8f9210 | |||
| a16ef7e73c | |||
| c39dae06d4 | |||
| f906e70f6b | |||
| 5139119307 | |||
| 1b537f904a | |||
| 556b631c54 | |||
| 49df9ceaf3 | |||
| 92ec1ac27a | |||
| e74097afdf | |||
| 8ddc4f2292 | |||
| 7b51320346 | |||
| 9e39be0770 | |||
| 3e5e87930c | |||
| 15a5ba67f1 | |||
| 9e3b4dc90d | |||
| 48c42a9fba | |||
| 0b35bc1ede | |||
| 8e01bb40fe | |||
| 9d21772820 | |||
| b745839bdb | |||
| 59ad6e02ce | |||
| a3b33cbe28 | |||
| 7b8540281a | |||
| 0a6b78f883 | |||
| 56ee8f7d64 | |||
| 3cfcd32876 | |||
| 06dcb55a9d | |||
| ec6cafd7aa | |||
| 6e9858960d | |||
| 150a8276b9 | |||
| c6a90d4bb3 | |||
| c71fd7113c | |||
| 5fc104a992 | |||
| d1de3cfb94 | |||
| 44d36f2460 | |||
| 9088f151d9 | |||
| c692962650 | |||
| f0a60a9000 | |||
| 2f50f3fd4b | |||
| 24cd7bbc62 | |||
| d299e75e1b | |||
| f86b6658c9 | |||
| 0a56d65581 | |||
| dfc03bac9f | |||
| 81e1376e08 | |||
| f50c85d536 | |||
| 5830c69694 | |||
| 0173496a77 | |||
| 30c5b47699 | |||
| e3191d4e91 | |||
| a9b3539b90 | |||
| 5217017e69 | |||
| bd5df5cf1c | |||
| 456dbfe7d7 | |||
| 586f210d6e | |||
| 275a0f9ddd | |||
| cbf2ba6cec | |||
| 1bd621f819 | |||
| bb6a331490 | |||
| 3922ad876f | |||
| fdb53fdeb1 | |||
| 3fb5a7bff1 | |||
| 6157c67cfe | |||
| fbc745764a | |||
| 78f09801b5 | |||
| d0dd81cf84 | |||
| 65b832c46c | |||
| a90b60c36f | |||
| 94a07706ec | |||
| ab2eacb6c1 | |||
| aead192743 | |||
| c1e8584b97 | |||
| 8a2b208299 | |||
| 2b6882bd97 | |||
| aa51662d98 | |||
| 3068526797 | |||
| 298d8c2d88 | |||
| 294e01a8c1 | |||
| 3a5aa4587c | |||
| cf1778e696 | |||
| 54db4c176a | |||
| 5d3e8a31d0 | |||
| 885dff82e3 | |||
| 3c4aa24198 | |||
| 33b0814323 | |||
| 45ae511036 | |||
| 0fa063c640 | |||
| 40d35304ea | |||
| 89821d66bb | |||
| 09d84e900c | |||
| a8746bff30 | |||
| c4d8bf0ce9 | |||
| 9cca605bac | |||
| dbd23f91e5 | |||
| 9387cc088c | |||
| 11f7a89e25 | |||
| 654d522b31 | |||
| 31e6ef77a6 | |||
| e56c847210 | |||
| e00172199a | |||
| 04f47836d8 | |||
| faaca822e4 | |||
| dc0f053925 | |||
| 517726da3a | |||
| 1d6c03eddf | |||
| fdfccd1205 | |||
| b30e7ced0a | |||
| 11770439be | |||
| d89c5f7146 | |||
| 4a475bf1cd | |||
| 10be9cfbbf | |||
| c20e0ad90d | |||
| 22f64d60bb | |||
| 7b7d332239 | |||
| b1d189324a | |||
| 00fb468f2e | |||
| bbbb6e04cb | |||
| f5161d9add | |||
| 787251f00e | |||
| cfe21f0826 | |||
| 196f691865 | |||
| 7a5bb1cfac | |||
| b80d55b764 | |||
| dd71625f52 | |||
| 19936d23d1 | |||
| decf0f3da0 | |||
| 7242a67f84 | |||
| c4884eb669 | |||
| d49f3327e4 | |||
| 633e68a2f7 | |||
| 809f48f733 | |||
| 578b1b45ea | |||
| 86c3c58e64 | |||
| 8d803a26eb | |||
| aa3129c2a9 | |||
| 97c924fe29 | |||
| 591c463e4b | |||
| e1691fddaa | |||
| b4d4351203 | |||
| f7b1348623 | |||
| 2619c7553a | |||
| f79d8baf63 | |||
| bbdcbac544 | |||
| d552680e72 | |||
| df43c6ab8a | |||
| cd47a47c3b | |||
| e5d4235f1b | |||
| f60aa36fa0 | |||
| b2bcb6d21a | |||
| b6cea71023 | |||
| 6462328620 | |||
| fd86cadf67 | |||
| c43c72c1a3 | |||
| d77c2e4d17 | |||
| 1a7898dff1 | |||
| af662b100b | |||
| 595df172a8 | |||
| 70bc5ca7f4 | |||
| 30617feff8 | |||
| 756864c85b | |||
| c8c94ef870 | |||
| 10d51ada59 | |||
| 00f3a53f1c | |||
| d2f0551170 | |||
| cba2b9b2ad | |||
| 029d5d36ac | |||
| 8d897153a5 | |||
| 2e914808ea | |||
| d00a72a435 | |||
| 36580221aa | |||
| e686cc9eab | |||
| 66196459d5 | |||
| a5387b304e | |||
| beb1448441 | |||
| 272102c06d | |||
| 36406cd62f | |||
| 87c41c88a3 | |||
| 095c56a646 | |||
| 244c132656 | |||
| 043ec46c33 | |||
| 0e4f19eee0 | |||
| ff34969f21 | |||
| 9a7245e1df | |||
| 4906eeac18 | |||
| 4da93ba579 | |||
| 319ecdd312 | |||
| 0c1ec35244 | |||
| 46375aacdb | |||
| e6d4331994 | |||
| 2a0abc51b1 | |||
| 3bb67885ef | |||
| e682749d03 | |||
| 9b83b0aadd | |||
| 0cac330bc2 | |||
| fb8114792a | |||
| eab6f65409 | |||
| 915023b809 | |||
| f104839672 | |||
| 6841a09667 | |||
| e937c8c72e | |||
| 960bb8a9b4 | |||
| 9b36059292 | |||
| a4acc64afd | |||
| 25c69ac540 | |||
| 96a0b9991e | |||
| 2913d17fe2 | |||
| d9e45a1abe | |||
| 24b4289d6c | |||
| fb6ccccc3d | |||
| 8b74ae683a | |||
| dd08957381 | |||
| 407323f817 | |||
| 2e2c87c5a1 | |||
| f4522fd695 | |||
| 760a2c656c | |||
| 8940decd1b | |||
| 0c4193bd91 | |||
| cd40cde790 | |||
| c60c754ac9 | |||
| ef80d3b707 | |||
| 24e8d21b3f | |||
| d823da18db | |||
| 1e3df09fc6 | |||
| 75a10c276c | |||
| 50050527eb | |||
| a39b185627 | |||
| 15270f09af | |||
| f6a5ac0698 | |||
| 2b79da722b | |||
| 71d69e43cd | |||
| 5bc6e8a433 | |||
| 68076f2e22 | |||
| 8c38363038 | |||
| 345ac8333c | |||
| 2375047ef0 | |||
| 857a48012e | |||
| 208fe3d7de | |||
| 92cddbcc02 | |||
| 599b53c9cb | |||
| 062b173c66 | |||
| db690013fd | |||
| e93bfe3d41 | |||
| ab910c736c | |||
| 4047a6bb12 | |||
| df2478dc26 | |||
| 4cc3f6045b | |||
| 1550316b8d | |||
| 87394d2512 | |||
| bad59c95bc | |||
| 9f138ef246 | |||
| 6453fc4973 | |||
| f62f926537 | |||
| b3dafd913b | |||
| b2d8a7eaf1 | |||
| 3e54414191 | |||
| a173546c8d | |||
| aa69d90489 | |||
| 4ba1292455 | |||
| bb01c31f30 | |||
| cd90b2ca9e | |||
| 9a65350cf7 | |||
| 680eb7a9f6 | |||
| 878420463c | |||
| 4692e20daf | |||
| 13fe2ca8fe | |||
| 1264e7d4f6 | |||
| 4f45978cd9 | |||
| 5a0bf8e028 | |||
| ffa163a8a8 | |||
| 8f86f5749d | |||
| 00d3bf15f3 | |||
| 7196c09e9d | |||
| fadd9e0bf4 | |||
| d8b4bbe067 | |||
| 24611e375a | |||
| ccec582cea | |||
| b2e4107c17 | |||
| 87aa070486 | |||
| 21230a8eb2 | |||
| 85cda47c70 | |||
| 7dadb33003 | |||
| b5a7e64e19 | |||
| b283b10d3e | |||
| ecb22226d6 | |||
| 8635aacb46 | |||
| bdd85b36a4 | |||
| a0c7713494 | |||
| abf4955c26 | |||
| 74340e3c04 | |||
| b98b389baf | |||
| 877806c34d | |||
| 0bbf4fb66a | |||
| 169ce71e59 | |||
| bdbe078630 | |||
| 88d5e27fe8 | |||
| bb5b8d2902 | |||
| bab4975809 | |||
| 2e44ebe98d | |||
| a1961cc37a | |||
| 727e1d3743 | |||
| 4e3b16c5f4 | |||
| 6c36bf28d7 | |||
| 5548b22fe7 | |||
| 03664d6b51 | |||
| 07d383ffaa | |||
| 9bb7bcf52e | |||
| 67a686cf98 | |||
| a3f2c05632 | |||
| efcf052004 | |||
| 9234a2293d | |||
| 7a626747cf | |||
| db01cbb63d | |||
| 4f868275a9 | |||
| ed20d14d01 | |||
| 0add1af1c8 | |||
| 5c50c3aa70 | |||
| 9e7328abfb | |||
| 188eb838c5 | |||
| 36ab9974d2 | |||
| a825f0f2b2 | |||
| 1b0f92a331 | |||
| a13d7987e0 | |||
| 635e7d3e70 | |||
| c78ef79995 | |||
| c3f9a7ed9b | |||
| c91253d05d | |||
| 285291f545 | |||
| c0e1015c6e | |||
| 12d1bcc545 | |||
| ec808f3fe8 | |||
| 394b0ac9c0 | |||
| c2fcd2895b | |||
| bb1514be2d | |||
| 8ffb9b6aed | |||
| 33afa7c84a | |||
| 69aad38d03 | |||
| 17b5309e47 | |||
| 05af23f88f | |||
| 4511f4f537 | |||
| bdacc4da36 | |||
| 1a078657d8 | |||
| 77ba3e8f26 | |||
| 84e3571ec3 | |||
| de18b14372 | |||
| a1322ddb5d | |||
| c7868fb176 | |||
| 4b6687db6b | |||
| f1d5bc58b0 | |||
| 99f4cd1cfa | |||
| 874406d934 | |||
| 07d067d828 | |||
| af7f67dc9c | |||
| 34e55028ae | |||
| 70e4d6be34 | |||
| b690ac4e2a | |||
| cbc0e639e4 | |||
| b51c724a94 | |||
| 26a9abef64 | |||
| fecdb9554d | |||
| 45ef177809 | |||
| 6574e9f0b2 | |||
| cce13750ad | |||
| 928bef9d82 | |||
| 08dd3f7b50 | |||
| 2ac7a9c8fc | |||
| 240b65b980 | |||
| 7443c5a6fc | |||
| a1cf48f84e | |||
| e5122945fe | |||
| 22cd97e2e0 | |||
| 38057b1b0e | |||
| eb52216a9c | |||
| 4c92e63b0b | |||
| ac2aa967c4 | |||
| d2e50a508c | |||
| 37975319f2 | |||
| 4aba570fa8 | |||
| e180c19cca | |||
| c595c03452 | |||
| 64c9a2f678 | |||
| 566e0fd3e5 | |||
| 7dfb72e381 | |||
| 649242f82b | |||
| cf1ee3162f | |||
| bf6485fab4 | |||
| 720ecea737 | |||
| d5e86d9180 | |||
| cab1272bb1 | |||
| 563a5af9e7 | |||
| ec0800eb1a | |||
| ea61420441 | |||
| 598ec07c91 | |||
| a932413314 | |||
| aff2482436 | |||
| 860ee20c71 | |||
| 74be2087b5 | |||
| 57f1822213 | |||
| cdfdf324e8 | |||
| f891c67eca | |||
| 5d0a50042f | |||
| 4ee49f3550 | |||
| f6059ef389 | |||
| ce2281d31b | |||
| 3d16767fb3 | |||
| 593f7989b8 | |||
| 16a3e21410 | |||
| 98204d78fb | |||
| 27bf244b3b | |||
| 9b8a03b53b | |||
| e1f871fefe | |||
| beaa8de648 | |||
| 7e629fd783 | |||
| b623224d07 | |||
| 92a939c401 | |||
| afa7228076 | |||
| bbc43ca50d | |||
| 9964cc202d | |||
| b05245eab0 | |||
| e41e23481c | |||
| 30e5c197cb | |||
| 52b1ac5f54 | |||
| a78339a040 | |||
| 2b0695bdde | |||
| 917d60a1cb | |||
| edf4a1b652 | |||
| 05cd7e2d8a | |||
| a9da8edbde | |||
| d03d3518d7 | |||
| cd95237ae4 | |||
| 1ba69b8abf | |||
| 95eac7f7f0 | |||
| f84b9fd5ef | |||
| e78f1cdc6a | |||
| 432f89cf33 | |||
| f0561c0c3b | |||
| 64e338133c | |||
| 4966e4e1fb | |||
| 19e1cbd033 | |||
| f721c778ad | |||
| a2e0f80c01 | |||
| 334218a62c | |||
| de768af099 | |||
| d36ce782b7 | |||
| fb307ae128 | |||
| 8d5f788f2b | |||
| 804e599598 | |||
| cdf9b674dc | |||
| d5aaee614f | |||
| 865ba8bb4f | |||
| ebbb4a5d0b | |||
| 9040b534c8 | |||
| 0a0ae16bd6 | |||
| c22b325c31 | |||
| c0d82a412d | |||
| ac057a2d40 | |||
| 3427f19a01 | |||
| 8effbaf101 | |||
| 53c4a8787f | |||
| 017a75aa44 | |||
| 56afb3fd64 | |||
| a9c7669c16 | |||
| aae792a9dd | |||
| db53656a45 | |||
| ff7a0e3170 | |||
| c7700ac176 | |||
| d011ddfc64 | |||
| 67cc70ad61 | |||
| a384ae9140 | |||
| a7627882a7 | |||
| 8eae7a95be | |||
| dabf266048 | |||
| 462e764a3c | |||
| 0e8a37dca8 | |||
| bffbe54120 | |||
| b673560b92 | |||
| 9e125e2029 | |||
| b88146c443 | |||
| c40cb7fd59 | |||
| 9d5956cef8 | |||
| 1fff4620e6 | |||
| c3820f55f4 | |||
| 60c5bdd62f | |||
| 5092e5f631 | |||
| c0bd35594e | |||
| bc9efa7ea8 | |||
| f540d0b747 | |||
| 7bcaa513fa | |||
| d33dfee8a3 | |||
| b5216df4fe | |||
| 25a11bfafc | |||
| 8fcc864fb7 | |||
| ed5ed0306e | |||
| a418c43d32 | |||
| 5aa8c9c8df | |||
| 32972b45db | |||
| af351b1723 | |||
| af88266212 | |||
| b14119b531 | |||
| 68c75f221b | |||
| 7b379e2a61 | |||
| c373b734bc | |||
| 2ac8f8003f | |||
| d6b3df8f6f | |||
| deea07e905 | |||
| 0caa94bd1c | |||
| a32dde5428 | |||
| 067b0d07c4 | |||
| 044f96bd93 | |||
| ca96350707 | |||
| be3af1e234 | |||
| 2e89d29c87 | |||
| e4eb9f7c55 | |||
| dd6547de06 | |||
| 84d09b8b8a | |||
| 2c462154f7 | |||
| b810efdb3f | |||
| ae04ccc445 | |||
| f7ac1192ae | |||
| e048588a88 | |||
| 2042353526 | |||
| 9486715929 | |||
| 64319c0d56 | |||
| acd209a890 | |||
| bd482eb8ef | |||
| 5b3cc560d5 | |||
| d41d4deaac | |||
| 208ce4e774 | |||
| 414ee51975 | |||
| d5a521eef2 | |||
| 1b401063e8 | |||
| 60d9d0584a | |||
| ffba341258 |
205
.claude/skills/frontend-testing/CHECKLIST.md
Normal file
205
.claude/skills/frontend-testing/CHECKLIST.md
Normal file
@ -0,0 +1,205 @@
|
||||
# Test Generation Checklist
|
||||
|
||||
Use this checklist when generating or reviewing tests for Dify frontend components.
|
||||
|
||||
## Pre-Generation
|
||||
|
||||
- [ ] Read the component source code completely
|
||||
- [ ] Identify component type (component, hook, utility, page)
|
||||
- [ ] Run `pnpm analyze-component <path>` if available
|
||||
- [ ] Note complexity score and features detected
|
||||
- [ ] Check for existing tests in the same directory
|
||||
- [ ] **Identify ALL files in the directory** that need testing (not just index)
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### ⚠️ Incremental Workflow (CRITICAL for Multi-File)
|
||||
|
||||
- [ ] **NEVER generate all tests at once** - process one file at a time
|
||||
- [ ] Order files by complexity: utilities → hooks → simple → complex → integration
|
||||
- [ ] Create a todo list to track progress before starting
|
||||
- [ ] For EACH file: write → run test → verify pass → then next
|
||||
- [ ] **DO NOT proceed** to next file until current one passes
|
||||
|
||||
### Path-Level Coverage
|
||||
|
||||
- [ ] **Test ALL files** in the assigned directory/path
|
||||
- [ ] List all components, hooks, utilities that need coverage
|
||||
- [ ] Decide: single spec file (integration) or multiple spec files (unit)
|
||||
|
||||
### Complexity Assessment
|
||||
|
||||
- [ ] Run `pnpm analyze-component <path>` for complexity score
|
||||
- [ ] **Complexity > 50**: Consider refactoring before testing
|
||||
- [ ] **500+ lines**: Consider splitting before testing
|
||||
- [ ] **30-50 complexity**: Use multiple describe blocks, organized structure
|
||||
|
||||
### Integration vs Mocking
|
||||
|
||||
- [ ] **DO NOT mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
|
||||
- [ ] Import real project components instead of mocking
|
||||
- [ ] Only mock: API calls, complex context providers, third-party libs with side effects
|
||||
- [ ] Prefer integration testing when using single spec file
|
||||
|
||||
## Required Test Sections
|
||||
|
||||
### All Components MUST Have
|
||||
|
||||
- [ ] **Rendering tests** - Component renders without crashing
|
||||
- [ ] **Props tests** - Required props, optional props, default values
|
||||
- [ ] **Edge cases** - null, undefined, empty values, boundaries
|
||||
|
||||
### Conditional Sections (Add When Feature Present)
|
||||
|
||||
| Feature | Add Tests For |
|
||||
|---------|---------------|
|
||||
| `useState` | Initial state, transitions, cleanup |
|
||||
| `useEffect` | Execution, dependencies, cleanup |
|
||||
| Event handlers | onClick, onChange, onSubmit, keyboard |
|
||||
| API calls | Loading, success, error states |
|
||||
| Routing | Navigation, params, query strings |
|
||||
| `useCallback`/`useMemo` | Referential equality |
|
||||
| Context | Provider values, consumer behavior |
|
||||
| Forms | Validation, submission, error display |
|
||||
|
||||
## Code Quality Checklist
|
||||
|
||||
### Structure
|
||||
|
||||
- [ ] Uses `describe` blocks to group related tests
|
||||
- [ ] Test names follow `should <behavior> when <condition>` pattern
|
||||
- [ ] AAA pattern (Arrange-Act-Assert) is clear
|
||||
- [ ] Comments explain complex test scenarios
|
||||
|
||||
### Mocks
|
||||
|
||||
- [ ] **DO NOT mock base components** (`@/app/components/base/*`)
|
||||
- [ ] `jest.clearAllMocks()` in `beforeEach` (not `afterEach`)
|
||||
- [ ] Shared mock state reset in `beforeEach`
|
||||
- [ ] i18n uses shared mock (auto-loaded); only override locally for custom translations
|
||||
- [ ] Router mocks match actual Next.js API
|
||||
- [ ] Mocks reflect actual component conditional behavior
|
||||
- [ ] Only mock: API services, complex context providers, third-party libs
|
||||
|
||||
### Queries
|
||||
|
||||
- [ ] Prefer semantic queries (`getByRole`, `getByLabelText`)
|
||||
- [ ] Use `queryBy*` for absence assertions
|
||||
- [ ] Use `findBy*` for async elements
|
||||
- [ ] `getByTestId` only as last resort
|
||||
|
||||
### Async
|
||||
|
||||
- [ ] All async tests use `async/await`
|
||||
- [ ] `waitFor` wraps async assertions
|
||||
- [ ] Fake timers properly setup/teardown
|
||||
- [ ] No floating promises
|
||||
|
||||
### TypeScript
|
||||
|
||||
- [ ] No `any` types without justification
|
||||
- [ ] Mock data uses actual types from source
|
||||
- [ ] Factory functions have proper return types
|
||||
|
||||
## Coverage Goals (Per File)
|
||||
|
||||
For the current file being tested:
|
||||
|
||||
- [ ] 100% function coverage
|
||||
- [ ] 100% statement coverage
|
||||
- [ ] >95% branch coverage
|
||||
- [ ] >95% line coverage
|
||||
|
||||
## Post-Generation (Per File)
|
||||
|
||||
**Run these checks after EACH test file, not just at the end:**
|
||||
|
||||
- [ ] Run `pnpm test -- path/to/file.spec.tsx` - **MUST PASS before next file**
|
||||
- [ ] Fix any failures immediately
|
||||
- [ ] Mark file as complete in todo list
|
||||
- [ ] Only then proceed to next file
|
||||
|
||||
### After All Files Complete
|
||||
|
||||
- [ ] Run full directory test: `pnpm test -- path/to/directory/`
|
||||
- [ ] Check coverage report: `pnpm test -- --coverage`
|
||||
- [ ] Run `pnpm lint:fix` on all test files
|
||||
- [ ] Run `pnpm type-check:tsgo`
|
||||
|
||||
## Common Issues to Watch
|
||||
|
||||
### False Positives
|
||||
|
||||
```typescript
|
||||
// ❌ Mock doesn't match actual behavior
|
||||
jest.mock('./Component', () => () => <div>Mocked</div>)
|
||||
|
||||
// ✅ Mock matches actual conditional logic
|
||||
jest.mock('./Component', () => ({ isOpen }: any) =>
|
||||
isOpen ? <div>Content</div> : null
|
||||
)
|
||||
```
|
||||
|
||||
### State Leakage
|
||||
|
||||
```typescript
|
||||
// ❌ Shared state not reset
|
||||
let mockState = false
|
||||
jest.mock('./useHook', () => () => mockState)
|
||||
|
||||
// ✅ Reset in beforeEach
|
||||
beforeEach(() => {
|
||||
mockState = false
|
||||
})
|
||||
```
|
||||
|
||||
### Async Race Conditions
|
||||
|
||||
```typescript
|
||||
// ❌ Not awaited
|
||||
it('loads data', () => {
|
||||
render(<Component />)
|
||||
expect(screen.getByText('Data')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
// ✅ Properly awaited
|
||||
it('loads data', async () => {
|
||||
render(<Component />)
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Data')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Missing Edge Cases
|
||||
|
||||
Always test these scenarios:
|
||||
|
||||
- `null` / `undefined` inputs
|
||||
- Empty strings / arrays / objects
|
||||
- Boundary values (0, -1, MAX_INT)
|
||||
- Error states
|
||||
- Loading states
|
||||
- Disabled states
|
||||
|
||||
## Quick Commands
|
||||
|
||||
```bash
|
||||
# Run specific test
|
||||
pnpm test -- path/to/file.spec.tsx
|
||||
|
||||
# Run with coverage
|
||||
pnpm test -- --coverage path/to/file.spec.tsx
|
||||
|
||||
# Watch mode
|
||||
pnpm test -- --watch path/to/file.spec.tsx
|
||||
|
||||
# Update snapshots (use sparingly)
|
||||
pnpm test -- -u path/to/file.spec.tsx
|
||||
|
||||
# Analyze component
|
||||
pnpm analyze-component path/to/component.tsx
|
||||
|
||||
# Review existing test
|
||||
pnpm analyze-component path/to/component.tsx --review
|
||||
```
|
||||
321
.claude/skills/frontend-testing/SKILL.md
Normal file
321
.claude/skills/frontend-testing/SKILL.md
Normal file
@ -0,0 +1,321 @@
|
||||
---
|
||||
name: Dify Frontend Testing
|
||||
description: Generate Jest + React Testing Library tests for Dify frontend components, hooks, and utilities. Triggers on testing, spec files, coverage, Jest, RTL, unit tests, integration tests, or write/review test requests.
|
||||
---
|
||||
|
||||
# Dify Frontend Testing Skill
|
||||
|
||||
This skill enables Claude to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices.
|
||||
|
||||
> **⚠️ Authoritative Source**: This skill is derived from `web/testing/testing.md`. When in doubt, always refer to that document as the canonical specification.
|
||||
|
||||
## When to Apply This Skill
|
||||
|
||||
Apply this skill when the user:
|
||||
|
||||
- Asks to **write tests** for a component, hook, or utility
|
||||
- Asks to **review existing tests** for completeness
|
||||
- Mentions **Jest**, **React Testing Library**, **RTL**, or **spec files**
|
||||
- Requests **test coverage** improvement
|
||||
- Uses `pnpm analyze-component` output as context
|
||||
- Mentions **testing**, **unit tests**, or **integration tests** for frontend code
|
||||
- Wants to understand **testing patterns** in the Dify codebase
|
||||
|
||||
**Do NOT apply** when:
|
||||
|
||||
- User is asking about backend/API tests (Python/pytest)
|
||||
- User is asking about E2E tests (Playwright/Cypress)
|
||||
- User is only asking conceptual questions without code context
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Tech Stack
|
||||
|
||||
| Tool | Version | Purpose |
|
||||
|------|---------|---------|
|
||||
| Jest | 29.7 | Test runner |
|
||||
| React Testing Library | 16.0 | Component testing |
|
||||
| happy-dom | - | Test environment |
|
||||
| nock | 14.0 | HTTP mocking |
|
||||
| TypeScript | 5.x | Type safety |
|
||||
|
||||
### Key Commands
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
pnpm test
|
||||
|
||||
# Watch mode
|
||||
pnpm test -- --watch
|
||||
|
||||
# Run specific file
|
||||
pnpm test -- path/to/file.spec.tsx
|
||||
|
||||
# Generate coverage report
|
||||
pnpm test -- --coverage
|
||||
|
||||
# Analyze component complexity
|
||||
pnpm analyze-component <path>
|
||||
|
||||
# Review existing test
|
||||
pnpm analyze-component <path> --review
|
||||
```
|
||||
|
||||
### File Naming
|
||||
|
||||
- Test files: `ComponentName.spec.tsx` (same directory as component)
|
||||
- Integration tests: `web/__tests__/` directory
|
||||
|
||||
## Test Structure Template
|
||||
|
||||
```typescript
|
||||
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
|
||||
import Component from './index'
|
||||
|
||||
// ✅ Import real project components (DO NOT mock these)
|
||||
// import Loading from '@/app/components/base/loading'
|
||||
// import { ChildComponent } from './child-component'
|
||||
|
||||
// ✅ Mock external dependencies only
|
||||
jest.mock('@/service/api')
|
||||
jest.mock('next/navigation', () => ({
|
||||
useRouter: () => ({ push: jest.fn() }),
|
||||
usePathname: () => '/test',
|
||||
}))
|
||||
|
||||
// Shared state for mocks (if needed)
|
||||
let mockSharedState = false
|
||||
|
||||
describe('ComponentName', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks() // ✅ Reset mocks BEFORE each test
|
||||
mockSharedState = false // ✅ Reset shared state
|
||||
})
|
||||
|
||||
// Rendering tests (REQUIRED)
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
// Arrange
|
||||
const props = { title: 'Test' }
|
||||
|
||||
// Act
|
||||
render(<Component {...props} />)
|
||||
|
||||
// Assert
|
||||
expect(screen.getByText('Test')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
// Props tests (REQUIRED)
|
||||
describe('Props', () => {
|
||||
it('should apply custom className', () => {
|
||||
render(<Component className="custom" />)
|
||||
expect(screen.getByRole('button')).toHaveClass('custom')
|
||||
})
|
||||
})
|
||||
|
||||
// User Interactions
|
||||
describe('User Interactions', () => {
|
||||
it('should handle click events', () => {
|
||||
const handleClick = jest.fn()
|
||||
render(<Component onClick={handleClick} />)
|
||||
|
||||
fireEvent.click(screen.getByRole('button'))
|
||||
|
||||
expect(handleClick).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
|
||||
// Edge Cases (REQUIRED)
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle null data', () => {
|
||||
render(<Component data={null} />)
|
||||
expect(screen.getByText(/no data/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should handle empty array', () => {
|
||||
render(<Component items={[]} />)
|
||||
expect(screen.getByText(/empty/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Testing Workflow (CRITICAL)
|
||||
|
||||
### ⚠️ Incremental Approach Required
|
||||
|
||||
**NEVER generate all test files at once.** For complex components or multi-file directories:
|
||||
|
||||
1. **Analyze & Plan**: List all files, order by complexity (simple → complex)
|
||||
1. **Process ONE at a time**: Write test → Run test → Fix if needed → Next
|
||||
1. **Verify before proceeding**: Do NOT continue to next file until current passes
|
||||
|
||||
```
|
||||
For each file:
|
||||
┌────────────────────────────────────────┐
|
||||
│ 1. Write test │
|
||||
│ 2. Run: pnpm test -- <file>.spec.tsx │
|
||||
│ 3. PASS? → Mark complete, next file │
|
||||
│ FAIL? → Fix first, then continue │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Complexity-Based Order
|
||||
|
||||
Process in this order for multi-file testing:
|
||||
|
||||
1. 🟢 Utility functions (simplest)
|
||||
1. 🟢 Custom hooks
|
||||
1. 🟡 Simple components (presentational)
|
||||
1. 🟡 Medium components (state, effects)
|
||||
1. 🔴 Complex components (API, routing)
|
||||
1. 🔴 Integration tests (index files - last)
|
||||
|
||||
### When to Refactor First
|
||||
|
||||
- **Complexity > 50**: Break into smaller pieces before testing
|
||||
- **500+ lines**: Consider splitting before testing
|
||||
- **Many dependencies**: Extract logic into hooks first
|
||||
|
||||
> 📖 See `guides/workflow.md` for complete workflow details and todo list format.
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Path-Level Testing (Directory Testing)
|
||||
|
||||
When assigned to test a directory/path, test **ALL content** within that path:
|
||||
|
||||
- Test all components, hooks, utilities in the directory (not just `index` file)
|
||||
- Use incremental approach: one file at a time, verify each before proceeding
|
||||
- Goal: 100% coverage of ALL files in the directory
|
||||
|
||||
### Integration Testing First
|
||||
|
||||
**Prefer integration testing** when writing tests for a directory:
|
||||
|
||||
- ✅ **Import real project components** directly (including base components and siblings)
|
||||
- ✅ **Only mock**: API services (`@/service/*`), `next/navigation`, complex context providers
|
||||
- ❌ **DO NOT mock** base components (`@/app/components/base/*`)
|
||||
- ❌ **DO NOT mock** sibling/child components in the same directory
|
||||
|
||||
> See [Test Structure Template](#test-structure-template) for correct import/mock patterns.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. AAA Pattern (Arrange-Act-Assert)
|
||||
|
||||
Every test should clearly separate:
|
||||
|
||||
- **Arrange**: Setup test data and render component
|
||||
- **Act**: Perform user actions
|
||||
- **Assert**: Verify expected outcomes
|
||||
|
||||
### 2. Black-Box Testing
|
||||
|
||||
- Test observable behavior, not implementation details
|
||||
- Use semantic queries (getByRole, getByLabelText)
|
||||
- Avoid testing internal state directly
|
||||
- **Prefer pattern matching over hardcoded strings** in assertions:
|
||||
|
||||
```typescript
|
||||
// ❌ Avoid: hardcoded text assertions
|
||||
expect(screen.getByText('Loading...')).toBeInTheDocument()
|
||||
|
||||
// ✅ Better: role-based queries
|
||||
expect(screen.getByRole('status')).toBeInTheDocument()
|
||||
|
||||
// ✅ Better: pattern matching
|
||||
expect(screen.getByText(/loading/i)).toBeInTheDocument()
|
||||
```
|
||||
|
||||
### 3. Single Behavior Per Test
|
||||
|
||||
Each test verifies ONE user-observable behavior:
|
||||
|
||||
```typescript
|
||||
// ✅ Good: One behavior
|
||||
it('should disable button when loading', () => {
|
||||
render(<Button loading />)
|
||||
expect(screen.getByRole('button')).toBeDisabled()
|
||||
})
|
||||
|
||||
// ❌ Bad: Multiple behaviors
|
||||
it('should handle loading state', () => {
|
||||
render(<Button loading />)
|
||||
expect(screen.getByRole('button')).toBeDisabled()
|
||||
expect(screen.getByText('Loading...')).toBeInTheDocument()
|
||||
expect(screen.getByRole('button')).toHaveClass('loading')
|
||||
})
|
||||
```
|
||||
|
||||
### 4. Semantic Naming
|
||||
|
||||
Use `should <behavior> when <condition>`:
|
||||
|
||||
```typescript
|
||||
it('should show error message when validation fails')
|
||||
it('should call onSubmit when form is valid')
|
||||
it('should disable input when isReadOnly is true')
|
||||
```
|
||||
|
||||
## Required Test Scenarios
|
||||
|
||||
### Always Required (All Components)
|
||||
|
||||
1. **Rendering**: Component renders without crashing
|
||||
1. **Props**: Required props, optional props, default values
|
||||
1. **Edge Cases**: null, undefined, empty values, boundary conditions
|
||||
|
||||
### Conditional (When Present)
|
||||
|
||||
| Feature | Test Focus |
|
||||
|---------|-----------|
|
||||
| `useState` | Initial state, transitions, cleanup |
|
||||
| `useEffect` | Execution, dependencies, cleanup |
|
||||
| Event handlers | All onClick, onChange, onSubmit, keyboard |
|
||||
| API calls | Loading, success, error states |
|
||||
| Routing | Navigation, params, query strings |
|
||||
| `useCallback`/`useMemo` | Referential equality |
|
||||
| Context | Provider values, consumer behavior |
|
||||
| Forms | Validation, submission, error display |
|
||||
|
||||
## Coverage Goals (Per File)
|
||||
|
||||
For each test file generated, aim for:
|
||||
|
||||
- ✅ **100%** function coverage
|
||||
- ✅ **100%** statement coverage
|
||||
- ✅ **>95%** branch coverage
|
||||
- ✅ **>95%** line coverage
|
||||
|
||||
> **Note**: For multi-file directories, process one file at a time with full coverage each. See `guides/workflow.md`.
|
||||
|
||||
## Detailed Guides
|
||||
|
||||
For more detailed information, refer to:
|
||||
|
||||
- `guides/workflow.md` - **Incremental testing workflow** (MUST READ for multi-file testing)
|
||||
- `guides/mocking.md` - Mock patterns and best practices
|
||||
- `guides/async-testing.md` - Async operations and API calls
|
||||
- `guides/domain-components.md` - Workflow, Dataset, Configuration testing
|
||||
- `guides/common-patterns.md` - Frequently used testing patterns
|
||||
|
||||
## Authoritative References
|
||||
|
||||
### Primary Specification (MUST follow)
|
||||
|
||||
- **`web/testing/testing.md`** - The canonical testing specification. This skill is derived from this document.
|
||||
|
||||
### Reference Examples in Codebase
|
||||
|
||||
- `web/utils/classnames.spec.ts` - Utility function tests
|
||||
- `web/app/components/base/button/index.spec.tsx` - Component tests
|
||||
- `web/__mocks__/provider-context.ts` - Mock factory example
|
||||
|
||||
### Project Configuration
|
||||
|
||||
- `web/jest.config.ts` - Jest configuration
|
||||
- `web/jest.setup.ts` - Test environment setup
|
||||
- `web/testing/analyze-component.js` - Component analysis tool
|
||||
- `web/__mocks__/react-i18next.ts` - Shared i18n mock (auto-loaded by Jest, no explicit mock needed; override locally only for custom translations)
|
||||
345
.claude/skills/frontend-testing/guides/async-testing.md
Normal file
345
.claude/skills/frontend-testing/guides/async-testing.md
Normal file
@ -0,0 +1,345 @@
|
||||
# Async Testing Guide
|
||||
|
||||
## Core Async Patterns
|
||||
|
||||
### 1. waitFor - Wait for Condition
|
||||
|
||||
```typescript
|
||||
import { render, screen, waitFor } from '@testing-library/react'
|
||||
|
||||
it('should load and display data', async () => {
|
||||
render(<DataComponent />)
|
||||
|
||||
// Wait for element to appear
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Loaded Data')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should hide loading spinner after load', async () => {
|
||||
render(<DataComponent />)
|
||||
|
||||
// Wait for element to disappear
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByText('Loading...')).not.toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 2. findBy\* - Async Queries
|
||||
|
||||
```typescript
|
||||
it('should show user name after fetch', async () => {
|
||||
render(<UserProfile />)
|
||||
|
||||
// findBy returns a promise, auto-waits up to 1000ms
|
||||
const userName = await screen.findByText('John Doe')
|
||||
expect(userName).toBeInTheDocument()
|
||||
|
||||
// findByRole with options
|
||||
const button = await screen.findByRole('button', { name: /submit/i })
|
||||
expect(button).toBeEnabled()
|
||||
})
|
||||
```
|
||||
|
||||
### 3. userEvent for Async Interactions
|
||||
|
||||
```typescript
|
||||
import userEvent from '@testing-library/user-event'
|
||||
|
||||
it('should submit form', async () => {
|
||||
const user = userEvent.setup()
|
||||
const onSubmit = jest.fn()
|
||||
|
||||
render(<Form onSubmit={onSubmit} />)
|
||||
|
||||
// userEvent methods are async
|
||||
await user.type(screen.getByLabelText('Email'), 'test@example.com')
|
||||
await user.click(screen.getByRole('button', { name: /submit/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(onSubmit).toHaveBeenCalledWith({ email: 'test@example.com' })
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Fake Timers
|
||||
|
||||
### When to Use Fake Timers
|
||||
|
||||
- Testing components with `setTimeout`/`setInterval`
|
||||
- Testing debounce/throttle behavior
|
||||
- Testing animations or delayed transitions
|
||||
- Testing polling or retry logic
|
||||
|
||||
### Basic Fake Timer Setup
|
||||
|
||||
```typescript
|
||||
describe('Debounced Search', () => {
|
||||
beforeEach(() => {
|
||||
jest.useFakeTimers()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
jest.useRealTimers()
|
||||
})
|
||||
|
||||
it('should debounce search input', async () => {
|
||||
const onSearch = jest.fn()
|
||||
render(<SearchInput onSearch={onSearch} debounceMs={300} />)
|
||||
|
||||
// Type in the input
|
||||
fireEvent.change(screen.getByRole('textbox'), { target: { value: 'query' } })
|
||||
|
||||
// Search not called immediately
|
||||
expect(onSearch).not.toHaveBeenCalled()
|
||||
|
||||
// Advance timers
|
||||
jest.advanceTimersByTime(300)
|
||||
|
||||
// Now search is called
|
||||
expect(onSearch).toHaveBeenCalledWith('query')
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Fake Timers with Async Code
|
||||
|
||||
```typescript
|
||||
it('should retry on failure', async () => {
|
||||
jest.useFakeTimers()
|
||||
const fetchData = jest.fn()
|
||||
.mockRejectedValueOnce(new Error('Network error'))
|
||||
.mockResolvedValueOnce({ data: 'success' })
|
||||
|
||||
render(<RetryComponent fetchData={fetchData} retryDelayMs={1000} />)
|
||||
|
||||
// First call fails
|
||||
await waitFor(() => {
|
||||
expect(fetchData).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
// Advance timer for retry
|
||||
jest.advanceTimersByTime(1000)
|
||||
|
||||
// Second call succeeds
|
||||
await waitFor(() => {
|
||||
expect(fetchData).toHaveBeenCalledTimes(2)
|
||||
expect(screen.getByText('success')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
jest.useRealTimers()
|
||||
})
|
||||
```
|
||||
|
||||
### Common Fake Timer Utilities
|
||||
|
||||
```typescript
|
||||
// Run all pending timers
|
||||
jest.runAllTimers()
|
||||
|
||||
// Run only pending timers (not new ones created during execution)
|
||||
jest.runOnlyPendingTimers()
|
||||
|
||||
// Advance by specific time
|
||||
jest.advanceTimersByTime(1000)
|
||||
|
||||
// Get current fake time
|
||||
jest.now()
|
||||
|
||||
// Clear all timers
|
||||
jest.clearAllTimers()
|
||||
```
|
||||
|
||||
## API Testing Patterns
|
||||
|
||||
### Loading → Success → Error States
|
||||
|
||||
```typescript
|
||||
describe('DataFetcher', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should show loading state', () => {
|
||||
mockedApi.fetchData.mockImplementation(() => new Promise(() => {})) // Never resolves
|
||||
|
||||
render(<DataFetcher />)
|
||||
|
||||
expect(screen.getByTestId('loading-spinner')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show data on success', async () => {
|
||||
mockedApi.fetchData.mockResolvedValue({ items: ['Item 1', 'Item 2'] })
|
||||
|
||||
render(<DataFetcher />)
|
||||
|
||||
// Use findBy* for multiple async elements (better error messages than waitFor with multiple assertions)
|
||||
const item1 = await screen.findByText('Item 1')
|
||||
const item2 = await screen.findByText('Item 2')
|
||||
expect(item1).toBeInTheDocument()
|
||||
expect(item2).toBeInTheDocument()
|
||||
|
||||
expect(screen.queryByTestId('loading-spinner')).not.toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show error on failure', async () => {
|
||||
mockedApi.fetchData.mockRejectedValue(new Error('Failed to fetch'))
|
||||
|
||||
render(<DataFetcher />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/failed to fetch/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should retry on error', async () => {
|
||||
mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
|
||||
|
||||
render(<DataFetcher />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole('button', { name: /retry/i })).toBeInTheDocument()
|
||||
})
|
||||
|
||||
mockedApi.fetchData.mockResolvedValue({ items: ['Item 1'] })
|
||||
fireEvent.click(screen.getByRole('button', { name: /retry/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Item 1')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Testing Mutations
|
||||
|
||||
```typescript
|
||||
it('should submit form and show success', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedApi.createItem.mockResolvedValue({ id: '1', name: 'New Item' })
|
||||
|
||||
render(<CreateItemForm />)
|
||||
|
||||
await user.type(screen.getByLabelText('Name'), 'New Item')
|
||||
await user.click(screen.getByRole('button', { name: /create/i }))
|
||||
|
||||
// Button should be disabled during submission
|
||||
expect(screen.getByRole('button', { name: /creating/i })).toBeDisabled()
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/created successfully/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
expect(mockedApi.createItem).toHaveBeenCalledWith({ name: 'New Item' })
|
||||
})
|
||||
```
|
||||
|
||||
## useEffect Testing
|
||||
|
||||
### Testing Effect Execution
|
||||
|
||||
```typescript
|
||||
it('should fetch data on mount', async () => {
|
||||
const fetchData = jest.fn().mockResolvedValue({ data: 'test' })
|
||||
|
||||
render(<ComponentWithEffect fetchData={fetchData} />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(fetchData).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Testing Effect Dependencies
|
||||
|
||||
```typescript
|
||||
it('should refetch when id changes', async () => {
|
||||
const fetchData = jest.fn().mockResolvedValue({ data: 'test' })
|
||||
|
||||
const { rerender } = render(<ComponentWithEffect id="1" fetchData={fetchData} />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(fetchData).toHaveBeenCalledWith('1')
|
||||
})
|
||||
|
||||
rerender(<ComponentWithEffect id="2" fetchData={fetchData} />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(fetchData).toHaveBeenCalledWith('2')
|
||||
expect(fetchData).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Testing Effect Cleanup
|
||||
|
||||
```typescript
|
||||
it('should cleanup subscription on unmount', () => {
|
||||
const subscribe = jest.fn()
|
||||
const unsubscribe = jest.fn()
|
||||
subscribe.mockReturnValue(unsubscribe)
|
||||
|
||||
const { unmount } = render(<SubscriptionComponent subscribe={subscribe} />)
|
||||
|
||||
expect(subscribe).toHaveBeenCalledTimes(1)
|
||||
|
||||
unmount()
|
||||
|
||||
expect(unsubscribe).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
```
|
||||
|
||||
## Common Async Pitfalls
|
||||
|
||||
### ❌ Don't: Forget to await
|
||||
|
||||
```typescript
|
||||
// Bad - test may pass even if assertion fails
|
||||
it('should load data', () => {
|
||||
render(<Component />)
|
||||
waitFor(() => {
|
||||
expect(screen.getByText('Data')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
// Good - properly awaited
|
||||
it('should load data', async () => {
|
||||
render(<Component />)
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Data')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### ❌ Don't: Use multiple assertions in single waitFor
|
||||
|
||||
```typescript
|
||||
// Bad - if first assertion fails, won't know about second
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Title')).toBeInTheDocument()
|
||||
expect(screen.getByText('Description')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
// Good - separate waitFor or use findBy
|
||||
const title = await screen.findByText('Title')
|
||||
const description = await screen.findByText('Description')
|
||||
expect(title).toBeInTheDocument()
|
||||
expect(description).toBeInTheDocument()
|
||||
```
|
||||
|
||||
### ❌ Don't: Mix fake timers with real async
|
||||
|
||||
```typescript
|
||||
// Bad - fake timers don't work well with real Promises
|
||||
jest.useFakeTimers()
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Data')).toBeInTheDocument()
|
||||
}) // May timeout!
|
||||
|
||||
// Good - use runAllTimers or advanceTimersByTime
|
||||
jest.useFakeTimers()
|
||||
render(<Component />)
|
||||
jest.runAllTimers()
|
||||
expect(screen.getByText('Data')).toBeInTheDocument()
|
||||
```
|
||||
449
.claude/skills/frontend-testing/guides/common-patterns.md
Normal file
449
.claude/skills/frontend-testing/guides/common-patterns.md
Normal file
@ -0,0 +1,449 @@
|
||||
# Common Testing Patterns
|
||||
|
||||
## Query Priority
|
||||
|
||||
Use queries in this order (most to least preferred):
|
||||
|
||||
```typescript
|
||||
// 1. getByRole - Most recommended (accessibility)
|
||||
screen.getByRole('button', { name: /submit/i })
|
||||
screen.getByRole('textbox', { name: /email/i })
|
||||
screen.getByRole('heading', { level: 1 })
|
||||
|
||||
// 2. getByLabelText - Form fields
|
||||
screen.getByLabelText('Email address')
|
||||
screen.getByLabelText(/password/i)
|
||||
|
||||
// 3. getByPlaceholderText - When no label
|
||||
screen.getByPlaceholderText('Search...')
|
||||
|
||||
// 4. getByText - Non-interactive elements
|
||||
screen.getByText('Welcome to Dify')
|
||||
screen.getByText(/loading/i)
|
||||
|
||||
// 5. getByDisplayValue - Current input value
|
||||
screen.getByDisplayValue('current value')
|
||||
|
||||
// 6. getByAltText - Images
|
||||
screen.getByAltText('Company logo')
|
||||
|
||||
// 7. getByTitle - Tooltip elements
|
||||
screen.getByTitle('Close')
|
||||
|
||||
// 8. getByTestId - Last resort only!
|
||||
screen.getByTestId('custom-element')
|
||||
```
|
||||
|
||||
## Event Handling Patterns
|
||||
|
||||
### Click Events
|
||||
|
||||
```typescript
|
||||
// Basic click
|
||||
fireEvent.click(screen.getByRole('button'))
|
||||
|
||||
// With userEvent (preferred for realistic interaction)
|
||||
const user = userEvent.setup()
|
||||
await user.click(screen.getByRole('button'))
|
||||
|
||||
// Double click
|
||||
await user.dblClick(screen.getByRole('button'))
|
||||
|
||||
// Right click
|
||||
await user.pointer({ keys: '[MouseRight]', target: screen.getByRole('button') })
|
||||
```
|
||||
|
||||
### Form Input
|
||||
|
||||
```typescript
|
||||
const user = userEvent.setup()
|
||||
|
||||
// Type in input
|
||||
await user.type(screen.getByRole('textbox'), 'Hello World')
|
||||
|
||||
// Clear and type
|
||||
await user.clear(screen.getByRole('textbox'))
|
||||
await user.type(screen.getByRole('textbox'), 'New value')
|
||||
|
||||
// Select option
|
||||
await user.selectOptions(screen.getByRole('combobox'), 'option-value')
|
||||
|
||||
// Check checkbox
|
||||
await user.click(screen.getByRole('checkbox'))
|
||||
|
||||
// Upload file
|
||||
const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
|
||||
await user.upload(screen.getByLabelText(/upload/i), file)
|
||||
```
|
||||
|
||||
### Keyboard Events
|
||||
|
||||
```typescript
|
||||
const user = userEvent.setup()
|
||||
|
||||
// Press Enter
|
||||
await user.keyboard('{Enter}')
|
||||
|
||||
// Press Escape
|
||||
await user.keyboard('{Escape}')
|
||||
|
||||
// Keyboard shortcut
|
||||
await user.keyboard('{Control>}a{/Control}') // Ctrl+A
|
||||
|
||||
// Tab navigation
|
||||
await user.tab()
|
||||
|
||||
// Arrow keys
|
||||
await user.keyboard('{ArrowDown}')
|
||||
await user.keyboard('{ArrowUp}')
|
||||
```
|
||||
|
||||
## Component State Testing
|
||||
|
||||
### Testing State Transitions
|
||||
|
||||
```typescript
|
||||
describe('Counter', () => {
|
||||
it('should increment count', async () => {
|
||||
const user = userEvent.setup()
|
||||
render(<Counter initialCount={0} />)
|
||||
|
||||
// Initial state
|
||||
expect(screen.getByText('Count: 0')).toBeInTheDocument()
|
||||
|
||||
// Trigger transition
|
||||
await user.click(screen.getByRole('button', { name: /increment/i }))
|
||||
|
||||
// New state
|
||||
expect(screen.getByText('Count: 1')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Testing Controlled Components
|
||||
|
||||
```typescript
|
||||
describe('ControlledInput', () => {
|
||||
it('should call onChange with new value', async () => {
|
||||
const user = userEvent.setup()
|
||||
const handleChange = jest.fn()
|
||||
|
||||
render(<ControlledInput value="" onChange={handleChange} />)
|
||||
|
||||
await user.type(screen.getByRole('textbox'), 'a')
|
||||
|
||||
expect(handleChange).toHaveBeenCalledWith('a')
|
||||
})
|
||||
|
||||
it('should display controlled value', () => {
|
||||
render(<ControlledInput value="controlled" onChange={jest.fn()} />)
|
||||
|
||||
expect(screen.getByRole('textbox')).toHaveValue('controlled')
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Conditional Rendering Testing
|
||||
|
||||
```typescript
|
||||
describe('ConditionalComponent', () => {
|
||||
it('should show loading state', () => {
|
||||
render(<DataDisplay isLoading={true} data={null} />)
|
||||
|
||||
expect(screen.getByText(/loading/i)).toBeInTheDocument()
|
||||
expect(screen.queryByTestId('data-content')).not.toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show error state', () => {
|
||||
render(<DataDisplay isLoading={false} data={null} error="Failed to load" />)
|
||||
|
||||
expect(screen.getByText(/failed to load/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show data when loaded', () => {
|
||||
render(<DataDisplay isLoading={false} data={{ name: 'Test' }} />)
|
||||
|
||||
expect(screen.getByText('Test')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show empty state when no data', () => {
|
||||
render(<DataDisplay isLoading={false} data={[]} />)
|
||||
|
||||
expect(screen.getByText(/no data/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## List Rendering Testing
|
||||
|
||||
```typescript
|
||||
describe('ItemList', () => {
|
||||
const items = [
|
||||
{ id: '1', name: 'Item 1' },
|
||||
{ id: '2', name: 'Item 2' },
|
||||
{ id: '3', name: 'Item 3' },
|
||||
]
|
||||
|
||||
it('should render all items', () => {
|
||||
render(<ItemList items={items} />)
|
||||
|
||||
expect(screen.getAllByRole('listitem')).toHaveLength(3)
|
||||
items.forEach(item => {
|
||||
expect(screen.getByText(item.name)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle item selection', async () => {
|
||||
const user = userEvent.setup()
|
||||
const onSelect = jest.fn()
|
||||
|
||||
render(<ItemList items={items} onSelect={onSelect} />)
|
||||
|
||||
await user.click(screen.getByText('Item 2'))
|
||||
|
||||
expect(onSelect).toHaveBeenCalledWith(items[1])
|
||||
})
|
||||
|
||||
it('should handle empty list', () => {
|
||||
render(<ItemList items={[]} />)
|
||||
|
||||
expect(screen.getByText(/no items/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Modal/Dialog Testing
|
||||
|
||||
```typescript
|
||||
describe('Modal', () => {
|
||||
it('should not render when closed', () => {
|
||||
render(<Modal isOpen={false} onClose={jest.fn()} />)
|
||||
|
||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render when open', () => {
|
||||
render(<Modal isOpen={true} onClose={jest.fn()} />)
|
||||
|
||||
expect(screen.getByRole('dialog')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should call onClose when clicking overlay', async () => {
|
||||
const user = userEvent.setup()
|
||||
const handleClose = jest.fn()
|
||||
|
||||
render(<Modal isOpen={true} onClose={handleClose} />)
|
||||
|
||||
await user.click(screen.getByTestId('modal-overlay'))
|
||||
|
||||
expect(handleClose).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should call onClose when pressing Escape', async () => {
|
||||
const user = userEvent.setup()
|
||||
const handleClose = jest.fn()
|
||||
|
||||
render(<Modal isOpen={true} onClose={handleClose} />)
|
||||
|
||||
await user.keyboard('{Escape}')
|
||||
|
||||
expect(handleClose).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should trap focus inside modal', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(
|
||||
<Modal isOpen={true} onClose={jest.fn()}>
|
||||
<button>First</button>
|
||||
<button>Second</button>
|
||||
</Modal>
|
||||
)
|
||||
|
||||
// Focus should cycle within modal
|
||||
await user.tab()
|
||||
expect(screen.getByText('First')).toHaveFocus()
|
||||
|
||||
await user.tab()
|
||||
expect(screen.getByText('Second')).toHaveFocus()
|
||||
|
||||
await user.tab()
|
||||
expect(screen.getByText('First')).toHaveFocus() // Cycles back
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Form Testing
|
||||
|
||||
```typescript
|
||||
describe('LoginForm', () => {
|
||||
it('should submit valid form', async () => {
|
||||
const user = userEvent.setup()
|
||||
const onSubmit = jest.fn()
|
||||
|
||||
render(<LoginForm onSubmit={onSubmit} />)
|
||||
|
||||
await user.type(screen.getByLabelText(/email/i), 'test@example.com')
|
||||
await user.type(screen.getByLabelText(/password/i), 'password123')
|
||||
await user.click(screen.getByRole('button', { name: /sign in/i }))
|
||||
|
||||
expect(onSubmit).toHaveBeenCalledWith({
|
||||
email: 'test@example.com',
|
||||
password: 'password123',
|
||||
})
|
||||
})
|
||||
|
||||
it('should show validation errors', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<LoginForm onSubmit={jest.fn()} />)
|
||||
|
||||
// Submit empty form
|
||||
await user.click(screen.getByRole('button', { name: /sign in/i }))
|
||||
|
||||
expect(screen.getByText(/email is required/i)).toBeInTheDocument()
|
||||
expect(screen.getByText(/password is required/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should validate email format', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<LoginForm onSubmit={jest.fn()} />)
|
||||
|
||||
await user.type(screen.getByLabelText(/email/i), 'invalid-email')
|
||||
await user.click(screen.getByRole('button', { name: /sign in/i }))
|
||||
|
||||
expect(screen.getByText(/invalid email/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should disable submit button while submitting', async () => {
|
||||
const user = userEvent.setup()
|
||||
const onSubmit = jest.fn(() => new Promise(resolve => setTimeout(resolve, 100)))
|
||||
|
||||
render(<LoginForm onSubmit={onSubmit} />)
|
||||
|
||||
await user.type(screen.getByLabelText(/email/i), 'test@example.com')
|
||||
await user.type(screen.getByLabelText(/password/i), 'password123')
|
||||
await user.click(screen.getByRole('button', { name: /sign in/i }))
|
||||
|
||||
expect(screen.getByRole('button', { name: /signing in/i })).toBeDisabled()
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole('button', { name: /sign in/i })).toBeEnabled()
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Data-Driven Tests with test.each
|
||||
|
||||
```typescript
|
||||
describe('StatusBadge', () => {
|
||||
test.each([
|
||||
['success', 'bg-green-500'],
|
||||
['warning', 'bg-yellow-500'],
|
||||
['error', 'bg-red-500'],
|
||||
['info', 'bg-blue-500'],
|
||||
])('should apply correct class for %s status', (status, expectedClass) => {
|
||||
render(<StatusBadge status={status} />)
|
||||
|
||||
expect(screen.getByTestId('status-badge')).toHaveClass(expectedClass)
|
||||
})
|
||||
|
||||
test.each([
|
||||
{ input: null, expected: 'Unknown' },
|
||||
{ input: undefined, expected: 'Unknown' },
|
||||
{ input: '', expected: 'Unknown' },
|
||||
{ input: 'invalid', expected: 'Unknown' },
|
||||
])('should show "Unknown" for invalid input: $input', ({ input, expected }) => {
|
||||
render(<StatusBadge status={input} />)
|
||||
|
||||
expect(screen.getByText(expected)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
```typescript
|
||||
// Print entire DOM
|
||||
screen.debug()
|
||||
|
||||
// Print specific element
|
||||
screen.debug(screen.getByRole('button'))
|
||||
|
||||
// Log testing playground URL
|
||||
screen.logTestingPlaygroundURL()
|
||||
|
||||
// Pretty print DOM
|
||||
import { prettyDOM } from '@testing-library/react'
|
||||
console.log(prettyDOM(screen.getByRole('dialog')))
|
||||
|
||||
// Check available roles
|
||||
import { getRoles } from '@testing-library/react'
|
||||
console.log(getRoles(container))
|
||||
```
|
||||
|
||||
## Common Mistakes to Avoid
|
||||
|
||||
### ❌ Don't Use Implementation Details
|
||||
|
||||
```typescript
|
||||
// Bad - testing implementation
|
||||
expect(component.state.isOpen).toBe(true)
|
||||
expect(wrapper.find('.internal-class').length).toBe(1)
|
||||
|
||||
// Good - testing behavior
|
||||
expect(screen.getByRole('dialog')).toBeInTheDocument()
|
||||
```
|
||||
|
||||
### ❌ Don't Forget Cleanup
|
||||
|
||||
```typescript
|
||||
// Bad - may leak state between tests
|
||||
it('test 1', () => {
|
||||
render(<Component />)
|
||||
})
|
||||
|
||||
// Good - cleanup is automatic with RTL, but reset mocks
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
```
|
||||
|
||||
### ❌ Don't Use Exact String Matching (Prefer Black-Box Assertions)
|
||||
|
||||
```typescript
|
||||
// ❌ Bad - hardcoded strings are brittle
|
||||
expect(screen.getByText('Submit Form')).toBeInTheDocument()
|
||||
expect(screen.getByText('Loading...')).toBeInTheDocument()
|
||||
|
||||
// ✅ Good - role-based queries (most semantic)
|
||||
expect(screen.getByRole('button', { name: /submit/i })).toBeInTheDocument()
|
||||
expect(screen.getByRole('status')).toBeInTheDocument()
|
||||
|
||||
// ✅ Good - pattern matching (flexible)
|
||||
expect(screen.getByText(/submit/i)).toBeInTheDocument()
|
||||
expect(screen.getByText(/loading/i)).toBeInTheDocument()
|
||||
|
||||
// ✅ Good - test behavior, not exact UI text
|
||||
expect(screen.getByRole('button')).toBeDisabled()
|
||||
expect(screen.getByRole('alert')).toBeInTheDocument()
|
||||
```
|
||||
|
||||
**Why prefer black-box assertions?**
|
||||
|
||||
- Text content may change (i18n, copy updates)
|
||||
- Role-based queries test accessibility
|
||||
- Pattern matching is resilient to minor changes
|
||||
- Tests focus on behavior, not implementation details
|
||||
|
||||
### ❌ Don't Assert on Absence Without Query
|
||||
|
||||
```typescript
|
||||
// Bad - throws if not found
|
||||
expect(screen.getByText('Error')).not.toBeInTheDocument() // Error!
|
||||
|
||||
// Good - use queryBy for absence assertions
|
||||
expect(screen.queryByText('Error')).not.toBeInTheDocument()
|
||||
```
|
||||
523
.claude/skills/frontend-testing/guides/domain-components.md
Normal file
523
.claude/skills/frontend-testing/guides/domain-components.md
Normal file
@ -0,0 +1,523 @@
|
||||
# Domain-Specific Component Testing
|
||||
|
||||
This guide covers testing patterns for Dify's domain-specific components.
|
||||
|
||||
## Workflow Components (`workflow/`)
|
||||
|
||||
Workflow components handle node configuration, data flow, and graph operations.
|
||||
|
||||
### Key Test Areas
|
||||
|
||||
1. **Node Configuration**
|
||||
1. **Data Validation**
|
||||
1. **Variable Passing**
|
||||
1. **Edge Connections**
|
||||
1. **Error Handling**
|
||||
|
||||
### Example: Node Configuration Panel
|
||||
|
||||
```typescript
|
||||
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
|
||||
import userEvent from '@testing-library/user-event'
|
||||
import NodeConfigPanel from './node-config-panel'
|
||||
import { createMockNode, createMockWorkflowContext } from '@/__mocks__/workflow'
|
||||
|
||||
// Mock workflow context
|
||||
jest.mock('@/app/components/workflow/hooks', () => ({
|
||||
useWorkflowStore: () => mockWorkflowStore,
|
||||
useNodesInteractions: () => mockNodesInteractions,
|
||||
}))
|
||||
|
||||
let mockWorkflowStore = {
|
||||
nodes: [],
|
||||
edges: [],
|
||||
updateNode: jest.fn(),
|
||||
}
|
||||
|
||||
let mockNodesInteractions = {
|
||||
handleNodeSelect: jest.fn(),
|
||||
handleNodeDelete: jest.fn(),
|
||||
}
|
||||
|
||||
describe('NodeConfigPanel', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
mockWorkflowStore = {
|
||||
nodes: [],
|
||||
edges: [],
|
||||
updateNode: jest.fn(),
|
||||
}
|
||||
})
|
||||
|
||||
describe('Node Configuration', () => {
|
||||
it('should render node type selector', () => {
|
||||
const node = createMockNode({ type: 'llm' })
|
||||
render(<NodeConfigPanel node={node} />)
|
||||
|
||||
expect(screen.getByLabelText(/model/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should update node config on change', async () => {
|
||||
const user = userEvent.setup()
|
||||
const node = createMockNode({ type: 'llm' })
|
||||
|
||||
render(<NodeConfigPanel node={node} />)
|
||||
|
||||
await user.selectOptions(screen.getByLabelText(/model/i), 'gpt-4')
|
||||
|
||||
expect(mockWorkflowStore.updateNode).toHaveBeenCalledWith(
|
||||
node.id,
|
||||
expect.objectContaining({ model: 'gpt-4' })
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Data Validation', () => {
|
||||
it('should show error for invalid input', async () => {
|
||||
const user = userEvent.setup()
|
||||
const node = createMockNode({ type: 'code' })
|
||||
|
||||
render(<NodeConfigPanel node={node} />)
|
||||
|
||||
// Enter invalid code
|
||||
const codeInput = screen.getByLabelText(/code/i)
|
||||
await user.clear(codeInput)
|
||||
await user.type(codeInput, 'invalid syntax {{{')
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/syntax error/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should validate required fields', async () => {
|
||||
const node = createMockNode({ type: 'http', data: { url: '' } })
|
||||
|
||||
render(<NodeConfigPanel node={node} />)
|
||||
|
||||
fireEvent.click(screen.getByRole('button', { name: /save/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/url is required/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Variable Passing', () => {
|
||||
it('should display available variables from upstream nodes', () => {
|
||||
const upstreamNode = createMockNode({
|
||||
id: 'node-1',
|
||||
type: 'start',
|
||||
data: { outputs: [{ name: 'user_input', type: 'string' }] },
|
||||
})
|
||||
const currentNode = createMockNode({
|
||||
id: 'node-2',
|
||||
type: 'llm',
|
||||
})
|
||||
|
||||
mockWorkflowStore.nodes = [upstreamNode, currentNode]
|
||||
mockWorkflowStore.edges = [{ source: 'node-1', target: 'node-2' }]
|
||||
|
||||
render(<NodeConfigPanel node={currentNode} />)
|
||||
|
||||
// Variable selector should show upstream variables
|
||||
fireEvent.click(screen.getByRole('button', { name: /add variable/i }))
|
||||
|
||||
expect(screen.getByText('user_input')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should insert variable into prompt template', async () => {
|
||||
const user = userEvent.setup()
|
||||
const node = createMockNode({ type: 'llm' })
|
||||
|
||||
render(<NodeConfigPanel node={node} />)
|
||||
|
||||
// Click variable button
|
||||
await user.click(screen.getByRole('button', { name: /insert variable/i }))
|
||||
await user.click(screen.getByText('user_input'))
|
||||
|
||||
const promptInput = screen.getByLabelText(/prompt/i)
|
||||
expect(promptInput).toHaveValue(expect.stringContaining('{{user_input}}'))
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Dataset Components (`dataset/`)
|
||||
|
||||
Dataset components handle file uploads, data display, and search/filter operations.
|
||||
|
||||
### Key Test Areas
|
||||
|
||||
1. **File Upload**
|
||||
1. **File Type Validation**
|
||||
1. **Pagination**
|
||||
1. **Search & Filtering**
|
||||
1. **Data Format Handling**
|
||||
|
||||
### Example: Document Uploader
|
||||
|
||||
```typescript
|
||||
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
|
||||
import userEvent from '@testing-library/user-event'
|
||||
import DocumentUploader from './document-uploader'
|
||||
|
||||
jest.mock('@/service/datasets', () => ({
|
||||
uploadDocument: jest.fn(),
|
||||
parseDocument: jest.fn(),
|
||||
}))
|
||||
|
||||
import * as datasetService from '@/service/datasets'
|
||||
const mockedService = datasetService as jest.Mocked<typeof datasetService>
|
||||
|
||||
describe('DocumentUploader', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('File Upload', () => {
|
||||
it('should accept valid file types', async () => {
|
||||
const user = userEvent.setup()
|
||||
const onUpload = jest.fn()
|
||||
mockedService.uploadDocument.mockResolvedValue({ id: 'doc-1' })
|
||||
|
||||
render(<DocumentUploader onUpload={onUpload} />)
|
||||
|
||||
const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
|
||||
const input = screen.getByLabelText(/upload/i)
|
||||
|
||||
await user.upload(input, file)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockedService.uploadDocument).toHaveBeenCalledWith(
|
||||
expect.any(FormData)
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('should reject invalid file types', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<DocumentUploader />)
|
||||
|
||||
const file = new File(['content'], 'test.exe', { type: 'application/x-msdownload' })
|
||||
const input = screen.getByLabelText(/upload/i)
|
||||
|
||||
await user.upload(input, file)
|
||||
|
||||
expect(screen.getByText(/unsupported file type/i)).toBeInTheDocument()
|
||||
expect(mockedService.uploadDocument).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should show upload progress', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
// Mock upload with progress
|
||||
mockedService.uploadDocument.mockImplementation(() => {
|
||||
return new Promise((resolve) => {
|
||||
setTimeout(() => resolve({ id: 'doc-1' }), 100)
|
||||
})
|
||||
})
|
||||
|
||||
render(<DocumentUploader />)
|
||||
|
||||
const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
|
||||
await user.upload(screen.getByLabelText(/upload/i), file)
|
||||
|
||||
expect(screen.getByRole('progressbar')).toBeInTheDocument()
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByRole('progressbar')).not.toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Error Handling', () => {
|
||||
it('should handle upload failure', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedService.uploadDocument.mockRejectedValue(new Error('Upload failed'))
|
||||
|
||||
render(<DocumentUploader />)
|
||||
|
||||
const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
|
||||
await user.upload(screen.getByLabelText(/upload/i), file)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/upload failed/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should allow retry after failure', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedService.uploadDocument
|
||||
.mockRejectedValueOnce(new Error('Network error'))
|
||||
.mockResolvedValueOnce({ id: 'doc-1' })
|
||||
|
||||
render(<DocumentUploader />)
|
||||
|
||||
const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
|
||||
await user.upload(screen.getByLabelText(/upload/i), file)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole('button', { name: /retry/i })).toBeInTheDocument()
|
||||
})
|
||||
|
||||
await user.click(screen.getByRole('button', { name: /retry/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/uploaded successfully/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Example: Document List with Pagination
|
||||
|
||||
```typescript
|
||||
describe('DocumentList', () => {
|
||||
describe('Pagination', () => {
|
||||
it('should load first page on mount', async () => {
|
||||
mockedService.getDocuments.mockResolvedValue({
|
||||
data: [{ id: '1', name: 'Doc 1' }],
|
||||
total: 50,
|
||||
page: 1,
|
||||
pageSize: 10,
|
||||
})
|
||||
|
||||
render(<DocumentList datasetId="ds-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Doc 1')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
expect(mockedService.getDocuments).toHaveBeenCalledWith('ds-1', { page: 1 })
|
||||
})
|
||||
|
||||
it('should navigate to next page', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedService.getDocuments.mockResolvedValue({
|
||||
data: [{ id: '1', name: 'Doc 1' }],
|
||||
total: 50,
|
||||
page: 1,
|
||||
pageSize: 10,
|
||||
})
|
||||
|
||||
render(<DocumentList datasetId="ds-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Doc 1')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
mockedService.getDocuments.mockResolvedValue({
|
||||
data: [{ id: '11', name: 'Doc 11' }],
|
||||
total: 50,
|
||||
page: 2,
|
||||
pageSize: 10,
|
||||
})
|
||||
|
||||
await user.click(screen.getByRole('button', { name: /next/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('Doc 11')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Search & Filtering', () => {
|
||||
it('should filter by search query', async () => {
|
||||
const user = userEvent.setup()
|
||||
jest.useFakeTimers()
|
||||
|
||||
render(<DocumentList datasetId="ds-1" />)
|
||||
|
||||
await user.type(screen.getByPlaceholderText(/search/i), 'test query')
|
||||
|
||||
// Debounce
|
||||
jest.advanceTimersByTime(300)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockedService.getDocuments).toHaveBeenCalledWith(
|
||||
'ds-1',
|
||||
expect.objectContaining({ search: 'test query' })
|
||||
)
|
||||
})
|
||||
|
||||
jest.useRealTimers()
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Configuration Components (`app/configuration/`, `config/`)
|
||||
|
||||
Configuration components handle forms, validation, and data persistence.
|
||||
|
||||
### Key Test Areas
|
||||
|
||||
1. **Form Validation**
|
||||
1. **Save/Reset**
|
||||
1. **Required vs Optional Fields**
|
||||
1. **Configuration Persistence**
|
||||
1. **Error Feedback**
|
||||
|
||||
### Example: App Configuration Form
|
||||
|
||||
```typescript
|
||||
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
|
||||
import userEvent from '@testing-library/user-event'
|
||||
import AppConfigForm from './app-config-form'
|
||||
|
||||
jest.mock('@/service/apps', () => ({
|
||||
updateAppConfig: jest.fn(),
|
||||
getAppConfig: jest.fn(),
|
||||
}))
|
||||
|
||||
import * as appService from '@/service/apps'
|
||||
const mockedService = appService as jest.Mocked<typeof appService>
|
||||
|
||||
describe('AppConfigForm', () => {
|
||||
const defaultConfig = {
|
||||
name: 'My App',
|
||||
description: '',
|
||||
icon: 'default',
|
||||
openingStatement: '',
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
mockedService.getAppConfig.mockResolvedValue(defaultConfig)
|
||||
})
|
||||
|
||||
describe('Form Validation', () => {
|
||||
it('should require app name', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
// Clear name field
|
||||
await user.clear(screen.getByLabelText(/name/i))
|
||||
await user.click(screen.getByRole('button', { name: /save/i }))
|
||||
|
||||
expect(screen.getByText(/name is required/i)).toBeInTheDocument()
|
||||
expect(mockedService.updateAppConfig).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should validate name length', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
// Enter very long name
|
||||
await user.clear(screen.getByLabelText(/name/i))
|
||||
await user.type(screen.getByLabelText(/name/i), 'a'.repeat(101))
|
||||
|
||||
expect(screen.getByText(/name must be less than 100 characters/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should allow empty optional fields', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedService.updateAppConfig.mockResolvedValue({ success: true })
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
// Leave description empty (optional)
|
||||
await user.click(screen.getByRole('button', { name: /save/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockedService.updateAppConfig).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Save/Reset Functionality', () => {
|
||||
it('should save configuration', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedService.updateAppConfig.mockResolvedValue({ success: true })
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
await user.clear(screen.getByLabelText(/name/i))
|
||||
await user.type(screen.getByLabelText(/name/i), 'Updated App')
|
||||
await user.click(screen.getByRole('button', { name: /save/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockedService.updateAppConfig).toHaveBeenCalledWith(
|
||||
'app-1',
|
||||
expect.objectContaining({ name: 'Updated App' })
|
||||
)
|
||||
})
|
||||
|
||||
expect(screen.getByText(/saved successfully/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should reset to default values', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
// Make changes
|
||||
await user.clear(screen.getByLabelText(/name/i))
|
||||
await user.type(screen.getByLabelText(/name/i), 'Changed Name')
|
||||
|
||||
// Reset
|
||||
await user.click(screen.getByRole('button', { name: /reset/i }))
|
||||
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
it('should show unsaved changes warning', async () => {
|
||||
const user = userEvent.setup()
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
// Make changes
|
||||
await user.type(screen.getByLabelText(/name/i), ' Updated')
|
||||
|
||||
expect(screen.getByText(/unsaved changes/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Error Handling', () => {
|
||||
it('should show error on save failure', async () => {
|
||||
const user = userEvent.setup()
|
||||
mockedService.updateAppConfig.mockRejectedValue(new Error('Server error'))
|
||||
|
||||
render(<AppConfigForm appId="app-1" />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
|
||||
})
|
||||
|
||||
await user.click(screen.getByRole('button', { name: /save/i }))
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/failed to save/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
363
.claude/skills/frontend-testing/guides/mocking.md
Normal file
363
.claude/skills/frontend-testing/guides/mocking.md
Normal file
@ -0,0 +1,363 @@
|
||||
# Mocking Guide for Dify Frontend Tests
|
||||
|
||||
## ⚠️ Important: What NOT to Mock
|
||||
|
||||
### DO NOT Mock Base Components
|
||||
|
||||
**Never mock components from `@/app/components/base/`** such as:
|
||||
|
||||
- `Loading`, `Spinner`
|
||||
- `Button`, `Input`, `Select`
|
||||
- `Tooltip`, `Modal`, `Dropdown`
|
||||
- `Icon`, `Badge`, `Tag`
|
||||
|
||||
**Why?**
|
||||
|
||||
- Base components will have their own dedicated tests
|
||||
- Mocking them creates false positives (tests pass but real integration fails)
|
||||
- Using real components tests actual integration behavior
|
||||
|
||||
```typescript
|
||||
// ❌ WRONG: Don't mock base components
|
||||
jest.mock('@/app/components/base/loading', () => () => <div>Loading</div>)
|
||||
jest.mock('@/app/components/base/button', () => ({ children }: any) => <button>{children}</button>)
|
||||
|
||||
// ✅ CORRECT: Import and use real base components
|
||||
import Loading from '@/app/components/base/loading'
|
||||
import Button from '@/app/components/base/button'
|
||||
// They will render normally in tests
|
||||
```
|
||||
|
||||
### What TO Mock
|
||||
|
||||
Only mock these categories:
|
||||
|
||||
1. **API services** (`@/service/*`) - Network calls
|
||||
1. **Complex context providers** - When setup is too difficult
|
||||
1. **Third-party libraries with side effects** - `next/navigation`, external SDKs
|
||||
1. **i18n** - Always mock to return keys
|
||||
|
||||
## Mock Placement
|
||||
|
||||
| Location | Purpose |
|
||||
|----------|---------|
|
||||
| `web/__mocks__/` | Reusable mocks shared across multiple test files |
|
||||
| Test file | Test-specific mocks, inline with `jest.mock()` |
|
||||
|
||||
## Essential Mocks
|
||||
|
||||
### 1. i18n (Auto-loaded via Shared Mock)
|
||||
|
||||
A shared mock is available at `web/__mocks__/react-i18next.ts` and is auto-loaded by Jest.
|
||||
**No explicit mock needed** for most tests - it returns translation keys as-is.
|
||||
|
||||
For tests requiring custom translations, override the mock:
|
||||
|
||||
```typescript
|
||||
jest.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string) => {
|
||||
const translations: Record<string, string> = {
|
||||
'my.custom.key': 'Custom translation',
|
||||
}
|
||||
return translations[key] || key
|
||||
},
|
||||
}),
|
||||
}))
|
||||
```
|
||||
|
||||
### 2. Next.js Router
|
||||
|
||||
```typescript
|
||||
const mockPush = jest.fn()
|
||||
const mockReplace = jest.fn()
|
||||
|
||||
jest.mock('next/navigation', () => ({
|
||||
useRouter: () => ({
|
||||
push: mockPush,
|
||||
replace: mockReplace,
|
||||
back: jest.fn(),
|
||||
prefetch: jest.fn(),
|
||||
}),
|
||||
usePathname: () => '/current-path',
|
||||
useSearchParams: () => new URLSearchParams('?key=value'),
|
||||
}))
|
||||
|
||||
describe('Component', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should navigate on click', () => {
|
||||
render(<Component />)
|
||||
fireEvent.click(screen.getByRole('button'))
|
||||
expect(mockPush).toHaveBeenCalledWith('/expected-path')
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 3. Portal Components (with Shared State)
|
||||
|
||||
```typescript
|
||||
// ⚠️ Important: Use shared state for components that depend on each other
|
||||
let mockPortalOpenState = false
|
||||
|
||||
jest.mock('@/app/components/base/portal-to-follow-elem', () => ({
|
||||
PortalToFollowElem: ({ children, open, ...props }: any) => {
|
||||
mockPortalOpenState = open || false // Update shared state
|
||||
return <div data-testid="portal" data-open={open}>{children}</div>
|
||||
},
|
||||
PortalToFollowElemContent: ({ children }: any) => {
|
||||
// ✅ Matches actual: returns null when portal is closed
|
||||
if (!mockPortalOpenState) return null
|
||||
return <div data-testid="portal-content">{children}</div>
|
||||
},
|
||||
PortalToFollowElemTrigger: ({ children }: any) => (
|
||||
<div data-testid="portal-trigger">{children}</div>
|
||||
),
|
||||
}))
|
||||
|
||||
describe('Component', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
mockPortalOpenState = false // ✅ Reset shared state
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 4. API Service Mocks
|
||||
|
||||
```typescript
|
||||
import * as api from '@/service/api'
|
||||
|
||||
jest.mock('@/service/api')
|
||||
|
||||
const mockedApi = api as jest.Mocked<typeof api>
|
||||
|
||||
describe('Component', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
|
||||
// Setup default mock implementation
|
||||
mockedApi.fetchData.mockResolvedValue({ data: [] })
|
||||
})
|
||||
|
||||
it('should show data on success', async () => {
|
||||
mockedApi.fetchData.mockResolvedValue({ data: [{ id: 1 }] })
|
||||
|
||||
render(<Component />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('1')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should show error on failure', async () => {
|
||||
mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
|
||||
|
||||
render(<Component />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/error/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 5. HTTP Mocking with Nock
|
||||
|
||||
```typescript
|
||||
import nock from 'nock'
|
||||
|
||||
const GITHUB_HOST = 'https://api.github.com'
|
||||
const GITHUB_PATH = '/repos/owner/repo'
|
||||
|
||||
const mockGithubApi = (status: number, body: Record<string, unknown>, delayMs = 0) => {
|
||||
return nock(GITHUB_HOST)
|
||||
.get(GITHUB_PATH)
|
||||
.delay(delayMs)
|
||||
.reply(status, body)
|
||||
}
|
||||
|
||||
describe('GithubComponent', () => {
|
||||
afterEach(() => {
|
||||
nock.cleanAll()
|
||||
})
|
||||
|
||||
it('should display repo info', async () => {
|
||||
mockGithubApi(200, { name: 'dify', stars: 1000 })
|
||||
|
||||
render(<GithubComponent />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('dify')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle API error', async () => {
|
||||
mockGithubApi(500, { message: 'Server error' })
|
||||
|
||||
render(<GithubComponent />)
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/error/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 6. Context Providers
|
||||
|
||||
```typescript
|
||||
import { ProviderContext } from '@/context/provider-context'
|
||||
import { createMockProviderContextValue, createMockPlan } from '@/__mocks__/provider-context'
|
||||
|
||||
describe('Component with Context', () => {
|
||||
it('should render for free plan', () => {
|
||||
const mockContext = createMockPlan('sandbox')
|
||||
|
||||
render(
|
||||
<ProviderContext.Provider value={mockContext}>
|
||||
<Component />
|
||||
</ProviderContext.Provider>
|
||||
)
|
||||
|
||||
expect(screen.getByText('Upgrade')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render for pro plan', () => {
|
||||
const mockContext = createMockPlan('professional')
|
||||
|
||||
render(
|
||||
<ProviderContext.Provider value={mockContext}>
|
||||
<Component />
|
||||
</ProviderContext.Provider>
|
||||
)
|
||||
|
||||
expect(screen.queryByText('Upgrade')).not.toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### 7. SWR / React Query
|
||||
|
||||
```typescript
|
||||
// SWR
|
||||
jest.mock('swr', () => ({
|
||||
__esModule: true,
|
||||
default: jest.fn(),
|
||||
}))
|
||||
|
||||
import useSWR from 'swr'
|
||||
const mockedUseSWR = useSWR as jest.Mock
|
||||
|
||||
describe('Component with SWR', () => {
|
||||
it('should show loading state', () => {
|
||||
mockedUseSWR.mockReturnValue({
|
||||
data: undefined,
|
||||
error: undefined,
|
||||
isLoading: true,
|
||||
})
|
||||
|
||||
render(<Component />)
|
||||
expect(screen.getByText(/loading/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
// React Query
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
|
||||
|
||||
const createTestQueryClient = () => new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: { retry: false },
|
||||
mutations: { retry: false },
|
||||
},
|
||||
})
|
||||
|
||||
const renderWithQueryClient = (ui: React.ReactElement) => {
|
||||
const queryClient = createTestQueryClient()
|
||||
return render(
|
||||
<QueryClientProvider client={queryClient}>
|
||||
{ui}
|
||||
</QueryClientProvider>
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
## Mock Best Practices
|
||||
|
||||
### ✅ DO
|
||||
|
||||
1. **Use real base components** - Import from `@/app/components/base/` directly
|
||||
1. **Use real project components** - Prefer importing over mocking
|
||||
1. **Reset mocks in `beforeEach`**, not `afterEach`
|
||||
1. **Match actual component behavior** in mocks (when mocking is necessary)
|
||||
1. **Use factory functions** for complex mock data
|
||||
1. **Import actual types** for type safety
|
||||
1. **Reset shared mock state** in `beforeEach`
|
||||
|
||||
### ❌ DON'T
|
||||
|
||||
1. **Don't mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
|
||||
1. Don't mock components you can import directly
|
||||
1. Don't create overly simplified mocks that miss conditional logic
|
||||
1. Don't forget to clean up nock after each test
|
||||
1. Don't use `any` types in mocks without necessity
|
||||
|
||||
### Mock Decision Tree
|
||||
|
||||
```
|
||||
Need to use a component in test?
|
||||
│
|
||||
├─ Is it from @/app/components/base/*?
|
||||
│ └─ YES → Import real component, DO NOT mock
|
||||
│
|
||||
├─ Is it a project component?
|
||||
│ └─ YES → Prefer importing real component
|
||||
│ Only mock if setup is extremely complex
|
||||
│
|
||||
├─ Is it an API service (@/service/*)?
|
||||
│ └─ YES → Mock it
|
||||
│
|
||||
├─ Is it a third-party lib with side effects?
|
||||
│ └─ YES → Mock it (next/navigation, external SDKs)
|
||||
│
|
||||
└─ Is it i18n?
|
||||
└─ YES → Uses shared mock (auto-loaded). Override only for custom translations
|
||||
```
|
||||
|
||||
## Factory Function Pattern
|
||||
|
||||
```typescript
|
||||
// __mocks__/data-factories.ts
|
||||
import type { User, Project } from '@/types'
|
||||
|
||||
export const createMockUser = (overrides: Partial<User> = {}): User => ({
|
||||
id: 'user-1',
|
||||
name: 'Test User',
|
||||
email: 'test@example.com',
|
||||
role: 'member',
|
||||
createdAt: new Date().toISOString(),
|
||||
...overrides,
|
||||
})
|
||||
|
||||
export const createMockProject = (overrides: Partial<Project> = {}): Project => ({
|
||||
id: 'project-1',
|
||||
name: 'Test Project',
|
||||
description: 'A test project',
|
||||
owner: createMockUser(),
|
||||
members: [],
|
||||
createdAt: new Date().toISOString(),
|
||||
...overrides,
|
||||
})
|
||||
|
||||
// Usage in tests
|
||||
it('should display project owner', () => {
|
||||
const project = createMockProject({
|
||||
owner: createMockUser({ name: 'John Doe' }),
|
||||
})
|
||||
|
||||
render(<ProjectCard project={project} />)
|
||||
expect(screen.getByText('John Doe')).toBeInTheDocument()
|
||||
})
|
||||
```
|
||||
269
.claude/skills/frontend-testing/guides/workflow.md
Normal file
269
.claude/skills/frontend-testing/guides/workflow.md
Normal file
@ -0,0 +1,269 @@
|
||||
# Testing Workflow Guide
|
||||
|
||||
This guide defines the workflow for generating tests, especially for complex components or directories with multiple files.
|
||||
|
||||
## Scope Clarification
|
||||
|
||||
This guide addresses **multi-file workflow** (how to process multiple test files). For coverage requirements within a single test file, see `web/testing/testing.md` § Coverage Goals.
|
||||
|
||||
| Scope | Rule |
|
||||
|-------|------|
|
||||
| **Single file** | Complete coverage in one generation (100% function, >95% branch) |
|
||||
| **Multi-file directory** | Process one file at a time, verify each before proceeding |
|
||||
|
||||
## ⚠️ Critical Rule: Incremental Approach for Multi-File Testing
|
||||
|
||||
When testing a **directory with multiple files**, **NEVER generate all test files at once.** Use an incremental, verify-as-you-go approach.
|
||||
|
||||
### Why Incremental?
|
||||
|
||||
| Batch Approach (❌) | Incremental Approach (✅) |
|
||||
|---------------------|---------------------------|
|
||||
| Generate 5+ tests at once | Generate 1 test at a time |
|
||||
| Run tests only at the end | Run test immediately after each file |
|
||||
| Multiple failures compound | Single point of failure, easy to debug |
|
||||
| Hard to identify root cause | Clear cause-effect relationship |
|
||||
| Mock issues affect many files | Mock issues caught early |
|
||||
| Messy git history | Clean, atomic commits possible |
|
||||
|
||||
## Single File Workflow
|
||||
|
||||
When testing a **single component, hook, or utility**:
|
||||
|
||||
```
|
||||
1. Read source code completely
|
||||
2. Run `pnpm analyze-component <path>` (if available)
|
||||
3. Check complexity score and features detected
|
||||
4. Write the test file
|
||||
5. Run test: `pnpm test -- <file>.spec.tsx`
|
||||
6. Fix any failures
|
||||
7. Verify coverage meets goals (100% function, >95% branch)
|
||||
```
|
||||
|
||||
## Directory/Multi-File Workflow (MUST FOLLOW)
|
||||
|
||||
When testing a **directory or multiple files**, follow this strict workflow:
|
||||
|
||||
### Step 1: Analyze and Plan
|
||||
|
||||
1. **List all files** that need tests in the directory
|
||||
1. **Categorize by complexity**:
|
||||
- 🟢 **Simple**: Utility functions, simple hooks, presentational components
|
||||
- 🟡 **Medium**: Components with state, effects, or event handlers
|
||||
- 🔴 **Complex**: Components with API calls, routing, or many dependencies
|
||||
1. **Order by dependency**: Test dependencies before dependents
|
||||
1. **Create a todo list** to track progress
|
||||
|
||||
### Step 2: Determine Processing Order
|
||||
|
||||
Process files in this recommended order:
|
||||
|
||||
```
|
||||
1. Utility functions (simplest, no React)
|
||||
2. Custom hooks (isolated logic)
|
||||
3. Simple presentational components (few/no props)
|
||||
4. Medium complexity components (state, effects)
|
||||
5. Complex components (API, routing, many deps)
|
||||
6. Container/index components (integration tests - last)
|
||||
```
|
||||
|
||||
**Rationale**:
|
||||
|
||||
- Simpler files help establish mock patterns
|
||||
- Hooks used by components should be tested first
|
||||
- Integration tests (index files) depend on child components working
|
||||
|
||||
### Step 3: Process Each File Incrementally
|
||||
|
||||
**For EACH file in the ordered list:**
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ 1. Write test file │
|
||||
│ 2. Run: pnpm test -- <file>.spec.tsx │
|
||||
│ 3. If FAIL → Fix immediately, re-run │
|
||||
│ 4. If PASS → Mark complete in todo list │
|
||||
│ 5. ONLY THEN proceed to next file │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**DO NOT proceed to the next file until the current one passes.**
|
||||
|
||||
### Step 4: Final Verification
|
||||
|
||||
After all individual tests pass:
|
||||
|
||||
```bash
|
||||
# Run all tests in the directory together
|
||||
pnpm test -- path/to/directory/
|
||||
|
||||
# Check coverage
|
||||
pnpm test -- --coverage path/to/directory/
|
||||
```
|
||||
|
||||
## Component Complexity Guidelines
|
||||
|
||||
Use `pnpm analyze-component <path>` to assess complexity before testing.
|
||||
|
||||
### 🔴 Very Complex Components (Complexity > 50)
|
||||
|
||||
**Consider refactoring BEFORE testing:**
|
||||
|
||||
- Break component into smaller, testable pieces
|
||||
- Extract complex logic into custom hooks
|
||||
- Separate container and presentational layers
|
||||
|
||||
**If testing as-is:**
|
||||
|
||||
- Use integration tests for complex workflows
|
||||
- Use `test.each()` for data-driven testing
|
||||
- Multiple `describe` blocks for organization
|
||||
- Consider testing major sections separately
|
||||
|
||||
### 🟡 Medium Complexity (Complexity 30-50)
|
||||
|
||||
- Group related tests in `describe` blocks
|
||||
- Test integration scenarios between internal parts
|
||||
- Focus on state transitions and side effects
|
||||
- Use helper functions to reduce test complexity
|
||||
|
||||
### 🟢 Simple Components (Complexity < 30)
|
||||
|
||||
- Standard test structure
|
||||
- Focus on props, rendering, and edge cases
|
||||
- Usually straightforward to test
|
||||
|
||||
### 📏 Large Files (500+ lines)
|
||||
|
||||
Regardless of complexity score:
|
||||
|
||||
- **Strongly consider refactoring** before testing
|
||||
- If testing as-is, test major sections separately
|
||||
- Create helper functions for test setup
|
||||
- May need multiple test files
|
||||
|
||||
## Todo List Format
|
||||
|
||||
When testing multiple files, use a todo list like this:
|
||||
|
||||
```
|
||||
Testing: path/to/directory/
|
||||
|
||||
Ordered by complexity (simple → complex):
|
||||
|
||||
☐ utils/helper.ts [utility, simple]
|
||||
☐ hooks/use-custom-hook.ts [hook, simple]
|
||||
☐ empty-state.tsx [component, simple]
|
||||
☐ item-card.tsx [component, medium]
|
||||
☐ list.tsx [component, complex]
|
||||
☐ index.tsx [integration]
|
||||
|
||||
Progress: 0/6 complete
|
||||
```
|
||||
|
||||
Update status as you complete each:
|
||||
|
||||
- ☐ → ⏳ (in progress)
|
||||
- ⏳ → ✅ (complete and verified)
|
||||
- ⏳ → ❌ (blocked, needs attention)
|
||||
|
||||
## When to Stop and Verify
|
||||
|
||||
**Always run tests after:**
|
||||
|
||||
- Completing a test file
|
||||
- Making changes to fix a failure
|
||||
- Modifying shared mocks
|
||||
- Updating test utilities or helpers
|
||||
|
||||
**Signs you should pause:**
|
||||
|
||||
- More than 2 consecutive test failures
|
||||
- Mock-related errors appearing
|
||||
- Unclear why a test is failing
|
||||
- Test passing but coverage unexpectedly low
|
||||
|
||||
## Common Pitfalls to Avoid
|
||||
|
||||
### ❌ Don't: Generate Everything First
|
||||
|
||||
```
|
||||
# BAD: Writing all files then testing
|
||||
Write component-a.spec.tsx
|
||||
Write component-b.spec.tsx
|
||||
Write component-c.spec.tsx
|
||||
Write component-d.spec.tsx
|
||||
Run pnpm test ← Multiple failures, hard to debug
|
||||
```
|
||||
|
||||
### ✅ Do: Verify Each Step
|
||||
|
||||
```
|
||||
# GOOD: Incremental with verification
|
||||
Write component-a.spec.tsx
|
||||
Run pnpm test -- component-a.spec.tsx ✅
|
||||
Write component-b.spec.tsx
|
||||
Run pnpm test -- component-b.spec.tsx ✅
|
||||
...continue...
|
||||
```
|
||||
|
||||
### ❌ Don't: Skip Verification for "Simple" Components
|
||||
|
||||
Even simple components can have:
|
||||
|
||||
- Import errors
|
||||
- Missing mock setup
|
||||
- Incorrect assumptions about props
|
||||
|
||||
**Always verify, regardless of perceived simplicity.**
|
||||
|
||||
### ❌ Don't: Continue When Tests Fail
|
||||
|
||||
Failing tests compound:
|
||||
|
||||
- A mock issue in file A affects files B, C, D
|
||||
- Fixing A later requires revisiting all dependent tests
|
||||
- Time wasted on debugging cascading failures
|
||||
|
||||
**Fix failures immediately before proceeding.**
|
||||
|
||||
## Integration with Claude's Todo Feature
|
||||
|
||||
When using Claude for multi-file testing:
|
||||
|
||||
1. **Ask Claude to create a todo list** before starting
|
||||
1. **Request one file at a time** or ensure Claude processes incrementally
|
||||
1. **Verify each test passes** before asking for the next
|
||||
1. **Mark todos complete** as you progress
|
||||
|
||||
Example prompt:
|
||||
|
||||
```
|
||||
Test all components in `path/to/directory/`.
|
||||
First, analyze the directory and create a todo list ordered by complexity.
|
||||
Then, process ONE file at a time, waiting for my confirmation that tests pass
|
||||
before proceeding to the next.
|
||||
```
|
||||
|
||||
## Summary Checklist
|
||||
|
||||
Before starting multi-file testing:
|
||||
|
||||
- [ ] Listed all files needing tests
|
||||
- [ ] Ordered by complexity (simple → complex)
|
||||
- [ ] Created todo list for tracking
|
||||
- [ ] Understand dependencies between files
|
||||
|
||||
During testing:
|
||||
|
||||
- [ ] Processing ONE file at a time
|
||||
- [ ] Running tests after EACH file
|
||||
- [ ] Fixing failures BEFORE proceeding
|
||||
- [ ] Updating todo list progress
|
||||
|
||||
After completion:
|
||||
|
||||
- [ ] All individual tests pass
|
||||
- [ ] Full directory test run passes
|
||||
- [ ] Coverage goals met
|
||||
- [ ] Todo list shows all complete
|
||||
@ -0,0 +1,296 @@
|
||||
/**
|
||||
* Test Template for React Components
|
||||
*
|
||||
* WHY THIS STRUCTURE?
|
||||
* - Organized sections make tests easy to navigate and maintain
|
||||
* - Mocks at top ensure consistent test isolation
|
||||
* - Factory functions reduce duplication and improve readability
|
||||
* - describe blocks group related scenarios for better debugging
|
||||
*
|
||||
* INSTRUCTIONS:
|
||||
* 1. Replace `ComponentName` with your component name
|
||||
* 2. Update import path
|
||||
* 3. Add/remove test sections based on component features (use analyze-component)
|
||||
* 4. Follow AAA pattern: Arrange → Act → Assert
|
||||
*
|
||||
* RUN FIRST: pnpm analyze-component <path> to identify required test scenarios
|
||||
*/
|
||||
|
||||
import { render, screen, fireEvent, waitFor } from '@testing-library/react'
|
||||
import userEvent from '@testing-library/user-event'
|
||||
// import ComponentName from './index'
|
||||
|
||||
// ============================================================================
|
||||
// Mocks
|
||||
// ============================================================================
|
||||
// WHY: Mocks must be hoisted to top of file (Jest requirement).
|
||||
// They run BEFORE imports, so keep them before component imports.
|
||||
|
||||
// i18n (automatically mocked)
|
||||
// WHY: Shared mock at web/__mocks__/react-i18next.ts is auto-loaded by Jest
|
||||
// No explicit mock needed - it returns translation keys as-is
|
||||
// Override only if custom translations are required:
|
||||
// jest.mock('react-i18next', () => ({
|
||||
// useTranslation: () => ({
|
||||
// t: (key: string) => {
|
||||
// const customTranslations: Record<string, string> = {
|
||||
// 'my.custom.key': 'Custom Translation',
|
||||
// }
|
||||
// return customTranslations[key] || key
|
||||
// },
|
||||
// }),
|
||||
// }))
|
||||
|
||||
// Router (if component uses useRouter, usePathname, useSearchParams)
|
||||
// WHY: Isolates tests from Next.js routing, enables testing navigation behavior
|
||||
// const mockPush = jest.fn()
|
||||
// jest.mock('next/navigation', () => ({
|
||||
// useRouter: () => ({ push: mockPush }),
|
||||
// usePathname: () => '/test-path',
|
||||
// }))
|
||||
|
||||
// API services (if component fetches data)
|
||||
// WHY: Prevents real network calls, enables testing all states (loading/success/error)
|
||||
// jest.mock('@/service/api')
|
||||
// import * as api from '@/service/api'
|
||||
// const mockedApi = api as jest.Mocked<typeof api>
|
||||
|
||||
// Shared mock state (for portal/dropdown components)
|
||||
// WHY: Portal components like PortalToFollowElem need shared state between
|
||||
// parent and child mocks to correctly simulate open/close behavior
|
||||
// let mockOpenState = false
|
||||
|
||||
// ============================================================================
|
||||
// Test Data Factories
|
||||
// ============================================================================
|
||||
// WHY FACTORIES?
|
||||
// - Avoid hard-coded test data scattered across tests
|
||||
// - Easy to create variations with overrides
|
||||
// - Type-safe when using actual types from source
|
||||
// - Single source of truth for default test values
|
||||
|
||||
// const createMockProps = (overrides = {}) => ({
|
||||
// // Default props that make component render successfully
|
||||
// ...overrides,
|
||||
// })
|
||||
|
||||
// const createMockItem = (overrides = {}) => ({
|
||||
// id: 'item-1',
|
||||
// name: 'Test Item',
|
||||
// ...overrides,
|
||||
// })
|
||||
|
||||
// ============================================================================
|
||||
// Test Helpers
|
||||
// ============================================================================
|
||||
|
||||
// const renderComponent = (props = {}) => {
|
||||
// return render(<ComponentName {...createMockProps(props)} />)
|
||||
// }
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('ComponentName', () => {
|
||||
// WHY beforeEach with clearAllMocks?
|
||||
// - Ensures each test starts with clean slate
|
||||
// - Prevents mock call history from leaking between tests
|
||||
// - MUST be beforeEach (not afterEach) to reset BEFORE assertions like toHaveBeenCalledTimes
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
// Reset shared mock state if used (CRITICAL for portal/dropdown tests)
|
||||
// mockOpenState = false
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests (REQUIRED - Every component MUST have these)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Catches import errors, missing providers, and basic render issues
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
// Arrange - Setup data and mocks
|
||||
// const props = createMockProps()
|
||||
|
||||
// Act - Render the component
|
||||
// render(<ComponentName {...props} />)
|
||||
|
||||
// Assert - Verify expected output
|
||||
// Prefer getByRole for accessibility; it's what users "see"
|
||||
// expect(screen.getByRole('...')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with default props', () => {
|
||||
// WHY: Verifies component works without optional props
|
||||
// render(<ComponentName />)
|
||||
// expect(screen.getByText('...')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Props Tests (REQUIRED - Every component MUST test prop behavior)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Props are the component's API contract. Test them thoroughly.
|
||||
describe('Props', () => {
|
||||
it('should apply custom className', () => {
|
||||
// WHY: Common pattern in Dify - components should merge custom classes
|
||||
// render(<ComponentName className="custom-class" />)
|
||||
// expect(screen.getByTestId('component')).toHaveClass('custom-class')
|
||||
})
|
||||
|
||||
it('should use default values for optional props', () => {
|
||||
// WHY: Verifies TypeScript defaults work at runtime
|
||||
// render(<ComponentName />)
|
||||
// expect(screen.getByRole('...')).toHaveAttribute('...', 'default-value')
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions (if component has event handlers - on*, handle*)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Event handlers are core functionality. Test from user's perspective.
|
||||
describe('User Interactions', () => {
|
||||
it('should call onClick when clicked', async () => {
|
||||
// WHY userEvent over fireEvent?
|
||||
// - userEvent simulates real user behavior (focus, hover, then click)
|
||||
// - fireEvent is lower-level, doesn't trigger all browser events
|
||||
// const user = userEvent.setup()
|
||||
// const handleClick = jest.fn()
|
||||
// render(<ComponentName onClick={handleClick} />)
|
||||
//
|
||||
// await user.click(screen.getByRole('button'))
|
||||
//
|
||||
// expect(handleClick).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('should call onChange when value changes', async () => {
|
||||
// const user = userEvent.setup()
|
||||
// const handleChange = jest.fn()
|
||||
// render(<ComponentName onChange={handleChange} />)
|
||||
//
|
||||
// await user.type(screen.getByRole('textbox'), 'new value')
|
||||
//
|
||||
// expect(handleChange).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// State Management (if component uses useState/useReducer)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Test state through observable UI changes, not internal state values
|
||||
describe('State Management', () => {
|
||||
it('should update state on interaction', async () => {
|
||||
// WHY test via UI, not state?
|
||||
// - State is implementation detail; UI is what users see
|
||||
// - If UI works correctly, state must be correct
|
||||
// const user = userEvent.setup()
|
||||
// render(<ComponentName />)
|
||||
//
|
||||
// // Initial state - verify what user sees
|
||||
// expect(screen.getByText('Initial')).toBeInTheDocument()
|
||||
//
|
||||
// // Trigger state change via user action
|
||||
// await user.click(screen.getByRole('button'))
|
||||
//
|
||||
// // New state - verify UI updated
|
||||
// expect(screen.getByText('Updated')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Async Operations (if component fetches data - useSWR, useQuery, fetch)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Async operations have 3 states users experience: loading, success, error
|
||||
describe('Async Operations', () => {
|
||||
it('should show loading state', () => {
|
||||
// WHY never-resolving promise?
|
||||
// - Keeps component in loading state for assertion
|
||||
// - Alternative: use fake timers
|
||||
// mockedApi.fetchData.mockImplementation(() => new Promise(() => {}))
|
||||
// render(<ComponentName />)
|
||||
//
|
||||
// expect(screen.getByText(/loading/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show data on success', async () => {
|
||||
// WHY waitFor?
|
||||
// - Component updates asynchronously after fetch resolves
|
||||
// - waitFor retries assertion until it passes or times out
|
||||
// mockedApi.fetchData.mockResolvedValue({ items: ['Item 1'] })
|
||||
// render(<ComponentName />)
|
||||
//
|
||||
// await waitFor(() => {
|
||||
// expect(screen.getByText('Item 1')).toBeInTheDocument()
|
||||
// })
|
||||
})
|
||||
|
||||
it('should show error on failure', async () => {
|
||||
// mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
|
||||
// render(<ComponentName />)
|
||||
//
|
||||
// await waitFor(() => {
|
||||
// expect(screen.getByText(/error/i)).toBeInTheDocument()
|
||||
// })
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Edge Cases (REQUIRED - Every component MUST handle edge cases)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Real-world data is messy. Components must handle:
|
||||
// - Null/undefined from API failures or optional fields
|
||||
// - Empty arrays/strings from user clearing data
|
||||
// - Boundary values (0, MAX_INT, special characters)
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle null value', () => {
|
||||
// WHY test null specifically?
|
||||
// - API might return null for missing data
|
||||
// - Prevents "Cannot read property of null" in production
|
||||
// render(<ComponentName value={null} />)
|
||||
// expect(screen.getByText(/no data/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should handle undefined value', () => {
|
||||
// WHY test undefined separately from null?
|
||||
// - TypeScript treats them differently
|
||||
// - Optional props are undefined, not null
|
||||
// render(<ComponentName value={undefined} />)
|
||||
// expect(screen.getByText(/no data/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should handle empty array', () => {
|
||||
// WHY: Empty state often needs special UI (e.g., "No items yet")
|
||||
// render(<ComponentName items={[]} />)
|
||||
// expect(screen.getByText(/empty/i)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
// WHY: Empty strings are truthy in JS but visually empty
|
||||
// render(<ComponentName text="" />)
|
||||
// expect(screen.getByText(/placeholder/i)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Accessibility (optional but recommended for Dify's enterprise users)
|
||||
// --------------------------------------------------------------------------
|
||||
// WHY: Dify has enterprise customers who may require accessibility compliance
|
||||
describe('Accessibility', () => {
|
||||
it('should have accessible name', () => {
|
||||
// WHY getByRole with name?
|
||||
// - Tests that screen readers can identify the element
|
||||
// - Enforces proper labeling practices
|
||||
// render(<ComponentName label="Test Label" />)
|
||||
// expect(screen.getByRole('button', { name: /test label/i })).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should support keyboard navigation', async () => {
|
||||
// WHY: Some users can't use a mouse
|
||||
// const user = userEvent.setup()
|
||||
// render(<ComponentName />)
|
||||
//
|
||||
// await user.tab()
|
||||
// expect(screen.getByRole('button')).toHaveFocus()
|
||||
})
|
||||
})
|
||||
})
|
||||
207
.claude/skills/frontend-testing/templates/hook-test.template.ts
Normal file
207
.claude/skills/frontend-testing/templates/hook-test.template.ts
Normal file
@ -0,0 +1,207 @@
|
||||
/**
|
||||
* Test Template for Custom Hooks
|
||||
*
|
||||
* Instructions:
|
||||
* 1. Replace `useHookName` with your hook name
|
||||
* 2. Update import path
|
||||
* 3. Add/remove test sections based on hook features
|
||||
*/
|
||||
|
||||
import { renderHook, act, waitFor } from '@testing-library/react'
|
||||
// import { useHookName } from './use-hook-name'
|
||||
|
||||
// ============================================================================
|
||||
// Mocks
|
||||
// ============================================================================
|
||||
|
||||
// API services (if hook fetches data)
|
||||
// jest.mock('@/service/api')
|
||||
// import * as api from '@/service/api'
|
||||
// const mockedApi = api as jest.Mocked<typeof api>
|
||||
|
||||
// ============================================================================
|
||||
// Test Helpers
|
||||
// ============================================================================
|
||||
|
||||
// Wrapper for hooks that need context
|
||||
// const createWrapper = (contextValue = {}) => {
|
||||
// return ({ children }: { children: React.ReactNode }) => (
|
||||
// <SomeContext.Provider value={contextValue}>
|
||||
// {children}
|
||||
// </SomeContext.Provider>
|
||||
// )
|
||||
// }
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('useHookName', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Initial State
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Initial State', () => {
|
||||
it('should return initial state', () => {
|
||||
// const { result } = renderHook(() => useHookName())
|
||||
//
|
||||
// expect(result.current.value).toBe(initialValue)
|
||||
// expect(result.current.isLoading).toBe(false)
|
||||
})
|
||||
|
||||
it('should accept initial value from props', () => {
|
||||
// const { result } = renderHook(() => useHookName({ initialValue: 'custom' }))
|
||||
//
|
||||
// expect(result.current.value).toBe('custom')
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// State Updates
|
||||
// --------------------------------------------------------------------------
|
||||
describe('State Updates', () => {
|
||||
it('should update value when setValue is called', () => {
|
||||
// const { result } = renderHook(() => useHookName())
|
||||
//
|
||||
// act(() => {
|
||||
// result.current.setValue('new value')
|
||||
// })
|
||||
//
|
||||
// expect(result.current.value).toBe('new value')
|
||||
})
|
||||
|
||||
it('should reset to initial value', () => {
|
||||
// const { result } = renderHook(() => useHookName({ initialValue: 'initial' }))
|
||||
//
|
||||
// act(() => {
|
||||
// result.current.setValue('changed')
|
||||
// })
|
||||
// expect(result.current.value).toBe('changed')
|
||||
//
|
||||
// act(() => {
|
||||
// result.current.reset()
|
||||
// })
|
||||
// expect(result.current.value).toBe('initial')
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Async Operations
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Async Operations', () => {
|
||||
it('should fetch data on mount', async () => {
|
||||
// mockedApi.fetchData.mockResolvedValue({ data: 'test' })
|
||||
//
|
||||
// const { result } = renderHook(() => useHookName())
|
||||
//
|
||||
// // Initially loading
|
||||
// expect(result.current.isLoading).toBe(true)
|
||||
//
|
||||
// // Wait for data
|
||||
// await waitFor(() => {
|
||||
// expect(result.current.isLoading).toBe(false)
|
||||
// })
|
||||
//
|
||||
// expect(result.current.data).toEqual({ data: 'test' })
|
||||
})
|
||||
|
||||
it('should handle fetch error', async () => {
|
||||
// mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
|
||||
//
|
||||
// const { result } = renderHook(() => useHookName())
|
||||
//
|
||||
// await waitFor(() => {
|
||||
// expect(result.current.error).toBeTruthy()
|
||||
// })
|
||||
//
|
||||
// expect(result.current.error?.message).toBe('Network error')
|
||||
})
|
||||
|
||||
it('should refetch when dependency changes', async () => {
|
||||
// mockedApi.fetchData.mockResolvedValue({ data: 'test' })
|
||||
//
|
||||
// const { result, rerender } = renderHook(
|
||||
// ({ id }) => useHookName(id),
|
||||
// { initialProps: { id: '1' } }
|
||||
// )
|
||||
//
|
||||
// await waitFor(() => {
|
||||
// expect(mockedApi.fetchData).toHaveBeenCalledWith('1')
|
||||
// })
|
||||
//
|
||||
// rerender({ id: '2' })
|
||||
//
|
||||
// await waitFor(() => {
|
||||
// expect(mockedApi.fetchData).toHaveBeenCalledWith('2')
|
||||
// })
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Side Effects
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Side Effects', () => {
|
||||
it('should call callback when value changes', () => {
|
||||
// const callback = jest.fn()
|
||||
// const { result } = renderHook(() => useHookName({ onChange: callback }))
|
||||
//
|
||||
// act(() => {
|
||||
// result.current.setValue('new value')
|
||||
// })
|
||||
//
|
||||
// expect(callback).toHaveBeenCalledWith('new value')
|
||||
})
|
||||
|
||||
it('should cleanup on unmount', () => {
|
||||
// const cleanup = jest.fn()
|
||||
// jest.spyOn(window, 'addEventListener')
|
||||
// jest.spyOn(window, 'removeEventListener')
|
||||
//
|
||||
// const { unmount } = renderHook(() => useHookName())
|
||||
//
|
||||
// expect(window.addEventListener).toHaveBeenCalled()
|
||||
//
|
||||
// unmount()
|
||||
//
|
||||
// expect(window.removeEventListener).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Edge Cases
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle null input', () => {
|
||||
// const { result } = renderHook(() => useHookName(null))
|
||||
//
|
||||
// expect(result.current.value).toBeNull()
|
||||
})
|
||||
|
||||
it('should handle rapid updates', () => {
|
||||
// const { result } = renderHook(() => useHookName())
|
||||
//
|
||||
// act(() => {
|
||||
// result.current.setValue('1')
|
||||
// result.current.setValue('2')
|
||||
// result.current.setValue('3')
|
||||
// })
|
||||
//
|
||||
// expect(result.current.value).toBe('3')
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// With Context (if hook uses context)
|
||||
// --------------------------------------------------------------------------
|
||||
describe('With Context', () => {
|
||||
it('should use context value', () => {
|
||||
// const wrapper = createWrapper({ someValue: 'context-value' })
|
||||
// const { result } = renderHook(() => useHookName(), { wrapper })
|
||||
//
|
||||
// expect(result.current.contextValue).toBe('context-value')
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -0,0 +1,154 @@
|
||||
/**
|
||||
* Test Template for Utility Functions
|
||||
*
|
||||
* Instructions:
|
||||
* 1. Replace `utilityFunction` with your function name
|
||||
* 2. Update import path
|
||||
* 3. Use test.each for data-driven tests
|
||||
*/
|
||||
|
||||
// import { utilityFunction } from './utility'
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('utilityFunction', () => {
|
||||
// --------------------------------------------------------------------------
|
||||
// Basic Functionality
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Basic Functionality', () => {
|
||||
it('should return expected result for valid input', () => {
|
||||
// expect(utilityFunction('input')).toBe('expected-output')
|
||||
})
|
||||
|
||||
it('should handle multiple arguments', () => {
|
||||
// expect(utilityFunction('a', 'b', 'c')).toBe('abc')
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Data-Driven Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Input/Output Mapping', () => {
|
||||
test.each([
|
||||
// [input, expected]
|
||||
['input1', 'output1'],
|
||||
['input2', 'output2'],
|
||||
['input3', 'output3'],
|
||||
])('should return %s for input %s', (input, expected) => {
|
||||
// expect(utilityFunction(input)).toBe(expected)
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Edge Cases
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle empty string', () => {
|
||||
// expect(utilityFunction('')).toBe('')
|
||||
})
|
||||
|
||||
it('should handle null', () => {
|
||||
// expect(utilityFunction(null)).toBe(null)
|
||||
// or
|
||||
// expect(() => utilityFunction(null)).toThrow()
|
||||
})
|
||||
|
||||
it('should handle undefined', () => {
|
||||
// expect(utilityFunction(undefined)).toBe(undefined)
|
||||
// or
|
||||
// expect(() => utilityFunction(undefined)).toThrow()
|
||||
})
|
||||
|
||||
it('should handle empty array', () => {
|
||||
// expect(utilityFunction([])).toEqual([])
|
||||
})
|
||||
|
||||
it('should handle empty object', () => {
|
||||
// expect(utilityFunction({})).toEqual({})
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Boundary Conditions
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Boundary Conditions', () => {
|
||||
it('should handle minimum value', () => {
|
||||
// expect(utilityFunction(0)).toBe(0)
|
||||
})
|
||||
|
||||
it('should handle maximum value', () => {
|
||||
// expect(utilityFunction(Number.MAX_SAFE_INTEGER)).toBe(...)
|
||||
})
|
||||
|
||||
it('should handle negative numbers', () => {
|
||||
// expect(utilityFunction(-1)).toBe(...)
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Type Coercion (if applicable)
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Type Handling', () => {
|
||||
it('should handle numeric string', () => {
|
||||
// expect(utilityFunction('123')).toBe(123)
|
||||
})
|
||||
|
||||
it('should handle boolean', () => {
|
||||
// expect(utilityFunction(true)).toBe(...)
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Error Cases
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Error Handling', () => {
|
||||
it('should throw for invalid input', () => {
|
||||
// expect(() => utilityFunction('invalid')).toThrow('Error message')
|
||||
})
|
||||
|
||||
it('should throw with specific error type', () => {
|
||||
// expect(() => utilityFunction('invalid')).toThrow(ValidationError)
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Complex Objects (if applicable)
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Object Handling', () => {
|
||||
it('should preserve object structure', () => {
|
||||
// const input = { a: 1, b: 2 }
|
||||
// expect(utilityFunction(input)).toEqual({ a: 1, b: 2 })
|
||||
})
|
||||
|
||||
it('should handle nested objects', () => {
|
||||
// const input = { nested: { deep: 'value' } }
|
||||
// expect(utilityFunction(input)).toEqual({ nested: { deep: 'transformed' } })
|
||||
})
|
||||
|
||||
it('should not mutate input', () => {
|
||||
// const input = { a: 1 }
|
||||
// const inputCopy = { ...input }
|
||||
// utilityFunction(input)
|
||||
// expect(input).toEqual(inputCopy)
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Array Handling (if applicable)
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Array Handling', () => {
|
||||
it('should process all elements', () => {
|
||||
// expect(utilityFunction([1, 2, 3])).toEqual([2, 4, 6])
|
||||
})
|
||||
|
||||
it('should handle single element array', () => {
|
||||
// expect(utilityFunction([1])).toEqual([2])
|
||||
})
|
||||
|
||||
it('should preserve order', () => {
|
||||
// expect(utilityFunction(['c', 'a', 'b'])).toEqual(['c', 'a', 'b'])
|
||||
})
|
||||
})
|
||||
})
|
||||
5
.coveragerc
Normal file
5
.coveragerc
Normal file
@ -0,0 +1,5 @@
|
||||
[run]
|
||||
omit =
|
||||
api/tests/*
|
||||
api/migrations/*
|
||||
api/core/rag/datasource/vdb/*
|
||||
@ -1,4 +1,4 @@
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.12
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.12-bookworm
|
||||
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
&& apt-get -y install libgmp-dev libmpfr-dev libmpc-dev
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
"nodeGypDependencies": true,
|
||||
"version": "lts"
|
||||
},
|
||||
"ghcr.io/devcontainers-contrib/features/npm-package:1": {
|
||||
"ghcr.io/devcontainers-extra/features/npm-package:1": {
|
||||
"package": "typescript",
|
||||
"version": "latest"
|
||||
},
|
||||
|
||||
@ -1,15 +1,15 @@
|
||||
#!/bin/bash
|
||||
WORKSPACE_ROOT=$(pwd)
|
||||
|
||||
corepack enable
|
||||
cd web && pnpm install
|
||||
pipx install uv
|
||||
|
||||
echo 'alias start-api="cd /workspaces/dify/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
|
||||
echo 'alias start-worker="cd /workspaces/dify/api && uv run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage"' >> ~/.bashrc
|
||||
echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc
|
||||
echo 'alias start-web-prod="cd /workspaces/dify/web && pnpm build && pnpm start"' >> ~/.bashrc
|
||||
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d"' >> ~/.bashrc
|
||||
echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env down"' >> ~/.bashrc
|
||||
echo "alias start-api=\"cd $WORKSPACE_ROOT/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug\"" >> ~/.bashrc
|
||||
echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor\"" >> ~/.bashrc
|
||||
echo "alias start-web=\"cd $WORKSPACE_ROOT/web && pnpm dev\"" >> ~/.bashrc
|
||||
echo "alias start-web-prod=\"cd $WORKSPACE_ROOT/web && pnpm build && pnpm start\"" >> ~/.bashrc
|
||||
echo "alias start-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d\"" >> ~/.bashrc
|
||||
echo "alias stop-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env down\"" >> ~/.bashrc
|
||||
|
||||
source /home/vscode/.bashrc
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ trim_trailing_whitespace = false
|
||||
|
||||
# Matches multiple files with brace expansion notation
|
||||
# Set default charset
|
||||
[*.{js,tsx}]
|
||||
[*.{js,jsx,ts,tsx,mjs}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
|
||||
234
.github/CODEOWNERS
vendored
Normal file
234
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,234 @@
|
||||
# CODEOWNERS
|
||||
# This file defines code ownership for the Dify project.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
# Owners can be @username, @org/team-name, or email addresses.
|
||||
# For more information, see: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
|
||||
* @crazywoola @laipz8200 @Yeuoly
|
||||
|
||||
# Backend (default owner, more specific rules below will override)
|
||||
api/ @QuantumGhost
|
||||
|
||||
# Backend - MCP
|
||||
api/core/mcp/ @Nov1c444
|
||||
api/core/entities/mcp_provider.py @Nov1c444
|
||||
api/services/tools/mcp_tools_manage_service.py @Nov1c444
|
||||
api/controllers/mcp/ @Nov1c444
|
||||
api/controllers/console/app/mcp_server.py @Nov1c444
|
||||
api/tests/**/*mcp* @Nov1c444
|
||||
|
||||
# Backend - Workflow - Engine (Core graph execution engine)
|
||||
api/core/workflow/graph_engine/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/runtime/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/graph/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/graph_events/ @laipz8200 @QuantumGhost
|
||||
api/core/workflow/node_events/ @laipz8200 @QuantumGhost
|
||||
api/core/model_runtime/ @laipz8200 @QuantumGhost
|
||||
|
||||
# Backend - Workflow - Nodes (Agent, Iteration, Loop, LLM)
|
||||
api/core/workflow/nodes/agent/ @Nov1c444
|
||||
api/core/workflow/nodes/iteration/ @Nov1c444
|
||||
api/core/workflow/nodes/loop/ @Nov1c444
|
||||
api/core/workflow/nodes/llm/ @Nov1c444
|
||||
|
||||
# Backend - RAG (Retrieval Augmented Generation)
|
||||
api/core/rag/ @JohnJyong
|
||||
api/services/rag_pipeline/ @JohnJyong
|
||||
api/services/dataset_service.py @JohnJyong
|
||||
api/services/knowledge_service.py @JohnJyong
|
||||
api/services/external_knowledge_service.py @JohnJyong
|
||||
api/services/hit_testing_service.py @JohnJyong
|
||||
api/services/metadata_service.py @JohnJyong
|
||||
api/services/vector_service.py @JohnJyong
|
||||
api/services/entities/knowledge_entities/ @JohnJyong
|
||||
api/services/entities/external_knowledge_entities/ @JohnJyong
|
||||
api/controllers/console/datasets/ @JohnJyong
|
||||
api/controllers/service_api/dataset/ @JohnJyong
|
||||
api/models/dataset.py @JohnJyong
|
||||
api/tasks/rag_pipeline/ @JohnJyong
|
||||
api/tasks/add_document_to_index_task.py @JohnJyong
|
||||
api/tasks/batch_clean_document_task.py @JohnJyong
|
||||
api/tasks/clean_document_task.py @JohnJyong
|
||||
api/tasks/clean_notion_document_task.py @JohnJyong
|
||||
api/tasks/document_indexing_task.py @JohnJyong
|
||||
api/tasks/document_indexing_sync_task.py @JohnJyong
|
||||
api/tasks/document_indexing_update_task.py @JohnJyong
|
||||
api/tasks/duplicate_document_indexing_task.py @JohnJyong
|
||||
api/tasks/recover_document_indexing_task.py @JohnJyong
|
||||
api/tasks/remove_document_from_index_task.py @JohnJyong
|
||||
api/tasks/retry_document_indexing_task.py @JohnJyong
|
||||
api/tasks/sync_website_document_indexing_task.py @JohnJyong
|
||||
api/tasks/batch_create_segment_to_index_task.py @JohnJyong
|
||||
api/tasks/create_segment_to_index_task.py @JohnJyong
|
||||
api/tasks/delete_segment_from_index_task.py @JohnJyong
|
||||
api/tasks/disable_segment_from_index_task.py @JohnJyong
|
||||
api/tasks/disable_segments_from_index_task.py @JohnJyong
|
||||
api/tasks/enable_segment_to_index_task.py @JohnJyong
|
||||
api/tasks/enable_segments_to_index_task.py @JohnJyong
|
||||
api/tasks/clean_dataset_task.py @JohnJyong
|
||||
api/tasks/deal_dataset_index_update_task.py @JohnJyong
|
||||
api/tasks/deal_dataset_vector_index_task.py @JohnJyong
|
||||
|
||||
# Backend - Plugins
|
||||
api/core/plugin/ @Mairuis @Yeuoly @Stream29
|
||||
api/services/plugin/ @Mairuis @Yeuoly @Stream29
|
||||
api/controllers/console/workspace/plugin.py @Mairuis @Yeuoly @Stream29
|
||||
api/controllers/inner_api/plugin/ @Mairuis @Yeuoly @Stream29
|
||||
api/tasks/process_tenant_plugin_autoupgrade_check_task.py @Mairuis @Yeuoly @Stream29
|
||||
|
||||
# Backend - Trigger/Schedule/Webhook
|
||||
api/controllers/trigger/ @Mairuis @Yeuoly
|
||||
api/controllers/console/app/workflow_trigger.py @Mairuis @Yeuoly
|
||||
api/controllers/console/workspace/trigger_providers.py @Mairuis @Yeuoly
|
||||
api/core/trigger/ @Mairuis @Yeuoly
|
||||
api/core/app/layers/trigger_post_layer.py @Mairuis @Yeuoly
|
||||
api/services/trigger/ @Mairuis @Yeuoly
|
||||
api/models/trigger.py @Mairuis @Yeuoly
|
||||
api/fields/workflow_trigger_fields.py @Mairuis @Yeuoly
|
||||
api/repositories/workflow_trigger_log_repository.py @Mairuis @Yeuoly
|
||||
api/repositories/sqlalchemy_workflow_trigger_log_repository.py @Mairuis @Yeuoly
|
||||
api/libs/schedule_utils.py @Mairuis @Yeuoly
|
||||
api/services/workflow/scheduler.py @Mairuis @Yeuoly
|
||||
api/schedule/trigger_provider_refresh_task.py @Mairuis @Yeuoly
|
||||
api/schedule/workflow_schedule_task.py @Mairuis @Yeuoly
|
||||
api/tasks/trigger_processing_tasks.py @Mairuis @Yeuoly
|
||||
api/tasks/trigger_subscription_refresh_tasks.py @Mairuis @Yeuoly
|
||||
api/tasks/workflow_schedule_tasks.py @Mairuis @Yeuoly
|
||||
api/tasks/workflow_cfs_scheduler/ @Mairuis @Yeuoly
|
||||
api/events/event_handlers/sync_plugin_trigger_when_app_created.py @Mairuis @Yeuoly
|
||||
api/events/event_handlers/update_app_triggers_when_app_published_workflow_updated.py @Mairuis @Yeuoly
|
||||
api/events/event_handlers/sync_workflow_schedule_when_app_published.py @Mairuis @Yeuoly
|
||||
api/events/event_handlers/sync_webhook_when_app_created.py @Mairuis @Yeuoly
|
||||
|
||||
# Backend - Async Workflow
|
||||
api/services/async_workflow_service.py @Mairuis @Yeuoly
|
||||
api/tasks/async_workflow_tasks.py @Mairuis @Yeuoly
|
||||
|
||||
# Backend - Billing
|
||||
api/services/billing_service.py @hj24 @zyssyz123
|
||||
api/controllers/console/billing/ @hj24 @zyssyz123
|
||||
|
||||
# Backend - Enterprise
|
||||
api/configs/enterprise/ @GarfieldDai @GareArc
|
||||
api/services/enterprise/ @GarfieldDai @GareArc
|
||||
api/services/feature_service.py @GarfieldDai @GareArc
|
||||
api/controllers/console/feature.py @GarfieldDai @GareArc
|
||||
api/controllers/web/feature.py @GarfieldDai @GareArc
|
||||
|
||||
# Backend - Database Migrations
|
||||
api/migrations/ @snakevash @laipz8200
|
||||
|
||||
# Frontend
|
||||
web/ @iamjoel
|
||||
|
||||
# Frontend - App - Orchestration
|
||||
web/app/components/workflow/ @iamjoel @zxhlyh
|
||||
web/app/components/workflow-app/ @iamjoel @zxhlyh
|
||||
web/app/components/app/configuration/ @iamjoel @zxhlyh
|
||||
web/app/components/app/app-publisher/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - WebApp - Chat
|
||||
web/app/components/base/chat/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - WebApp - Completion
|
||||
web/app/components/share/text-generation/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - App - List and Creation
|
||||
web/app/components/apps/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/create-app-dialog/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/create-app-modal/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/create-from-dsl-modal/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - API Documentation
|
||||
web/app/components/develop/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - Logs and Annotations
|
||||
web/app/components/app/workflow-log/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/log/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/log-annotation/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/annotation/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - Monitoring
|
||||
web/app/(commonLayout)/app/(appDetailLayout)/\[appId\]/overview/ @JzoNgKVO @iamjoel
|
||||
web/app/components/app/overview/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - App - Settings
|
||||
web/app/components/app-sidebar/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - RAG - Hit Testing
|
||||
web/app/components/datasets/hit-testing/ @JzoNgKVO @iamjoel
|
||||
|
||||
# Frontend - RAG - List and Creation
|
||||
web/app/components/datasets/list/ @iamjoel @WTW0313
|
||||
web/app/components/datasets/create/ @iamjoel @WTW0313
|
||||
web/app/components/datasets/create-from-pipeline/ @iamjoel @WTW0313
|
||||
web/app/components/datasets/external-knowledge-base/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - RAG - Orchestration (general rule first, specific rules below override)
|
||||
web/app/components/rag-pipeline/ @iamjoel @WTW0313
|
||||
web/app/components/rag-pipeline/components/rag-pipeline-main.tsx @iamjoel @zxhlyh
|
||||
web/app/components/rag-pipeline/store/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - RAG - Documents List
|
||||
web/app/components/datasets/documents/list.tsx @iamjoel @WTW0313
|
||||
web/app/components/datasets/documents/create-from-pipeline/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - RAG - Segments List
|
||||
web/app/components/datasets/documents/detail/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - RAG - Settings
|
||||
web/app/components/datasets/settings/ @iamjoel @WTW0313
|
||||
|
||||
# Frontend - Ecosystem - Plugins
|
||||
web/app/components/plugins/ @iamjoel @zhsama
|
||||
|
||||
# Frontend - Ecosystem - Tools
|
||||
web/app/components/tools/ @iamjoel @Yessenia-d
|
||||
|
||||
# Frontend - Ecosystem - MarketPlace
|
||||
web/app/components/plugins/marketplace/ @iamjoel @Yessenia-d
|
||||
|
||||
# Frontend - Login and Registration
|
||||
web/app/signin/ @douxc @iamjoel
|
||||
web/app/signup/ @douxc @iamjoel
|
||||
web/app/reset-password/ @douxc @iamjoel
|
||||
web/app/install/ @douxc @iamjoel
|
||||
web/app/init/ @douxc @iamjoel
|
||||
web/app/forgot-password/ @douxc @iamjoel
|
||||
web/app/account/ @douxc @iamjoel
|
||||
|
||||
# Frontend - Service Authentication
|
||||
web/service/base.ts @douxc @iamjoel
|
||||
|
||||
# Frontend - WebApp Authentication and Access Control
|
||||
web/app/(shareLayout)/components/ @douxc @iamjoel
|
||||
web/app/(shareLayout)/webapp-signin/ @douxc @iamjoel
|
||||
web/app/(shareLayout)/webapp-reset-password/ @douxc @iamjoel
|
||||
web/app/components/app/app-access-control/ @douxc @iamjoel
|
||||
|
||||
# Frontend - Explore Page
|
||||
web/app/components/explore/ @CodingOnStar @iamjoel
|
||||
|
||||
# Frontend - Personal Settings
|
||||
web/app/components/header/account-setting/ @CodingOnStar @iamjoel
|
||||
web/app/components/header/account-dropdown/ @CodingOnStar @iamjoel
|
||||
|
||||
# Frontend - Analytics
|
||||
web/app/components/base/ga/ @CodingOnStar @iamjoel
|
||||
|
||||
# Frontend - Base Components
|
||||
web/app/components/base/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - Utils and Hooks
|
||||
web/utils/classnames.ts @iamjoel @zxhlyh
|
||||
web/utils/time.ts @iamjoel @zxhlyh
|
||||
web/utils/format.ts @iamjoel @zxhlyh
|
||||
web/utils/clipboard.ts @iamjoel @zxhlyh
|
||||
web/hooks/use-document-title.ts @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - Billing and Education
|
||||
web/app/components/billing/ @iamjoel @zxhlyh
|
||||
web/app/education-apply/ @iamjoel @zxhlyh
|
||||
|
||||
# Frontend - Workspace
|
||||
web/app/components/header/account-dropdown/workplace-selector/ @iamjoel @zxhlyh
|
||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,5 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: "\U0001F510 Security Vulnerabilities"
|
||||
url: "https://github.com/langgenius/dify/security/advisories/new"
|
||||
about: Report security vulnerabilities through GitHub Security Advisories to ensure responsible disclosure. 💡 Please do not report security vulnerabilities in public issues.
|
||||
- name: "\U0001F4A1 Model Providers & Plugins"
|
||||
url: "https://github.com/langgenius/dify-official-plugins/issues/new/choose"
|
||||
about: Report issues with official plugins or model providers, you will need to provide the plugin version and other relevant details.
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/refactor.yml
vendored
14
.github/ISSUE_TEMPLATE/refactor.yml
vendored
@ -1,8 +1,6 @@
|
||||
name: "✨ Refactor"
|
||||
description: Refactor existing code for improved readability and maintainability.
|
||||
title: "[Chore/Refactor] "
|
||||
labels:
|
||||
- refactor
|
||||
name: "✨ Refactor or Chore"
|
||||
description: Refactor existing code or perform maintenance chores to improve readability and reliability.
|
||||
title: "[Refactor/Chore] "
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
@ -11,7 +9,7 @@ body:
|
||||
options:
|
||||
- label: I have read the [Contributing Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) and [Language Policy](https://github.com/langgenius/dify/issues/1542).
|
||||
required: true
|
||||
- label: This is only for refactoring, if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general).
|
||||
- label: This is only for refactors or chores; if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general).
|
||||
required: true
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
|
||||
required: true
|
||||
@ -25,14 +23,14 @@ body:
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
placeholder: "Describe the refactor you are proposing."
|
||||
placeholder: "Describe the refactor or chore you are proposing."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: motivation
|
||||
attributes:
|
||||
label: Motivation
|
||||
placeholder: "Explain why this refactor is necessary."
|
||||
placeholder: "Explain why this refactor or chore is necessary."
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
||||
13
.github/ISSUE_TEMPLATE/tracker.yml
vendored
13
.github/ISSUE_TEMPLATE/tracker.yml
vendored
@ -1,13 +0,0 @@
|
||||
name: "👾 Tracker"
|
||||
description: For inner usages, please do not use this template.
|
||||
title: "[Tracker] "
|
||||
labels:
|
||||
- tracker
|
||||
body:
|
||||
- type: textarea
|
||||
id: content
|
||||
attributes:
|
||||
label: Blockers
|
||||
placeholder: "- [ ] ..."
|
||||
validations:
|
||||
required: true
|
||||
12
.github/dependabot.yml
vendored
Normal file
12
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
- package-ecosystem: "uv"
|
||||
directory: "/api"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
64
.github/workflows/api-tests.yml
vendored
64
.github/workflows/api-tests.yml
vendored
@ -39,42 +39,15 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: uv sync --project api --dev
|
||||
|
||||
- name: Run Unit tests
|
||||
run: |
|
||||
uv run --project api bash dev/pytest/pytest_unit_tests.sh
|
||||
- name: Run ty check
|
||||
run: |
|
||||
cd api
|
||||
uv add --dev ty
|
||||
uv run ty check || true
|
||||
- name: Run pyrefly check
|
||||
run: |
|
||||
cd api
|
||||
uv add --dev pyrefly
|
||||
uv run pyrefly check || true
|
||||
- name: Coverage Summary
|
||||
run: |
|
||||
set -x
|
||||
# Extract coverage percentage and create a summary
|
||||
TOTAL_COVERAGE=$(python -c 'import json; print(json.load(open("coverage.json"))["totals"]["percent_covered_display"])')
|
||||
|
||||
# Create a detailed coverage summary
|
||||
echo "### Test Coverage Summary :test_tube:" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Total Coverage: ${TOTAL_COVERAGE}%" >> $GITHUB_STEP_SUMMARY
|
||||
uv run --project api coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Run dify config tests
|
||||
run: uv run --project api dev/pytest/pytest_config_tests.py
|
||||
|
||||
- name: MyPy Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: api/.mypy_cache
|
||||
key: mypy-${{ matrix.python-version }}-${{ runner.os }}-${{ hashFiles('api/uv.lock') }}
|
||||
|
||||
- name: Run MyPy Checks
|
||||
run: dev/mypy-check
|
||||
|
||||
- name: Set up dotenvs
|
||||
run: |
|
||||
cp docker/.env.example docker/.env
|
||||
@ -89,7 +62,7 @@ jobs:
|
||||
compose-file: |
|
||||
docker/docker-compose.middleware.yaml
|
||||
services: |
|
||||
db
|
||||
db_postgres
|
||||
redis
|
||||
sandbox
|
||||
ssrf_proxy
|
||||
@ -98,11 +71,34 @@ jobs:
|
||||
run: |
|
||||
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
|
||||
|
||||
- name: Run Workflow
|
||||
run: uv run --project api bash dev/pytest/pytest_workflow.sh
|
||||
- name: Run API Tests
|
||||
env:
|
||||
STORAGE_TYPE: opendal
|
||||
OPENDAL_SCHEME: fs
|
||||
OPENDAL_FS_ROOT: /tmp/dify-storage
|
||||
run: |
|
||||
uv run --project api pytest \
|
||||
--timeout "${PYTEST_TIMEOUT:-180}" \
|
||||
api/tests/integration_tests/workflow \
|
||||
api/tests/integration_tests/tools \
|
||||
api/tests/test_containers_integration_tests \
|
||||
api/tests/unit_tests
|
||||
|
||||
- name: Run Tool
|
||||
run: uv run --project api bash dev/pytest/pytest_tools.sh
|
||||
- name: Coverage Summary
|
||||
run: |
|
||||
set -x
|
||||
# Extract coverage percentage and create a summary
|
||||
TOTAL_COVERAGE=$(python -c 'import json; print(json.load(open("coverage.json"))["totals"]["percent_covered_display"])')
|
||||
|
||||
- name: Run TestContainers
|
||||
run: uv run --project api bash dev/pytest/pytest_testcontainers.sh
|
||||
# Create a detailed coverage summary
|
||||
echo "### Test Coverage Summary :test_tube:" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Total Coverage: ${TOTAL_COVERAGE}%" >> $GITHUB_STEP_SUMMARY
|
||||
{
|
||||
echo ""
|
||||
echo "<details><summary>File-level coverage (click to expand)</summary>"
|
||||
echo ""
|
||||
echo '```'
|
||||
uv run --project api coverage report -m
|
||||
echo '```'
|
||||
echo "</details>"
|
||||
} >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
77
.github/workflows/autofix.yml
vendored
77
.github/workflows/autofix.yml
vendored
@ -2,6 +2,8 @@ name: autofix.ci
|
||||
on:
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
push:
|
||||
branches: ["main"]
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
@ -11,23 +13,80 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Use uv to ensure we have the same ruff version in CI and locally.
|
||||
- uses: astral-sh/setup-uv@v6
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.11"
|
||||
|
||||
- uses: astral-sh/setup-uv@v6
|
||||
|
||||
- run: |
|
||||
cd api
|
||||
uv sync --dev
|
||||
# fmt first to avoid line too long
|
||||
uv run ruff format ..
|
||||
# Fix lint errors
|
||||
uv run ruff check --fix-only .
|
||||
uv run ruff check --fix .
|
||||
# Format code
|
||||
uv run ruff format .
|
||||
uv run ruff format ..
|
||||
|
||||
- name: count migration progress
|
||||
run: |
|
||||
cd api
|
||||
./cnt_base.sh
|
||||
|
||||
- name: ast-grep
|
||||
run: |
|
||||
uvx --from ast-grep-cli sg --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all
|
||||
uvx --from ast-grep-cli sg --pattern 'session.query($WHATEVER).filter($HERE)' --rewrite 'session.query($WHATEVER).where($HERE)' -l py --update-all
|
||||
# ast-grep exits 1 if no matches are found; allow idempotent runs.
|
||||
uvx --from ast-grep-cli ast-grep --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all || true
|
||||
uvx --from ast-grep-cli ast-grep --pattern 'session.query($WHATEVER).filter($HERE)' --rewrite 'session.query($WHATEVER).where($HERE)' -l py --update-all || true
|
||||
uvx --from ast-grep-cli ast-grep -p '$A = db.Column($$$B)' -r '$A = mapped_column($$$B)' -l py --update-all || true
|
||||
uvx --from ast-grep-cli ast-grep -p '$A : $T = db.Column($$$B)' -r '$A : $T = mapped_column($$$B)' -l py --update-all || true
|
||||
# Convert Optional[T] to T | None (ignoring quoted types)
|
||||
cat > /tmp/optional-rule.yml << 'EOF'
|
||||
id: convert-optional-to-union
|
||||
language: python
|
||||
rule:
|
||||
kind: generic_type
|
||||
all:
|
||||
- has:
|
||||
kind: identifier
|
||||
pattern: Optional
|
||||
- has:
|
||||
kind: type_parameter
|
||||
has:
|
||||
kind: type
|
||||
pattern: $T
|
||||
fix: $T | None
|
||||
EOF
|
||||
uvx --from ast-grep-cli ast-grep scan . --inline-rules "$(cat /tmp/optional-rule.yml)" --update-all
|
||||
# Fix forward references that were incorrectly converted (Python doesn't support "Type" | None syntax)
|
||||
find . -name "*.py" -type f -exec sed -i.bak -E 's/"([^"]+)" \| None/Optional["\1"]/g; s/'"'"'([^'"'"']+)'"'"' \| None/Optional['"'"'\1'"'"']/g' {} \;
|
||||
find . -name "*.py.bak" -type f -delete
|
||||
|
||||
# mdformat breaks YAML front matter in markdown files. Add --exclude for directories containing YAML front matter.
|
||||
- name: mdformat
|
||||
run: |
|
||||
uvx mdformat .
|
||||
uvx --python 3.13 mdformat . --exclude ".claude/skills/**"
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
package_json_file: web/package.json
|
||||
run_install: false
|
||||
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: pnpm
|
||||
cache-dependency-path: ./web/package.json
|
||||
|
||||
- name: Web dependencies
|
||||
working-directory: ./web
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: oxlint
|
||||
working-directory: ./web
|
||||
run: pnpm exec oxlint --config .oxlintrc.json --fix .
|
||||
|
||||
- uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27
|
||||
|
||||
4
.github/workflows/build-push.yml
vendored
4
.github/workflows/build-push.yml
vendored
@ -4,10 +4,10 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "deploy/dev"
|
||||
- "deploy/enterprise"
|
||||
- "deploy/**"
|
||||
- "build/**"
|
||||
- "release/e-*"
|
||||
- "hotfix/**"
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
|
||||
61
.github/workflows/db-migration-test.yml
vendored
61
.github/workflows/db-migration-test.yml
vendored
@ -8,7 +8,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
db-migration-test:
|
||||
db-migration-test-postgres:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@ -45,7 +45,7 @@ jobs:
|
||||
compose-file: |
|
||||
docker/docker-compose.middleware.yaml
|
||||
services: |
|
||||
db
|
||||
db_postgres
|
||||
redis
|
||||
|
||||
- name: Prepare configs
|
||||
@ -57,3 +57,60 @@ jobs:
|
||||
env:
|
||||
DEBUG: true
|
||||
run: uv run --directory api flask upgrade-db
|
||||
|
||||
db-migration-test-mysql:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup UV and Python
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
python-version: "3.12"
|
||||
cache-dependency-glob: api/uv.lock
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --project api
|
||||
- name: Ensure Offline migration are supported
|
||||
run: |
|
||||
# upgrade
|
||||
uv run --directory api flask db upgrade 'base:head' --sql
|
||||
# downgrade
|
||||
uv run --directory api flask db downgrade 'head:base' --sql
|
||||
|
||||
- name: Prepare middleware env for MySQL
|
||||
run: |
|
||||
cd docker
|
||||
cp middleware.env.example middleware.env
|
||||
sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env
|
||||
sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env
|
||||
sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env
|
||||
sed -i 's/DB_USERNAME=postgres/DB_USERNAME=mysql/' middleware.env
|
||||
|
||||
- name: Set up Middlewares
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
with:
|
||||
compose-file: |
|
||||
docker/docker-compose.middleware.yaml
|
||||
services: |
|
||||
db_mysql
|
||||
redis
|
||||
|
||||
- name: Prepare configs for MySQL
|
||||
run: |
|
||||
cd api
|
||||
cp .env.example .env
|
||||
sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' .env
|
||||
sed -i 's/DB_PORT=5432/DB_PORT=3306/' .env
|
||||
sed -i 's/DB_USERNAME=postgres/DB_USERNAME=root/' .env
|
||||
|
||||
- name: Run DB Migration
|
||||
env:
|
||||
DEBUG: true
|
||||
run: uv run --directory api flask upgrade-db
|
||||
|
||||
3
.github/workflows/deploy-dev.yml
vendored
3
.github/workflows/deploy-dev.yml
vendored
@ -12,7 +12,8 @@ jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'success'
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.head_branch == 'deploy/dev'
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
|
||||
28
.github/workflows/deploy-enterprise.yml
vendored
28
.github/workflows/deploy-enterprise.yml
vendored
@ -19,11 +19,23 @@ jobs:
|
||||
github.event.workflow_run.head_branch == 'deploy/enterprise'
|
||||
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
with:
|
||||
host: ${{ secrets.ENTERPRISE_SSH_HOST }}
|
||||
username: ${{ secrets.ENTERPRISE_SSH_USER }}
|
||||
password: ${{ secrets.ENTERPRISE_SSH_PASSWORD }}
|
||||
script: |
|
||||
${{ vars.ENTERPRISE_SSH_SCRIPT || secrets.ENTERPRISE_SSH_SCRIPT }}
|
||||
- name: trigger deployments
|
||||
env:
|
||||
DEV_ENV_ADDRS: ${{ vars.DEV_ENV_ADDRS }}
|
||||
DEPLOY_SECRET: ${{ secrets.DEPLOY_SECRET }}
|
||||
run: |
|
||||
IFS=',' read -ra ENDPOINTS <<< "${DEV_ENV_ADDRS:-}"
|
||||
BODY='{"project":"dify-api","tag":"deploy-enterprise"}'
|
||||
|
||||
for ENDPOINT in "${ENDPOINTS[@]}"; do
|
||||
ENDPOINT="$(echo "$ENDPOINT" | xargs)"
|
||||
[ -z "$ENDPOINT" ] && continue
|
||||
|
||||
API_SIGNATURE=$(printf '%s' "$BODY" | openssl dgst -sha256 -hmac "$DEPLOY_SECRET" | awk '{print "sha256="$2}')
|
||||
|
||||
curl -sSf -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Hub-Signature-256: $API_SIGNATURE" \
|
||||
-d "$BODY" \
|
||||
"$ENDPOINT"
|
||||
done
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
name: Deploy RAG Dev
|
||||
name: Deploy Trigger Dev
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@ -7,7 +7,7 @@ on:
|
||||
workflow_run:
|
||||
workflows: ["Build and Push API & Web"]
|
||||
branches:
|
||||
- "deploy/rag-dev"
|
||||
- "deploy/trigger-dev"
|
||||
types:
|
||||
- completed
|
||||
|
||||
@ -16,12 +16,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.head_branch == 'deploy/rag-dev'
|
||||
github.event.workflow_run.head_branch == 'deploy/trigger-dev'
|
||||
steps:
|
||||
- name: Deploy to server
|
||||
uses: appleboy/ssh-action@v0.1.8
|
||||
with:
|
||||
host: ${{ secrets.RAG_SSH_HOST }}
|
||||
host: ${{ secrets.TRIGGER_SSH_HOST }}
|
||||
username: ${{ secrets.SSH_USER }}
|
||||
key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
script: |
|
||||
3
.github/workflows/expose_service_ports.sh
vendored
3
.github/workflows/expose_service_ports.sh
vendored
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
yq eval '.services.weaviate.ports += ["8080:8080"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.weaviate.ports += ["50051:50051"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.qdrant.ports += ["6333:6333"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.chroma.ports += ["8000:8000"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services["milvus-standalone"].ports += ["19530:19530"]' -i docker/docker-compose.yaml
|
||||
@ -13,4 +14,4 @@ yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/tidb/docker-compose.ya
|
||||
yq eval '.services.oceanbase.ports += ["2881:2881"]' -i docker/docker-compose.yaml
|
||||
yq eval '.services.opengauss.ports += ["6600:6600"]' -i docker/docker-compose.yaml
|
||||
|
||||
echo "Ports exposed for sandbox, weaviate, tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase, opengauss"
|
||||
echo "Ports exposed for sandbox, weaviate (HTTP 8080, gRPC 50051), tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase, opengauss"
|
||||
|
||||
21
.github/workflows/semantic-pull-request.yml
vendored
Normal file
21
.github/workflows/semantic-pull-request.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: Semantic Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Validate PR title
|
||||
permissions:
|
||||
pull-requests: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check title
|
||||
uses: amannn/action-semantic-pull-request@v6.1.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
21
.github/workflows/style.yml
vendored
21
.github/workflows/style.yml
vendored
@ -12,7 +12,6 @@ permissions:
|
||||
statuses: write
|
||||
contents: read
|
||||
|
||||
|
||||
jobs:
|
||||
python-style:
|
||||
name: Python Style
|
||||
@ -44,6 +43,18 @@ jobs:
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: uv sync --project api --dev
|
||||
|
||||
- name: Run Import Linter
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: uv run --directory api --dev lint-imports
|
||||
|
||||
- name: Run Basedpyright Checks
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: dev/basedpyright-check
|
||||
|
||||
- name: Run Mypy Type Checks
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped .
|
||||
|
||||
- name: Dotenv check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: uv run --project api dotenv-linter ./api/.env.example ./web/.env.example
|
||||
@ -89,7 +100,13 @@ jobs:
|
||||
- name: Web style check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
working-directory: ./web
|
||||
run: pnpm run lint
|
||||
run: |
|
||||
pnpm run lint
|
||||
|
||||
- name: Web type check
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
working-directory: ./web
|
||||
run: pnpm run type-check:tsgo
|
||||
|
||||
docker-compose-template:
|
||||
name: Docker Compose Template
|
||||
|
||||
@ -20,22 +20,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check for file changes in i18n/en-US
|
||||
id: check_files
|
||||
run: |
|
||||
recent_commit_sha=$(git rev-parse HEAD)
|
||||
second_recent_commit_sha=$(git rev-parse HEAD~1)
|
||||
changed_files=$(git diff --name-only $recent_commit_sha $second_recent_commit_sha -- 'i18n/en-US/*.ts')
|
||||
git fetch origin "${{ github.event.before }}" || true
|
||||
git fetch origin "${{ github.sha }}" || true
|
||||
changed_files=$(git diff --name-only "${{ github.event.before }}" "${{ github.sha }}" -- 'i18n/en-US/*.ts')
|
||||
echo "Changed files: $changed_files"
|
||||
if [ -n "$changed_files" ]; then
|
||||
echo "FILES_CHANGED=true" >> $GITHUB_ENV
|
||||
file_args=""
|
||||
for file in $changed_files; do
|
||||
filename=$(basename "$file" .ts)
|
||||
file_args="$file_args --file=$filename"
|
||||
file_args="$file_args --file $filename"
|
||||
done
|
||||
echo "FILE_ARGS=$file_args" >> $GITHUB_ENV
|
||||
echo "File arguments: $file_args"
|
||||
@ -67,12 +67,25 @@ jobs:
|
||||
working-directory: ./web
|
||||
run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }}
|
||||
|
||||
- name: Generate i18n type definitions
|
||||
if: env.FILES_CHANGED == 'true'
|
||||
working-directory: ./web
|
||||
run: pnpm run gen:i18n-types
|
||||
|
||||
- name: Create Pull Request
|
||||
if: env.FILES_CHANGED == 'true'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: Update i18n files based on en-US changes
|
||||
title: 'chore: translate i18n files'
|
||||
body: This PR was automatically created to update i18n files based on changes in en-US locale.
|
||||
branch: chore/automated-i18n-updates
|
||||
commit-message: 'chore(i18n): update translations based on en-US changes'
|
||||
title: 'chore(i18n): translate i18n files and update type definitions'
|
||||
body: |
|
||||
This PR was automatically created to update i18n files and TypeScript type definitions based on changes in en-US locale.
|
||||
|
||||
**Triggered by:** ${{ github.sha }}
|
||||
|
||||
**Changes included:**
|
||||
- Updated translation files for all locales
|
||||
- Regenerated TypeScript type definitions for type safety
|
||||
branch: chore/automated-i18n-updates-${{ github.sha }}
|
||||
delete-branch: true
|
||||
|
||||
18
.github/workflows/vdb-tests.yml
vendored
18
.github/workflows/vdb-tests.yml
vendored
@ -51,13 +51,13 @@ jobs:
|
||||
- name: Expose Service Ports
|
||||
run: sh .github/workflows/expose_service_ports.sh
|
||||
|
||||
- name: Set up Vector Store (TiDB)
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
with:
|
||||
compose-file: docker/tidb/docker-compose.yaml
|
||||
services: |
|
||||
tidb
|
||||
tiflash
|
||||
# - name: Set up Vector Store (TiDB)
|
||||
# uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
# with:
|
||||
# compose-file: docker/tidb/docker-compose.yaml
|
||||
# services: |
|
||||
# tidb
|
||||
# tiflash
|
||||
|
||||
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase, OceanBase)
|
||||
uses: hoverkraft-tech/compose-action@v2.0.2
|
||||
@ -83,8 +83,8 @@ jobs:
|
||||
ls -lah .
|
||||
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
|
||||
|
||||
- name: Check VDB Ready (TiDB)
|
||||
run: uv run --project api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
|
||||
# - name: Check VDB Ready (TiDB)
|
||||
# run: uv run --project api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
|
||||
|
||||
- name: Test Vector Stores
|
||||
run: uv run --project api bash dev/pytest/pytest_vdb.sh
|
||||
|
||||
5
.github/workflows/web-tests.yml
vendored
5
.github/workflows/web-tests.yml
vendored
@ -47,6 +47,11 @@ jobs:
|
||||
working-directory: ./web
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Check i18n types synchronization
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
working-directory: ./web
|
||||
run: pnpm run check:i18n-types
|
||||
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
working-directory: ./web
|
||||
|
||||
31
.gitignore
vendored
31
.gitignore
vendored
@ -6,6 +6,9 @@ __pycache__/
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# *db files
|
||||
*.db
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
@ -97,6 +100,7 @@ __pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat-schedule.db
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
@ -123,10 +127,12 @@ venv.bak/
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
# type checking
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
pyrightconfig.json
|
||||
!api/pyrightconfig.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
@ -180,7 +186,10 @@ docker/volumes/couchbase/*
|
||||
docker/volumes/oceanbase/*
|
||||
docker/volumes/plugin_daemon/*
|
||||
docker/volumes/matrixone/*
|
||||
docker/volumes/mysql/*
|
||||
docker/volumes/seekdb/*
|
||||
!docker/volumes/oceanbase/init.d
|
||||
docker/volumes/iris/*
|
||||
|
||||
docker/nginx/conf.d/default.conf
|
||||
docker/nginx/ssl/*
|
||||
@ -195,8 +204,8 @@ sdks/python-client/dify_client.egg-info
|
||||
.vscode/*
|
||||
!.vscode/launch.json.template
|
||||
!.vscode/README.md
|
||||
pyrightconfig.json
|
||||
api/.vscode
|
||||
web/.vscode
|
||||
# vscode Code History Extension
|
||||
.history
|
||||
|
||||
@ -214,7 +223,25 @@ mise.toml
|
||||
# Next.js build output
|
||||
.next/
|
||||
|
||||
# PWA generated files
|
||||
web/public/sw.js
|
||||
web/public/sw.js.map
|
||||
web/public/workbox-*.js
|
||||
web/public/workbox-*.js.map
|
||||
web/public/fallback-*.js
|
||||
|
||||
# AI Assistant
|
||||
.roo/
|
||||
api/.env.backup
|
||||
/clickzetta
|
||||
|
||||
# Benchmark
|
||||
scripts/stress-test/setup/config/
|
||||
scripts/stress-test/reports/
|
||||
|
||||
# mcp
|
||||
.playwright-mcp/
|
||||
.serena/
|
||||
|
||||
# settings
|
||||
*.local.json
|
||||
|
||||
9
.vscode/launch.json.template
vendored
9
.vscode/launch.json.template
vendored
@ -8,8 +8,7 @@
|
||||
"module": "flask",
|
||||
"env": {
|
||||
"FLASK_APP": "app.py",
|
||||
"FLASK_ENV": "development",
|
||||
"GEVENT_SUPPORT": "True"
|
||||
"FLASK_ENV": "development"
|
||||
},
|
||||
"args": [
|
||||
"run",
|
||||
@ -28,9 +27,7 @@
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "celery",
|
||||
"env": {
|
||||
"GEVENT_SUPPORT": "True"
|
||||
},
|
||||
"env": {},
|
||||
"args": [
|
||||
"-A",
|
||||
"app.celery",
|
||||
@ -40,7 +37,7 @@
|
||||
"-c",
|
||||
"1",
|
||||
"-Q",
|
||||
"dataset,generation,mail,ops_trace",
|
||||
"dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor",
|
||||
"--loglevel",
|
||||
"INFO"
|
||||
],
|
||||
|
||||
54
AGENTS.md
Normal file
54
AGENTS.md
Normal file
@ -0,0 +1,54 @@
|
||||
# AGENTS.md
|
||||
|
||||
## Project Overview
|
||||
|
||||
Dify is an open-source platform for developing LLM applications with an intuitive interface combining agentic AI workflows, RAG pipelines, agent capabilities, and model management.
|
||||
|
||||
The codebase is split into:
|
||||
|
||||
- **Backend API** (`/api`): Python Flask application organized with Domain-Driven Design
|
||||
- **Frontend Web** (`/web`): Next.js 15 application using TypeScript and React 19
|
||||
- **Docker deployment** (`/docker`): Containerized deployment configurations
|
||||
|
||||
## Backend Workflow
|
||||
|
||||
- Run backend CLI commands through `uv run --project api <command>`.
|
||||
|
||||
- Before submission, all backend modifications must pass local checks: `make lint`, `make type-check`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
|
||||
|
||||
- Use Makefile targets for linting and formatting; `make lint` and `make type-check` cover the required checks.
|
||||
|
||||
- Integration tests are CI-only and are not expected to run in the local environment.
|
||||
|
||||
## Frontend Workflow
|
||||
|
||||
```bash
|
||||
cd web
|
||||
pnpm lint:fix
|
||||
pnpm type-check:tsgo
|
||||
pnpm test
|
||||
```
|
||||
|
||||
## Testing & Quality Practices
|
||||
|
||||
- Follow TDD: red → green → refactor.
|
||||
- Use `pytest` for backend tests with Arrange-Act-Assert structure.
|
||||
- Enforce strong typing; avoid `Any` and prefer explicit type annotations.
|
||||
- Write self-documenting code; only add comments that explain intent.
|
||||
|
||||
## Language Style
|
||||
|
||||
- **Python**: Keep type hints on functions and attributes, and implement relevant special methods (e.g., `__repr__`, `__str__`).
|
||||
- **TypeScript**: Use the strict config, rely on ESLint (`pnpm lint:fix` preferred) plus `pnpm type-check:tsgo`, and avoid `any` types.
|
||||
|
||||
## General Practices
|
||||
|
||||
- Prefer editing existing files; add new documentation only when requested.
|
||||
- Inject dependencies through constructors and preserve clean architecture boundaries.
|
||||
- Handle errors with domain-specific exceptions at the correct layer.
|
||||
|
||||
## Project Conventions
|
||||
|
||||
- Backend architecture adheres to DDD and Clean Architecture principles.
|
||||
- Async work runs through Celery with Redis as the broker.
|
||||
- Frontend user-facing strings must use `web/i18n/en-US/`; avoid hardcoded text.
|
||||
89
CLAUDE.md
89
CLAUDE.md
@ -1,89 +0,0 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Dify is an open-source platform for developing LLM applications with an intuitive interface combining agentic AI workflows, RAG pipelines, agent capabilities, and model management.
|
||||
|
||||
The codebase consists of:
|
||||
|
||||
- **Backend API** (`/api`): Python Flask application with Domain-Driven Design architecture
|
||||
- **Frontend Web** (`/web`): Next.js 15 application with TypeScript and React 19
|
||||
- **Docker deployment** (`/docker`): Containerized deployment configurations
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Backend (API)
|
||||
|
||||
All Python commands must be prefixed with `uv run --project api`:
|
||||
|
||||
```bash
|
||||
# Start development servers
|
||||
./dev/start-api # Start API server
|
||||
./dev/start-worker # Start Celery worker
|
||||
|
||||
# Run tests
|
||||
uv run --project api pytest # Run all tests
|
||||
uv run --project api pytest tests/unit_tests/ # Unit tests only
|
||||
uv run --project api pytest tests/integration_tests/ # Integration tests
|
||||
|
||||
# Code quality
|
||||
./dev/reformat # Run all formatters and linters
|
||||
uv run --project api ruff check --fix ./ # Fix linting issues
|
||||
uv run --project api ruff format ./ # Format code
|
||||
uv run --project api mypy . # Type checking
|
||||
```
|
||||
|
||||
### Frontend (Web)
|
||||
|
||||
```bash
|
||||
cd web
|
||||
pnpm lint # Run ESLint
|
||||
pnpm eslint-fix # Fix ESLint issues
|
||||
pnpm test # Run Jest tests
|
||||
```
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### Backend Testing
|
||||
|
||||
- Use `pytest` for all backend tests
|
||||
- Write tests first (TDD approach)
|
||||
- Test structure: Arrange-Act-Assert
|
||||
|
||||
## Code Style Requirements
|
||||
|
||||
### Python
|
||||
|
||||
- Use type hints for all functions and class attributes
|
||||
- No `Any` types unless absolutely necessary
|
||||
- Implement special methods (`__repr__`, `__str__`) appropriately
|
||||
|
||||
### TypeScript/JavaScript
|
||||
|
||||
- Strict TypeScript configuration
|
||||
- ESLint with Prettier integration
|
||||
- Avoid `any` type
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Environment Variables**: Always use UV for Python commands: `uv run --project api <command>`
|
||||
- **Comments**: Only write meaningful comments that explain "why", not "what"
|
||||
- **File Creation**: Always prefer editing existing files over creating new ones
|
||||
- **Documentation**: Don't create documentation files unless explicitly requested
|
||||
- **Code Quality**: Always run `./dev/reformat` before committing backend changes
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Adding a New API Endpoint
|
||||
|
||||
1. Create controller in `/api/controllers/`
|
||||
1. Add service logic in `/api/services/`
|
||||
1. Update routes in controller's `__init__.py`
|
||||
1. Write tests in `/api/tests/`
|
||||
|
||||
## Project-Specific Conventions
|
||||
|
||||
- All async tasks use Celery with Redis as broker
|
||||
- **Internationalization**: Frontend supports multiple languages with English (`web/i18n/en-US/`) as the source. All user-facing text must use i18n keys, no hardcoded strings. Edit corresponding module files in `en-US/` directory for translations.
|
||||
@ -77,6 +77,8 @@ How we prioritize:
|
||||
|
||||
For setting up the frontend service, please refer to our comprehensive [guide](https://github.com/langgenius/dify/blob/main/web/README.md) in the `web/README.md` file. This document provides detailed instructions to help you set up the frontend environment properly.
|
||||
|
||||
**Testing**: All React components must have comprehensive test coverage. See [web/testing/testing.md](https://github.com/langgenius/dify/blob/main/web/testing/testing.md) for the canonical frontend testing guidelines and follow every requirement described there.
|
||||
|
||||
#### Backend
|
||||
|
||||
For setting up the backend service, kindly refer to our detailed [instructions](https://github.com/langgenius/dify/blob/main/api/README.md) in the `api/README.md` file. This document contains step-by-step guidance to help you get the backend up and running smoothly.
|
||||
|
||||
96
Makefile
96
Makefile
@ -4,6 +4,77 @@ WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web
|
||||
API_IMAGE=$(DOCKER_REGISTRY)/dify-api
|
||||
VERSION=latest
|
||||
|
||||
# Default target - show help
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
# Backend Development Environment Setup
|
||||
.PHONY: dev-setup prepare-docker prepare-web prepare-api
|
||||
|
||||
# Dev setup target
|
||||
dev-setup: prepare-docker prepare-web prepare-api
|
||||
@echo "✅ Backend development environment setup complete!"
|
||||
|
||||
# Step 1: Prepare Docker middleware
|
||||
prepare-docker:
|
||||
@echo "🐳 Setting up Docker middleware..."
|
||||
@cp -n docker/middleware.env.example docker/middleware.env 2>/dev/null || echo "Docker middleware.env already exists"
|
||||
@cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev up -d
|
||||
@echo "✅ Docker middleware started"
|
||||
|
||||
# Step 2: Prepare web environment
|
||||
prepare-web:
|
||||
@echo "🌐 Setting up web environment..."
|
||||
@cp -n web/.env.example web/.env 2>/dev/null || echo "Web .env already exists"
|
||||
@cd web && pnpm install
|
||||
@echo "✅ Web environment prepared (not started)"
|
||||
|
||||
# Step 3: Prepare API environment
|
||||
prepare-api:
|
||||
@echo "🔧 Setting up API environment..."
|
||||
@cp -n api/.env.example api/.env 2>/dev/null || echo "API .env already exists"
|
||||
@cd api && uv sync --dev
|
||||
@cd api && uv run flask db upgrade
|
||||
@echo "✅ API environment prepared (not started)"
|
||||
|
||||
# Clean dev environment
|
||||
dev-clean:
|
||||
@echo "⚠️ Stopping Docker containers..."
|
||||
@cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev down
|
||||
@echo "🗑️ Removing volumes..."
|
||||
@rm -rf docker/volumes/db
|
||||
@rm -rf docker/volumes/redis
|
||||
@rm -rf docker/volumes/plugin_daemon
|
||||
@rm -rf docker/volumes/weaviate
|
||||
@rm -rf api/storage
|
||||
@echo "✅ Cleanup complete"
|
||||
|
||||
# Backend Code Quality Commands
|
||||
format:
|
||||
@echo "🎨 Running ruff format..."
|
||||
@uv run --project api --dev ruff format ./api
|
||||
@echo "✅ Code formatting complete"
|
||||
|
||||
check:
|
||||
@echo "🔍 Running ruff check..."
|
||||
@uv run --project api --dev ruff check ./api
|
||||
@echo "✅ Code check complete"
|
||||
|
||||
lint:
|
||||
@echo "🔧 Running ruff format, check with fixes, and import linter..."
|
||||
@uv run --project api --dev sh -c 'ruff format ./api && ruff check --fix ./api'
|
||||
@uv run --directory api --dev lint-imports
|
||||
@echo "✅ Linting complete"
|
||||
|
||||
type-check:
|
||||
@echo "📝 Running type check with basedpyright..."
|
||||
@uv run --directory api --dev basedpyright
|
||||
@echo "✅ Type check complete"
|
||||
|
||||
test:
|
||||
@echo "🧪 Running backend unit tests..."
|
||||
@uv run --project api --dev dev/pytest/pytest_unit_tests.sh
|
||||
@echo "✅ Tests complete"
|
||||
|
||||
# Build Docker images
|
||||
build-web:
|
||||
@echo "Building web Docker image: $(WEB_IMAGE):$(VERSION)..."
|
||||
@ -39,5 +110,28 @@ build-push-web: build-web push-web
|
||||
build-push-all: build-all push-all
|
||||
@echo "All Docker images have been built and pushed."
|
||||
|
||||
# Help target
|
||||
help:
|
||||
@echo "Development Setup Targets:"
|
||||
@echo " make dev-setup - Run all setup steps for backend dev environment"
|
||||
@echo " make prepare-docker - Set up Docker middleware"
|
||||
@echo " make prepare-web - Set up web environment"
|
||||
@echo " make prepare-api - Set up API environment"
|
||||
@echo " make dev-clean - Stop Docker middleware containers"
|
||||
@echo ""
|
||||
@echo "Backend Code Quality:"
|
||||
@echo " make format - Format code with ruff"
|
||||
@echo " make check - Check code with ruff"
|
||||
@echo " make lint - Format and fix code with ruff"
|
||||
@echo " make type-check - Run type checking with basedpyright"
|
||||
@echo " make test - Run backend unit tests"
|
||||
@echo ""
|
||||
@echo "Docker Build Targets:"
|
||||
@echo " make build-web - Build web Docker image"
|
||||
@echo " make build-api - Build API Docker image"
|
||||
@echo " make build-all - Build all Docker images"
|
||||
@echo " make push-all - Push all Docker images"
|
||||
@echo " make build-push-all - Build and push all Docker images"
|
||||
|
||||
# Phony targets
|
||||
.PHONY: build-web build-api push-web push-api build-all push-all build-push-all
|
||||
.PHONY: build-web build-api push-web push-api build-all push-all build-push-all dev-setup prepare-docker prepare-web prepare-api dev-clean help format check lint type-check test
|
||||
|
||||
63
README.md
63
README.md
@ -36,22 +36,28 @@
|
||||
<img alt="Issues closed" src="https://img.shields.io/github/issues-search?query=repo%3Alanggenius%2Fdify%20is%3Aclosed&label=issues%20closed&labelColor=%20%237d89b0&color=%20%235d6b98"></a>
|
||||
<a href="https://github.com/langgenius/dify/discussions/" target="_blank">
|
||||
<img alt="Discussion posts" src="https://img.shields.io/github/discussions/langgenius/dify?labelColor=%20%239b8afb&color=%20%237a5af8"></a>
|
||||
<a href="https://insights.linuxfoundation.org/project/langgenius-dify" target="_blank">
|
||||
<img alt="LFX Health Score" src="https://insights.linuxfoundation.org/api/badge/health-score?project=langgenius-dify"></a>
|
||||
<a href="https://insights.linuxfoundation.org/project/langgenius-dify" target="_blank">
|
||||
<img alt="LFX Contributors" src="https://insights.linuxfoundation.org/api/badge/contributors?project=langgenius-dify"></a>
|
||||
<a href="https://insights.linuxfoundation.org/project/langgenius-dify" target="_blank">
|
||||
<img alt="LFX Active Contributors" src="https://insights.linuxfoundation.org/api/badge/active-contributors?project=langgenius-dify"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_TW.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_DE.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="./docs/zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="./docs/zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./docs/ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./docs/es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./docs/fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./docs/tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./docs/ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./docs/ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./docs/tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./docs/vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./docs/de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="./docs/bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify is an open-source platform for developing LLM applications. Its intuitive interface combines agentic AI workflows, RAG pipelines, agent capabilities, model management, observability features, and more—allowing you to quickly move from prototype to production.
|
||||
@ -63,7 +69,7 @@ Dify is an open-source platform for developing LLM applications. Its intuitive i
|
||||
> - CPU >= 2 Core
|
||||
> - RAM >= 4 GiB
|
||||
|
||||
</br>
|
||||
<br/>
|
||||
|
||||
The easiest way to start the Dify server is through [Docker Compose](docker/docker-compose.yaml). Before running Dify with the following commands, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
|
||||
|
||||
@ -109,15 +115,15 @@ All of Dify's offerings come with corresponding APIs, so you could effortlessly
|
||||
|
||||
## Using Dify
|
||||
|
||||
- **Cloud </br>**
|
||||
- **Cloud <br/>**
|
||||
We host a [Dify Cloud](https://dify.ai) service for anyone to try with zero setup. It provides all the capabilities of the self-deployed version, and includes 200 free GPT-4 calls in the sandbox plan.
|
||||
|
||||
- **Self-hosting Dify Community Edition</br>**
|
||||
- **Self-hosting Dify Community Edition<br/>**
|
||||
Quickly get Dify running in your environment with this [starter guide](#quick-start).
|
||||
Use our [documentation](https://docs.dify.ai) for further references and more in-depth instructions.
|
||||
|
||||
- **Dify for enterprise / organizations</br>**
|
||||
We provide additional enterprise-centric features. [Log your questions for us through this chatbot](https://udify.app/chat/22L1zSxg6yW1cWQg) or [send us an email](mailto:business@dify.ai?subject=%5BGitHub%5DBusiness%20License%20Inquiry) to discuss enterprise needs. </br>
|
||||
- **Dify for enterprise / organizations<br/>**
|
||||
We provide additional enterprise-centric features. [Send us an email](mailto:business@dify.ai?subject=%5BGitHub%5DBusiness%20License%20Inquiry) to discuss your enterprise needs. <br/>
|
||||
|
||||
> For startups and small businesses using AWS, check out [Dify Premium on AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) and deploy it to your own AWS VPC with one click. It's an affordable AMI offering with the option to create apps with custom logo and branding.
|
||||
|
||||
@ -129,8 +135,31 @@ Star Dify on GitHub and be instantly notified of new releases.
|
||||
|
||||
## Advanced Setup
|
||||
|
||||
### Custom configurations
|
||||
|
||||
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
#### Customizing Suggested Questions
|
||||
|
||||
You can now customize the "Suggested Questions After Answer" feature to better fit your use case. For example, to generate longer, more technical questions:
|
||||
|
||||
```bash
|
||||
# In your .env file
|
||||
SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]'
|
||||
SUGGESTED_QUESTIONS_MAX_TOKENS=512
|
||||
SUGGESTED_QUESTIONS_TEMPERATURE=0.3
|
||||
```
|
||||
|
||||
See the [Suggested Questions Configuration Guide](docs/suggested-questions-configuration.md) for detailed examples and usage instructions.
|
||||
|
||||
### Metrics Monitoring with Grafana
|
||||
|
||||
Import the dashboard to Grafana, using Dify's PostgreSQL database as data source, to monitor metrics in granularity of apps, tenants, messages, and more.
|
||||
|
||||
- [Grafana Dashboard by @bowenliang123](https://github.com/bowenliang123/dify-grafana-dashboard)
|
||||
|
||||
### Deployment with Kubernetes
|
||||
|
||||
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
|
||||
|
||||
- [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
|
||||
|
||||
152
api/.env.example
152
api/.env.example
@ -27,6 +27,9 @@ FILES_URL=http://localhost:5001
|
||||
# Example: INTERNAL_FILES_URL=http://api:5001
|
||||
INTERNAL_FILES_URL=http://127.0.0.1:5001
|
||||
|
||||
# TRIGGER URL
|
||||
TRIGGER_URL=http://localhost:5001
|
||||
|
||||
# The time in seconds after the signature is rejected
|
||||
FILES_ACCESS_TIMEOUT=300
|
||||
|
||||
@ -69,13 +72,18 @@ REDIS_CLUSTERS_PASSWORD=
|
||||
# celery configuration
|
||||
CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1
|
||||
CELERY_BACKEND=redis
|
||||
# PostgreSQL database configuration
|
||||
|
||||
# Database configuration
|
||||
DB_TYPE=postgresql
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=localhost
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
|
||||
SQLALCHEMY_POOL_PRE_PING=true
|
||||
SQLALCHEMY_POOL_TIMEOUT=30
|
||||
|
||||
# Storage configuration
|
||||
# use for store upload files, private keys...
|
||||
# storage type: opendal, s3, aliyun-oss, azure-blob, baidu-obs, google-storage, huawei-obs, oci-storage, tencent-cos, volcengine-tos, supabase
|
||||
@ -154,9 +162,11 @@ SUPABASE_URL=your-server-url
|
||||
# CORS configuration
|
||||
WEB_API_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,*
|
||||
# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site’s top-level domain (e.g., `example.com`). Leading dots are optional.
|
||||
COOKIE_DOMAIN=
|
||||
|
||||
# Vector database configuration
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
|
||||
# Supported values are `weaviate`, `oceanbase`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
|
||||
VECTOR_STORE=weaviate
|
||||
# Prefix used to create collection name in vector database
|
||||
VECTOR_INDEX_NAME_PREFIX=Vector_index
|
||||
@ -166,6 +176,18 @@ WEAVIATE_ENDPOINT=http://localhost:8080
|
||||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_GRPC_ENABLED=false
|
||||
WEAVIATE_BATCH_SIZE=100
|
||||
WEAVIATE_TOKENIZATION=word
|
||||
|
||||
# OceanBase Vector configuration
|
||||
OCEANBASE_VECTOR_HOST=127.0.0.1
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||||
OCEANBASE_FULLTEXT_PARSER=ik
|
||||
SEEKDB_MEMORY_LIMIT=2G
|
||||
|
||||
# Qdrant configuration, use `http://localhost:6333` for local mode or `https://your-qdrant-cluster-url.qdrant.io` for remote mode
|
||||
QDRANT_URL=http://localhost:6333
|
||||
@ -302,6 +324,8 @@ BAIDU_VECTOR_DB_API_KEY=dify
|
||||
BAIDU_VECTOR_DB_DATABASE=dify
|
||||
BAIDU_VECTOR_DB_SHARD=1
|
||||
BAIDU_VECTOR_DB_REPLICAS=3
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE
|
||||
|
||||
# Upstash configuration
|
||||
UPSTASH_VECTOR_URL=your-server-url
|
||||
@ -327,17 +351,17 @@ MATRIXONE_DATABASE=dify
|
||||
LINDORM_URL=http://ld-*******************-proxy-search-pub.lindorm.aliyuncs.com:30070
|
||||
LINDORM_USERNAME=admin
|
||||
LINDORM_PASSWORD=admin
|
||||
USING_UGC_INDEX=False
|
||||
LINDORM_USING_UGC=True
|
||||
LINDORM_QUERY_TIMEOUT=1
|
||||
|
||||
# OceanBase Vector configuration
|
||||
OCEANBASE_VECTOR_HOST=127.0.0.1
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
OCEANBASE_ENABLE_HYBRID_SEARCH=false
|
||||
# AlibabaCloud MySQL Vector configuration
|
||||
ALIBABACLOUD_MYSQL_HOST=127.0.0.1
|
||||
ALIBABACLOUD_MYSQL_PORT=3306
|
||||
ALIBABACLOUD_MYSQL_USER=root
|
||||
ALIBABACLOUD_MYSQL_PASSWORD=root
|
||||
ALIBABACLOUD_MYSQL_DATABASE=dify
|
||||
ALIBABACLOUD_MYSQL_MAX_CONNECTION=5
|
||||
ALIBABACLOUD_MYSQL_HNSW_M=6
|
||||
|
||||
# openGauss configuration
|
||||
OPENGAUSS_HOST=127.0.0.1
|
||||
@ -355,6 +379,12 @@ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
# Comma-separated list of file extensions blocked from upload for security reasons.
|
||||
# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll).
|
||||
# Empty by default to allow all file types.
|
||||
# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll
|
||||
UPLOAD_FILE_EXTENSION_BLACKLIST=
|
||||
|
||||
# Model configuration
|
||||
MULTIMODAL_SEND_FORMAT=base64
|
||||
PROMPT_GENERATION_MAX_TOKENS=512
|
||||
@ -404,6 +434,9 @@ SSRF_DEFAULT_TIME_OUT=5
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT=5
|
||||
SSRF_DEFAULT_READ_TIME_OUT=5
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT=5
|
||||
SSRF_POOL_MAX_CONNECTIONS=100
|
||||
SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||||
SSRF_POOL_KEEPALIVE_EXPIRY=5.0
|
||||
|
||||
BATCH_UPLOAD_LIMIT=10
|
||||
KEYWORD_DATA_SOURCE_TYPE=database
|
||||
@ -414,10 +447,17 @@ WORKFLOW_FILE_UPLOAD_LIMIT=10
|
||||
# CODE EXECUTION CONFIGURATION
|
||||
CODE_EXECUTION_ENDPOINT=http://127.0.0.1:8194
|
||||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||||
CODE_EXECUTION_SSL_VERIFY=True
|
||||
CODE_EXECUTION_POOL_MAX_CONNECTIONS=100
|
||||
CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20
|
||||
CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT=10
|
||||
CODE_EXECUTION_READ_TIMEOUT=60
|
||||
CODE_EXECUTION_WRITE_TIMEOUT=10
|
||||
CODE_MAX_NUMBER=9223372036854775807
|
||||
CODE_MIN_NUMBER=-9223372036854775808
|
||||
CODE_MAX_STRING_LENGTH=80000
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
|
||||
CODE_MAX_STRING_LENGTH=400000
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=400000
|
||||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||||
@ -434,6 +474,9 @@ HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||||
HTTP_REQUEST_NODE_SSL_VERIFY=True
|
||||
|
||||
# Webhook request configuration
|
||||
WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760
|
||||
|
||||
# Respect X-* headers to redirect clients
|
||||
RESPECT_XFORWARD_HEADERS_ENABLED=false
|
||||
|
||||
@ -457,9 +500,18 @@ INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||||
WORKFLOW_MAX_EXECUTION_STEPS=500
|
||||
WORKFLOW_MAX_EXECUTION_TIME=1200
|
||||
WORKFLOW_CALL_MAX_DEPTH=5
|
||||
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
|
||||
MAX_VARIABLE_SIZE=204800
|
||||
|
||||
# GraphEngine Worker Pool Configuration
|
||||
# Minimum number of workers per GraphEngine instance (default: 1)
|
||||
GRAPH_ENGINE_MIN_WORKERS=1
|
||||
# Maximum number of workers per GraphEngine instance (default: 10)
|
||||
GRAPH_ENGINE_MAX_WORKERS=10
|
||||
# Queue depth threshold that triggers worker scale up (default: 3)
|
||||
GRAPH_ENGINE_SCALE_UP_THRESHOLD=3
|
||||
# Seconds of idle time before scaling down workers (default: 5.0)
|
||||
GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0
|
||||
|
||||
# Workflow storage configuration
|
||||
# Options: rdbms, hybrid
|
||||
# rdbms: Use only the relational database (default)
|
||||
@ -480,7 +532,7 @@ API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node
|
||||
API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository
|
||||
# Workflow log cleanup configuration
|
||||
# Enable automatic cleanup of workflow run logs to manage database size
|
||||
WORKFLOW_LOG_CLEANUP_ENABLED=true
|
||||
WORKFLOW_LOG_CLEANUP_ENABLED=false
|
||||
# Number of days to retain workflow run logs (default: 30 days)
|
||||
WORKFLOW_LOG_RETENTION_DAYS=30
|
||||
# Batch size for workflow log cleanup operations (default: 100)
|
||||
@ -488,8 +540,28 @@ WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100
|
||||
|
||||
# App configuration
|
||||
APP_MAX_EXECUTION_TIME=1200
|
||||
APP_DEFAULT_ACTIVE_REQUESTS=0
|
||||
APP_MAX_ACTIVE_REQUESTS=0
|
||||
|
||||
# Aliyun SLS Logstore Configuration
|
||||
# Aliyun Access Key ID
|
||||
ALIYUN_SLS_ACCESS_KEY_ID=
|
||||
# Aliyun Access Key Secret
|
||||
ALIYUN_SLS_ACCESS_KEY_SECRET=
|
||||
# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com)
|
||||
ALIYUN_SLS_ENDPOINT=
|
||||
# Aliyun SLS Region (e.g., cn-hangzhou)
|
||||
ALIYUN_SLS_REGION=
|
||||
# Aliyun SLS Project Name
|
||||
ALIYUN_SLS_PROJECT_NAME=
|
||||
# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage)
|
||||
ALIYUN_SLS_LOGSTORE_TTL=365
|
||||
# Enable dual-write to both SLS LogStore and SQL database (default: false)
|
||||
LOGSTORE_DUAL_WRITE_ENABLED=false
|
||||
# Enable dual-read fallback to SQL database when LogStore returns no results (default: true)
|
||||
# Useful for migration scenarios where historical data exists only in SQL database
|
||||
LOGSTORE_DUAL_READ_ENABLED=true
|
||||
|
||||
# Celery beat configuration
|
||||
CELERY_BEAT_SCHEDULER_TIME=1
|
||||
|
||||
@ -502,6 +574,12 @@ ENABLE_CLEAN_MESSAGES=false
|
||||
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false
|
||||
ENABLE_DATASETS_QUEUE_MONITOR=false
|
||||
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true
|
||||
ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true
|
||||
# Interval time in minutes for polling scheduled workflows(default: 1 min)
|
||||
WORKFLOW_SCHEDULE_POLLER_INTERVAL=1
|
||||
WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100
|
||||
# Maximum number of scheduled workflows to dispatch per tick (0 for unlimited)
|
||||
WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0
|
||||
|
||||
# Position configuration
|
||||
POSITION_TOOL_PINS=
|
||||
@ -529,6 +607,7 @@ ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
|
||||
|
||||
# Reset password token expiry minutes
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||||
EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5
|
||||
CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5
|
||||
OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5
|
||||
|
||||
@ -568,3 +647,46 @@ QUEUE_MONITOR_INTERVAL=30
|
||||
# Swagger UI configuration
|
||||
SWAGGER_UI_ENABLED=true
|
||||
SWAGGER_UI_PATH=/swagger-ui.html
|
||||
|
||||
# Whether to encrypt dataset IDs when exporting DSL files (default: true)
|
||||
# Set to false to export dataset IDs as plain text for easier cross-environment import
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID=true
|
||||
|
||||
# Suggested Questions After Answer Configuration
|
||||
# These environment variables allow customization of the suggested questions feature
|
||||
#
|
||||
# Custom prompt for generating suggested questions (optional)
|
||||
# If not set, uses the default prompt that generates 3 questions under 20 characters each
|
||||
# Example: "Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: [\"question1\",\"question2\",\"question3\",\"question4\",\"question5\"]"
|
||||
# SUGGESTED_QUESTIONS_PROMPT=
|
||||
|
||||
# Maximum number of tokens for suggested questions generation (default: 256)
|
||||
# Adjust this value for longer questions or more questions
|
||||
# SUGGESTED_QUESTIONS_MAX_TOKENS=256
|
||||
|
||||
# Temperature for suggested questions generation (default: 0.0)
|
||||
# Higher values (0.5-1.0) produce more creative questions, lower values (0.0-0.3) produce more focused questions
|
||||
# SUGGESTED_QUESTIONS_TEMPERATURE=0
|
||||
|
||||
# Tenant isolated task queue configuration
|
||||
TENANT_ISOLATED_TASK_CONCURRENCY=1
|
||||
|
||||
# Maximum number of segments for dataset segments API (0 for unlimited)
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST=0
|
||||
|
||||
# Multimodal knowledgebase limit
|
||||
SINGLE_CHUNK_ATTACHMENT_LIMIT=10
|
||||
ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2
|
||||
ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60
|
||||
IMAGE_FILE_BATCH_LIMIT=10
|
||||
|
||||
# Maximum allowed CSV file size for annotation import in megabytes
|
||||
ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2
|
||||
#Maximum number of annotation records allowed in a single import
|
||||
ANNOTATION_IMPORT_MAX_RECORDS=10000
|
||||
# Minimum number of annotation records required in a single import
|
||||
ANNOTATION_IMPORT_MIN_RECORDS=1
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20
|
||||
# Maximum number of concurrent annotation import tasks per tenant
|
||||
ANNOTATION_IMPORT_MAX_CONCURRENT=5
|
||||
|
||||
106
api/.importlinter
Normal file
106
api/.importlinter
Normal file
@ -0,0 +1,106 @@
|
||||
[importlinter]
|
||||
root_packages =
|
||||
core
|
||||
configs
|
||||
controllers
|
||||
models
|
||||
tasks
|
||||
services
|
||||
|
||||
[importlinter:contract:workflow]
|
||||
name = Workflow
|
||||
type=layers
|
||||
layers =
|
||||
graph_engine
|
||||
graph_events
|
||||
graph
|
||||
nodes
|
||||
node_events
|
||||
runtime
|
||||
entities
|
||||
containers =
|
||||
core.workflow
|
||||
ignore_imports =
|
||||
core.workflow.nodes.base.node -> core.workflow.graph_events
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph_events
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_events
|
||||
|
||||
core.workflow.nodes.node_factory -> core.workflow.graph
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph_engine
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph_engine.command_channels
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_engine
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_engine.command_channels
|
||||
|
||||
[importlinter:contract:rsc]
|
||||
name = RSC
|
||||
type = layers
|
||||
layers =
|
||||
graph_engine
|
||||
response_coordinator
|
||||
containers =
|
||||
core.workflow.graph_engine
|
||||
|
||||
[importlinter:contract:worker]
|
||||
name = Worker
|
||||
type = layers
|
||||
layers =
|
||||
graph_engine
|
||||
worker
|
||||
containers =
|
||||
core.workflow.graph_engine
|
||||
|
||||
[importlinter:contract:graph-engine-architecture]
|
||||
name = Graph Engine Architecture
|
||||
type = layers
|
||||
layers =
|
||||
graph_engine
|
||||
orchestration
|
||||
command_processing
|
||||
event_management
|
||||
error_handler
|
||||
graph_traversal
|
||||
graph_state_manager
|
||||
worker_management
|
||||
domain
|
||||
containers =
|
||||
core.workflow.graph_engine
|
||||
|
||||
[importlinter:contract:domain-isolation]
|
||||
name = Domain Model Isolation
|
||||
type = forbidden
|
||||
source_modules =
|
||||
core.workflow.graph_engine.domain
|
||||
forbidden_modules =
|
||||
core.workflow.graph_engine.worker_management
|
||||
core.workflow.graph_engine.command_channels
|
||||
core.workflow.graph_engine.layers
|
||||
core.workflow.graph_engine.protocols
|
||||
|
||||
[importlinter:contract:worker-management]
|
||||
name = Worker Management
|
||||
type = forbidden
|
||||
source_modules =
|
||||
core.workflow.graph_engine.worker_management
|
||||
forbidden_modules =
|
||||
core.workflow.graph_engine.orchestration
|
||||
core.workflow.graph_engine.command_processing
|
||||
core.workflow.graph_engine.event_management
|
||||
|
||||
|
||||
[importlinter:contract:graph-traversal-components]
|
||||
name = Graph Traversal Components
|
||||
type = layers
|
||||
layers =
|
||||
edge_processor
|
||||
skip_propagator
|
||||
containers =
|
||||
core.workflow.graph_engine.graph_traversal
|
||||
|
||||
[importlinter:contract:command-channels]
|
||||
name = Command Channels Independence
|
||||
type = independence
|
||||
modules =
|
||||
core.workflow.graph_engine.command_channels.in_memory_channel
|
||||
core.workflow.graph_engine.command_channels.redis_channel
|
||||
@ -5,7 +5,7 @@ line-length = 120
|
||||
quote-style = "double"
|
||||
|
||||
[lint]
|
||||
preview = false
|
||||
preview = true
|
||||
select = [
|
||||
"B", # flake8-bugbear rules
|
||||
"C4", # flake8-comprehensions
|
||||
@ -30,21 +30,26 @@ select = [
|
||||
"RUF022", # unsorted-dunder-all
|
||||
"S506", # unsafe-yaml-load
|
||||
"SIM", # flake8-simplify rules
|
||||
"T201", # print-found
|
||||
"TRY400", # error-instead-of-exception
|
||||
"TRY401", # verbose-log-message
|
||||
"UP", # pyupgrade rules
|
||||
"W191", # tab-indentation
|
||||
"W605", # invalid-escape-sequence
|
||||
"G001", # don't use str format to logging messages
|
||||
"G003", # don't use + in logging messages
|
||||
"G004", # don't use f-strings to format logging messages
|
||||
"UP042", # use StrEnum,
|
||||
"S110", # disallow the try-except-pass pattern.
|
||||
|
||||
# security related linting rules
|
||||
# RCE proctection (sort of)
|
||||
"S102", # exec-builtin, disallow use of `exec`
|
||||
"S307", # suspicious-eval-usage, disallow use of `eval` and `ast.literal_eval`
|
||||
"S301", # suspicious-pickle-usage, disallow use of `pickle` and its wrappers.
|
||||
"S302", # suspicious-marshal-usage, disallow use of `marshal` module
|
||||
"S311", # suspicious-non-cryptographic-random-usage
|
||||
"G001", # don't use str format to logging messages
|
||||
"G003", # don't use + in logging messages
|
||||
"G004", # don't use f-strings to format logging messages
|
||||
"S311", # suspicious-non-cryptographic-random-usage,
|
||||
|
||||
]
|
||||
|
||||
ignore = [
|
||||
@ -64,6 +69,7 @@ ignore = [
|
||||
"B006", # mutable-argument-default
|
||||
"B007", # unused-loop-control-variable
|
||||
"B026", # star-arg-unpacking-after-keyword-arg
|
||||
"B901", # allow return in yield
|
||||
"B903", # class-as-data-structure
|
||||
"B904", # raise-without-from-inside-except
|
||||
"B905", # zip-without-explicit-strict
|
||||
@ -78,7 +84,6 @@ ignore = [
|
||||
"SIM113", # enumerate-for-loop
|
||||
"SIM117", # multiple-with-statements
|
||||
"SIM210", # if-expr-with-true-false
|
||||
"UP038", # deprecated and not recommended by Ruff, https://docs.astral.sh/ruff/rules/non-pep604-isinstance/
|
||||
]
|
||||
|
||||
[lint.per-file-ignores]
|
||||
@ -89,11 +94,16 @@ ignore = [
|
||||
"configs/*" = [
|
||||
"N802", # invalid-function-name
|
||||
]
|
||||
"core/model_runtime/callbacks/base_callback.py" = ["T201"]
|
||||
"core/workflow/callbacks/workflow_logging_callback.py" = ["T201"]
|
||||
"libs/gmpy2_pkcs10aep_cipher.py" = [
|
||||
"N803", # invalid-argument-name
|
||||
]
|
||||
"tests/*" = [
|
||||
"F811", # redefined-while-unused
|
||||
"T201", # allow print in tests,
|
||||
"S110", # allow ignoring exceptions in tests code (currently)
|
||||
|
||||
]
|
||||
|
||||
[lint.pyflakes]
|
||||
|
||||
2
api/.vscode/launch.json.example
vendored
2
api/.vscode/launch.json.example
vendored
@ -54,7 +54,7 @@
|
||||
"--loglevel",
|
||||
"DEBUG",
|
||||
"-Q",
|
||||
"dataset,generation,mail,ops_trace,app_deletion"
|
||||
"dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
62
api/AGENTS.md
Normal file
62
api/AGENTS.md
Normal file
@ -0,0 +1,62 @@
|
||||
# Agent Skill Index
|
||||
|
||||
Start with the section that best matches your need. Each entry lists the problems it solves plus key files/concepts so you know what to expect before opening it.
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
## Platform Foundations
|
||||
|
||||
- **[Infrastructure Overview](agent_skills/infra.md)**\
|
||||
When to read this:
|
||||
|
||||
- You need to understand where a feature belongs in the architecture.
|
||||
- You’re wiring storage, Redis, vector stores, or OTEL.
|
||||
- You’re about to add CLI commands or async jobs.\
|
||||
What it covers: configuration stack (`configs/app_config.py`, remote settings), storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`), Redis conventions (`extensions/ext_redis.py`), plugin runtime topology, vector-store factory (`core/rag/datasource/vdb/*`), observability hooks, SSRF proxy usage, and core CLI commands.
|
||||
|
||||
- **[Coding Style](agent_skills/coding_style.md)**\
|
||||
When to read this:
|
||||
|
||||
- You’re writing or reviewing backend code and need the authoritative checklist.
|
||||
- You’re unsure about Pydantic validators, SQLAlchemy session usage, or logging patterns.
|
||||
- You want the exact lint/type/test commands used in PRs.\
|
||||
Includes: Ruff & BasedPyright commands, no-annotation policy, session examples (`with Session(db.engine, ...)`), `@field_validator` usage, logging expectations, and the rule set for file size, helpers, and package management.
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
## Plugin & Extension Development
|
||||
|
||||
- **[Plugin Systems](agent_skills/plugin.md)**\
|
||||
When to read this:
|
||||
|
||||
- You’re building or debugging a marketplace plugin.
|
||||
- You need to know how manifests, providers, daemons, and migrations fit together.\
|
||||
What it covers: plugin manifests (`core/plugin/entities/plugin.py`), installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands), runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent), daemon coordination (`core/plugin/entities/plugin_daemon.py`), and how provider registries surface capabilities to the rest of the platform.
|
||||
|
||||
- **[Plugin OAuth](agent_skills/plugin_oauth.md)**\
|
||||
When to read this:
|
||||
|
||||
- You must integrate OAuth for a plugin or datasource.
|
||||
- You’re handling credential encryption or refresh flows.\
|
||||
Topics: credential storage, encryption helpers (`core/helper/provider_encryption.py`), OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`), and how console/API layers expose the flows.
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
## Workflow Entry & Execution
|
||||
|
||||
- **[Trigger Concepts](agent_skills/trigger.md)**\
|
||||
When to read this:
|
||||
- You’re debugging why a workflow didn’t start.
|
||||
- You’re adding a new trigger type or hook.
|
||||
- You need to trace async execution, draft debugging, or webhook/schedule pipelines.\
|
||||
Details: Start-node taxonomy, webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`), async orchestration (`services/async_workflow_service.py`, Celery queues), debug event bus, and storage/logging interactions.
|
||||
|
||||
______________________________________________________________________
|
||||
|
||||
## Additional Notes for Agents
|
||||
|
||||
- All skill docs assume you follow the coding style guide—run Ruff/BasedPyright/tests listed there before submitting changes.
|
||||
- When you cannot find an answer in these briefs, search the codebase using the paths referenced (e.g., `core/plugin/impl/tool.py`, `services/dataset_service.py`).
|
||||
- If you run into cross-cutting concerns (tenancy, configuration, storage), check the infrastructure guide first; it links to most supporting modules.
|
||||
- Keep multi-tenancy and configuration central: everything flows through `configs.dify_config` and `tenant_id`.
|
||||
- When touching plugins or triggers, consult both the system overview and the specialised doc to ensure you adjust lifecycle, storage, and observability consistently.
|
||||
@ -15,7 +15,11 @@ FROM base AS packages
|
||||
# RUN sed -i 's@deb.debian.org@mirrors.aliyun.com@g' /etc/apt/sources.list.d/debian.sources
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends gcc g++ libc-dev libffi-dev libgmp-dev libmpfr-dev libmpc-dev
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
# basic environment
|
||||
g++ \
|
||||
# for building gmpy2
|
||||
libmpfr-dev libmpc-dev
|
||||
|
||||
# Install Python dependencies
|
||||
COPY pyproject.toml uv.lock ./
|
||||
@ -44,14 +48,22 @@ ENV PYTHONIOENCODING=utf-8
|
||||
|
||||
WORKDIR /app/api
|
||||
|
||||
# Create non-root user
|
||||
ARG dify_uid=1001
|
||||
RUN groupadd -r -g ${dify_uid} dify && \
|
||||
useradd -r -u ${dify_uid} -g ${dify_uid} -s /bin/bash dify && \
|
||||
chown -R dify:dify /app
|
||||
|
||||
RUN \
|
||||
apt-get update \
|
||||
# Install dependencies
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
# basic environment
|
||||
curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
|
||||
curl nodejs \
|
||||
# for gmpy2 \
|
||||
libgmp-dev libmpfr-dev libmpc-dev \
|
||||
# For Security
|
||||
expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
|
||||
expat libldap-2.5-0=2.5.13+dfsg-5 perl libsqlite3-0=3.40.1-2+deb12u2 zlib1g=1:1.2.13.dfsg-1 \
|
||||
# install fonts to support the use of tools like pypdfium2
|
||||
fonts-noto-cjk \
|
||||
# install a package to improve the accuracy of guessing mime type and file extension
|
||||
@ -63,24 +75,29 @@ RUN \
|
||||
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/app/api/.venv
|
||||
COPY --from=packages ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
COPY --from=packages --chown=dify:dify ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
# Download nltk data
|
||||
RUN python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger')"
|
||||
RUN mkdir -p /usr/local/share/nltk_data && NLTK_DATA=/usr/local/share/nltk_data python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger'); nltk.download('stopwords')" \
|
||||
&& chmod -R 755 /usr/local/share/nltk_data
|
||||
|
||||
ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache
|
||||
|
||||
RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')"
|
||||
RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')" \
|
||||
&& chown -R dify:dify ${TIKTOKEN_CACHE_DIR}
|
||||
|
||||
# Copy source code
|
||||
COPY . /app/api/
|
||||
COPY --chown=dify:dify . /app/api/
|
||||
|
||||
# Prepare entrypoint script
|
||||
COPY --chown=dify:dify --chmod=755 docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Copy entrypoint
|
||||
COPY docker/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ENV COMMIT_SHA=${COMMIT_SHA}
|
||||
ENV NLTK_DATA=/usr/local/share/nltk_data
|
||||
|
||||
USER dify
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
|
||||
@ -15,8 +15,8 @@
|
||||
```bash
|
||||
cd ../docker
|
||||
cp middleware.env.example middleware.env
|
||||
# change the profile to other vector database if you are not using weaviate
|
||||
docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d
|
||||
# change the profile to mysql if you are not using postgres,change the profile to other vector database if you are not using weaviate
|
||||
docker compose -f docker-compose.middleware.yaml --profile postgresql --profile weaviate -p dify up -d
|
||||
cd ../api
|
||||
```
|
||||
|
||||
@ -26,6 +26,10 @@
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site’s top-level domain (e.g., `example.com`). The frontend and backend must be under the same top-level domain in order to share authentication cookies.
|
||||
|
||||
1. Generate a `SECRET_KEY` in the `.env` file.
|
||||
|
||||
bash for Linux
|
||||
@ -80,10 +84,10 @@
|
||||
1. If you need to handle and debug the async tasks (e.g. dataset importing and documents indexing), please start the worker service.
|
||||
|
||||
```bash
|
||||
uv run celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation
|
||||
uv run celery -A app.celery worker -P threads -c 2 --loglevel INFO -Q dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor
|
||||
```
|
||||
|
||||
Addition, if you want to debug the celery scheduled tasks, you can use the following command in another terminal:
|
||||
Additionally, if you want to debug the celery scheduled tasks, you can run the following command in another terminal to start the beat service:
|
||||
|
||||
```bash
|
||||
uv run celery -A app.celery beat
|
||||
@ -108,5 +112,5 @@ uv run celery -A app.celery beat
|
||||
../dev/reformat # Run all formatters and linters
|
||||
uv run ruff check --fix ./ # Fix linting issues
|
||||
uv run ruff format ./ # Format code
|
||||
uv run mypy . # Type checking
|
||||
uv run basedpyright . # Type checking
|
||||
```
|
||||
|
||||
115
api/agent_skills/coding_style.md
Normal file
115
api/agent_skills/coding_style.md
Normal file
@ -0,0 +1,115 @@
|
||||
## Linter
|
||||
|
||||
- Always follow `.ruff.toml`.
|
||||
- Run `uv run ruff check --fix --unsafe-fixes`.
|
||||
- Keep each line under 100 characters (including spaces).
|
||||
|
||||
## Code Style
|
||||
|
||||
- `snake_case` for variables and functions.
|
||||
- `PascalCase` for classes.
|
||||
- `UPPER_CASE` for constants.
|
||||
|
||||
## Rules
|
||||
|
||||
- Use Pydantic v2 standard.
|
||||
- Use `uv` for package management.
|
||||
- Do not override dunder methods like `__init__`, `__iadd__`, etc.
|
||||
- Never launch services (`uv run app.py`, `flask run`, etc.); running tests under `tests/` is allowed.
|
||||
- Prefer simple functions over classes for lightweight helpers.
|
||||
- Keep files below 800 lines; split when necessary.
|
||||
- Keep code readable—no clever hacks.
|
||||
- Never use `print`; log with `logger = logging.getLogger(__name__)`.
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
- Mirror the project’s layered architecture: controller → service → core/domain.
|
||||
- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
|
||||
- Optimise for observability: deterministic control flow, clear logging, actionable errors.
|
||||
|
||||
## SQLAlchemy Patterns
|
||||
|
||||
- Models inherit from `models.base.Base`; never create ad-hoc metadata or engines.
|
||||
|
||||
- Open sessions with context managers:
|
||||
|
||||
```python
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
stmt = select(Workflow).where(
|
||||
Workflow.id == workflow_id,
|
||||
Workflow.tenant_id == tenant_id,
|
||||
)
|
||||
workflow = session.execute(stmt).scalar_one_or_none()
|
||||
```
|
||||
|
||||
- Use SQLAlchemy expressions; avoid raw SQL unless necessary.
|
||||
|
||||
- Introduce repository abstractions only for very large tables (e.g., workflow executions) to support alternative storage strategies.
|
||||
|
||||
- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
|
||||
|
||||
## Storage & External IO
|
||||
|
||||
- Access storage via `extensions.ext_storage.storage`.
|
||||
- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
|
||||
- Background tasks that touch storage must be idempotent and log the relevant object identifiers.
|
||||
|
||||
## Pydantic Usage
|
||||
|
||||
- Define DTOs with Pydantic v2 models and forbid extras by default.
|
||||
|
||||
- Use `@field_validator` / `@model_validator` for domain rules.
|
||||
|
||||
- Example:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
|
||||
|
||||
class TriggerConfig(BaseModel):
|
||||
endpoint: HttpUrl
|
||||
secret: str
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
@field_validator("secret")
|
||||
def ensure_secret_prefix(cls, value: str) -> str:
|
||||
if not value.startswith("dify_"):
|
||||
raise ValueError("secret must start with dify_")
|
||||
return value
|
||||
```
|
||||
|
||||
## Generics & Protocols
|
||||
|
||||
- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
|
||||
- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
|
||||
- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
|
||||
|
||||
## Error Handling & Logging
|
||||
|
||||
- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate to HTTP responses in controllers.
|
||||
- Declare `logger = logging.getLogger(__name__)` at module top.
|
||||
- Include tenant/app/workflow identifiers in log context.
|
||||
- Log retryable events at `warning`, terminal failures at `error`.
|
||||
|
||||
## Tooling & Checks
|
||||
|
||||
- Format/lint: `uv run --project api --dev ruff format ./api` and `uv run --project api --dev ruff check --fix --unsafe-fixes ./api`.
|
||||
- Type checks: `uv run --directory api --dev basedpyright`.
|
||||
- Tests: `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
|
||||
- Run all of the above before submitting your work.
|
||||
|
||||
## Controllers & Services
|
||||
|
||||
- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
|
||||
- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
|
||||
- Avoid repositories unless necessary; direct SQLAlchemy usage is preferred for typical tables.
|
||||
- Document non-obvious behaviour with concise comments.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
- Use `configs.dify_config` for configuration—never read environment variables directly.
|
||||
- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
|
||||
- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
|
||||
- Keep experimental scripts under `dev/`; do not ship them in production builds.
|
||||
96
api/agent_skills/infra.md
Normal file
96
api/agent_skills/infra.md
Normal file
@ -0,0 +1,96 @@
|
||||
## Configuration
|
||||
|
||||
- Import `configs.dify_config` for every runtime toggle. Do not read environment variables directly.
|
||||
- Add new settings to the proper mixin inside `configs/` (deployment, feature, middleware, etc.) so they load through `DifyConfig`.
|
||||
- Remote overrides come from the optional providers in `configs/remote_settings_sources`; keep defaults in code safe when the value is missing.
|
||||
- Example: logging pulls targets from `extensions/ext_logging.py`, and model provider URLs are assembled in `services/entities/model_provider_entities.py`.
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Runtime dependencies live in `[project].dependencies` inside `pyproject.toml`. Optional clients go into the `storage`, `tools`, or `vdb` groups under `[dependency-groups]`.
|
||||
- Always pin versions and keep the list alphabetised. Shared tooling (lint, typing, pytest) belongs in the `dev` group.
|
||||
- When code needs a new package, explain why in the PR and run `uv lock` so the lockfile stays current.
|
||||
|
||||
## Storage & Files
|
||||
|
||||
- Use `extensions.ext_storage.storage` for all blob IO; it already respects the configured backend.
|
||||
- Convert files for workflows with helpers in `core/file/file_manager.py`; they handle signed URLs and multimodal payloads.
|
||||
- When writing controller logic, delegate upload quotas and metadata to `services/file_service.py` instead of touching storage directly.
|
||||
- All outbound HTTP fetches (webhooks, remote files) must go through the SSRF-safe client in `core/helper/ssrf_proxy.py`; it wraps `httpx` with the allow/deny rules configured for the platform.
|
||||
|
||||
## Redis & Shared State
|
||||
|
||||
- Access Redis through `extensions.ext_redis.redis_client`. For locking, reuse `redis_client.lock`.
|
||||
- Prefer higher-level helpers when available: rate limits use `libs.helper.RateLimiter`, provider metadata uses caches in `core/helper/provider_cache.py`.
|
||||
|
||||
## Models
|
||||
|
||||
- SQLAlchemy models sit in `models/` and inherit from the shared declarative `Base` defined in `models/base.py` (metadata configured via `models/engine.py`).
|
||||
- `models/__init__.py` exposes grouped aggregates: account/tenant models, app and conversation tables, datasets, providers, workflow runs, triggers, etc. Import from there to avoid deep path churn.
|
||||
- Follow the DDD boundary: persistence objects live in `models/`, repositories under `repositories/` translate them into domain entities, and services consume those repositories.
|
||||
- When adding a table, create the model class, register it in `models/__init__.py`, wire a repository if needed, and generate an Alembic migration as described below.
|
||||
|
||||
## Vector Stores
|
||||
|
||||
- Vector client implementations live in `core/rag/datasource/vdb/<provider>`, with a common factory in `core/rag/datasource/vdb/vector_factory.py` and enums in `core/rag/datasource/vdb/vector_type.py`.
|
||||
- Retrieval pipelines call these providers through `core/rag/datasource/retrieval_service.py` and dataset ingestion flows in `services/dataset_service.py`.
|
||||
- The CLI helper `flask vdb-migrate` orchestrates bulk migrations using routines in `commands.py`; reuse that pattern when adding new backend transitions.
|
||||
- To add another store, mirror the provider layout, register it with the factory, and include any schema changes in Alembic migrations.
|
||||
|
||||
## Observability & OTEL
|
||||
|
||||
- OpenTelemetry settings live under the observability mixin in `configs/observability`. Toggle exporters and sampling via `dify_config`, not ad-hoc env reads.
|
||||
- HTTP, Celery, Redis, SQLAlchemy, and httpx instrumentation is initialised in `extensions/ext_app_metrics.py` and `extensions/ext_request_logging.py`; reuse these hooks when adding new workers or entrypoints.
|
||||
- When creating background tasks or external calls, propagate tracing context with helpers in the existing instrumented clients (e.g. use the shared `httpx` session from `core/helper/http_client_pooling.py`).
|
||||
- If you add a new external integration, ensure spans and metrics are emitted by wiring the appropriate OTEL instrumentation package in `pyproject.toml` and configuring it in `extensions/`.
|
||||
|
||||
## Ops Integrations
|
||||
|
||||
- Langfuse support and other tracing bridges live under `core/ops/opik_trace`. Config toggles sit in `configs/observability`, while exporters are initialised in the OTEL extensions mentioned above.
|
||||
- External monitoring services should follow this pattern: keep client code in `core/ops`, expose switches via `dify_config`, and hook initialisation in `extensions/ext_app_metrics.py` or sibling modules.
|
||||
- Before instrumenting new code paths, check whether existing context helpers (e.g. `extensions/ext_request_logging.py`) already capture the necessary metadata.
|
||||
|
||||
## Controllers, Services, Core
|
||||
|
||||
- Controllers only parse HTTP input and call a service method. Keep business rules in `services/`.
|
||||
- Services enforce tenant rules, quotas, and orchestration, then call into `core/` engines (workflow execution, tools, LLMs).
|
||||
- When adding a new endpoint, search for an existing service to extend before introducing a new layer. Example: workflow APIs pipe through `services/workflow_service.py` into `core/workflow`.
|
||||
|
||||
## Plugins, Tools, Providers
|
||||
|
||||
- In Dify a plugin is a tenant-installable bundle that declares one or more providers (tool, model, datasource, trigger, endpoint, agent strategy) plus its resource needs and version metadata. The manifest (`core/plugin/entities/plugin.py`) mirrors what you see in the marketplace documentation.
|
||||
- Installation, upgrades, and migrations are orchestrated by `services/plugin/plugin_service.py` together with helpers such as `services/plugin/plugin_migration.py`.
|
||||
- Runtime loading happens through the implementations under `core/plugin/impl/*` (tool/model/datasource/trigger/endpoint/agent). These modules normalise plugin providers so that downstream systems (`core/tools/tool_manager.py`, `services/model_provider_service.py`, `services/trigger/*`) can treat builtin and plugin capabilities the same way.
|
||||
- For remote execution, plugin daemons (`core/plugin/entities/plugin_daemon.py`, `core/plugin/impl/plugin.py`) manage lifecycle hooks, credential forwarding, and background workers that keep plugin processes in sync with the main application.
|
||||
- Acquire tool implementations through `core/tools/tool_manager.py`; it resolves builtin, plugin, and workflow-as-tool providers uniformly, injecting the right context (tenant, credentials, runtime config).
|
||||
- To add a new plugin capability, extend the relevant `core/plugin/entities` schema and register the implementation in the matching `core/plugin/impl` module rather than importing the provider directly.
|
||||
|
||||
## Async Workloads
|
||||
|
||||
see `agent_skills/trigger.md` for more detailed documentation.
|
||||
|
||||
- Enqueue background work through `services/async_workflow_service.py`. It routes jobs to the tiered Celery queues defined in `tasks/`.
|
||||
- Workers boot from `celery_entrypoint.py` and execute functions in `tasks/workflow_execution_tasks.py`, `tasks/trigger_processing_tasks.py`, etc.
|
||||
- Scheduled workflows poll from `schedule/workflow_schedule_tasks.py`. Follow the same pattern if you need new periodic jobs.
|
||||
|
||||
## Database & Migrations
|
||||
|
||||
- SQLAlchemy models live under `models/` and map directly to migration files in `migrations/versions`.
|
||||
- Generate migrations with `uv run --project api flask db revision --autogenerate -m "<summary>"`, then review the diff; never hand-edit the database outside Alembic.
|
||||
- Apply migrations locally using `uv run --project api flask db upgrade`; production deploys expect the same history.
|
||||
- If you add tenant-scoped data, confirm the upgrade includes tenant filters or defaults consistent with the service logic touching those tables.
|
||||
|
||||
## CLI Commands
|
||||
|
||||
- Maintenance commands from `commands.py` are registered on the Flask CLI. Run them via `uv run --project api flask <command>`.
|
||||
- Use the built-in `db` commands from Flask-Migrate for schema operations (`flask db upgrade`, `flask db stamp`, etc.). Only fall back to custom helpers if you need their extra behaviour.
|
||||
- Custom entries such as `flask reset-password`, `flask reset-email`, and `flask vdb-migrate` handle self-hosted account recovery and vector database migrations.
|
||||
- Before adding a new command, check whether an existing service can be reused and ensure the command guards edition-specific behaviour (many enforce `SELF_HOSTED`). Document any additions in the PR.
|
||||
- Ruff helpers are run directly with `uv`: `uv run --project api --dev ruff format ./api` for formatting and `uv run --project api --dev ruff check ./api` (add `--fix` if you want automatic fixes).
|
||||
|
||||
## When You Add Features
|
||||
|
||||
- Check for an existing helper or service before writing a new util.
|
||||
- Uphold tenancy: every service method should receive the tenant ID from controller wrappers such as `controllers/console/wraps.py`.
|
||||
- Update or create tests alongside behaviour changes (`tests/unit_tests` for fast coverage, `tests/integration_tests` when touching orchestrations).
|
||||
- Run `uv run --project api --dev ruff check ./api`, `uv run --directory api --dev basedpyright`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh` before submitting changes.
|
||||
1
api/agent_skills/plugin.md
Normal file
1
api/agent_skills/plugin.md
Normal file
@ -0,0 +1 @@
|
||||
// TBD
|
||||
1
api/agent_skills/plugin_oauth.md
Normal file
1
api/agent_skills/plugin_oauth.md
Normal file
@ -0,0 +1 @@
|
||||
// TBD
|
||||
53
api/agent_skills/trigger.md
Normal file
53
api/agent_skills/trigger.md
Normal file
@ -0,0 +1,53 @@
|
||||
## Overview
|
||||
|
||||
Trigger is a collection of nodes that we called `Start` nodes, also, the concept of `Start` is the same as `RootNode` in the workflow engine `core/workflow/graph_engine`, On the other hand, `Start` node is the entry point of workflows, every workflow run always starts from a `Start` node.
|
||||
|
||||
## Trigger nodes
|
||||
|
||||
- `UserInput`
|
||||
- `Trigger Webhook`
|
||||
- `Trigger Schedule`
|
||||
- `Trigger Plugin`
|
||||
|
||||
### UserInput
|
||||
|
||||
Before `Trigger` concept is introduced, it's what we called `Start` node, but now, to avoid confusion, it was renamed to `UserInput` node, has a strong relation with `ServiceAPI` in `controllers/service_api/app`
|
||||
|
||||
1. `UserInput` node introduces a list of arguments that need to be provided by the user, finally it will be converted into variables in the workflow variable pool.
|
||||
1. `ServiceAPI` accept those arguments, and pass through them into `UserInput` node.
|
||||
1. For its detailed implementation, please refer to `core/workflow/nodes/start`
|
||||
|
||||
### Trigger Webhook
|
||||
|
||||
Inside Webhook Node, Dify provided a UI panel that allows user define a HTTP manifest `core/workflow/nodes/trigger_webhook/entities.py`.`WebhookData`, also, Dify generates a random webhook id for each `Trigger Webhook` node, the implementation was implemented in `core/trigger/utils/endpoint.py`, as you can see, `webhook-debug` is a debug mode for webhook, you may find it in `controllers/trigger/webhook.py`.
|
||||
|
||||
Finally, requests to `webhook` endpoint will be converted into variables in workflow variable pool during workflow execution.
|
||||
|
||||
### Trigger Schedule
|
||||
|
||||
`Trigger Schedule` node is a node that allows user define a schedule to trigger the workflow, detailed manifest is here `core/workflow/nodes/trigger_schedule/entities.py`, we have a poller and executor to handle millions of schedules, see `docker/entrypoint.sh` / `schedule/workflow_schedule_task.py` for help.
|
||||
|
||||
To Achieve this, a `WorkflowSchedulePlan` model was introduced in `models/trigger.py`, and a `events/event_handlers/sync_workflow_schedule_when_app_published.py` was used to sync workflow schedule plans when app is published.
|
||||
|
||||
### Trigger Plugin
|
||||
|
||||
`Trigger Plugin` node allows user define there own distributed trigger plugin, whenever a request was received, Dify forwards it to the plugin and wait for parsed variables from it.
|
||||
|
||||
1. Requests were saved in storage by `services/trigger/trigger_request_service.py`, referenced by `services/trigger/trigger_service.py`.`TriggerService`.`process_endpoint`
|
||||
1. Plugins accept those requests and parse variables from it, see `core/plugin/impl/trigger.py` for details.
|
||||
|
||||
A `subscription` concept was out here by Dify, it means an endpoint address from Dify was bound to thirdparty webhook service like `Github` `Slack` `Linear` `GoogleDrive` `Gmail` etc. Once a subscription was created, Dify continually receives requests from the platforms and handle them one by one.
|
||||
|
||||
## Worker Pool / Async Task
|
||||
|
||||
All the events that triggered a new workflow run is always in async mode, a unified entrypoint can be found here `services/async_workflow_service.py`.`AsyncWorkflowService`.`trigger_workflow_async`.
|
||||
|
||||
The infrastructure we used is `celery`, we've already configured it in `docker/entrypoint.sh`, and the consumers are in `tasks/async_workflow_tasks.py`, 3 queues were used to handle different tiers of users, `PROFESSIONAL_QUEUE` `TEAM_QUEUE` `SANDBOX_QUEUE`.
|
||||
|
||||
## Debug Strategy
|
||||
|
||||
Dify divided users into 2 groups: builders / end users.
|
||||
|
||||
Builders are the users who create workflows, in this stage, debugging a workflow becomes a critical part of the workflow development process, as the start node in workflows, trigger nodes can `listen` to the events from `WebhookDebug` `Schedule` `Plugin`, debugging process was created in `controllers/console/app/workflow.py`.`DraftWorkflowTriggerNodeApi`.
|
||||
|
||||
A polling process can be considered as combine of few single `poll` operations, each `poll` operation fetches events cached in `Redis`, returns `None` if no event was found, more detailed implemented: `core/trigger/debug/event_bus.py` was used to handle the polling process, and `core/trigger/debug/event_selectors.py` was used to select the event poller based on the trigger type.
|
||||
26
api/app.py
26
api/app.py
@ -1,8 +1,7 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def is_db_command():
|
||||
def is_db_command() -> bool:
|
||||
if len(sys.argv) > 1 and sys.argv[0].endswith("flask") and sys.argv[1] == "db":
|
||||
return True
|
||||
return False
|
||||
@ -14,23 +13,12 @@ if is_db_command():
|
||||
|
||||
app = create_migrations_app()
|
||||
else:
|
||||
# It seems that JetBrains Python debugger does not work well with gevent,
|
||||
# so we need to disable gevent in debug mode.
|
||||
# If you are using debugpy and set GEVENT_SUPPORT=True, you can debug with gevent.
|
||||
if (flask_debug := os.environ.get("FLASK_DEBUG", "0")) and flask_debug.lower() in {"false", "0", "no"}:
|
||||
from gevent import monkey
|
||||
|
||||
# gevent
|
||||
monkey.patch_all()
|
||||
|
||||
from grpc.experimental import gevent as grpc_gevent # type: ignore
|
||||
|
||||
# grpc gevent
|
||||
grpc_gevent.init_gevent()
|
||||
|
||||
import psycogreen.gevent # type: ignore
|
||||
|
||||
psycogreen.gevent.patch_psycopg()
|
||||
# Gunicorn and Celery handle monkey patching automatically in production by
|
||||
# specifying the `gevent` worker class. Manual monkey patching is not required here.
|
||||
#
|
||||
# See `api/docker/entrypoint.sh` (lines 33 and 47) for details.
|
||||
#
|
||||
# For third-party library patching, refer to `gunicorn.conf.py` and `celery_entrypoint.py`.
|
||||
|
||||
from app_factory import create_app
|
||||
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
from opentelemetry.trace import get_current_span
|
||||
|
||||
from configs import dify_config
|
||||
from contexts.wrapper import RecyclableContextVar
|
||||
from dify_app import DifyApp
|
||||
@ -18,6 +20,7 @@ def create_flask_app_with_configs() -> DifyApp:
|
||||
"""
|
||||
dify_app = DifyApp(__name__)
|
||||
dify_app.config.from_mapping(dify_config.model_dump())
|
||||
dify_app.config["RESTX_INCLUDE_ALL_MODELS"] = True
|
||||
|
||||
# add before request hook
|
||||
@dify_app.before_request
|
||||
@ -25,6 +28,26 @@ def create_flask_app_with_configs() -> DifyApp:
|
||||
# add an unique identifier to each request
|
||||
RecyclableContextVar.increment_thread_recycles()
|
||||
|
||||
# add after request hook for injecting X-Trace-Id header from OpenTelemetry span context
|
||||
@dify_app.after_request
|
||||
def add_trace_id_header(response):
|
||||
try:
|
||||
span = get_current_span()
|
||||
ctx = span.get_span_context() if span else None
|
||||
if ctx and ctx.is_valid:
|
||||
trace_id_hex = format(ctx.trace_id, "032x")
|
||||
# Avoid duplicates if some middleware added it
|
||||
if "X-Trace-Id" not in response.headers:
|
||||
response.headers["X-Trace-Id"] = trace_id_hex
|
||||
except Exception:
|
||||
# Never break the response due to tracing header injection
|
||||
logger.warning("Failed to add trace ID to response header", exc_info=True)
|
||||
return response
|
||||
|
||||
# Capture the decorator's return value to avoid pyright reportUnusedFunction
|
||||
_ = before_request
|
||||
_ = add_trace_id_header
|
||||
|
||||
return dify_app
|
||||
|
||||
|
||||
@ -47,10 +70,12 @@ def initialize_extensions(app: DifyApp):
|
||||
ext_commands,
|
||||
ext_compress,
|
||||
ext_database,
|
||||
ext_forward_refs,
|
||||
ext_hosting_provider,
|
||||
ext_import_modules,
|
||||
ext_logging,
|
||||
ext_login,
|
||||
ext_logstore,
|
||||
ext_mail,
|
||||
ext_migrate,
|
||||
ext_orjson,
|
||||
@ -59,6 +84,7 @@ def initialize_extensions(app: DifyApp):
|
||||
ext_redis,
|
||||
ext_request_logging,
|
||||
ext_sentry,
|
||||
ext_session_factory,
|
||||
ext_set_secretkey,
|
||||
ext_storage,
|
||||
ext_timezone,
|
||||
@ -71,6 +97,7 @@ def initialize_extensions(app: DifyApp):
|
||||
ext_warnings,
|
||||
ext_import_modules,
|
||||
ext_orjson,
|
||||
ext_forward_refs,
|
||||
ext_set_secretkey,
|
||||
ext_compress,
|
||||
ext_code_based_extension,
|
||||
@ -79,6 +106,7 @@ def initialize_extensions(app: DifyApp):
|
||||
ext_migrate,
|
||||
ext_redis,
|
||||
ext_storage,
|
||||
ext_logstore, # Initialize logstore after storage, before celery
|
||||
ext_celery,
|
||||
ext_login,
|
||||
ext_mail,
|
||||
@ -89,6 +117,7 @@ def initialize_extensions(app: DifyApp):
|
||||
ext_commands,
|
||||
ext_otel,
|
||||
ext_request_logging,
|
||||
ext_session_factory,
|
||||
]
|
||||
for ext in extensions:
|
||||
short_name = ext.__name__.split(".")[-1]
|
||||
|
||||
13
api/celery_entrypoint.py
Normal file
13
api/celery_entrypoint.py
Normal file
@ -0,0 +1,13 @@
|
||||
import psycogreen.gevent as pscycogreen_gevent # type: ignore
|
||||
from grpc.experimental import gevent as grpc_gevent # type: ignore
|
||||
|
||||
# grpc gevent
|
||||
grpc_gevent.init_gevent()
|
||||
print("gRPC patched with gevent.", flush=True) # noqa: T201
|
||||
pscycogreen_gevent.patch_psycopg()
|
||||
print("psycopg2 patched with gevent.", flush=True) # noqa: T201
|
||||
|
||||
|
||||
from app import app, celery
|
||||
|
||||
__all__ = ["app", "celery"]
|
||||
@ -1,11 +0,0 @@
|
||||
from tests.integration_tests.utils.parent_class import ParentClass
|
||||
|
||||
|
||||
class ChildClass(ParentClass):
|
||||
"""Test child class for module import helper tests"""
|
||||
|
||||
def __init__(self, name):
|
||||
super().__init__(name)
|
||||
|
||||
def get_name(self):
|
||||
return f"Child: {self.name}"
|
||||
7
api/cnt_base.sh
Executable file
7
api/cnt_base.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
for pattern in "Base" "TypeBase"; do
|
||||
printf "%s " "$pattern"
|
||||
grep "($pattern):" -r --include='*.py' --exclude-dir=".venv" --exclude-dir="tests" . | wc -l
|
||||
done
|
||||
774
api/commands.py
774
api/commands.py
@ -2,7 +2,7 @@ import base64
|
||||
import json
|
||||
import logging
|
||||
import secrets
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
import sqlalchemy as sa
|
||||
@ -10,10 +10,13 @@ from flask import current_app
|
||||
from pydantic import TypeAdapter
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from configs import dify_config
|
||||
from constants.languages import languages
|
||||
from core.plugin.entities.plugin import ToolProviderID
|
||||
from core.helper import encrypter
|
||||
from core.plugin.entities.plugin_daemon import CredentialType
|
||||
from core.plugin.impl.plugin import PluginInstaller
|
||||
from core.rag.datasource.vdb.vector_factory import Vector
|
||||
from core.rag.datasource.vdb.vector_type import VectorType
|
||||
from core.rag.index_processor.constant.built_in_field import BuiltInField
|
||||
@ -23,19 +26,25 @@ from events.app_event import app_was_created
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from extensions.ext_storage import storage
|
||||
from extensions.storage.opendal_storage import OpenDALStorage
|
||||
from extensions.storage.storage_type import StorageType
|
||||
from libs.helper import email as email_validate
|
||||
from libs.password import hash_password, password_pattern, valid_password
|
||||
from libs.rsa import generate_key_pair
|
||||
from models import Tenant
|
||||
from models.dataset import Dataset, DatasetCollectionBinding, DatasetMetadata, DatasetMetadataBinding, DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
|
||||
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation, UploadFile
|
||||
from models.oauth import DatasourceOauthParamConfig, DatasourceProvider
|
||||
from models.provider import Provider, ProviderModel
|
||||
from models.provider_ids import DatasourceProviderID, ToolProviderID
|
||||
from models.source import DataSourceApiKeyAuthBinding, DataSourceOauthBinding
|
||||
from models.tools import ToolOAuthSystemClient
|
||||
from services.account_service import AccountService, RegisterService, TenantService
|
||||
from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpiredLogs
|
||||
from services.plugin.data_migration import PluginDataMigration
|
||||
from services.plugin.plugin_migration import PluginMigration
|
||||
from services.plugin.plugin_service import PluginService
|
||||
from tasks.remove_app_and_related_data_task import delete_draft_variables_batch
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -53,31 +62,30 @@ def reset_password(email, new_password, password_confirm):
|
||||
if str(new_password).strip() != str(password_confirm).strip():
|
||||
click.echo(click.style("Passwords do not match.", fg="red"))
|
||||
return
|
||||
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
|
||||
account = session.query(Account).where(Account.email == email).one_or_none()
|
||||
|
||||
account = db.session.query(Account).where(Account.email == email).one_or_none()
|
||||
if not account:
|
||||
click.echo(click.style(f"Account not found for email: {email}", fg="red"))
|
||||
return
|
||||
|
||||
if not account:
|
||||
click.echo(click.style(f"Account not found for email: {email}", fg="red"))
|
||||
return
|
||||
try:
|
||||
valid_password(new_password)
|
||||
except:
|
||||
click.echo(click.style(f"Invalid password. Must match {password_pattern}", fg="red"))
|
||||
return
|
||||
|
||||
try:
|
||||
valid_password(new_password)
|
||||
except:
|
||||
click.echo(click.style(f"Invalid password. Must match {password_pattern}", fg="red"))
|
||||
return
|
||||
# generate password salt
|
||||
salt = secrets.token_bytes(16)
|
||||
base64_salt = base64.b64encode(salt).decode()
|
||||
|
||||
# generate password salt
|
||||
salt = secrets.token_bytes(16)
|
||||
base64_salt = base64.b64encode(salt).decode()
|
||||
|
||||
# encrypt password with salt
|
||||
password_hashed = hash_password(new_password, salt)
|
||||
base64_password_hashed = base64.b64encode(password_hashed).decode()
|
||||
account.password = base64_password_hashed
|
||||
account.password_salt = base64_salt
|
||||
db.session.commit()
|
||||
AccountService.reset_login_error_rate_limit(email)
|
||||
click.echo(click.style("Password reset successfully.", fg="green"))
|
||||
# encrypt password with salt
|
||||
password_hashed = hash_password(new_password, salt)
|
||||
base64_password_hashed = base64.b64encode(password_hashed).decode()
|
||||
account.password = base64_password_hashed
|
||||
account.password_salt = base64_salt
|
||||
AccountService.reset_login_error_rate_limit(email)
|
||||
click.echo(click.style("Password reset successfully.", fg="green"))
|
||||
|
||||
|
||||
@click.command("reset-email", help="Reset the account email.")
|
||||
@ -92,22 +100,21 @@ def reset_email(email, new_email, email_confirm):
|
||||
if str(new_email).strip() != str(email_confirm).strip():
|
||||
click.echo(click.style("New emails do not match.", fg="red"))
|
||||
return
|
||||
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
|
||||
account = session.query(Account).where(Account.email == email).one_or_none()
|
||||
|
||||
account = db.session.query(Account).where(Account.email == email).one_or_none()
|
||||
if not account:
|
||||
click.echo(click.style(f"Account not found for email: {email}", fg="red"))
|
||||
return
|
||||
|
||||
if not account:
|
||||
click.echo(click.style(f"Account not found for email: {email}", fg="red"))
|
||||
return
|
||||
try:
|
||||
email_validate(new_email)
|
||||
except:
|
||||
click.echo(click.style(f"Invalid email: {new_email}", fg="red"))
|
||||
return
|
||||
|
||||
try:
|
||||
email_validate(new_email)
|
||||
except:
|
||||
click.echo(click.style(f"Invalid email: {new_email}", fg="red"))
|
||||
return
|
||||
|
||||
account.email = new_email
|
||||
db.session.commit()
|
||||
click.echo(click.style("Email updated successfully.", fg="green"))
|
||||
account.email = new_email
|
||||
click.echo(click.style("Email updated successfully.", fg="green"))
|
||||
|
||||
|
||||
@click.command(
|
||||
@ -131,25 +138,24 @@ def reset_encrypt_key_pair():
|
||||
if dify_config.EDITION != "SELF_HOSTED":
|
||||
click.echo(click.style("This command is only for SELF_HOSTED installations.", fg="red"))
|
||||
return
|
||||
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
|
||||
tenants = session.query(Tenant).all()
|
||||
for tenant in tenants:
|
||||
if not tenant:
|
||||
click.echo(click.style("No workspaces found. Run /install first.", fg="red"))
|
||||
return
|
||||
|
||||
tenants = db.session.query(Tenant).all()
|
||||
for tenant in tenants:
|
||||
if not tenant:
|
||||
click.echo(click.style("No workspaces found. Run /install first.", fg="red"))
|
||||
return
|
||||
tenant.encrypt_public_key = generate_key_pair(tenant.id)
|
||||
|
||||
tenant.encrypt_public_key = generate_key_pair(tenant.id)
|
||||
session.query(Provider).where(Provider.provider_type == "custom", Provider.tenant_id == tenant.id).delete()
|
||||
session.query(ProviderModel).where(ProviderModel.tenant_id == tenant.id).delete()
|
||||
|
||||
db.session.query(Provider).where(Provider.provider_type == "custom", Provider.tenant_id == tenant.id).delete()
|
||||
db.session.query(ProviderModel).where(ProviderModel.tenant_id == tenant.id).delete()
|
||||
db.session.commit()
|
||||
|
||||
click.echo(
|
||||
click.style(
|
||||
f"Congratulations! The asymmetric key pair of workspace {tenant.id} has been reset.",
|
||||
fg="green",
|
||||
click.echo(
|
||||
click.style(
|
||||
f"Congratulations! The asymmetric key pair of workspace {tenant.id} has been reset.",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@click.command("vdb-migrate", help="Migrate vector db.")
|
||||
@ -174,14 +180,15 @@ def migrate_annotation_vector_database():
|
||||
try:
|
||||
# get apps info
|
||||
per_page = 50
|
||||
apps = (
|
||||
db.session.query(App)
|
||||
.where(App.status == "normal")
|
||||
.order_by(App.created_at.desc())
|
||||
.limit(per_page)
|
||||
.offset((page - 1) * per_page)
|
||||
.all()
|
||||
)
|
||||
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
|
||||
apps = (
|
||||
session.query(App)
|
||||
.where(App.status == "normal")
|
||||
.order_by(App.created_at.desc())
|
||||
.limit(per_page)
|
||||
.offset((page - 1) * per_page)
|
||||
.all()
|
||||
)
|
||||
if not apps:
|
||||
break
|
||||
except SQLAlchemyError:
|
||||
@ -195,24 +202,27 @@ def migrate_annotation_vector_database():
|
||||
)
|
||||
try:
|
||||
click.echo(f"Creating app annotation index: {app.id}")
|
||||
app_annotation_setting = (
|
||||
db.session.query(AppAnnotationSetting).where(AppAnnotationSetting.app_id == app.id).first()
|
||||
)
|
||||
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
|
||||
app_annotation_setting = (
|
||||
session.query(AppAnnotationSetting).where(AppAnnotationSetting.app_id == app.id).first()
|
||||
)
|
||||
|
||||
if not app_annotation_setting:
|
||||
skipped_count = skipped_count + 1
|
||||
click.echo(f"App annotation setting disabled: {app.id}")
|
||||
continue
|
||||
# get dataset_collection_binding info
|
||||
dataset_collection_binding = (
|
||||
db.session.query(DatasetCollectionBinding)
|
||||
.where(DatasetCollectionBinding.id == app_annotation_setting.collection_binding_id)
|
||||
.first()
|
||||
)
|
||||
if not dataset_collection_binding:
|
||||
click.echo(f"App annotation collection binding not found: {app.id}")
|
||||
continue
|
||||
annotations = db.session.query(MessageAnnotation).where(MessageAnnotation.app_id == app.id).all()
|
||||
if not app_annotation_setting:
|
||||
skipped_count = skipped_count + 1
|
||||
click.echo(f"App annotation setting disabled: {app.id}")
|
||||
continue
|
||||
# get dataset_collection_binding info
|
||||
dataset_collection_binding = (
|
||||
session.query(DatasetCollectionBinding)
|
||||
.where(DatasetCollectionBinding.id == app_annotation_setting.collection_binding_id)
|
||||
.first()
|
||||
)
|
||||
if not dataset_collection_binding:
|
||||
click.echo(f"App annotation collection binding not found: {app.id}")
|
||||
continue
|
||||
annotations = session.scalars(
|
||||
select(MessageAnnotation).where(MessageAnnotation.app_id == app.id)
|
||||
).all()
|
||||
dataset = Dataset(
|
||||
id=app.id,
|
||||
tenant_id=app.tenant_id,
|
||||
@ -311,6 +321,8 @@ def migrate_knowledge_vector_database():
|
||||
)
|
||||
|
||||
datasets = db.paginate(select=stmt, page=page, per_page=50, max_per_page=50, error_out=False)
|
||||
if not datasets.items:
|
||||
break
|
||||
except SQLAlchemyError:
|
||||
raise
|
||||
|
||||
@ -367,29 +379,25 @@ def migrate_knowledge_vector_database():
|
||||
)
|
||||
raise e
|
||||
|
||||
dataset_documents = (
|
||||
db.session.query(DatasetDocument)
|
||||
.where(
|
||||
dataset_documents = db.session.scalars(
|
||||
select(DatasetDocument).where(
|
||||
DatasetDocument.dataset_id == dataset.id,
|
||||
DatasetDocument.indexing_status == "completed",
|
||||
DatasetDocument.enabled == True,
|
||||
DatasetDocument.archived == False,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
).all()
|
||||
|
||||
documents = []
|
||||
segments_count = 0
|
||||
for dataset_document in dataset_documents:
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.where(
|
||||
segments = db.session.scalars(
|
||||
select(DocumentSegment).where(
|
||||
DocumentSegment.document_id == dataset_document.id,
|
||||
DocumentSegment.status == "completed",
|
||||
DocumentSegment.enabled == True,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
).all()
|
||||
|
||||
for segment in segments:
|
||||
document = Document(
|
||||
@ -479,12 +487,12 @@ def convert_to_agent_apps():
|
||||
click.echo(f"Converting app: {app.id}")
|
||||
|
||||
try:
|
||||
app.mode = AppMode.AGENT_CHAT.value
|
||||
app.mode = AppMode.AGENT_CHAT
|
||||
db.session.commit()
|
||||
|
||||
# update conversation mode to agent
|
||||
db.session.query(Conversation).where(Conversation.app_id == app.id).update(
|
||||
{Conversation.mode: AppMode.AGENT_CHAT.value}
|
||||
{Conversation.mode: AppMode.AGENT_CHAT}
|
||||
)
|
||||
|
||||
db.session.commit()
|
||||
@ -511,7 +519,7 @@ def add_qdrant_index(field: str):
|
||||
from qdrant_client.http.exceptions import UnexpectedResponse
|
||||
from qdrant_client.http.models import PayloadSchemaType
|
||||
|
||||
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig
|
||||
from core.rag.datasource.vdb.qdrant.qdrant_vector import PathQdrantParams, QdrantConfig
|
||||
|
||||
for binding in bindings:
|
||||
if dify_config.QDRANT_URL is None:
|
||||
@ -525,7 +533,21 @@ def add_qdrant_index(field: str):
|
||||
prefer_grpc=dify_config.QDRANT_GRPC_ENABLED,
|
||||
)
|
||||
try:
|
||||
client = qdrant_client.QdrantClient(**qdrant_config.to_qdrant_params())
|
||||
params = qdrant_config.to_qdrant_params()
|
||||
# Check the type before using
|
||||
if isinstance(params, PathQdrantParams):
|
||||
# PathQdrantParams case
|
||||
client = qdrant_client.QdrantClient(path=params.path)
|
||||
else:
|
||||
# UrlQdrantParams case - params is UrlQdrantParams
|
||||
client = qdrant_client.QdrantClient(
|
||||
url=params.url,
|
||||
api_key=params.api_key,
|
||||
timeout=int(params.timeout),
|
||||
verify=params.verify,
|
||||
grpc_port=params.grpc_port,
|
||||
prefer_grpc=params.prefer_grpc,
|
||||
)
|
||||
# create payload index
|
||||
client.create_payload_index(binding.collection_name, field, field_schema=PayloadSchemaType.KEYWORD)
|
||||
create_count += 1
|
||||
@ -571,7 +593,7 @@ def old_metadata_migration():
|
||||
for document in documents:
|
||||
if document.doc_metadata:
|
||||
doc_metadata = document.doc_metadata
|
||||
for key, value in doc_metadata.items():
|
||||
for key in doc_metadata:
|
||||
for field in BuiltInField:
|
||||
if field.value == key:
|
||||
break
|
||||
@ -627,7 +649,7 @@ def old_metadata_migration():
|
||||
@click.option("--email", prompt=True, help="Tenant account email.")
|
||||
@click.option("--name", prompt=True, help="Workspace name.")
|
||||
@click.option("--language", prompt=True, help="Account language, default: en-US.")
|
||||
def create_tenant(email: str, language: Optional[str] = None, name: Optional[str] = None):
|
||||
def create_tenant(email: str, language: str | None = None, name: str | None = None):
|
||||
"""
|
||||
Create tenant account
|
||||
"""
|
||||
@ -719,18 +741,18 @@ where sites.id is null limit 1000"""
|
||||
try:
|
||||
app = db.session.query(App).where(App.id == app_id).first()
|
||||
if not app:
|
||||
print(f"App {app_id} not found")
|
||||
logger.info("App %s not found", app_id)
|
||||
continue
|
||||
|
||||
tenant = app.tenant
|
||||
if tenant:
|
||||
accounts = tenant.get_accounts()
|
||||
if not accounts:
|
||||
print(f"Fix failed for app {app.id}")
|
||||
logger.info("Fix failed for app %s", app.id)
|
||||
continue
|
||||
|
||||
account = accounts[0]
|
||||
print(f"Fixing missing site for app {app.id}")
|
||||
logger.info("Fixing missing site for app %s", app.id)
|
||||
app_was_created.send(app, account=account)
|
||||
except Exception:
|
||||
failed_app_ids.append(app_id)
|
||||
@ -941,7 +963,7 @@ def clear_orphaned_file_records(force: bool):
|
||||
click.echo(click.style("- Deleting orphaned message_files records", fg="white"))
|
||||
query = "DELETE FROM message_files WHERE id IN :ids"
|
||||
with db.engine.begin() as conn:
|
||||
conn.execute(sa.text(query), {"ids": tuple([record["id"] for record in orphaned_message_files])})
|
||||
conn.execute(sa.text(query), {"ids": tuple(record["id"] for record in orphaned_message_files)})
|
||||
click.echo(
|
||||
click.style(f"Removed {len(orphaned_message_files)} orphaned message_files records.", fg="green")
|
||||
)
|
||||
@ -1117,6 +1139,7 @@ def remove_orphaned_files_on_storage(force: bool):
|
||||
click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white"))
|
||||
except Exception as e:
|
||||
click.echo(click.style(f"Error fetching keys: {str(e)}", fg="red"))
|
||||
return
|
||||
|
||||
all_files_on_storage = []
|
||||
for storage_path in storage_paths:
|
||||
@ -1207,6 +1230,55 @@ def setup_system_tool_oauth_client(provider, client_params):
|
||||
click.echo(click.style(f"OAuth client params setup successfully. id: {oauth_client.id}", fg="green"))
|
||||
|
||||
|
||||
@click.command("setup-system-trigger-oauth-client", help="Setup system trigger oauth client.")
|
||||
@click.option("--provider", prompt=True, help="Provider name")
|
||||
@click.option("--client-params", prompt=True, help="Client Params")
|
||||
def setup_system_trigger_oauth_client(provider, client_params):
|
||||
"""
|
||||
Setup system trigger oauth client
|
||||
"""
|
||||
from models.provider_ids import TriggerProviderID
|
||||
from models.trigger import TriggerOAuthSystemClient
|
||||
|
||||
provider_id = TriggerProviderID(provider)
|
||||
provider_name = provider_id.provider_name
|
||||
plugin_id = provider_id.plugin_id
|
||||
|
||||
try:
|
||||
# json validate
|
||||
click.echo(click.style(f"Validating client params: {client_params}", fg="yellow"))
|
||||
client_params_dict = TypeAdapter(dict[str, Any]).validate_json(client_params)
|
||||
click.echo(click.style("Client params validated successfully.", fg="green"))
|
||||
|
||||
click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow"))
|
||||
click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow"))
|
||||
oauth_client_params = encrypt_system_oauth_params(client_params_dict)
|
||||
click.echo(click.style("Client params encrypted successfully.", fg="green"))
|
||||
except Exception as e:
|
||||
click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red"))
|
||||
return
|
||||
|
||||
deleted_count = (
|
||||
db.session.query(TriggerOAuthSystemClient)
|
||||
.filter_by(
|
||||
provider=provider_name,
|
||||
plugin_id=plugin_id,
|
||||
)
|
||||
.delete()
|
||||
)
|
||||
if deleted_count > 0:
|
||||
click.echo(click.style(f"Deleted {deleted_count} existing oauth client params.", fg="yellow"))
|
||||
|
||||
oauth_client = TriggerOAuthSystemClient(
|
||||
provider=provider_name,
|
||||
plugin_id=plugin_id,
|
||||
encrypted_oauth_params=oauth_client_params,
|
||||
)
|
||||
db.session.add(oauth_client)
|
||||
db.session.commit()
|
||||
click.echo(click.style(f"OAuth client params setup successfully. id: {oauth_client.id}", fg="green"))
|
||||
|
||||
|
||||
def _find_orphaned_draft_variables(batch_size: int = 1000) -> list[str]:
|
||||
"""
|
||||
Find draft variables that reference non-existent apps.
|
||||
@ -1233,15 +1305,17 @@ def _find_orphaned_draft_variables(batch_size: int = 1000) -> list[str]:
|
||||
|
||||
def _count_orphaned_draft_variables() -> dict[str, Any]:
|
||||
"""
|
||||
Count orphaned draft variables by app.
|
||||
Count orphaned draft variables by app, including associated file counts.
|
||||
|
||||
Returns:
|
||||
Dictionary with statistics about orphaned variables
|
||||
Dictionary with statistics about orphaned variables and files
|
||||
"""
|
||||
query = """
|
||||
# Count orphaned variables by app
|
||||
variables_query = """
|
||||
SELECT
|
||||
wdv.app_id,
|
||||
COUNT(*) as variable_count
|
||||
COUNT(*) as variable_count,
|
||||
COUNT(wdv.file_id) as file_count
|
||||
FROM workflow_draft_variables AS wdv
|
||||
WHERE NOT EXISTS(
|
||||
SELECT 1 FROM apps WHERE apps.id = wdv.app_id
|
||||
@ -1251,14 +1325,21 @@ def _count_orphaned_draft_variables() -> dict[str, Any]:
|
||||
"""
|
||||
|
||||
with db.engine.connect() as conn:
|
||||
result = conn.execute(sa.text(query))
|
||||
orphaned_by_app = {row[0]: row[1] for row in result}
|
||||
result = conn.execute(sa.text(variables_query))
|
||||
orphaned_by_app = {}
|
||||
total_files = 0
|
||||
|
||||
total_orphaned = sum(orphaned_by_app.values())
|
||||
for row in result:
|
||||
app_id, variable_count, file_count = row
|
||||
orphaned_by_app[app_id] = {"variables": variable_count, "files": file_count}
|
||||
total_files += file_count
|
||||
|
||||
total_orphaned = sum(app_data["variables"] for app_data in orphaned_by_app.values())
|
||||
app_count = len(orphaned_by_app)
|
||||
|
||||
return {
|
||||
"total_orphaned_variables": total_orphaned,
|
||||
"total_orphaned_files": total_files,
|
||||
"orphaned_app_count": app_count,
|
||||
"orphaned_by_app": orphaned_by_app,
|
||||
}
|
||||
@ -1287,6 +1368,7 @@ def cleanup_orphaned_draft_variables(
|
||||
stats = _count_orphaned_draft_variables()
|
||||
|
||||
logger.info("Found %s orphaned draft variables", stats["total_orphaned_variables"])
|
||||
logger.info("Found %s associated offload files", stats["total_orphaned_files"])
|
||||
logger.info("Across %s non-existent apps", stats["orphaned_app_count"])
|
||||
|
||||
if stats["total_orphaned_variables"] == 0:
|
||||
@ -1295,10 +1377,10 @@ def cleanup_orphaned_draft_variables(
|
||||
|
||||
if dry_run:
|
||||
logger.info("DRY RUN: Would delete the following:")
|
||||
for app_id, count in sorted(stats["orphaned_by_app"].items(), key=lambda x: x[1], reverse=True)[
|
||||
for app_id, data in sorted(stats["orphaned_by_app"].items(), key=lambda x: x[1]["variables"], reverse=True)[
|
||||
:10
|
||||
]: # Show top 10
|
||||
logger.info(" App %s: %s variables", app_id, count)
|
||||
logger.info(" App %s: %s variables, %s files", app_id, data["variables"], data["files"])
|
||||
if len(stats["orphaned_by_app"]) > 10:
|
||||
logger.info(" ... and %s more apps", len(stats["orphaned_by_app"]) - 10)
|
||||
return
|
||||
@ -1307,7 +1389,8 @@ def cleanup_orphaned_draft_variables(
|
||||
if not force:
|
||||
click.confirm(
|
||||
f"Are you sure you want to delete {stats['total_orphaned_variables']} "
|
||||
f"orphaned draft variables from {stats['orphaned_app_count']} apps?",
|
||||
f"orphaned draft variables and {stats['total_orphaned_files']} associated files "
|
||||
f"from {stats['orphaned_app_count']} apps?",
|
||||
abort=True,
|
||||
)
|
||||
|
||||
@ -1340,3 +1423,480 @@ def cleanup_orphaned_draft_variables(
|
||||
continue
|
||||
|
||||
logger.info("Cleanup completed. Total deleted: %s variables across %s apps", total_deleted, processed_apps)
|
||||
|
||||
|
||||
@click.command("setup-datasource-oauth-client", help="Setup datasource oauth client.")
|
||||
@click.option("--provider", prompt=True, help="Provider name")
|
||||
@click.option("--client-params", prompt=True, help="Client Params")
|
||||
def setup_datasource_oauth_client(provider, client_params):
|
||||
"""
|
||||
Setup datasource oauth client
|
||||
"""
|
||||
provider_id = DatasourceProviderID(provider)
|
||||
provider_name = provider_id.provider_name
|
||||
plugin_id = provider_id.plugin_id
|
||||
|
||||
try:
|
||||
# json validate
|
||||
click.echo(click.style(f"Validating client params: {client_params}", fg="yellow"))
|
||||
client_params_dict = TypeAdapter(dict[str, Any]).validate_json(client_params)
|
||||
click.echo(click.style("Client params validated successfully.", fg="green"))
|
||||
except Exception as e:
|
||||
click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red"))
|
||||
return
|
||||
|
||||
click.echo(click.style(f"Ready to delete existing oauth client params: {provider_name}", fg="yellow"))
|
||||
deleted_count = (
|
||||
db.session.query(DatasourceOauthParamConfig)
|
||||
.filter_by(
|
||||
provider=provider_name,
|
||||
plugin_id=plugin_id,
|
||||
)
|
||||
.delete()
|
||||
)
|
||||
if deleted_count > 0:
|
||||
click.echo(click.style(f"Deleted {deleted_count} existing oauth client params.", fg="yellow"))
|
||||
|
||||
click.echo(click.style(f"Ready to setup datasource oauth client: {provider_name}", fg="yellow"))
|
||||
oauth_client = DatasourceOauthParamConfig(
|
||||
provider=provider_name,
|
||||
plugin_id=plugin_id,
|
||||
system_credentials=client_params_dict,
|
||||
)
|
||||
db.session.add(oauth_client)
|
||||
db.session.commit()
|
||||
click.echo(click.style(f"provider: {provider_name}", fg="green"))
|
||||
click.echo(click.style(f"plugin_id: {plugin_id}", fg="green"))
|
||||
click.echo(click.style(f"params: {json.dumps(client_params_dict, indent=2, ensure_ascii=False)}", fg="green"))
|
||||
click.echo(click.style(f"Datasource oauth client setup successfully. id: {oauth_client.id}", fg="green"))
|
||||
|
||||
|
||||
@click.command("transform-datasource-credentials", help="Transform datasource credentials.")
|
||||
@click.option(
|
||||
"--environment", prompt=True, help="the environment to transform datasource credentials", default="online"
|
||||
)
|
||||
def transform_datasource_credentials(environment: str):
|
||||
"""
|
||||
Transform datasource credentials
|
||||
"""
|
||||
try:
|
||||
installer_manager = PluginInstaller()
|
||||
plugin_migration = PluginMigration()
|
||||
|
||||
notion_plugin_id = "langgenius/notion_datasource"
|
||||
firecrawl_plugin_id = "langgenius/firecrawl_datasource"
|
||||
jina_plugin_id = "langgenius/jina_datasource"
|
||||
if environment == "online":
|
||||
notion_plugin_unique_identifier = plugin_migration._fetch_plugin_unique_identifier(notion_plugin_id) # pyright: ignore[reportPrivateUsage]
|
||||
firecrawl_plugin_unique_identifier = plugin_migration._fetch_plugin_unique_identifier(firecrawl_plugin_id) # pyright: ignore[reportPrivateUsage]
|
||||
jina_plugin_unique_identifier = plugin_migration._fetch_plugin_unique_identifier(jina_plugin_id) # pyright: ignore[reportPrivateUsage]
|
||||
else:
|
||||
notion_plugin_unique_identifier = None
|
||||
firecrawl_plugin_unique_identifier = None
|
||||
jina_plugin_unique_identifier = None
|
||||
oauth_credential_type = CredentialType.OAUTH2
|
||||
api_key_credential_type = CredentialType.API_KEY
|
||||
|
||||
# deal notion credentials
|
||||
deal_notion_count = 0
|
||||
notion_credentials = db.session.query(DataSourceOauthBinding).filter_by(provider="notion").all()
|
||||
if notion_credentials:
|
||||
notion_credentials_tenant_mapping: dict[str, list[DataSourceOauthBinding]] = {}
|
||||
for notion_credential in notion_credentials:
|
||||
tenant_id = notion_credential.tenant_id
|
||||
if tenant_id not in notion_credentials_tenant_mapping:
|
||||
notion_credentials_tenant_mapping[tenant_id] = []
|
||||
notion_credentials_tenant_mapping[tenant_id].append(notion_credential)
|
||||
for tenant_id, notion_tenant_credentials in notion_credentials_tenant_mapping.items():
|
||||
tenant = db.session.query(Tenant).filter_by(id=tenant_id).first()
|
||||
if not tenant:
|
||||
continue
|
||||
try:
|
||||
# check notion plugin is installed
|
||||
installed_plugins = installer_manager.list_plugins(tenant_id)
|
||||
installed_plugins_ids = [plugin.plugin_id for plugin in installed_plugins]
|
||||
if notion_plugin_id not in installed_plugins_ids:
|
||||
if notion_plugin_unique_identifier:
|
||||
# install notion plugin
|
||||
PluginService.install_from_marketplace_pkg(tenant_id, [notion_plugin_unique_identifier])
|
||||
auth_count = 0
|
||||
for notion_tenant_credential in notion_tenant_credentials:
|
||||
auth_count += 1
|
||||
# get credential oauth params
|
||||
access_token = notion_tenant_credential.access_token
|
||||
# notion info
|
||||
notion_info = notion_tenant_credential.source_info
|
||||
workspace_id = notion_info.get("workspace_id")
|
||||
workspace_name = notion_info.get("workspace_name")
|
||||
workspace_icon = notion_info.get("workspace_icon")
|
||||
new_credentials = {
|
||||
"integration_secret": encrypter.encrypt_token(tenant_id, access_token),
|
||||
"workspace_id": workspace_id,
|
||||
"workspace_name": workspace_name,
|
||||
"workspace_icon": workspace_icon,
|
||||
}
|
||||
datasource_provider = DatasourceProvider(
|
||||
provider="notion_datasource",
|
||||
tenant_id=tenant_id,
|
||||
plugin_id=notion_plugin_id,
|
||||
auth_type=oauth_credential_type.value,
|
||||
encrypted_credentials=new_credentials,
|
||||
name=f"Auth {auth_count}",
|
||||
avatar_url=workspace_icon or "default",
|
||||
is_default=False,
|
||||
)
|
||||
db.session.add(datasource_provider)
|
||||
deal_notion_count += 1
|
||||
except Exception as e:
|
||||
click.echo(
|
||||
click.style(
|
||||
f"Error transforming notion credentials: {str(e)}, tenant_id: {tenant_id}", fg="red"
|
||||
)
|
||||
)
|
||||
continue
|
||||
db.session.commit()
|
||||
# deal firecrawl credentials
|
||||
deal_firecrawl_count = 0
|
||||
firecrawl_credentials = db.session.query(DataSourceApiKeyAuthBinding).filter_by(provider="firecrawl").all()
|
||||
if firecrawl_credentials:
|
||||
firecrawl_credentials_tenant_mapping: dict[str, list[DataSourceApiKeyAuthBinding]] = {}
|
||||
for firecrawl_credential in firecrawl_credentials:
|
||||
tenant_id = firecrawl_credential.tenant_id
|
||||
if tenant_id not in firecrawl_credentials_tenant_mapping:
|
||||
firecrawl_credentials_tenant_mapping[tenant_id] = []
|
||||
firecrawl_credentials_tenant_mapping[tenant_id].append(firecrawl_credential)
|
||||
for tenant_id, firecrawl_tenant_credentials in firecrawl_credentials_tenant_mapping.items():
|
||||
tenant = db.session.query(Tenant).filter_by(id=tenant_id).first()
|
||||
if not tenant:
|
||||
continue
|
||||
try:
|
||||
# check firecrawl plugin is installed
|
||||
installed_plugins = installer_manager.list_plugins(tenant_id)
|
||||
installed_plugins_ids = [plugin.plugin_id for plugin in installed_plugins]
|
||||
if firecrawl_plugin_id not in installed_plugins_ids:
|
||||
if firecrawl_plugin_unique_identifier:
|
||||
# install firecrawl plugin
|
||||
PluginService.install_from_marketplace_pkg(tenant_id, [firecrawl_plugin_unique_identifier])
|
||||
|
||||
auth_count = 0
|
||||
for firecrawl_tenant_credential in firecrawl_tenant_credentials:
|
||||
auth_count += 1
|
||||
if not firecrawl_tenant_credential.credentials:
|
||||
click.echo(
|
||||
click.style(
|
||||
f"Skipping firecrawl credential for tenant {tenant_id} due to missing credentials.",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
continue
|
||||
# get credential api key
|
||||
credentials_json = json.loads(firecrawl_tenant_credential.credentials)
|
||||
api_key = credentials_json.get("config", {}).get("api_key")
|
||||
base_url = credentials_json.get("config", {}).get("base_url")
|
||||
new_credentials = {
|
||||
"firecrawl_api_key": api_key,
|
||||
"base_url": base_url,
|
||||
}
|
||||
datasource_provider = DatasourceProvider(
|
||||
provider="firecrawl",
|
||||
tenant_id=tenant_id,
|
||||
plugin_id=firecrawl_plugin_id,
|
||||
auth_type=api_key_credential_type.value,
|
||||
encrypted_credentials=new_credentials,
|
||||
name=f"Auth {auth_count}",
|
||||
avatar_url="default",
|
||||
is_default=False,
|
||||
)
|
||||
db.session.add(datasource_provider)
|
||||
deal_firecrawl_count += 1
|
||||
except Exception as e:
|
||||
click.echo(
|
||||
click.style(
|
||||
f"Error transforming firecrawl credentials: {str(e)}, tenant_id: {tenant_id}", fg="red"
|
||||
)
|
||||
)
|
||||
continue
|
||||
db.session.commit()
|
||||
# deal jina credentials
|
||||
deal_jina_count = 0
|
||||
jina_credentials = db.session.query(DataSourceApiKeyAuthBinding).filter_by(provider="jinareader").all()
|
||||
if jina_credentials:
|
||||
jina_credentials_tenant_mapping: dict[str, list[DataSourceApiKeyAuthBinding]] = {}
|
||||
for jina_credential in jina_credentials:
|
||||
tenant_id = jina_credential.tenant_id
|
||||
if tenant_id not in jina_credentials_tenant_mapping:
|
||||
jina_credentials_tenant_mapping[tenant_id] = []
|
||||
jina_credentials_tenant_mapping[tenant_id].append(jina_credential)
|
||||
for tenant_id, jina_tenant_credentials in jina_credentials_tenant_mapping.items():
|
||||
tenant = db.session.query(Tenant).filter_by(id=tenant_id).first()
|
||||
if not tenant:
|
||||
continue
|
||||
try:
|
||||
# check jina plugin is installed
|
||||
installed_plugins = installer_manager.list_plugins(tenant_id)
|
||||
installed_plugins_ids = [plugin.plugin_id for plugin in installed_plugins]
|
||||
if jina_plugin_id not in installed_plugins_ids:
|
||||
if jina_plugin_unique_identifier:
|
||||
# install jina plugin
|
||||
logger.debug("Installing Jina plugin %s", jina_plugin_unique_identifier)
|
||||
PluginService.install_from_marketplace_pkg(tenant_id, [jina_plugin_unique_identifier])
|
||||
|
||||
auth_count = 0
|
||||
for jina_tenant_credential in jina_tenant_credentials:
|
||||
auth_count += 1
|
||||
if not jina_tenant_credential.credentials:
|
||||
click.echo(
|
||||
click.style(
|
||||
f"Skipping jina credential for tenant {tenant_id} due to missing credentials.",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
continue
|
||||
# get credential api key
|
||||
credentials_json = json.loads(jina_tenant_credential.credentials)
|
||||
api_key = credentials_json.get("config", {}).get("api_key")
|
||||
new_credentials = {
|
||||
"integration_secret": api_key,
|
||||
}
|
||||
datasource_provider = DatasourceProvider(
|
||||
provider="jinareader",
|
||||
tenant_id=tenant_id,
|
||||
plugin_id=jina_plugin_id,
|
||||
auth_type=api_key_credential_type.value,
|
||||
encrypted_credentials=new_credentials,
|
||||
name=f"Auth {auth_count}",
|
||||
avatar_url="default",
|
||||
is_default=False,
|
||||
)
|
||||
db.session.add(datasource_provider)
|
||||
deal_jina_count += 1
|
||||
except Exception as e:
|
||||
click.echo(
|
||||
click.style(f"Error transforming jina credentials: {str(e)}, tenant_id: {tenant_id}", fg="red")
|
||||
)
|
||||
continue
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red"))
|
||||
return
|
||||
click.echo(click.style(f"Transforming notion successfully. deal_notion_count: {deal_notion_count}", fg="green"))
|
||||
click.echo(
|
||||
click.style(f"Transforming firecrawl successfully. deal_firecrawl_count: {deal_firecrawl_count}", fg="green")
|
||||
)
|
||||
click.echo(click.style(f"Transforming jina successfully. deal_jina_count: {deal_jina_count}", fg="green"))
|
||||
|
||||
|
||||
@click.command("install-rag-pipeline-plugins", help="Install rag pipeline plugins.")
|
||||
@click.option(
|
||||
"--input_file", prompt=True, help="The file to store the extracted unique identifiers.", default="plugins.jsonl"
|
||||
)
|
||||
@click.option(
|
||||
"--output_file", prompt=True, help="The file to store the installed plugins.", default="installed_plugins.jsonl"
|
||||
)
|
||||
@click.option("--workers", prompt=True, help="The number of workers to install plugins.", default=100)
|
||||
def install_rag_pipeline_plugins(input_file, output_file, workers):
|
||||
"""
|
||||
Install rag pipeline plugins
|
||||
"""
|
||||
click.echo(click.style("Installing rag pipeline plugins", fg="yellow"))
|
||||
plugin_migration = PluginMigration()
|
||||
plugin_migration.install_rag_pipeline_plugins(
|
||||
input_file,
|
||||
output_file,
|
||||
workers,
|
||||
)
|
||||
click.echo(click.style("Installing rag pipeline plugins successfully", fg="green"))
|
||||
|
||||
|
||||
@click.command(
|
||||
"migrate-oss",
|
||||
help="Migrate files from Local or OpenDAL source to a cloud OSS storage (destination must NOT be local/opendal).",
|
||||
)
|
||||
@click.option(
|
||||
"--path",
|
||||
"paths",
|
||||
multiple=True,
|
||||
help="Storage path prefixes to migrate (repeatable). Defaults: privkeys, upload_files, image_files,"
|
||||
" tools, website_files, keyword_files, ops_trace",
|
||||
)
|
||||
@click.option(
|
||||
"--source",
|
||||
type=click.Choice(["local", "opendal"], case_sensitive=False),
|
||||
default="opendal",
|
||||
show_default=True,
|
||||
help="Source storage type to read from",
|
||||
)
|
||||
@click.option("--overwrite", is_flag=True, default=False, help="Overwrite destination if file already exists")
|
||||
@click.option("--dry-run", is_flag=True, default=False, help="Show what would be migrated without uploading")
|
||||
@click.option("-f", "--force", is_flag=True, help="Skip confirmation and run without prompts")
|
||||
@click.option(
|
||||
"--update-db/--no-update-db",
|
||||
default=True,
|
||||
help="Update upload_files.storage_type from source type to current storage after migration",
|
||||
)
|
||||
def migrate_oss(
|
||||
paths: tuple[str, ...],
|
||||
source: str,
|
||||
overwrite: bool,
|
||||
dry_run: bool,
|
||||
force: bool,
|
||||
update_db: bool,
|
||||
):
|
||||
"""
|
||||
Copy all files under selected prefixes from a source storage
|
||||
(Local filesystem or OpenDAL-backed) into the currently configured
|
||||
destination storage backend, then optionally update DB records.
|
||||
|
||||
Expected usage: set STORAGE_TYPE (and its credentials) to your target backend.
|
||||
"""
|
||||
# Ensure target storage is not local/opendal
|
||||
if dify_config.STORAGE_TYPE in (StorageType.LOCAL, StorageType.OPENDAL):
|
||||
click.echo(
|
||||
click.style(
|
||||
"Target STORAGE_TYPE must be a cloud OSS (not 'local' or 'opendal').\n"
|
||||
"Please set STORAGE_TYPE to one of: s3, aliyun-oss, azure-blob, google-storage, tencent-cos, \n"
|
||||
"volcengine-tos, supabase, oci-storage, huawei-obs, baidu-obs, clickzetta-volume.",
|
||||
fg="red",
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Default paths if none specified
|
||||
default_paths = ("privkeys", "upload_files", "image_files", "tools", "website_files", "keyword_files", "ops_trace")
|
||||
path_list = list(paths) if paths else list(default_paths)
|
||||
is_source_local = source.lower() == "local"
|
||||
|
||||
click.echo(click.style("Preparing migration to target storage.", fg="yellow"))
|
||||
click.echo(click.style(f"Target storage type: {dify_config.STORAGE_TYPE}", fg="white"))
|
||||
if is_source_local:
|
||||
src_root = dify_config.STORAGE_LOCAL_PATH
|
||||
click.echo(click.style(f"Source: local fs, root: {src_root}", fg="white"))
|
||||
else:
|
||||
click.echo(click.style(f"Source: opendal scheme={dify_config.OPENDAL_SCHEME}", fg="white"))
|
||||
click.echo(click.style(f"Paths to migrate: {', '.join(path_list)}", fg="white"))
|
||||
click.echo("")
|
||||
|
||||
if not force:
|
||||
click.confirm("Proceed with migration?", abort=True)
|
||||
|
||||
# Instantiate source storage
|
||||
try:
|
||||
if is_source_local:
|
||||
src_root = dify_config.STORAGE_LOCAL_PATH
|
||||
source_storage = OpenDALStorage(scheme="fs", root=src_root)
|
||||
else:
|
||||
source_storage = OpenDALStorage(scheme=dify_config.OPENDAL_SCHEME)
|
||||
except Exception as e:
|
||||
click.echo(click.style(f"Failed to initialize source storage: {str(e)}", fg="red"))
|
||||
return
|
||||
|
||||
total_files = 0
|
||||
copied_files = 0
|
||||
skipped_files = 0
|
||||
errored_files = 0
|
||||
copied_upload_file_keys: list[str] = []
|
||||
|
||||
for prefix in path_list:
|
||||
click.echo(click.style(f"Scanning source path: {prefix}", fg="white"))
|
||||
try:
|
||||
keys = source_storage.scan(path=prefix, files=True, directories=False)
|
||||
except FileNotFoundError:
|
||||
click.echo(click.style(f" -> Skipping missing path: {prefix}", fg="yellow"))
|
||||
continue
|
||||
except NotImplementedError:
|
||||
click.echo(click.style(" -> Source storage does not support scanning.", fg="red"))
|
||||
return
|
||||
except Exception as e:
|
||||
click.echo(click.style(f" -> Error scanning '{prefix}': {str(e)}", fg="red"))
|
||||
continue
|
||||
|
||||
click.echo(click.style(f"Found {len(keys)} files under {prefix}", fg="white"))
|
||||
|
||||
for key in keys:
|
||||
total_files += 1
|
||||
|
||||
# check destination existence
|
||||
if not overwrite:
|
||||
try:
|
||||
if storage.exists(key):
|
||||
skipped_files += 1
|
||||
continue
|
||||
except Exception as e:
|
||||
# existence check failures should not block migration attempt
|
||||
# but should be surfaced to user as a warning for visibility
|
||||
click.echo(
|
||||
click.style(
|
||||
f" -> Warning: failed target existence check for {key}: {str(e)}",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
|
||||
if dry_run:
|
||||
copied_files += 1
|
||||
continue
|
||||
|
||||
# read from source and write to destination
|
||||
try:
|
||||
data = source_storage.load_once(key)
|
||||
except FileNotFoundError:
|
||||
errored_files += 1
|
||||
click.echo(click.style(f" -> Missing on source: {key}", fg="yellow"))
|
||||
continue
|
||||
except Exception as e:
|
||||
errored_files += 1
|
||||
click.echo(click.style(f" -> Error reading {key}: {str(e)}", fg="red"))
|
||||
continue
|
||||
|
||||
try:
|
||||
storage.save(key, data)
|
||||
copied_files += 1
|
||||
if prefix == "upload_files":
|
||||
copied_upload_file_keys.append(key)
|
||||
except Exception as e:
|
||||
errored_files += 1
|
||||
click.echo(click.style(f" -> Error writing {key} to target: {str(e)}", fg="red"))
|
||||
continue
|
||||
|
||||
click.echo("")
|
||||
click.echo(click.style("Migration summary:", fg="yellow"))
|
||||
click.echo(click.style(f" Total: {total_files}", fg="white"))
|
||||
click.echo(click.style(f" Copied: {copied_files}", fg="green"))
|
||||
click.echo(click.style(f" Skipped: {skipped_files}", fg="white"))
|
||||
if errored_files:
|
||||
click.echo(click.style(f" Errors: {errored_files}", fg="red"))
|
||||
|
||||
if dry_run:
|
||||
click.echo(click.style("Dry-run complete. No changes were made.", fg="green"))
|
||||
return
|
||||
|
||||
if errored_files:
|
||||
click.echo(
|
||||
click.style(
|
||||
"Some files failed to migrate. Review errors above before updating DB records.",
|
||||
fg="yellow",
|
||||
)
|
||||
)
|
||||
if update_db and not force:
|
||||
if not click.confirm("Proceed to update DB storage_type despite errors?", default=False):
|
||||
update_db = False
|
||||
|
||||
# Optionally update DB records for upload_files.storage_type (only for successfully copied upload_files)
|
||||
if update_db:
|
||||
if not copied_upload_file_keys:
|
||||
click.echo(click.style("No upload_files copied. Skipping DB storage_type update.", fg="yellow"))
|
||||
else:
|
||||
try:
|
||||
source_storage_type = StorageType.LOCAL if is_source_local else StorageType.OPENDAL
|
||||
updated = (
|
||||
db.session.query(UploadFile)
|
||||
.where(
|
||||
UploadFile.storage_type == source_storage_type,
|
||||
UploadFile.key.in_(copied_upload_file_keys),
|
||||
)
|
||||
.update({UploadFile.storage_type: dify_config.STORAGE_TYPE}, synchronize_session=False)
|
||||
)
|
||||
db.session.commit()
|
||||
click.echo(click.style(f"Updated storage_type for {updated} upload_files records.", fg="green"))
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
click.echo(click.style(f"Failed to update DB storage_type: {str(e)}", fg="red"))
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
from .app_config import DifyConfig
|
||||
|
||||
dify_config = DifyConfig()
|
||||
dify_config = DifyConfig() # type: ignore
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,28 +7,28 @@ class NotionConfig(BaseSettings):
|
||||
Configuration settings for Notion integration
|
||||
"""
|
||||
|
||||
NOTION_CLIENT_ID: Optional[str] = Field(
|
||||
NOTION_CLIENT_ID: str | None = Field(
|
||||
description="Client ID for Notion API authentication. Required for OAuth 2.0 flow.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
NOTION_CLIENT_SECRET: Optional[str] = Field(
|
||||
NOTION_CLIENT_SECRET: str | None = Field(
|
||||
description="Client secret for Notion API authentication. Required for OAuth 2.0 flow.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
NOTION_INTEGRATION_TYPE: Optional[str] = Field(
|
||||
NOTION_INTEGRATION_TYPE: str | None = Field(
|
||||
description="Type of Notion integration."
|
||||
" Set to 'internal' for internal integrations, or None for public integrations.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
NOTION_INTERNAL_SECRET: Optional[str] = Field(
|
||||
NOTION_INTERNAL_SECRET: str | None = Field(
|
||||
description="Secret key for internal Notion integrations. Required when NOTION_INTEGRATION_TYPE is 'internal'.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
NOTION_INTEGRATION_TOKEN: Optional[str] = Field(
|
||||
NOTION_INTEGRATION_TOKEN: str | None = Field(
|
||||
description="Integration token for Notion API access. Used for direct API calls without OAuth flow.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, NonNegativeFloat
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class SentryConfig(BaseSettings):
|
||||
Configuration settings for Sentry error tracking and performance monitoring
|
||||
"""
|
||||
|
||||
SENTRY_DSN: Optional[str] = Field(
|
||||
SENTRY_DSN: str | None = Field(
|
||||
description="Sentry Data Source Name (DSN)."
|
||||
" This is the unique identifier of your Sentry project, used to send events to the correct project.",
|
||||
default=None,
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
from typing import Literal, Optional
|
||||
from enum import StrEnum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import (
|
||||
AliasChoices,
|
||||
@ -31,6 +32,12 @@ class SecurityConfig(BaseSettings):
|
||||
description="Duration in minutes for which a password reset token remains valid",
|
||||
default=5,
|
||||
)
|
||||
|
||||
EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES: PositiveInt = Field(
|
||||
description="Duration in minutes for which a email register token remains valid",
|
||||
default=5,
|
||||
)
|
||||
|
||||
CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES: PositiveInt = Field(
|
||||
description="Duration in minutes for which a change email token remains valid",
|
||||
default=5,
|
||||
@ -51,7 +58,7 @@ class SecurityConfig(BaseSettings):
|
||||
default=False,
|
||||
)
|
||||
|
||||
ADMIN_API_KEY: Optional[str] = Field(
|
||||
ADMIN_API_KEY: str | None = Field(
|
||||
description="admin api key for authentication",
|
||||
default=None,
|
||||
)
|
||||
@ -66,14 +73,14 @@ class AppExecutionConfig(BaseSettings):
|
||||
description="Maximum allowed execution time for the application in seconds",
|
||||
default=1200,
|
||||
)
|
||||
APP_DEFAULT_ACTIVE_REQUESTS: NonNegativeInt = Field(
|
||||
description="Default number of concurrent active requests per app (0 for unlimited)",
|
||||
default=0,
|
||||
)
|
||||
APP_MAX_ACTIVE_REQUESTS: NonNegativeInt = Field(
|
||||
description="Maximum number of concurrent active requests per app (0 for unlimited)",
|
||||
default=0,
|
||||
)
|
||||
APP_DAILY_RATE_LIMIT: NonNegativeInt = Field(
|
||||
description="Maximum number of requests per app per day",
|
||||
default=5000,
|
||||
)
|
||||
|
||||
|
||||
class CodeExecutionSandboxConfig(BaseSettings):
|
||||
@ -91,21 +98,36 @@ class CodeExecutionSandboxConfig(BaseSettings):
|
||||
default="dify-sandbox",
|
||||
)
|
||||
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT: Optional[float] = Field(
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT: float | None = Field(
|
||||
description="Connection timeout in seconds for code execution requests",
|
||||
default=10.0,
|
||||
)
|
||||
|
||||
CODE_EXECUTION_READ_TIMEOUT: Optional[float] = Field(
|
||||
CODE_EXECUTION_READ_TIMEOUT: float | None = Field(
|
||||
description="Read timeout in seconds for code execution requests",
|
||||
default=60.0,
|
||||
)
|
||||
|
||||
CODE_EXECUTION_WRITE_TIMEOUT: Optional[float] = Field(
|
||||
CODE_EXECUTION_WRITE_TIMEOUT: float | None = Field(
|
||||
description="Write timeout in seconds for code execution request",
|
||||
default=10.0,
|
||||
)
|
||||
|
||||
CODE_EXECUTION_POOL_MAX_CONNECTIONS: PositiveInt = Field(
|
||||
description="Maximum number of concurrent connections for the code execution HTTP client",
|
||||
default=100,
|
||||
)
|
||||
|
||||
CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS: PositiveInt = Field(
|
||||
description="Maximum number of persistent keep-alive connections for the code execution HTTP client",
|
||||
default=20,
|
||||
)
|
||||
|
||||
CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY: PositiveFloat | None = Field(
|
||||
description="Keep-alive expiry in seconds for idle connections (set to None to disable)",
|
||||
default=5.0,
|
||||
)
|
||||
|
||||
CODE_MAX_NUMBER: PositiveInt = Field(
|
||||
description="Maximum allowed numeric value in code execution",
|
||||
default=9223372036854775807,
|
||||
@ -128,7 +150,7 @@ class CodeExecutionSandboxConfig(BaseSettings):
|
||||
|
||||
CODE_MAX_STRING_LENGTH: PositiveInt = Field(
|
||||
description="Maximum allowed length for strings in code execution",
|
||||
default=80000,
|
||||
default=400_000,
|
||||
)
|
||||
|
||||
CODE_MAX_STRING_ARRAY_LENGTH: PositiveInt = Field(
|
||||
@ -146,6 +168,38 @@ class CodeExecutionSandboxConfig(BaseSettings):
|
||||
default=1000,
|
||||
)
|
||||
|
||||
CODE_EXECUTION_SSL_VERIFY: bool = Field(
|
||||
description="Enable or disable SSL verification for code execution requests",
|
||||
default=True,
|
||||
)
|
||||
|
||||
|
||||
class TriggerConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for trigger
|
||||
"""
|
||||
|
||||
WEBHOOK_REQUEST_BODY_MAX_SIZE: PositiveInt = Field(
|
||||
description="Maximum allowed size for webhook request bodies in bytes",
|
||||
default=10485760,
|
||||
)
|
||||
|
||||
|
||||
class AsyncWorkflowConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for async workflow
|
||||
"""
|
||||
|
||||
ASYNC_WORKFLOW_SCHEDULER_GRANULARITY: int = Field(
|
||||
description="Granularity for async workflow scheduler, "
|
||||
"sometime, few users could block the queue due to some time-consuming tasks, "
|
||||
"to avoid this, workflow can be suspended if needed, to achieve"
|
||||
"this, a time-based checker is required, every granularity seconds, "
|
||||
"the checker will check the workflow queue and suspend the workflow",
|
||||
default=120,
|
||||
ge=1,
|
||||
)
|
||||
|
||||
|
||||
class PluginConfig(BaseSettings):
|
||||
"""
|
||||
@ -162,6 +216,11 @@ class PluginConfig(BaseSettings):
|
||||
default="plugin-api-key",
|
||||
)
|
||||
|
||||
PLUGIN_DAEMON_TIMEOUT: PositiveFloat | None = Field(
|
||||
description="Timeout in seconds for requests to the plugin daemon (set to None to disable)",
|
||||
default=300.0,
|
||||
)
|
||||
|
||||
INNER_API_KEY_FOR_PLUGIN: str = Field(description="Inner api key for plugin", default="inner-api-key")
|
||||
|
||||
PLUGIN_REMOTE_INSTALL_HOST: str = Field(
|
||||
@ -231,6 +290,8 @@ class EndpointConfig(BaseSettings):
|
||||
description="Template url for endpoint plugin", default="http://localhost:5002/e/{hook_id}"
|
||||
)
|
||||
|
||||
TRIGGER_URL: str = Field(description="Template url for triggers", default="http://localhost:5001")
|
||||
|
||||
|
||||
class FileAccessConfig(BaseSettings):
|
||||
"""
|
||||
@ -299,12 +360,93 @@ class FileUploadConfig(BaseSettings):
|
||||
default=10,
|
||||
)
|
||||
|
||||
IMAGE_FILE_BATCH_LIMIT: PositiveInt = Field(
|
||||
description="Maximum number of files allowed in a image batch upload operation",
|
||||
default=10,
|
||||
)
|
||||
|
||||
SINGLE_CHUNK_ATTACHMENT_LIMIT: PositiveInt = Field(
|
||||
description="Maximum number of files allowed in a single chunk attachment",
|
||||
default=10,
|
||||
)
|
||||
|
||||
ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: NonNegativeInt = Field(
|
||||
description="Maximum allowed image file size for attachments in megabytes",
|
||||
default=2,
|
||||
)
|
||||
|
||||
ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: NonNegativeInt = Field(
|
||||
description="Timeout for downloading image attachments in seconds",
|
||||
default=60,
|
||||
)
|
||||
|
||||
# Annotation Import Security Configurations
|
||||
ANNOTATION_IMPORT_FILE_SIZE_LIMIT: NonNegativeInt = Field(
|
||||
description="Maximum allowed CSV file size for annotation import in megabytes",
|
||||
default=2,
|
||||
)
|
||||
|
||||
ANNOTATION_IMPORT_MAX_RECORDS: PositiveInt = Field(
|
||||
description="Maximum number of annotation records allowed in a single import",
|
||||
default=10000,
|
||||
)
|
||||
|
||||
ANNOTATION_IMPORT_MIN_RECORDS: PositiveInt = Field(
|
||||
description="Minimum number of annotation records required in a single import",
|
||||
default=1,
|
||||
)
|
||||
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: PositiveInt = Field(
|
||||
description="Maximum number of annotation import requests per minute per tenant",
|
||||
default=5,
|
||||
)
|
||||
|
||||
ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: PositiveInt = Field(
|
||||
description="Maximum number of annotation import requests per hour per tenant",
|
||||
default=20,
|
||||
)
|
||||
|
||||
ANNOTATION_IMPORT_MAX_CONCURRENT: PositiveInt = Field(
|
||||
description="Maximum number of concurrent annotation import tasks per tenant",
|
||||
default=2,
|
||||
)
|
||||
|
||||
inner_UPLOAD_FILE_EXTENSION_BLACKLIST: str = Field(
|
||||
description=(
|
||||
"Comma-separated list of file extensions that are blocked from upload. "
|
||||
"Extensions should be lowercase without dots (e.g., 'exe,bat,sh,dll'). "
|
||||
"Empty by default to allow all file types."
|
||||
),
|
||||
validation_alias=AliasChoices("UPLOAD_FILE_EXTENSION_BLACKLIST"),
|
||||
default="",
|
||||
)
|
||||
|
||||
@computed_field # type: ignore[misc]
|
||||
@property
|
||||
def UPLOAD_FILE_EXTENSION_BLACKLIST(self) -> set[str]:
|
||||
"""
|
||||
Parse and return the blacklist as a set of lowercase extensions.
|
||||
Returns an empty set if no blacklist is configured.
|
||||
"""
|
||||
if not self.inner_UPLOAD_FILE_EXTENSION_BLACKLIST:
|
||||
return set()
|
||||
return {
|
||||
ext.strip().lower().strip(".")
|
||||
for ext in self.inner_UPLOAD_FILE_EXTENSION_BLACKLIST.split(",")
|
||||
if ext.strip()
|
||||
}
|
||||
|
||||
|
||||
class HttpConfig(BaseSettings):
|
||||
"""
|
||||
HTTP-related configurations for the application
|
||||
"""
|
||||
|
||||
COOKIE_DOMAIN: str = Field(
|
||||
description="Explicit cookie domain for console/service cookies when sharing across subdomains",
|
||||
default="",
|
||||
)
|
||||
|
||||
API_COMPRESSION_ENABLED: bool = Field(
|
||||
description="Enable or disable gzip compression for HTTP responses",
|
||||
default=False,
|
||||
@ -335,11 +477,11 @@ class HttpConfig(BaseSettings):
|
||||
)
|
||||
|
||||
HTTP_REQUEST_MAX_READ_TIMEOUT: int = Field(
|
||||
ge=1, description="Maximum read timeout in seconds for HTTP requests", default=60
|
||||
ge=1, description="Maximum read timeout in seconds for HTTP requests", default=600
|
||||
)
|
||||
|
||||
HTTP_REQUEST_MAX_WRITE_TIMEOUT: int = Field(
|
||||
ge=1, description="Maximum write timeout in seconds for HTTP requests", default=20
|
||||
ge=1, description="Maximum write timeout in seconds for HTTP requests", default=600
|
||||
)
|
||||
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: PositiveInt = Field(
|
||||
@ -362,17 +504,17 @@ class HttpConfig(BaseSettings):
|
||||
default=3,
|
||||
)
|
||||
|
||||
SSRF_PROXY_ALL_URL: Optional[str] = Field(
|
||||
SSRF_PROXY_ALL_URL: str | None = Field(
|
||||
description="Proxy URL for HTTP or HTTPS requests to prevent Server-Side Request Forgery (SSRF)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SSRF_PROXY_HTTP_URL: Optional[str] = Field(
|
||||
SSRF_PROXY_HTTP_URL: str | None = Field(
|
||||
description="Proxy URL for HTTP requests to prevent Server-Side Request Forgery (SSRF)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SSRF_PROXY_HTTPS_URL: Optional[str] = Field(
|
||||
SSRF_PROXY_HTTPS_URL: str | None = Field(
|
||||
description="Proxy URL for HTTPS requests to prevent Server-Side Request Forgery (SSRF)",
|
||||
default=None,
|
||||
)
|
||||
@ -397,6 +539,21 @@ class HttpConfig(BaseSettings):
|
||||
default=5,
|
||||
)
|
||||
|
||||
SSRF_POOL_MAX_CONNECTIONS: PositiveInt = Field(
|
||||
description="Maximum number of concurrent connections for the SSRF HTTP client",
|
||||
default=100,
|
||||
)
|
||||
|
||||
SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS: PositiveInt = Field(
|
||||
description="Maximum number of persistent keep-alive connections for the SSRF HTTP client",
|
||||
default=20,
|
||||
)
|
||||
|
||||
SSRF_POOL_KEEPALIVE_EXPIRY: PositiveFloat | None = Field(
|
||||
description="Keep-alive expiry in seconds for idle SSRF connections (set to None to disable)",
|
||||
default=5.0,
|
||||
)
|
||||
|
||||
RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field(
|
||||
description="Enable handling of X-Forwarded-For, X-Forwarded-Proto, and X-Forwarded-Port headers"
|
||||
" when the app is behind a single trusted reverse proxy.",
|
||||
@ -414,7 +571,7 @@ class InnerAPIConfig(BaseSettings):
|
||||
default=False,
|
||||
)
|
||||
|
||||
INNER_API_KEY: Optional[str] = Field(
|
||||
INNER_API_KEY: str | None = Field(
|
||||
description="API key for accessing the internal API",
|
||||
default=None,
|
||||
)
|
||||
@ -430,7 +587,7 @@ class LoggingConfig(BaseSettings):
|
||||
default="INFO",
|
||||
)
|
||||
|
||||
LOG_FILE: Optional[str] = Field(
|
||||
LOG_FILE: str | None = Field(
|
||||
description="File path for log output.",
|
||||
default=None,
|
||||
)
|
||||
@ -447,15 +604,18 @@ class LoggingConfig(BaseSettings):
|
||||
|
||||
LOG_FORMAT: str = Field(
|
||||
description="Format string for log messages",
|
||||
default="%(asctime)s.%(msecs)03d %(levelname)s [%(threadName)s] [%(filename)s:%(lineno)d] - %(message)s",
|
||||
default=(
|
||||
"%(asctime)s.%(msecs)03d %(levelname)s [%(threadName)s] "
|
||||
"[%(filename)s:%(lineno)d] %(trace_id)s - %(message)s"
|
||||
),
|
||||
)
|
||||
|
||||
LOG_DATEFORMAT: Optional[str] = Field(
|
||||
LOG_DATEFORMAT: str | None = Field(
|
||||
description="Date format string for log timestamps",
|
||||
default=None,
|
||||
)
|
||||
|
||||
LOG_TZ: Optional[str] = Field(
|
||||
LOG_TZ: str | None = Field(
|
||||
description="Timezone for log timestamps (e.g., 'America/New_York')",
|
||||
default="UTC",
|
||||
)
|
||||
@ -499,6 +659,22 @@ class UpdateConfig(BaseSettings):
|
||||
)
|
||||
|
||||
|
||||
class WorkflowVariableTruncationConfig(BaseSettings):
|
||||
WORKFLOW_VARIABLE_TRUNCATION_MAX_SIZE: PositiveInt = Field(
|
||||
# 1000 KiB
|
||||
1024_000,
|
||||
description="Maximum size for variable to trigger final truncation.",
|
||||
)
|
||||
WORKFLOW_VARIABLE_TRUNCATION_STRING_LENGTH: PositiveInt = Field(
|
||||
100000,
|
||||
description="maximum length for string to trigger tuncation, measure in number of characters",
|
||||
)
|
||||
WORKFLOW_VARIABLE_TRUNCATION_ARRAY_LENGTH: PositiveInt = Field(
|
||||
1000,
|
||||
description="maximum length for array to trigger truncation.",
|
||||
)
|
||||
|
||||
|
||||
class WorkflowConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for workflow execution
|
||||
@ -519,16 +695,38 @@ class WorkflowConfig(BaseSettings):
|
||||
default=5,
|
||||
)
|
||||
|
||||
WORKFLOW_PARALLEL_DEPTH_LIMIT: PositiveInt = Field(
|
||||
description="Maximum allowed depth for nested parallel executions",
|
||||
default=3,
|
||||
)
|
||||
|
||||
MAX_VARIABLE_SIZE: PositiveInt = Field(
|
||||
description="Maximum size in bytes for a single variable in workflows. Default to 200 KB.",
|
||||
default=200 * 1024,
|
||||
)
|
||||
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH: PositiveInt = Field(
|
||||
description="Maximum number of characters allowed in Template Transform node output",
|
||||
default=400_000,
|
||||
)
|
||||
|
||||
# GraphEngine Worker Pool Configuration
|
||||
GRAPH_ENGINE_MIN_WORKERS: PositiveInt = Field(
|
||||
description="Minimum number of workers per GraphEngine instance",
|
||||
default=1,
|
||||
)
|
||||
|
||||
GRAPH_ENGINE_MAX_WORKERS: PositiveInt = Field(
|
||||
description="Maximum number of workers per GraphEngine instance",
|
||||
default=10,
|
||||
)
|
||||
|
||||
GRAPH_ENGINE_SCALE_UP_THRESHOLD: PositiveInt = Field(
|
||||
description="Queue depth threshold that triggers worker scale up",
|
||||
default=3,
|
||||
)
|
||||
|
||||
GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME: float = Field(
|
||||
description="Seconds of idle time before scaling down workers",
|
||||
default=5.0,
|
||||
ge=0.1,
|
||||
)
|
||||
|
||||
|
||||
class WorkflowNodeExecutionConfig(BaseSettings):
|
||||
"""
|
||||
@ -589,22 +787,22 @@ class AuthConfig(BaseSettings):
|
||||
default="/console/api/oauth/authorize",
|
||||
)
|
||||
|
||||
GITHUB_CLIENT_ID: Optional[str] = Field(
|
||||
GITHUB_CLIENT_ID: str | None = Field(
|
||||
description="GitHub OAuth client ID",
|
||||
default=None,
|
||||
)
|
||||
|
||||
GITHUB_CLIENT_SECRET: Optional[str] = Field(
|
||||
GITHUB_CLIENT_SECRET: str | None = Field(
|
||||
description="GitHub OAuth client secret",
|
||||
default=None,
|
||||
)
|
||||
|
||||
GOOGLE_CLIENT_ID: Optional[str] = Field(
|
||||
GOOGLE_CLIENT_ID: str | None = Field(
|
||||
description="Google OAuth client ID",
|
||||
default=None,
|
||||
)
|
||||
|
||||
GOOGLE_CLIENT_SECRET: Optional[str] = Field(
|
||||
GOOGLE_CLIENT_SECRET: str | None = Field(
|
||||
description="Google OAuth client secret",
|
||||
default=None,
|
||||
)
|
||||
@ -639,6 +837,11 @@ class AuthConfig(BaseSettings):
|
||||
default=86400,
|
||||
)
|
||||
|
||||
EMAIL_REGISTER_LOCKOUT_DURATION: PositiveInt = Field(
|
||||
description="Time (in seconds) a user must wait before retrying email register after exceeding the rate limit.",
|
||||
default=86400,
|
||||
)
|
||||
|
||||
|
||||
class ModerationConfig(BaseSettings):
|
||||
"""
|
||||
@ -662,47 +865,71 @@ class ToolConfig(BaseSettings):
|
||||
)
|
||||
|
||||
|
||||
class TemplateMode(StrEnum):
|
||||
# unsafe mode allows flexible operations in templates, but may cause security vulnerabilities
|
||||
UNSAFE = "unsafe"
|
||||
|
||||
# sandbox mode restricts some unsafe operations like accessing __class__.
|
||||
# however, it is still not 100% safe, for example, cpu exploitation can happen.
|
||||
SANDBOX = "sandbox"
|
||||
|
||||
# templating is disabled
|
||||
DISABLED = "disabled"
|
||||
|
||||
|
||||
class MailConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for email services
|
||||
"""
|
||||
|
||||
MAIL_TYPE: Optional[str] = Field(
|
||||
MAIL_TEMPLATING_MODE: TemplateMode = Field(
|
||||
description="Template mode for email services",
|
||||
default=TemplateMode.SANDBOX,
|
||||
)
|
||||
|
||||
MAIL_TEMPLATING_TIMEOUT: int = Field(
|
||||
description="""
|
||||
Timeout for email templating in seconds. Used to prevent infinite loops in malicious templates.
|
||||
Only available in sandbox mode.""",
|
||||
default=3,
|
||||
)
|
||||
|
||||
MAIL_TYPE: str | None = Field(
|
||||
description="Email service provider type ('smtp' or 'resend' or 'sendGrid), default to None.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
MAIL_DEFAULT_SEND_FROM: Optional[str] = Field(
|
||||
MAIL_DEFAULT_SEND_FROM: str | None = Field(
|
||||
description="Default email address to use as the sender",
|
||||
default=None,
|
||||
)
|
||||
|
||||
RESEND_API_KEY: Optional[str] = Field(
|
||||
RESEND_API_KEY: str | None = Field(
|
||||
description="API key for Resend email service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
RESEND_API_URL: Optional[str] = Field(
|
||||
RESEND_API_URL: str | None = Field(
|
||||
description="API URL for Resend email service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SMTP_SERVER: Optional[str] = Field(
|
||||
SMTP_SERVER: str | None = Field(
|
||||
description="SMTP server hostname",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SMTP_PORT: Optional[int] = Field(
|
||||
SMTP_PORT: int | None = Field(
|
||||
description="SMTP server port number",
|
||||
default=465,
|
||||
)
|
||||
|
||||
SMTP_USERNAME: Optional[str] = Field(
|
||||
SMTP_USERNAME: str | None = Field(
|
||||
description="Username for SMTP authentication",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SMTP_PASSWORD: Optional[str] = Field(
|
||||
SMTP_PASSWORD: str | None = Field(
|
||||
description="Password for SMTP authentication",
|
||||
default=None,
|
||||
)
|
||||
@ -722,7 +949,7 @@ class MailConfig(BaseSettings):
|
||||
default=50,
|
||||
)
|
||||
|
||||
SENDGRID_API_KEY: Optional[str] = Field(
|
||||
SENDGRID_API_KEY: str | None = Field(
|
||||
description="API key for SendGrid service",
|
||||
default=None,
|
||||
)
|
||||
@ -745,17 +972,17 @@ class RagEtlConfig(BaseSettings):
|
||||
default="database",
|
||||
)
|
||||
|
||||
UNSTRUCTURED_API_URL: Optional[str] = Field(
|
||||
UNSTRUCTURED_API_URL: str | None = Field(
|
||||
description="API URL for Unstructured.io service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
UNSTRUCTURED_API_KEY: Optional[str] = Field(
|
||||
UNSTRUCTURED_API_KEY: str | None = Field(
|
||||
description="API key for Unstructured.io service",
|
||||
default="",
|
||||
)
|
||||
|
||||
SCARF_NO_ANALYTICS: Optional[str] = Field(
|
||||
SCARF_NO_ANALYTICS: str | None = Field(
|
||||
description="This is about whether to disable Scarf analytics in Unstructured library.",
|
||||
default="false",
|
||||
)
|
||||
@ -796,6 +1023,16 @@ class DataSetConfig(BaseSettings):
|
||||
default=30,
|
||||
)
|
||||
|
||||
DSL_EXPORT_ENCRYPT_DATASET_ID: bool = Field(
|
||||
description="Enable or disable dataset ID encryption when exporting DSL files",
|
||||
default=True,
|
||||
)
|
||||
|
||||
DATASET_MAX_SEGMENTS_PER_REQUEST: NonNegativeInt = Field(
|
||||
description="Maximum number of segments for dataset segments API (0 for unlimited)",
|
||||
default=0,
|
||||
)
|
||||
|
||||
|
||||
class WorkspaceConfig(BaseSettings):
|
||||
"""
|
||||
@ -871,6 +1108,44 @@ class CeleryScheduleTasksConfig(BaseSettings):
|
||||
description="Enable check upgradable plugin task",
|
||||
default=True,
|
||||
)
|
||||
ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK: bool = Field(
|
||||
description="Enable workflow schedule poller task",
|
||||
default=True,
|
||||
)
|
||||
WORKFLOW_SCHEDULE_POLLER_INTERVAL: int = Field(
|
||||
description="Workflow schedule poller interval in minutes",
|
||||
default=1,
|
||||
)
|
||||
WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE: int = Field(
|
||||
description="Maximum number of schedules to process in each poll batch",
|
||||
default=100,
|
||||
)
|
||||
WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK: int = Field(
|
||||
description="Maximum schedules to dispatch per tick (0=unlimited, circuit breaker)",
|
||||
default=0,
|
||||
)
|
||||
|
||||
# Trigger provider refresh (simple version)
|
||||
ENABLE_TRIGGER_PROVIDER_REFRESH_TASK: bool = Field(
|
||||
description="Enable trigger provider refresh poller",
|
||||
default=True,
|
||||
)
|
||||
TRIGGER_PROVIDER_REFRESH_INTERVAL: int = Field(
|
||||
description="Trigger provider refresh poller interval in minutes",
|
||||
default=1,
|
||||
)
|
||||
TRIGGER_PROVIDER_REFRESH_BATCH_SIZE: int = Field(
|
||||
description="Max trigger subscriptions to process per tick",
|
||||
default=200,
|
||||
)
|
||||
TRIGGER_PROVIDER_CREDENTIAL_THRESHOLD_SECONDS: int = Field(
|
||||
description="Proactive credential refresh threshold in seconds",
|
||||
default=60 * 60,
|
||||
)
|
||||
TRIGGER_PROVIDER_SUBSCRIPTION_THRESHOLD_SECONDS: int = Field(
|
||||
description="Proactive subscription refresh threshold in seconds",
|
||||
default=60 * 60,
|
||||
)
|
||||
|
||||
|
||||
class PositionConfig(BaseSettings):
|
||||
@ -969,7 +1244,7 @@ class AccountConfig(BaseSettings):
|
||||
|
||||
|
||||
class WorkflowLogConfig(BaseSettings):
|
||||
WORKFLOW_LOG_CLEANUP_ENABLED: bool = Field(default=True, description="Enable workflow run log cleanup")
|
||||
WORKFLOW_LOG_CLEANUP_ENABLED: bool = Field(default=False, description="Enable workflow run log cleanup")
|
||||
WORKFLOW_LOG_RETENTION_DAYS: int = Field(default=30, description="Retention days for workflow run logs")
|
||||
WORKFLOW_LOG_CLEANUP_BATCH_SIZE: int = Field(
|
||||
default=100, description="Batch size for workflow run log cleanup operations"
|
||||
@ -988,12 +1263,21 @@ class SwaggerUIConfig(BaseSettings):
|
||||
)
|
||||
|
||||
|
||||
class TenantIsolatedTaskQueueConfig(BaseSettings):
|
||||
TENANT_ISOLATED_TASK_CONCURRENCY: int = Field(
|
||||
description="Number of tasks allowed to be delivered concurrently from isolated queue per tenant",
|
||||
default=1,
|
||||
)
|
||||
|
||||
|
||||
class FeatureConfig(
|
||||
# place the configs in alphabet order
|
||||
AppExecutionConfig,
|
||||
AuthConfig, # Changed from OAuthConfig to AuthConfig
|
||||
BillingConfig,
|
||||
CodeExecutionSandboxConfig,
|
||||
TriggerConfig,
|
||||
AsyncWorkflowConfig,
|
||||
PluginConfig,
|
||||
MarketplaceConfig,
|
||||
DataSetConfig,
|
||||
@ -1012,6 +1296,7 @@ class FeatureConfig(
|
||||
RagEtlConfig,
|
||||
RepositoryConfig,
|
||||
SecurityConfig,
|
||||
TenantIsolatedTaskQueueConfig,
|
||||
ToolConfig,
|
||||
UpdateConfig,
|
||||
WorkflowConfig,
|
||||
@ -1025,5 +1310,6 @@ class FeatureConfig(
|
||||
CeleryBeatConfig,
|
||||
CeleryScheduleTasksConfig,
|
||||
WorkflowLogConfig,
|
||||
WorkflowVariableTruncationConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, NonNegativeInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -40,17 +38,17 @@ class HostedOpenAiConfig(BaseSettings):
|
||||
Configuration for hosted OpenAI service
|
||||
"""
|
||||
|
||||
HOSTED_OPENAI_API_KEY: Optional[str] = Field(
|
||||
HOSTED_OPENAI_API_KEY: str | None = Field(
|
||||
description="API key for hosted OpenAI service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HOSTED_OPENAI_API_BASE: Optional[str] = Field(
|
||||
HOSTED_OPENAI_API_BASE: str | None = Field(
|
||||
description="Base URL for hosted OpenAI API",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HOSTED_OPENAI_API_ORGANIZATION: Optional[str] = Field(
|
||||
HOSTED_OPENAI_API_ORGANIZATION: str | None = Field(
|
||||
description="Organization ID for hosted OpenAI service",
|
||||
default=None,
|
||||
)
|
||||
@ -110,12 +108,12 @@ class HostedAzureOpenAiConfig(BaseSettings):
|
||||
default=False,
|
||||
)
|
||||
|
||||
HOSTED_AZURE_OPENAI_API_KEY: Optional[str] = Field(
|
||||
HOSTED_AZURE_OPENAI_API_KEY: str | None = Field(
|
||||
description="API key for hosted Azure OpenAI service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HOSTED_AZURE_OPENAI_API_BASE: Optional[str] = Field(
|
||||
HOSTED_AZURE_OPENAI_API_BASE: str | None = Field(
|
||||
description="Base URL for hosted Azure OpenAI API",
|
||||
default=None,
|
||||
)
|
||||
@ -131,12 +129,12 @@ class HostedAnthropicConfig(BaseSettings):
|
||||
Configuration for hosted Anthropic service
|
||||
"""
|
||||
|
||||
HOSTED_ANTHROPIC_API_BASE: Optional[str] = Field(
|
||||
HOSTED_ANTHROPIC_API_BASE: str | None = Field(
|
||||
description="Base URL for hosted Anthropic API",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HOSTED_ANTHROPIC_API_KEY: Optional[str] = Field(
|
||||
HOSTED_ANTHROPIC_API_KEY: str | None = Field(
|
||||
description="API key for hosted Anthropic service",
|
||||
default=None,
|
||||
)
|
||||
@ -222,11 +220,28 @@ class HostedFetchAppTemplateConfig(BaseSettings):
|
||||
)
|
||||
|
||||
|
||||
class HostedFetchPipelineTemplateConfig(BaseSettings):
|
||||
"""
|
||||
Configuration for fetching pipeline templates
|
||||
"""
|
||||
|
||||
HOSTED_FETCH_PIPELINE_TEMPLATES_MODE: str = Field(
|
||||
description="Mode for fetching pipeline templates: remote, db, or builtin default to remote,",
|
||||
default="remote",
|
||||
)
|
||||
|
||||
HOSTED_FETCH_PIPELINE_TEMPLATES_REMOTE_DOMAIN: str = Field(
|
||||
description="Domain for fetching remote pipeline templates",
|
||||
default="https://tmpl.dify.ai",
|
||||
)
|
||||
|
||||
|
||||
class HostedServiceConfig(
|
||||
# place the configs in alphabet order
|
||||
HostedAnthropicConfig,
|
||||
HostedAzureOpenAiConfig,
|
||||
HostedFetchAppTemplateConfig,
|
||||
HostedFetchPipelineTemplateConfig,
|
||||
HostedMinmaxConfig,
|
||||
HostedOpenAiConfig,
|
||||
HostedSparkConfig,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import os
|
||||
from typing import Any, Literal, Optional
|
||||
from typing import Any, Literal
|
||||
from urllib.parse import parse_qsl, quote_plus
|
||||
|
||||
from pydantic import Field, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, computed_field
|
||||
@ -18,6 +18,7 @@ from .storage.opendal_storage_config import OpenDALStorageConfig
|
||||
from .storage.supabase_storage_config import SupabaseStorageConfig
|
||||
from .storage.tencent_cos_storage_config import TencentCloudCOSStorageConfig
|
||||
from .storage.volcengine_tos_storage_config import VolcengineTOSStorageConfig
|
||||
from .vdb.alibabacloud_mysql_config import AlibabaCloudMySQLConfig
|
||||
from .vdb.analyticdb_config import AnalyticdbConfig
|
||||
from .vdb.baidu_vector_config import BaiduVectorDBConfig
|
||||
from .vdb.chroma_config import ChromaConfig
|
||||
@ -25,6 +26,7 @@ from .vdb.clickzetta_config import ClickzettaConfig
|
||||
from .vdb.couchbase_config import CouchbaseConfig
|
||||
from .vdb.elasticsearch_config import ElasticsearchConfig
|
||||
from .vdb.huawei_cloud_config import HuaweiCloudConfig
|
||||
from .vdb.iris_config import IrisVectorConfig
|
||||
from .vdb.lindorm_config import LindormConfig
|
||||
from .vdb.matrixone_config import MatrixoneConfig
|
||||
from .vdb.milvus_config import MilvusConfig
|
||||
@ -78,18 +80,18 @@ class StorageConfig(BaseSettings):
|
||||
|
||||
|
||||
class VectorStoreConfig(BaseSettings):
|
||||
VECTOR_STORE: Optional[str] = Field(
|
||||
VECTOR_STORE: str | None = Field(
|
||||
description="Type of vector store to use for efficient similarity search."
|
||||
" Set to None if not using a vector store.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
VECTOR_STORE_WHITELIST_ENABLE: Optional[bool] = Field(
|
||||
VECTOR_STORE_WHITELIST_ENABLE: bool | None = Field(
|
||||
description="Enable whitelist for vector store.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
VECTOR_INDEX_NAME_PREFIX: Optional[str] = Field(
|
||||
VECTOR_INDEX_NAME_PREFIX: str | None = Field(
|
||||
description="Prefix used to create collection name in vector database",
|
||||
default="Vector_index",
|
||||
)
|
||||
@ -104,6 +106,12 @@ class KeywordStoreConfig(BaseSettings):
|
||||
|
||||
|
||||
class DatabaseConfig(BaseSettings):
|
||||
# Database type selector
|
||||
DB_TYPE: Literal["postgresql", "mysql", "oceanbase", "seekdb"] = Field(
|
||||
description="Database type to use. OceanBase is MySQL-compatible.",
|
||||
default="postgresql",
|
||||
)
|
||||
|
||||
DB_HOST: str = Field(
|
||||
description="Hostname or IP address of the database server.",
|
||||
default="localhost",
|
||||
@ -139,12 +147,12 @@ class DatabaseConfig(BaseSettings):
|
||||
default="",
|
||||
)
|
||||
|
||||
SQLALCHEMY_DATABASE_URI_SCHEME: str = Field(
|
||||
description="Database URI scheme for SQLAlchemy connection.",
|
||||
default="postgresql",
|
||||
)
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def SQLALCHEMY_DATABASE_URI_SCHEME(self) -> str:
|
||||
return "postgresql" if self.DB_TYPE == "postgresql" else "mysql+pymysql"
|
||||
|
||||
@computed_field # type: ignore[misc]
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def SQLALCHEMY_DATABASE_URI(self) -> str:
|
||||
db_extras = (
|
||||
@ -187,26 +195,31 @@ class DatabaseConfig(BaseSettings):
|
||||
default=False,
|
||||
)
|
||||
|
||||
SQLALCHEMY_POOL_TIMEOUT: NonNegativeInt = Field(
|
||||
description="Number of seconds to wait for a connection from the pool before raising a timeout error.",
|
||||
default=30,
|
||||
)
|
||||
|
||||
RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field(
|
||||
description="Number of processes for the retrieval service, default to CPU cores.",
|
||||
default=os.cpu_count() or 1,
|
||||
)
|
||||
|
||||
@computed_field # type: ignore[misc]
|
||||
@computed_field # type: ignore[prop-decorator]
|
||||
@property
|
||||
def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
|
||||
# Parse DB_EXTRAS for 'options'
|
||||
db_extras_dict = dict(parse_qsl(self.DB_EXTRAS))
|
||||
options = db_extras_dict.get("options", "")
|
||||
# Always include timezone
|
||||
timezone_opt = "-c timezone=UTC"
|
||||
if options:
|
||||
# Merge user options and timezone
|
||||
merged_options = f"{options} {timezone_opt}"
|
||||
else:
|
||||
merged_options = timezone_opt
|
||||
|
||||
connect_args = {"options": merged_options}
|
||||
connect_args = {}
|
||||
# Use the dynamic SQLALCHEMY_DATABASE_URI_SCHEME property
|
||||
if self.SQLALCHEMY_DATABASE_URI_SCHEME.startswith("postgresql"):
|
||||
timezone_opt = "-c timezone=UTC"
|
||||
if options:
|
||||
merged_options = f"{options} {timezone_opt}"
|
||||
else:
|
||||
merged_options = timezone_opt
|
||||
connect_args = {"options": merged_options}
|
||||
|
||||
return {
|
||||
"pool_size": self.SQLALCHEMY_POOL_SIZE,
|
||||
@ -216,6 +229,7 @@ class DatabaseConfig(BaseSettings):
|
||||
"connect_args": connect_args,
|
||||
"pool_use_lifo": self.SQLALCHEMY_POOL_USE_LIFO,
|
||||
"pool_reset_on_return": None,
|
||||
"pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT,
|
||||
}
|
||||
|
||||
|
||||
@ -225,26 +239,26 @@ class CeleryConfig(DatabaseConfig):
|
||||
default="redis",
|
||||
)
|
||||
|
||||
CELERY_BROKER_URL: Optional[str] = Field(
|
||||
CELERY_BROKER_URL: str | None = Field(
|
||||
description="URL of the message broker for Celery tasks.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CELERY_USE_SENTINEL: Optional[bool] = Field(
|
||||
CELERY_USE_SENTINEL: bool | None = Field(
|
||||
description="Whether to use Redis Sentinel for high availability.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
CELERY_SENTINEL_MASTER_NAME: Optional[str] = Field(
|
||||
CELERY_SENTINEL_MASTER_NAME: str | None = Field(
|
||||
description="Name of the Redis Sentinel master.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CELERY_SENTINEL_PASSWORD: Optional[str] = Field(
|
||||
CELERY_SENTINEL_PASSWORD: str | None = Field(
|
||||
description="Password of the Redis Sentinel master.",
|
||||
default=None,
|
||||
)
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: Optional[PositiveFloat] = Field(
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: PositiveFloat | None = Field(
|
||||
description="Timeout for Redis Sentinel socket operations in seconds.",
|
||||
default=0.1,
|
||||
)
|
||||
@ -268,12 +282,12 @@ class InternalTestConfig(BaseSettings):
|
||||
Configuration settings for Internal Test
|
||||
"""
|
||||
|
||||
AWS_SECRET_ACCESS_KEY: Optional[str] = Field(
|
||||
AWS_SECRET_ACCESS_KEY: str | None = Field(
|
||||
description="Internal test AWS secret access key",
|
||||
default=None,
|
||||
)
|
||||
|
||||
AWS_ACCESS_KEY_ID: Optional[str] = Field(
|
||||
AWS_ACCESS_KEY_ID: str | None = Field(
|
||||
description="Internal test AWS access key ID",
|
||||
default=None,
|
||||
)
|
||||
@ -284,15 +298,15 @@ class DatasetQueueMonitorConfig(BaseSettings):
|
||||
Configuration settings for Dataset Queue Monitor
|
||||
"""
|
||||
|
||||
QUEUE_MONITOR_THRESHOLD: Optional[NonNegativeInt] = Field(
|
||||
QUEUE_MONITOR_THRESHOLD: NonNegativeInt | None = Field(
|
||||
description="Threshold for dataset queue monitor",
|
||||
default=200,
|
||||
)
|
||||
QUEUE_MONITOR_ALERT_EMAILS: Optional[str] = Field(
|
||||
QUEUE_MONITOR_ALERT_EMAILS: str | None = Field(
|
||||
description="Emails for dataset queue monitor alert, separated by commas",
|
||||
default=None,
|
||||
)
|
||||
QUEUE_MONITOR_INTERVAL: Optional[NonNegativeFloat] = Field(
|
||||
QUEUE_MONITOR_INTERVAL: NonNegativeFloat | None = Field(
|
||||
description="Interval for dataset queue monitor in minutes",
|
||||
default=30,
|
||||
)
|
||||
@ -300,8 +314,7 @@ class DatasetQueueMonitorConfig(BaseSettings):
|
||||
|
||||
class MiddlewareConfig(
|
||||
# place the configs in alphabet order
|
||||
CeleryConfig,
|
||||
DatabaseConfig,
|
||||
CeleryConfig, # Note: CeleryConfig already inherits from DatabaseConfig
|
||||
KeywordStoreConfig,
|
||||
RedisConfig,
|
||||
# configs of storage and storage providers
|
||||
@ -324,7 +337,9 @@ class MiddlewareConfig(
|
||||
ChromaConfig,
|
||||
ClickzettaConfig,
|
||||
HuaweiCloudConfig,
|
||||
IrisVectorConfig,
|
||||
MilvusConfig,
|
||||
AlibabaCloudMySQLConfig,
|
||||
MyScaleConfig,
|
||||
OpenSearchConfig,
|
||||
OracleConfig,
|
||||
|
||||
28
api/configs/middleware/cache/redis_config.py
vendored
28
api/configs/middleware/cache/redis_config.py
vendored
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, NonNegativeInt, PositiveFloat, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -19,12 +17,12 @@ class RedisConfig(BaseSettings):
|
||||
default=6379,
|
||||
)
|
||||
|
||||
REDIS_USERNAME: Optional[str] = Field(
|
||||
REDIS_USERNAME: str | None = Field(
|
||||
description="Username for Redis authentication (if required)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_PASSWORD: Optional[str] = Field(
|
||||
REDIS_PASSWORD: str | None = Field(
|
||||
description="Password for Redis authentication (if required)",
|
||||
default=None,
|
||||
)
|
||||
@ -44,47 +42,47 @@ class RedisConfig(BaseSettings):
|
||||
default="CERT_NONE",
|
||||
)
|
||||
|
||||
REDIS_SSL_CA_CERTS: Optional[str] = Field(
|
||||
REDIS_SSL_CA_CERTS: str | None = Field(
|
||||
description="Path to the CA certificate file for SSL verification",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_SSL_CERTFILE: Optional[str] = Field(
|
||||
REDIS_SSL_CERTFILE: str | None = Field(
|
||||
description="Path to the client certificate file for SSL authentication",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_SSL_KEYFILE: Optional[str] = Field(
|
||||
REDIS_SSL_KEYFILE: str | None = Field(
|
||||
description="Path to the client private key file for SSL authentication",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_USE_SENTINEL: Optional[bool] = Field(
|
||||
REDIS_USE_SENTINEL: bool | None = Field(
|
||||
description="Enable Redis Sentinel mode for high availability",
|
||||
default=False,
|
||||
)
|
||||
|
||||
REDIS_SENTINELS: Optional[str] = Field(
|
||||
REDIS_SENTINELS: str | None = Field(
|
||||
description="Comma-separated list of Redis Sentinel nodes (host:port)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_SENTINEL_SERVICE_NAME: Optional[str] = Field(
|
||||
REDIS_SENTINEL_SERVICE_NAME: str | None = Field(
|
||||
description="Name of the Redis Sentinel service to monitor",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_SENTINEL_USERNAME: Optional[str] = Field(
|
||||
REDIS_SENTINEL_USERNAME: str | None = Field(
|
||||
description="Username for Redis Sentinel authentication (if required)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_SENTINEL_PASSWORD: Optional[str] = Field(
|
||||
REDIS_SENTINEL_PASSWORD: str | None = Field(
|
||||
description="Password for Redis Sentinel authentication (if required)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_SENTINEL_SOCKET_TIMEOUT: Optional[PositiveFloat] = Field(
|
||||
REDIS_SENTINEL_SOCKET_TIMEOUT: PositiveFloat | None = Field(
|
||||
description="Socket timeout in seconds for Redis Sentinel connections",
|
||||
default=0.1,
|
||||
)
|
||||
@ -94,12 +92,12 @@ class RedisConfig(BaseSettings):
|
||||
default=False,
|
||||
)
|
||||
|
||||
REDIS_CLUSTERS: Optional[str] = Field(
|
||||
REDIS_CLUSTERS: str | None = Field(
|
||||
description="Comma-separated list of Redis Clusters nodes (host:port)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
REDIS_CLUSTERS_PASSWORD: Optional[str] = Field(
|
||||
REDIS_CLUSTERS_PASSWORD: str | None = Field(
|
||||
description="Password for Redis Clusters authentication (if required)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,37 +7,37 @@ class AliyunOSSStorageConfig(BaseSettings):
|
||||
Configuration settings for Aliyun Object Storage Service (OSS)
|
||||
"""
|
||||
|
||||
ALIYUN_OSS_BUCKET_NAME: Optional[str] = Field(
|
||||
ALIYUN_OSS_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Aliyun OSS bucket to store and retrieve objects",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ALIYUN_OSS_ACCESS_KEY: Optional[str] = Field(
|
||||
ALIYUN_OSS_ACCESS_KEY: str | None = Field(
|
||||
description="Access key ID for authenticating with Aliyun OSS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ALIYUN_OSS_SECRET_KEY: Optional[str] = Field(
|
||||
ALIYUN_OSS_SECRET_KEY: str | None = Field(
|
||||
description="Secret access key for authenticating with Aliyun OSS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ALIYUN_OSS_ENDPOINT: Optional[str] = Field(
|
||||
ALIYUN_OSS_ENDPOINT: str | None = Field(
|
||||
description="URL of the Aliyun OSS endpoint for your chosen region",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ALIYUN_OSS_REGION: Optional[str] = Field(
|
||||
ALIYUN_OSS_REGION: str | None = Field(
|
||||
description="Aliyun OSS region where your bucket is located (e.g., 'oss-cn-hangzhou')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ALIYUN_OSS_AUTH_VERSION: Optional[str] = Field(
|
||||
ALIYUN_OSS_AUTH_VERSION: str | None = Field(
|
||||
description="Version of the authentication protocol to use with Aliyun OSS (e.g., 'v4')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ALIYUN_OSS_PATH: Optional[str] = Field(
|
||||
ALIYUN_OSS_PATH: str | None = Field(
|
||||
description="Base path within the bucket to store objects (e.g., 'my-app-data/')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from typing import Literal, Optional
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
@ -9,27 +9,27 @@ class S3StorageConfig(BaseSettings):
|
||||
Configuration settings for S3-compatible object storage
|
||||
"""
|
||||
|
||||
S3_ENDPOINT: Optional[str] = Field(
|
||||
S3_ENDPOINT: str | None = Field(
|
||||
description="URL of the S3-compatible storage endpoint (e.g., 'https://s3.amazonaws.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
S3_REGION: Optional[str] = Field(
|
||||
S3_REGION: str | None = Field(
|
||||
description="Region where the S3 bucket is located (e.g., 'us-east-1')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
S3_BUCKET_NAME: Optional[str] = Field(
|
||||
S3_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the S3 bucket to store and retrieve objects",
|
||||
default=None,
|
||||
)
|
||||
|
||||
S3_ACCESS_KEY: Optional[str] = Field(
|
||||
S3_ACCESS_KEY: str | None = Field(
|
||||
description="Access key ID for authenticating with the S3 service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
S3_SECRET_KEY: Optional[str] = Field(
|
||||
S3_SECRET_KEY: str | None = Field(
|
||||
description="Secret access key for authenticating with the S3 service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,22 +7,22 @@ class AzureBlobStorageConfig(BaseSettings):
|
||||
Configuration settings for Azure Blob Storage
|
||||
"""
|
||||
|
||||
AZURE_BLOB_ACCOUNT_NAME: Optional[str] = Field(
|
||||
AZURE_BLOB_ACCOUNT_NAME: str | None = Field(
|
||||
description="Name of the Azure Storage account (e.g., 'mystorageaccount')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
AZURE_BLOB_ACCOUNT_KEY: Optional[str] = Field(
|
||||
AZURE_BLOB_ACCOUNT_KEY: str | None = Field(
|
||||
description="Access key for authenticating with the Azure Storage account",
|
||||
default=None,
|
||||
)
|
||||
|
||||
AZURE_BLOB_CONTAINER_NAME: Optional[str] = Field(
|
||||
AZURE_BLOB_CONTAINER_NAME: str | None = Field(
|
||||
description="Name of the Azure Blob container to store and retrieve objects",
|
||||
default=None,
|
||||
)
|
||||
|
||||
AZURE_BLOB_ACCOUNT_URL: Optional[str] = Field(
|
||||
AZURE_BLOB_ACCOUNT_URL: str | None = Field(
|
||||
description="URL of the Azure Blob storage endpoint (e.g., 'https://mystorageaccount.blob.core.windows.net')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,22 +7,22 @@ class BaiduOBSStorageConfig(BaseSettings):
|
||||
Configuration settings for Baidu Object Storage Service (OBS)
|
||||
"""
|
||||
|
||||
BAIDU_OBS_BUCKET_NAME: Optional[str] = Field(
|
||||
BAIDU_OBS_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Baidu OBS bucket to store and retrieve objects (e.g., 'my-obs-bucket')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
BAIDU_OBS_ACCESS_KEY: Optional[str] = Field(
|
||||
BAIDU_OBS_ACCESS_KEY: str | None = Field(
|
||||
description="Access Key ID for authenticating with Baidu OBS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
BAIDU_OBS_SECRET_KEY: Optional[str] = Field(
|
||||
BAIDU_OBS_SECRET_KEY: str | None = Field(
|
||||
description="Secret Access Key for authenticating with Baidu OBS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
BAIDU_OBS_ENDPOINT: Optional[str] = Field(
|
||||
BAIDU_OBS_ENDPOINT: str | None = Field(
|
||||
description="URL of the Baidu OSS endpoint for your chosen region (e.g., 'https://.bj.bcebos.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,7 +1,5 @@
|
||||
"""ClickZetta Volume Storage Configuration"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,17 +7,17 @@ from pydantic_settings import BaseSettings
|
||||
class ClickZettaVolumeStorageConfig(BaseSettings):
|
||||
"""Configuration for ClickZetta Volume storage."""
|
||||
|
||||
CLICKZETTA_VOLUME_USERNAME: Optional[str] = Field(
|
||||
CLICKZETTA_VOLUME_USERNAME: str | None = Field(
|
||||
description="Username for ClickZetta Volume authentication",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CLICKZETTA_VOLUME_PASSWORD: Optional[str] = Field(
|
||||
CLICKZETTA_VOLUME_PASSWORD: str | None = Field(
|
||||
description="Password for ClickZetta Volume authentication",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CLICKZETTA_VOLUME_INSTANCE: Optional[str] = Field(
|
||||
CLICKZETTA_VOLUME_INSTANCE: str | None = Field(
|
||||
description="ClickZetta instance identifier",
|
||||
default=None,
|
||||
)
|
||||
@ -49,7 +47,7 @@ class ClickZettaVolumeStorageConfig(BaseSettings):
|
||||
default="user",
|
||||
)
|
||||
|
||||
CLICKZETTA_VOLUME_NAME: Optional[str] = Field(
|
||||
CLICKZETTA_VOLUME_NAME: str | None = Field(
|
||||
description="ClickZetta volume name for external volumes",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,12 +7,12 @@ class GoogleCloudStorageConfig(BaseSettings):
|
||||
Configuration settings for Google Cloud Storage
|
||||
"""
|
||||
|
||||
GOOGLE_STORAGE_BUCKET_NAME: Optional[str] = Field(
|
||||
GOOGLE_STORAGE_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Google Cloud Storage bucket to store and retrieve objects (e.g., 'my-gcs-bucket')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: Optional[str] = Field(
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: str | None = Field(
|
||||
description="Base64-encoded JSON key file for Google Cloud service account authentication",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,22 +7,22 @@ class HuaweiCloudOBSStorageConfig(BaseSettings):
|
||||
Configuration settings for Huawei Cloud Object Storage Service (OBS)
|
||||
"""
|
||||
|
||||
HUAWEI_OBS_BUCKET_NAME: Optional[str] = Field(
|
||||
HUAWEI_OBS_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Huawei Cloud OBS bucket to store and retrieve objects (e.g., 'my-obs-bucket')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HUAWEI_OBS_ACCESS_KEY: Optional[str] = Field(
|
||||
HUAWEI_OBS_ACCESS_KEY: str | None = Field(
|
||||
description="Access Key ID for authenticating with Huawei Cloud OBS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HUAWEI_OBS_SECRET_KEY: Optional[str] = Field(
|
||||
HUAWEI_OBS_SECRET_KEY: str | None = Field(
|
||||
description="Secret Access Key for authenticating with Huawei Cloud OBS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HUAWEI_OBS_SERVER: Optional[str] = Field(
|
||||
HUAWEI_OBS_SERVER: str | None = Field(
|
||||
description="Endpoint URL for Huawei Cloud OBS (e.g., 'https://obs.cn-north-4.myhuaweicloud.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,27 +7,27 @@ class OCIStorageConfig(BaseSettings):
|
||||
Configuration settings for Oracle Cloud Infrastructure (OCI) Object Storage
|
||||
"""
|
||||
|
||||
OCI_ENDPOINT: Optional[str] = Field(
|
||||
OCI_ENDPOINT: str | None = Field(
|
||||
description="URL of the OCI Object Storage endpoint (e.g., 'https://objectstorage.us-phoenix-1.oraclecloud.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCI_REGION: Optional[str] = Field(
|
||||
OCI_REGION: str | None = Field(
|
||||
description="OCI region where the bucket is located (e.g., 'us-phoenix-1')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCI_BUCKET_NAME: Optional[str] = Field(
|
||||
OCI_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the OCI Object Storage bucket to store and retrieve objects (e.g., 'my-oci-bucket')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCI_ACCESS_KEY: Optional[str] = Field(
|
||||
OCI_ACCESS_KEY: str | None = Field(
|
||||
description="Access key (also known as API key) for authenticating with OCI Object Storage",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCI_SECRET_KEY: Optional[str] = Field(
|
||||
OCI_SECRET_KEY: str | None = Field(
|
||||
description="Secret key associated with the access key for authenticating with OCI Object Storage",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,17 +7,17 @@ class SupabaseStorageConfig(BaseSettings):
|
||||
Configuration settings for Supabase Object Storage Service
|
||||
"""
|
||||
|
||||
SUPABASE_BUCKET_NAME: Optional[str] = Field(
|
||||
SUPABASE_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Supabase bucket to store and retrieve objects (e.g., 'dify-bucket')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SUPABASE_API_KEY: Optional[str] = Field(
|
||||
SUPABASE_API_KEY: str | None = Field(
|
||||
description="API KEY for authenticating with Supabase",
|
||||
default=None,
|
||||
)
|
||||
|
||||
SUPABASE_URL: Optional[str] = Field(
|
||||
SUPABASE_URL: str | None = Field(
|
||||
description="URL of the Supabase",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,27 +7,27 @@ class TencentCloudCOSStorageConfig(BaseSettings):
|
||||
Configuration settings for Tencent Cloud Object Storage (COS)
|
||||
"""
|
||||
|
||||
TENCENT_COS_BUCKET_NAME: Optional[str] = Field(
|
||||
TENCENT_COS_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Tencent Cloud COS bucket to store and retrieve objects",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TENCENT_COS_REGION: Optional[str] = Field(
|
||||
TENCENT_COS_REGION: str | None = Field(
|
||||
description="Tencent Cloud region where the COS bucket is located (e.g., 'ap-guangzhou')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TENCENT_COS_SECRET_ID: Optional[str] = Field(
|
||||
TENCENT_COS_SECRET_ID: str | None = Field(
|
||||
description="SecretId for authenticating with Tencent Cloud COS (part of API credentials)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TENCENT_COS_SECRET_KEY: Optional[str] = Field(
|
||||
TENCENT_COS_SECRET_KEY: str | None = Field(
|
||||
description="SecretKey for authenticating with Tencent Cloud COS (part of API credentials)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TENCENT_COS_SCHEME: Optional[str] = Field(
|
||||
TENCENT_COS_SCHEME: str | None = Field(
|
||||
description="Protocol scheme for COS requests: 'https' (recommended) or 'http'",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,27 +7,27 @@ class VolcengineTOSStorageConfig(BaseSettings):
|
||||
Configuration settings for Volcengine Tinder Object Storage (TOS)
|
||||
"""
|
||||
|
||||
VOLCENGINE_TOS_BUCKET_NAME: Optional[str] = Field(
|
||||
VOLCENGINE_TOS_BUCKET_NAME: str | None = Field(
|
||||
description="Name of the Volcengine TOS bucket to store and retrieve objects (e.g., 'my-tos-bucket')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
VOLCENGINE_TOS_ACCESS_KEY: Optional[str] = Field(
|
||||
VOLCENGINE_TOS_ACCESS_KEY: str | None = Field(
|
||||
description="Access Key ID for authenticating with Volcengine TOS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
VOLCENGINE_TOS_SECRET_KEY: Optional[str] = Field(
|
||||
VOLCENGINE_TOS_SECRET_KEY: str | None = Field(
|
||||
description="Secret Access Key for authenticating with Volcengine TOS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
VOLCENGINE_TOS_ENDPOINT: Optional[str] = Field(
|
||||
VOLCENGINE_TOS_ENDPOINT: str | None = Field(
|
||||
description="URL of the Volcengine TOS endpoint (e.g., 'https://tos-cn-beijing.volces.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
VOLCENGINE_TOS_REGION: Optional[str] = Field(
|
||||
VOLCENGINE_TOS_REGION: str | None = Field(
|
||||
description="Volcengine region where the TOS bucket is located (e.g., 'cn-beijing')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
54
api/configs/middleware/vdb/alibabacloud_mysql_config.py
Normal file
54
api/configs/middleware/vdb/alibabacloud_mysql_config.py
Normal file
@ -0,0 +1,54 @@
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class AlibabaCloudMySQLConfig(BaseSettings):
|
||||
"""
|
||||
Configuration settings for AlibabaCloud MySQL vector database
|
||||
"""
|
||||
|
||||
ALIBABACLOUD_MYSQL_HOST: str = Field(
|
||||
description="Hostname or IP address of the AlibabaCloud MySQL server (e.g., 'localhost' or 'mysql.aliyun.com')",
|
||||
default="localhost",
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_PORT: PositiveInt = Field(
|
||||
description="Port number on which the AlibabaCloud MySQL server is listening (default is 3306)",
|
||||
default=3306,
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_USER: str = Field(
|
||||
description="Username for authenticating with AlibabaCloud MySQL (default is 'root')",
|
||||
default="root",
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_PASSWORD: str = Field(
|
||||
description="Password for authenticating with AlibabaCloud MySQL (default is an empty string)",
|
||||
default="",
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_DATABASE: str = Field(
|
||||
description="Name of the AlibabaCloud MySQL database to connect to (default is 'dify')",
|
||||
default="dify",
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_MAX_CONNECTION: PositiveInt = Field(
|
||||
description="Maximum number of connections in the connection pool",
|
||||
default=5,
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_CHARSET: str = Field(
|
||||
description="Character set for AlibabaCloud MySQL connection (default is 'utf8mb4')",
|
||||
default="utf8mb4",
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_DISTANCE_FUNCTION: str = Field(
|
||||
description="Distance function used for vector similarity search in AlibabaCloud MySQL "
|
||||
"(e.g., 'cosine', 'euclidean')",
|
||||
default="cosine",
|
||||
)
|
||||
|
||||
ALIBABACLOUD_MYSQL_HNSW_M: PositiveInt = Field(
|
||||
description="Maximum number of connections per layer for HNSW vector index (default is 6, range: 3-200)",
|
||||
default=6,
|
||||
)
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -11,37 +9,37 @@ class AnalyticdbConfig(BaseSettings):
|
||||
https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/getting-started/create-an-instance-instances-with-vector-engine-optimization-enabled
|
||||
"""
|
||||
|
||||
ANALYTICDB_KEY_ID: Optional[str] = Field(
|
||||
ANALYTICDB_KEY_ID: str | None = Field(
|
||||
default=None, description="The Access Key ID provided by Alibaba Cloud for API authentication."
|
||||
)
|
||||
ANALYTICDB_KEY_SECRET: Optional[str] = Field(
|
||||
ANALYTICDB_KEY_SECRET: str | None = Field(
|
||||
default=None, description="The Secret Access Key corresponding to the Access Key ID for secure API access."
|
||||
)
|
||||
ANALYTICDB_REGION_ID: Optional[str] = Field(
|
||||
ANALYTICDB_REGION_ID: str | None = Field(
|
||||
default=None,
|
||||
description="The region where the AnalyticDB instance is deployed (e.g., 'cn-hangzhou', 'ap-southeast-1').",
|
||||
)
|
||||
ANALYTICDB_INSTANCE_ID: Optional[str] = Field(
|
||||
ANALYTICDB_INSTANCE_ID: str | None = Field(
|
||||
default=None,
|
||||
description="The unique identifier of the AnalyticDB instance you want to connect to.",
|
||||
)
|
||||
ANALYTICDB_ACCOUNT: Optional[str] = Field(
|
||||
ANALYTICDB_ACCOUNT: str | None = Field(
|
||||
default=None,
|
||||
description="The account name used to log in to the AnalyticDB instance"
|
||||
" (usually the initial account created with the instance).",
|
||||
)
|
||||
ANALYTICDB_PASSWORD: Optional[str] = Field(
|
||||
ANALYTICDB_PASSWORD: str | None = Field(
|
||||
default=None, description="The password associated with the AnalyticDB account for database authentication."
|
||||
)
|
||||
ANALYTICDB_NAMESPACE: Optional[str] = Field(
|
||||
ANALYTICDB_NAMESPACE: str | None = Field(
|
||||
default=None, description="The namespace within AnalyticDB for schema isolation (if using namespace feature)."
|
||||
)
|
||||
ANALYTICDB_NAMESPACE_PASSWORD: Optional[str] = Field(
|
||||
ANALYTICDB_NAMESPACE_PASSWORD: str | None = Field(
|
||||
default=None,
|
||||
description="The password for accessing the specified namespace within the AnalyticDB instance"
|
||||
" (if namespace feature is enabled).",
|
||||
)
|
||||
ANALYTICDB_HOST: Optional[str] = Field(
|
||||
ANALYTICDB_HOST: str | None = Field(
|
||||
default=None, description="The host of the AnalyticDB instance you want to connect to."
|
||||
)
|
||||
ANALYTICDB_PORT: PositiveInt = Field(
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, NonNegativeInt, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class BaiduVectorDBConfig(BaseSettings):
|
||||
Configuration settings for Baidu Vector Database
|
||||
"""
|
||||
|
||||
BAIDU_VECTOR_DB_ENDPOINT: Optional[str] = Field(
|
||||
BAIDU_VECTOR_DB_ENDPOINT: str | None = Field(
|
||||
description="URL of the Baidu Vector Database service (e.g., 'http://vdb.bj.baidubce.com')",
|
||||
default=None,
|
||||
)
|
||||
@ -19,17 +17,17 @@ class BaiduVectorDBConfig(BaseSettings):
|
||||
default=30000,
|
||||
)
|
||||
|
||||
BAIDU_VECTOR_DB_ACCOUNT: Optional[str] = Field(
|
||||
BAIDU_VECTOR_DB_ACCOUNT: str | None = Field(
|
||||
description="Account for authenticating with the Baidu Vector Database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
BAIDU_VECTOR_DB_API_KEY: Optional[str] = Field(
|
||||
BAIDU_VECTOR_DB_API_KEY: str | None = Field(
|
||||
description="API key for authenticating with the Baidu Vector Database service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
BAIDU_VECTOR_DB_DATABASE: Optional[str] = Field(
|
||||
BAIDU_VECTOR_DB_DATABASE: str | None = Field(
|
||||
description="Name of the specific Baidu Vector Database to connect to",
|
||||
default=None,
|
||||
)
|
||||
@ -43,3 +41,13 @@ class BaiduVectorDBConfig(BaseSettings):
|
||||
description="Number of replicas for the Baidu Vector Database (default is 3)",
|
||||
default=3,
|
||||
)
|
||||
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: str = Field(
|
||||
description="Analyzer type for inverted index in Baidu Vector Database (default is DEFAULT_ANALYZER)",
|
||||
default="DEFAULT_ANALYZER",
|
||||
)
|
||||
|
||||
BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: str = Field(
|
||||
description="Parser mode for inverted index in Baidu Vector Database (default is COARSE_MODE)",
|
||||
default="COARSE_MODE",
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class ChromaConfig(BaseSettings):
|
||||
Configuration settings for Chroma vector database
|
||||
"""
|
||||
|
||||
CHROMA_HOST: Optional[str] = Field(
|
||||
CHROMA_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the Chroma server (e.g., 'localhost' or '192.168.1.100')",
|
||||
default=None,
|
||||
)
|
||||
@ -19,22 +17,22 @@ class ChromaConfig(BaseSettings):
|
||||
default=8000,
|
||||
)
|
||||
|
||||
CHROMA_TENANT: Optional[str] = Field(
|
||||
CHROMA_TENANT: str | None = Field(
|
||||
description="Tenant identifier for multi-tenancy support in Chroma",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CHROMA_DATABASE: Optional[str] = Field(
|
||||
CHROMA_DATABASE: str | None = Field(
|
||||
description="Name of the Chroma database to connect to",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CHROMA_AUTH_PROVIDER: Optional[str] = Field(
|
||||
CHROMA_AUTH_PROVIDER: str | None = Field(
|
||||
description="Authentication provider for Chroma (e.g., 'basic', 'token', or a custom provider)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CHROMA_AUTH_CREDENTIALS: Optional[str] = Field(
|
||||
CHROMA_AUTH_CREDENTIALS: str | None = Field(
|
||||
description="Authentication credentials for Chroma (format depends on the auth provider)",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,69 +1,68 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class ClickzettaConfig(BaseModel):
|
||||
class ClickzettaConfig(BaseSettings):
|
||||
"""
|
||||
Clickzetta Lakehouse vector database configuration
|
||||
"""
|
||||
|
||||
CLICKZETTA_USERNAME: Optional[str] = Field(
|
||||
CLICKZETTA_USERNAME: str | None = Field(
|
||||
description="Username for authenticating with Clickzetta Lakehouse",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CLICKZETTA_PASSWORD: Optional[str] = Field(
|
||||
CLICKZETTA_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with Clickzetta Lakehouse",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CLICKZETTA_INSTANCE: Optional[str] = Field(
|
||||
CLICKZETTA_INSTANCE: str | None = Field(
|
||||
description="Clickzetta Lakehouse instance ID",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CLICKZETTA_SERVICE: Optional[str] = Field(
|
||||
CLICKZETTA_SERVICE: str | None = Field(
|
||||
description="Clickzetta API service endpoint (e.g., 'api.clickzetta.com')",
|
||||
default="api.clickzetta.com",
|
||||
)
|
||||
|
||||
CLICKZETTA_WORKSPACE: Optional[str] = Field(
|
||||
CLICKZETTA_WORKSPACE: str | None = Field(
|
||||
description="Clickzetta workspace name",
|
||||
default="default",
|
||||
)
|
||||
|
||||
CLICKZETTA_VCLUSTER: Optional[str] = Field(
|
||||
CLICKZETTA_VCLUSTER: str | None = Field(
|
||||
description="Clickzetta virtual cluster name",
|
||||
default="default_ap",
|
||||
)
|
||||
|
||||
CLICKZETTA_SCHEMA: Optional[str] = Field(
|
||||
CLICKZETTA_SCHEMA: str | None = Field(
|
||||
description="Database schema name in Clickzetta",
|
||||
default="public",
|
||||
)
|
||||
|
||||
CLICKZETTA_BATCH_SIZE: Optional[int] = Field(
|
||||
CLICKZETTA_BATCH_SIZE: int | None = Field(
|
||||
description="Batch size for bulk insert operations",
|
||||
default=100,
|
||||
)
|
||||
|
||||
CLICKZETTA_ENABLE_INVERTED_INDEX: Optional[bool] = Field(
|
||||
CLICKZETTA_ENABLE_INVERTED_INDEX: bool | None = Field(
|
||||
description="Enable inverted index for full-text search capabilities",
|
||||
default=True,
|
||||
)
|
||||
|
||||
CLICKZETTA_ANALYZER_TYPE: Optional[str] = Field(
|
||||
CLICKZETTA_ANALYZER_TYPE: str | None = Field(
|
||||
description="Analyzer type for full-text search: keyword, english, chinese, unicode",
|
||||
default="chinese",
|
||||
)
|
||||
|
||||
CLICKZETTA_ANALYZER_MODE: Optional[str] = Field(
|
||||
CLICKZETTA_ANALYZER_MODE: str | None = Field(
|
||||
description="Analyzer mode for tokenization: max_word (fine-grained) or smart (intelligent)",
|
||||
default="smart",
|
||||
)
|
||||
|
||||
CLICKZETTA_VECTOR_DISTANCE_FUNCTION: Optional[str] = Field(
|
||||
CLICKZETTA_VECTOR_DISTANCE_FUNCTION: str | None = Field(
|
||||
description="Distance function for vector similarity: l2_distance or cosine_distance",
|
||||
default="cosine_distance",
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,27 +7,27 @@ class CouchbaseConfig(BaseSettings):
|
||||
Couchbase configs
|
||||
"""
|
||||
|
||||
COUCHBASE_CONNECTION_STRING: Optional[str] = Field(
|
||||
COUCHBASE_CONNECTION_STRING: str | None = Field(
|
||||
description="COUCHBASE connection string",
|
||||
default=None,
|
||||
)
|
||||
|
||||
COUCHBASE_USER: Optional[str] = Field(
|
||||
COUCHBASE_USER: str | None = Field(
|
||||
description="COUCHBASE user",
|
||||
default=None,
|
||||
)
|
||||
|
||||
COUCHBASE_PASSWORD: Optional[str] = Field(
|
||||
COUCHBASE_PASSWORD: str | None = Field(
|
||||
description="COUCHBASE password",
|
||||
default=None,
|
||||
)
|
||||
|
||||
COUCHBASE_BUCKET_NAME: Optional[str] = Field(
|
||||
COUCHBASE_BUCKET_NAME: str | None = Field(
|
||||
description="COUCHBASE bucket name",
|
||||
default=None,
|
||||
)
|
||||
|
||||
COUCHBASE_SCOPE_NAME: Optional[str] = Field(
|
||||
COUCHBASE_SCOPE_NAME: str | None = Field(
|
||||
description="COUCHBASE scope name",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt, model_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -10,7 +8,7 @@ class ElasticsearchConfig(BaseSettings):
|
||||
Can load from environment variables or .env files.
|
||||
"""
|
||||
|
||||
ELASTICSEARCH_HOST: Optional[str] = Field(
|
||||
ELASTICSEARCH_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the Elasticsearch server (e.g., 'localhost' or '192.168.1.100')",
|
||||
default="127.0.0.1",
|
||||
)
|
||||
@ -20,30 +18,28 @@ class ElasticsearchConfig(BaseSettings):
|
||||
default=9200,
|
||||
)
|
||||
|
||||
ELASTICSEARCH_USERNAME: Optional[str] = Field(
|
||||
ELASTICSEARCH_USERNAME: str | None = Field(
|
||||
description="Username for authenticating with Elasticsearch (default is 'elastic')",
|
||||
default="elastic",
|
||||
)
|
||||
|
||||
ELASTICSEARCH_PASSWORD: Optional[str] = Field(
|
||||
ELASTICSEARCH_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with Elasticsearch (default is 'elastic')",
|
||||
default="elastic",
|
||||
)
|
||||
|
||||
# Elastic Cloud (optional)
|
||||
ELASTICSEARCH_USE_CLOUD: Optional[bool] = Field(
|
||||
ELASTICSEARCH_USE_CLOUD: bool | None = Field(
|
||||
description="Set to True to use Elastic Cloud instead of self-hosted Elasticsearch", default=False
|
||||
)
|
||||
ELASTICSEARCH_CLOUD_URL: Optional[str] = Field(
|
||||
ELASTICSEARCH_CLOUD_URL: str | None = Field(
|
||||
description="Full URL for Elastic Cloud deployment (e.g., 'https://example.es.region.aws.found.io:443')",
|
||||
default=None,
|
||||
)
|
||||
ELASTICSEARCH_API_KEY: Optional[str] = Field(
|
||||
description="API key for authenticating with Elastic Cloud", default=None
|
||||
)
|
||||
ELASTICSEARCH_API_KEY: str | None = Field(description="API key for authenticating with Elastic Cloud", default=None)
|
||||
|
||||
# Common options
|
||||
ELASTICSEARCH_CA_CERTS: Optional[str] = Field(
|
||||
ELASTICSEARCH_CA_CERTS: str | None = Field(
|
||||
description="Path to CA certificate file for SSL verification", default=None
|
||||
)
|
||||
ELASTICSEARCH_VERIFY_CERTS: bool = Field(
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,17 +7,17 @@ class HuaweiCloudConfig(BaseSettings):
|
||||
Configuration settings for Huawei cloud search service
|
||||
"""
|
||||
|
||||
HUAWEI_CLOUD_HOSTS: Optional[str] = Field(
|
||||
HUAWEI_CLOUD_HOSTS: str | None = Field(
|
||||
description="Hostname or IP address of the Huawei cloud search service instance",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HUAWEI_CLOUD_USER: Optional[str] = Field(
|
||||
HUAWEI_CLOUD_USER: str | None = Field(
|
||||
description="Username for authenticating with Huawei cloud search service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
HUAWEI_CLOUD_PASSWORD: Optional[str] = Field(
|
||||
HUAWEI_CLOUD_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with Huawei cloud search service",
|
||||
default=None,
|
||||
)
|
||||
|
||||
91
api/configs/middleware/vdb/iris_config.py
Normal file
91
api/configs/middleware/vdb/iris_config.py
Normal file
@ -0,0 +1,91 @@
|
||||
"""Configuration for InterSystems IRIS vector database."""
|
||||
|
||||
from pydantic import Field, PositiveInt, model_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class IrisVectorConfig(BaseSettings):
|
||||
"""Configuration settings for IRIS vector database connection and pooling."""
|
||||
|
||||
IRIS_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the IRIS server.",
|
||||
default="localhost",
|
||||
)
|
||||
|
||||
IRIS_SUPER_SERVER_PORT: PositiveInt | None = Field(
|
||||
description="Port number for IRIS connection.",
|
||||
default=1972,
|
||||
)
|
||||
|
||||
IRIS_USER: str | None = Field(
|
||||
description="Username for IRIS authentication.",
|
||||
default="_SYSTEM",
|
||||
)
|
||||
|
||||
IRIS_PASSWORD: str | None = Field(
|
||||
description="Password for IRIS authentication.",
|
||||
default="Dify@1234",
|
||||
)
|
||||
|
||||
IRIS_SCHEMA: str | None = Field(
|
||||
description="Schema name for IRIS tables.",
|
||||
default="dify",
|
||||
)
|
||||
|
||||
IRIS_DATABASE: str | None = Field(
|
||||
description="Database namespace for IRIS connection.",
|
||||
default="USER",
|
||||
)
|
||||
|
||||
IRIS_CONNECTION_URL: str | None = Field(
|
||||
description="Full connection URL for IRIS (overrides individual fields if provided).",
|
||||
default=None,
|
||||
)
|
||||
|
||||
IRIS_MIN_CONNECTION: PositiveInt = Field(
|
||||
description="Minimum number of connections in the pool.",
|
||||
default=1,
|
||||
)
|
||||
|
||||
IRIS_MAX_CONNECTION: PositiveInt = Field(
|
||||
description="Maximum number of connections in the pool.",
|
||||
default=3,
|
||||
)
|
||||
|
||||
IRIS_TEXT_INDEX: bool = Field(
|
||||
description="Enable full-text search index using %iFind.Index.Basic.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
IRIS_TEXT_INDEX_LANGUAGE: str = Field(
|
||||
description="Language for full-text search index (e.g., 'en', 'ja', 'zh', 'de').",
|
||||
default="en",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_config(cls, values: dict) -> dict:
|
||||
"""Validate IRIS configuration values.
|
||||
|
||||
Args:
|
||||
values: Configuration dictionary
|
||||
|
||||
Returns:
|
||||
Validated configuration dictionary
|
||||
|
||||
Raises:
|
||||
ValueError: If required fields are missing or pool settings are invalid
|
||||
"""
|
||||
# Only validate required fields if IRIS is being used as the vector store
|
||||
# This allows the config to be loaded even when IRIS is not in use
|
||||
|
||||
# vector_store = os.environ.get("VECTOR_STORE", "")
|
||||
# We rely on Pydantic defaults for required fields if they are missing from env.
|
||||
# Strict existence check is removed to allow defaults to work.
|
||||
|
||||
min_conn = values.get("IRIS_MIN_CONNECTION", 1)
|
||||
max_conn = values.get("IRIS_MAX_CONNECTION", 3)
|
||||
if min_conn > max_conn:
|
||||
raise ValueError("IRIS_MIN_CONNECTION must be less than or equal to IRIS_MAX_CONNECTION")
|
||||
|
||||
return values
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,27 +7,27 @@ class LindormConfig(BaseSettings):
|
||||
Lindorm configs
|
||||
"""
|
||||
|
||||
LINDORM_URL: Optional[str] = Field(
|
||||
LINDORM_URL: str | None = Field(
|
||||
description="Lindorm url",
|
||||
default=None,
|
||||
)
|
||||
LINDORM_USERNAME: Optional[str] = Field(
|
||||
LINDORM_USERNAME: str | None = Field(
|
||||
description="Lindorm user",
|
||||
default=None,
|
||||
)
|
||||
LINDORM_PASSWORD: Optional[str] = Field(
|
||||
LINDORM_PASSWORD: str | None = Field(
|
||||
description="Lindorm password",
|
||||
default=None,
|
||||
)
|
||||
DEFAULT_INDEX_TYPE: Optional[str] = Field(
|
||||
LINDORM_INDEX_TYPE: str | None = Field(
|
||||
description="Lindorm Vector Index Type, hnsw or flat is available in dify",
|
||||
default="hnsw",
|
||||
)
|
||||
DEFAULT_DISTANCE_TYPE: Optional[str] = Field(
|
||||
LINDORM_DISTANCE_TYPE: str | None = Field(
|
||||
description="Vector Distance Type, support l2, cosinesimil, innerproduct", default="l2"
|
||||
)
|
||||
USING_UGC_INDEX: Optional[bool] = Field(
|
||||
description="Using UGC index will store the same type of Index in a single index but can retrieve separately.",
|
||||
default=False,
|
||||
LINDORM_USING_UGC: bool | None = Field(
|
||||
description="Using UGC index will store indexes with the same IndexType/Dimension in a single big index.",
|
||||
default=True,
|
||||
)
|
||||
LINDORM_QUERY_TIMEOUT: Optional[float] = Field(description="The lindorm search request timeout (s)", default=2.0)
|
||||
LINDORM_QUERY_TIMEOUT: float | None = Field(description="The lindorm search request timeout (s)", default=2.0)
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class MatrixoneConfig(BaseModel):
|
||||
class MatrixoneConfig(BaseSettings):
|
||||
"""Matrixone vector database configuration."""
|
||||
|
||||
MATRIXONE_HOST: str = Field(default="localhost", description="Host address of the Matrixone server")
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,22 +7,22 @@ class MilvusConfig(BaseSettings):
|
||||
Configuration settings for Milvus vector database
|
||||
"""
|
||||
|
||||
MILVUS_URI: Optional[str] = Field(
|
||||
MILVUS_URI: str | None = Field(
|
||||
description="URI for connecting to the Milvus server (e.g., 'http://localhost:19530' or 'https://milvus-instance.example.com:19530')",
|
||||
default="http://127.0.0.1:19530",
|
||||
)
|
||||
|
||||
MILVUS_TOKEN: Optional[str] = Field(
|
||||
MILVUS_TOKEN: str | None = Field(
|
||||
description="Authentication token for Milvus, if token-based authentication is enabled",
|
||||
default=None,
|
||||
)
|
||||
|
||||
MILVUS_USER: Optional[str] = Field(
|
||||
MILVUS_USER: str | None = Field(
|
||||
description="Username for authenticating with Milvus, if username/password authentication is enabled",
|
||||
default=None,
|
||||
)
|
||||
|
||||
MILVUS_PASSWORD: Optional[str] = Field(
|
||||
MILVUS_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with Milvus, if username/password authentication is enabled",
|
||||
default=None,
|
||||
)
|
||||
@ -40,7 +38,7 @@ class MilvusConfig(BaseSettings):
|
||||
default=True,
|
||||
)
|
||||
|
||||
MILVUS_ANALYZER_PARAMS: Optional[str] = Field(
|
||||
MILVUS_ANALYZER_PARAMS: str | None = Field(
|
||||
description='Milvus text analyzer parameters, e.g., {"type": "chinese"} for Chinese segmentation support.',
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,27 +7,27 @@ class OceanBaseVectorConfig(BaseSettings):
|
||||
Configuration settings for OceanBase Vector database
|
||||
"""
|
||||
|
||||
OCEANBASE_VECTOR_HOST: Optional[str] = Field(
|
||||
OCEANBASE_VECTOR_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the OceanBase Vector server (e.g. 'localhost')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCEANBASE_VECTOR_PORT: Optional[PositiveInt] = Field(
|
||||
OCEANBASE_VECTOR_PORT: PositiveInt | None = Field(
|
||||
description="Port number on which the OceanBase Vector server is listening (default is 2881)",
|
||||
default=2881,
|
||||
)
|
||||
|
||||
OCEANBASE_VECTOR_USER: Optional[str] = Field(
|
||||
OCEANBASE_VECTOR_USER: str | None = Field(
|
||||
description="Username for authenticating with the OceanBase Vector database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCEANBASE_VECTOR_PASSWORD: Optional[str] = Field(
|
||||
OCEANBASE_VECTOR_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with the OceanBase Vector database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OCEANBASE_VECTOR_DATABASE: Optional[str] = Field(
|
||||
OCEANBASE_VECTOR_DATABASE: str | None = Field(
|
||||
description="Name of the OceanBase Vector database to connect to",
|
||||
default=None,
|
||||
)
|
||||
@ -39,3 +37,15 @@ class OceanBaseVectorConfig(BaseSettings):
|
||||
"with older versions",
|
||||
default=False,
|
||||
)
|
||||
|
||||
OCEANBASE_FULLTEXT_PARSER: str | None = Field(
|
||||
description=(
|
||||
"Fulltext parser to use for text indexing. "
|
||||
"Built-in options: 'ngram' (N-gram tokenizer for English/numbers), "
|
||||
"'beng' (Basic English tokenizer), 'space' (Space-based tokenizer), "
|
||||
"'ngram2' (Improved N-gram tokenizer), 'ik' (Chinese tokenizer). "
|
||||
"External plugins (require installation): 'japanese_ftparser' (Japanese tokenizer), "
|
||||
"'thai_ftparser' (Thai tokenizer). Default is 'ik'"
|
||||
),
|
||||
default="ik",
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class OpenGaussConfig(BaseSettings):
|
||||
Configuration settings for OpenGauss
|
||||
"""
|
||||
|
||||
OPENGAUSS_HOST: Optional[str] = Field(
|
||||
OPENGAUSS_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the OpenGauss server(e.g., 'localhost')",
|
||||
default=None,
|
||||
)
|
||||
@ -19,17 +17,17 @@ class OpenGaussConfig(BaseSettings):
|
||||
default=6600,
|
||||
)
|
||||
|
||||
OPENGAUSS_USER: Optional[str] = Field(
|
||||
OPENGAUSS_USER: str | None = Field(
|
||||
description="Username for authenticating with the OpenGauss database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OPENGAUSS_PASSWORD: Optional[str] = Field(
|
||||
OPENGAUSS_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with the OpenGauss database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OPENGAUSS_DATABASE: Optional[str] = Field(
|
||||
OPENGAUSS_DATABASE: str | None = Field(
|
||||
description="Name of the OpenGauss database to connect to",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,24 +1,25 @@
|
||||
import enum
|
||||
from typing import Literal, Optional
|
||||
from enum import StrEnum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class AuthMethod(StrEnum):
|
||||
"""
|
||||
Authentication method for OpenSearch
|
||||
"""
|
||||
|
||||
BASIC = "basic"
|
||||
AWS_MANAGED_IAM = "aws_managed_iam"
|
||||
|
||||
|
||||
class OpenSearchConfig(BaseSettings):
|
||||
"""
|
||||
Configuration settings for OpenSearch
|
||||
"""
|
||||
|
||||
class AuthMethod(enum.StrEnum):
|
||||
"""
|
||||
Authentication method for OpenSearch
|
||||
"""
|
||||
|
||||
BASIC = "basic"
|
||||
AWS_MANAGED_IAM = "aws_managed_iam"
|
||||
|
||||
OPENSEARCH_HOST: Optional[str] = Field(
|
||||
OPENSEARCH_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the OpenSearch server (e.g., 'localhost' or 'opensearch.example.com')",
|
||||
default=None,
|
||||
)
|
||||
@ -43,21 +44,21 @@ class OpenSearchConfig(BaseSettings):
|
||||
default=AuthMethod.BASIC,
|
||||
)
|
||||
|
||||
OPENSEARCH_USER: Optional[str] = Field(
|
||||
OPENSEARCH_USER: str | None = Field(
|
||||
description="Username for authenticating with OpenSearch",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OPENSEARCH_PASSWORD: Optional[str] = Field(
|
||||
OPENSEARCH_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with OpenSearch",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OPENSEARCH_AWS_REGION: Optional[str] = Field(
|
||||
OPENSEARCH_AWS_REGION: str | None = Field(
|
||||
description="AWS region for OpenSearch (e.g. 'us-west-2')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
OPENSEARCH_AWS_SERVICE: Optional[Literal["es", "aoss"]] = Field(
|
||||
OPENSEARCH_AWS_SERVICE: Literal["es", "aoss"] | None = Field(
|
||||
description="AWS service for OpenSearch (e.g. 'aoss' for OpenSearch Serverless)", default=None
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,33 +7,33 @@ class OracleConfig(BaseSettings):
|
||||
Configuration settings for Oracle database
|
||||
"""
|
||||
|
||||
ORACLE_USER: Optional[str] = Field(
|
||||
ORACLE_USER: str | None = Field(
|
||||
description="Username for authenticating with the Oracle database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ORACLE_PASSWORD: Optional[str] = Field(
|
||||
ORACLE_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with the Oracle database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ORACLE_DSN: Optional[str] = Field(
|
||||
ORACLE_DSN: str | None = Field(
|
||||
description="Oracle database connection string. For traditional database, use format 'host:port/service_name'. "
|
||||
"For autonomous database, use the service name from tnsnames.ora in the wallet",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ORACLE_CONFIG_DIR: Optional[str] = Field(
|
||||
ORACLE_CONFIG_DIR: str | None = Field(
|
||||
description="Directory containing the tnsnames.ora configuration file. Only used in thin mode connection",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ORACLE_WALLET_LOCATION: Optional[str] = Field(
|
||||
ORACLE_WALLET_LOCATION: str | None = Field(
|
||||
description="Oracle wallet directory path containing the wallet files for secure connection",
|
||||
default=None,
|
||||
)
|
||||
|
||||
ORACLE_WALLET_PASSWORD: Optional[str] = Field(
|
||||
ORACLE_WALLET_PASSWORD: str | None = Field(
|
||||
description="Password to decrypt the Oracle wallet, if it is encrypted",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class PGVectorConfig(BaseSettings):
|
||||
Configuration settings for PGVector (PostgreSQL with vector extension)
|
||||
"""
|
||||
|
||||
PGVECTOR_HOST: Optional[str] = Field(
|
||||
PGVECTOR_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the PostgreSQL server with PGVector extension (e.g., 'localhost')",
|
||||
default=None,
|
||||
)
|
||||
@ -19,17 +17,17 @@ class PGVectorConfig(BaseSettings):
|
||||
default=5433,
|
||||
)
|
||||
|
||||
PGVECTOR_USER: Optional[str] = Field(
|
||||
PGVECTOR_USER: str | None = Field(
|
||||
description="Username for authenticating with the PostgreSQL database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
PGVECTOR_PASSWORD: Optional[str] = Field(
|
||||
PGVECTOR_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with the PostgreSQL database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
PGVECTOR_DATABASE: Optional[str] = Field(
|
||||
PGVECTOR_DATABASE: str | None = Field(
|
||||
description="Name of the PostgreSQL database to connect to",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class PGVectoRSConfig(BaseSettings):
|
||||
Configuration settings for PGVecto.RS (Rust-based vector extension for PostgreSQL)
|
||||
"""
|
||||
|
||||
PGVECTO_RS_HOST: Optional[str] = Field(
|
||||
PGVECTO_RS_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the PostgreSQL server with PGVecto.RS extension (e.g., 'localhost')",
|
||||
default=None,
|
||||
)
|
||||
@ -19,17 +17,17 @@ class PGVectoRSConfig(BaseSettings):
|
||||
default=5431,
|
||||
)
|
||||
|
||||
PGVECTO_RS_USER: Optional[str] = Field(
|
||||
PGVECTO_RS_USER: str | None = Field(
|
||||
description="Username for authenticating with the PostgreSQL database using PGVecto.RS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
PGVECTO_RS_PASSWORD: Optional[str] = Field(
|
||||
PGVECTO_RS_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with the PostgreSQL database using PGVecto.RS",
|
||||
default=None,
|
||||
)
|
||||
|
||||
PGVECTO_RS_DATABASE: Optional[str] = Field(
|
||||
PGVECTO_RS_DATABASE: str | None = Field(
|
||||
description="Name of the PostgreSQL database with PGVecto.RS extension to connect to",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, NonNegativeInt, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,12 +7,12 @@ class QdrantConfig(BaseSettings):
|
||||
Configuration settings for Qdrant vector database
|
||||
"""
|
||||
|
||||
QDRANT_URL: Optional[str] = Field(
|
||||
QDRANT_URL: str | None = Field(
|
||||
description="URL of the Qdrant server (e.g., 'http://localhost:6333' or 'https://qdrant.example.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
QDRANT_API_KEY: Optional[str] = Field(
|
||||
QDRANT_API_KEY: str | None = Field(
|
||||
description="API key for authenticating with the Qdrant server",
|
||||
default=None,
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, PositiveInt
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,7 +7,7 @@ class RelytConfig(BaseSettings):
|
||||
Configuration settings for Relyt database
|
||||
"""
|
||||
|
||||
RELYT_HOST: Optional[str] = Field(
|
||||
RELYT_HOST: str | None = Field(
|
||||
description="Hostname or IP address of the Relyt server (e.g., 'localhost' or 'relyt.example.com')",
|
||||
default=None,
|
||||
)
|
||||
@ -19,17 +17,17 @@ class RelytConfig(BaseSettings):
|
||||
default=9200,
|
||||
)
|
||||
|
||||
RELYT_USER: Optional[str] = Field(
|
||||
RELYT_USER: str | None = Field(
|
||||
description="Username for authenticating with the Relyt database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
RELYT_PASSWORD: Optional[str] = Field(
|
||||
RELYT_PASSWORD: str | None = Field(
|
||||
description="Password for authenticating with the Relyt database",
|
||||
default=None,
|
||||
)
|
||||
|
||||
RELYT_DATABASE: Optional[str] = Field(
|
||||
RELYT_DATABASE: str | None = Field(
|
||||
description="Name of the Relyt database to connect to (default is 'default')",
|
||||
default="default",
|
||||
)
|
||||
|
||||
@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
@ -9,22 +7,22 @@ class TableStoreConfig(BaseSettings):
|
||||
Configuration settings for TableStore.
|
||||
"""
|
||||
|
||||
TABLESTORE_ENDPOINT: Optional[str] = Field(
|
||||
TABLESTORE_ENDPOINT: str | None = Field(
|
||||
description="Endpoint address of the TableStore server (e.g. 'https://instance-name.cn-hangzhou.ots.aliyuncs.com')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TABLESTORE_INSTANCE_NAME: Optional[str] = Field(
|
||||
TABLESTORE_INSTANCE_NAME: str | None = Field(
|
||||
description="Instance name to access TableStore server (eg. 'instance-name')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TABLESTORE_ACCESS_KEY_ID: Optional[str] = Field(
|
||||
TABLESTORE_ACCESS_KEY_ID: str | None = Field(
|
||||
description="AccessKey id for the instance name",
|
||||
default=None,
|
||||
)
|
||||
|
||||
TABLESTORE_ACCESS_KEY_SECRET: Optional[str] = Field(
|
||||
TABLESTORE_ACCESS_KEY_SECRET: str | None = Field(
|
||||
description="AccessKey secret for the instance name",
|
||||
default=None,
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user