Compare commits
1543 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b56f9d349c | |||
| 9da9db069c | |||
| 11a362939f | |||
| 49ab488f88 | |||
| 5cce8a4ceb | |||
| b147c66c1b | |||
| 423683825a | |||
| f364450dfc | |||
| b2e318c5c7 | |||
| 52b263b99c | |||
| a9b9631c5c | |||
| 2cb869dfd2 | |||
| 52835103f1 | |||
| ba386a8d7a | |||
| c719e36f91 | |||
| 735fe9352a | |||
| 4513e4f700 | |||
| 37e593aeb0 | |||
| 60fb557f14 | |||
| b7b5a108c2 | |||
| 0bf901eff7 | |||
| 8408af8695 | |||
| 29c9e50f4e | |||
| 8cf28d71b8 | |||
| 709cdfc604 | |||
| eaa2a90a56 | |||
| f1e836c725 | |||
| ff2080a39e | |||
| 1366f582b1 | |||
| 03583f3424 | |||
| fcaacd5397 | |||
| 4b46b7a811 | |||
| e55c37ddab | |||
| 0448ff460b | |||
| 28771e5757 | |||
| da17cace4f | |||
| 87d7a5822e | |||
| e8b5a4109a | |||
| bae0facba3 | |||
| ecdcb4a836 | |||
| 066a2f8fa6 | |||
| 629207791e | |||
| b411e40d6b | |||
| b1b9fb55d9 | |||
| 666fea3d26 | |||
| 7112471a80 | |||
| 50bb76f968 | |||
| c78517d410 | |||
| 57b5d367f2 | |||
| 41ef4b6495 | |||
| 0c559778bb | |||
| 5a2a7ca4db | |||
| 8faebbc7fc | |||
| 8d68b8a03c | |||
| 97659c41f2 | |||
| b624596685 | |||
| be28fbc7b8 | |||
| 778059279b | |||
| 5bc46ff1ba | |||
| 47231fc5fb | |||
| f1a954cbcb | |||
| c620262d3d | |||
| 63402bb556 | |||
| c869b3996f | |||
| 0e5bccc20b | |||
| 872f53d35a | |||
| 109c968baa | |||
| 3d6975b369 | |||
| f2542f22fe | |||
| 3421025074 | |||
| 43f49b15e8 | |||
| 093d223799 | |||
| 853404a3ce | |||
| 30f9378dec | |||
| e083825f55 | |||
| 22ca6ba6ee | |||
| 04a82583d1 | |||
| ad4646f027 | |||
| bfd27460cc | |||
| ee1ff8ba75 | |||
| 885b1efac6 | |||
| e63fe7ec65 | |||
| dc92b1b087 | |||
| edf4141ad6 | |||
| dd4f1395ca | |||
| 84dcf34e2c | |||
| b679e1db98 | |||
| be183c0323 | |||
| 3df1195ae5 | |||
| cd41bddd1e | |||
| 87874caf95 | |||
| b83c2852b8 | |||
| 5e39eff403 | |||
| 6c77cd8780 | |||
| 669cba3240 | |||
| d4f3fb01fc | |||
| 65e4aef2a1 | |||
| d13396127c | |||
| 990d2e0074 | |||
| 61abf80197 | |||
| 956e8c5750 | |||
| bfc3ea767a | |||
| 499577c2a9 | |||
| 143284b2f3 | |||
| 7410b95db2 | |||
| 86b5c9d88e | |||
| a1e47bc287 | |||
| efc23316e9 | |||
| a680d3e138 | |||
| cc29bea81b | |||
| af0d7ba45e | |||
| 17d0970244 | |||
| 45b32046cd | |||
| a101b88114 | |||
| d9ccdc4fce | |||
| 7a989a58a0 | |||
| 82f694e473 | |||
| 9a155c81a7 | |||
| 9b888fa9b3 | |||
| ef6c213471 | |||
| 90ea57b99b | |||
| 330cb39daf | |||
| b87a8400e3 | |||
| 3b226ed672 | |||
| 07881195f2 | |||
| 08a5d35f34 | |||
| 561ce1e206 | |||
| 7cb46db945 | |||
| fe82477a6b | |||
| b8e56a40b2 | |||
| 3ff93d7342 | |||
| e43de21e43 | |||
| 31061416bc | |||
| a8af6cb3b4 | |||
| fe0a62118d | |||
| cc5271ef2b | |||
| d35b5a8905 | |||
| 64c96e89c5 | |||
| 9c4fcbd050 | |||
| b34e2711a7 | |||
| 14f07626e0 | |||
| 8c5ac5c2d9 | |||
| 4b6e53223b | |||
| a505cebf9b | |||
| 141d74f187 | |||
| 4c5b7595b4 | |||
| e613ae55bb | |||
| 5594106351 | |||
| c5031a5a97 | |||
| 473f9df65a | |||
| 0c26014812 | |||
| 06a3822965 | |||
| 2d1409a672 | |||
| 1d3ab76cc4 | |||
| c00b5fd4bb | |||
| 000273a8de | |||
| 3ba8c2a139 | |||
| c0219b38d1 | |||
| 40f95272be | |||
| 15e2eae69a | |||
| 743c86e506 | |||
| 4605cc2035 | |||
| a259981f16 | |||
| bbbb2d0908 | |||
| e80de15cc6 | |||
| b283ab291a | |||
| c24015ae17 | |||
| df5364d758 | |||
| 52c10cd45d | |||
| e8f1e3473c | |||
| 37cf324c52 | |||
| 5691071ac6 | |||
| 6faaff10ee | |||
| 4796e982ab | |||
| 22869d99a5 | |||
| 7d2d4e8866 | |||
| 4fe2652c6c | |||
| fcb5aa77fb | |||
| 411e42384e | |||
| 1c2f61e2a5 | |||
| 23efccbe39 | |||
| 77ffe7d634 | |||
| 60b871e0ae | |||
| 44d5b5e1c9 | |||
| 03651a30ea | |||
| db80fa2eb0 | |||
| 6f90c6918f | |||
| 437bf7ec95 | |||
| 50d5a73f84 | |||
| ec183d0d9a | |||
| ae28a110ab | |||
| a6637b29e6 | |||
| 1a50b9a04a | |||
| 585f137cf0 | |||
| 1449905fe5 | |||
| 622dc0a815 | |||
| 86b353511a | |||
| a3a0ae523f | |||
| fa807a56fb | |||
| 254d717a4a | |||
| 86e6bdaf4d | |||
| 2841601ad5 | |||
| 39c2d8b2a7 | |||
| 683452a9be | |||
| ebae5a302f | |||
| ba7b2ef9f0 | |||
| f3946a2310 | |||
| a4f694c345 | |||
| 2c532e8b79 | |||
| 5c1932f702 | |||
| ccdcccd44c | |||
| 3864f58c22 | |||
| c36827d1de | |||
| e2cc36a37f | |||
| cf6102f91b | |||
| dd6815b90f | |||
| 95026804e9 | |||
| 9ab5a2ea73 | |||
| a5cdd05c25 | |||
| 31676f6201 | |||
| c97f7a2a13 | |||
| be54c34ecb | |||
| 79597c7960 | |||
| 70a30d6e26 | |||
| b97fd470a5 | |||
| 4d7fd60305 | |||
| da38dc73ad | |||
| e89adf6633 | |||
| fa2bcfc60d | |||
| ed1d431a1f | |||
| 67442cf054 | |||
| a7186b6072 | |||
| 517574c40c | |||
| 5e6f21a9ff | |||
| 54aa278df0 | |||
| 2f9fb74a42 | |||
| b82632547c | |||
| e3b50ad3e1 | |||
| b139507ae6 | |||
| feb0845103 | |||
| f041812939 | |||
| 2b57e74330 | |||
| b671fa7a9c | |||
| 691669749e | |||
| 10a72bfd0f | |||
| 43f81b76af | |||
| 68bbfee8ea | |||
| ec8caf64b8 | |||
| bcacca6599 | |||
| 4fdd2456d4 | |||
| 3f6e8a8707 | |||
| 4845831f93 | |||
| 919575f312 | |||
| 0cd73e406d | |||
| 807ec1f6f7 | |||
| e2ac9b45e8 | |||
| b15ed13807 | |||
| f9d3941d9d | |||
| 34c379babb | |||
| 7b5111c955 | |||
| a3964b3fcd | |||
| 3856637cd2 | |||
| 2871975d1e | |||
| d5dd17644d | |||
| 5e5b4f0a49 | |||
| e5b15bed7d | |||
| 2e4a6928c3 | |||
| 1aa77f6cda | |||
| d0c4b5c763 | |||
| 361e10d09c | |||
| 95cfbe30ed | |||
| 87b8bafaea | |||
| 1a703e623a | |||
| 1ebefca029 | |||
| ffff26e165 | |||
| 61df7bf42c | |||
| c5fb42ff10 | |||
| cbc33cd7ae | |||
| 645c10a3c3 | |||
| 54293a66b3 | |||
| 01b3caa38c | |||
| 64642e1d1b | |||
| 546cdd0d91 | |||
| a83f4a48d0 | |||
| 99d3e68d59 | |||
| 01189e99fc | |||
| f493cb5846 | |||
| e9814b4a4d | |||
| 3b12aaf2ab | |||
| 7e20278489 | |||
| 3d73d5a687 | |||
| fa3a472c6b | |||
| 5f38301861 | |||
| 4d5632912a | |||
| a74034a012 | |||
| 3f64c72c24 | |||
| 68c45ce791 | |||
| e8cb6d6d34 | |||
| a2f2f72aaf | |||
| 7bb9609827 | |||
| 82107f4b6c | |||
| ee49ca4abf | |||
| 48548f0896 | |||
| 981e97ee76 | |||
| a568aa70fd | |||
| 218adcb29b | |||
| 6c55bcfdd8 | |||
| 8d04ee3e01 | |||
| 6781ef5bd1 | |||
| 7e94b64ae7 | |||
| 64a142723a | |||
| 50f6c38c84 | |||
| 9fb4c32c6a | |||
| 280ed5d706 | |||
| 2518ff3568 | |||
| c65ce8a42c | |||
| e5986d0034 | |||
| b2bb12fd2c | |||
| 7f30353fb9 | |||
| 235bccced5 | |||
| 67e6b9e495 | |||
| ea42911530 | |||
| 6823c5a7ec | |||
| d1272d296a | |||
| d2a571a868 | |||
| d120e54284 | |||
| 3a6af38582 | |||
| e157d811cb | |||
| 56a4e67009 | |||
| 5b93765802 | |||
| acea1d33f9 | |||
| 528a61718d | |||
| c5a75a1fb2 | |||
| 3790a0f8b4 | |||
| 779afe5d62 | |||
| 26b5658d70 | |||
| c568a69452 | |||
| 13ad53eef7 | |||
| b14758baff | |||
| b5c3fc0a08 | |||
| b29f8d0f2b | |||
| 5699875e30 | |||
| 3081e419e1 | |||
| a7b38a6940 | |||
| 1f04165a33 | |||
| 36db898d01 | |||
| 38a1ff42e5 | |||
| a4a2841c05 | |||
| 0ece204393 | |||
| 6344d74ae3 | |||
| faec0d9d15 | |||
| e14a2eb94b | |||
| cb3dc28e6e | |||
| 38dc65180b | |||
| 2405706643 | |||
| 5371cd1468 | |||
| 7978395083 | |||
| d0a944fcaa | |||
| 537384e9b5 | |||
| e650d8c55c | |||
| 9663215bb4 | |||
| b2537052ef | |||
| 2e51908bec | |||
| a8edbd8622 | |||
| 06d0852e74 | |||
| 03066ea29a | |||
| e66c9a82a2 | |||
| e86e6cf24f | |||
| 7e8238abc0 | |||
| 6448c8f1a8 | |||
| 3b6688253f | |||
| 45e7cd085a | |||
| 7c9cf84316 | |||
| 580775b47c | |||
| eab26a1e01 | |||
| 1910856c6c | |||
| 4b3e715291 | |||
| 6ca5a24a7f | |||
| 0d6b02090e | |||
| 5b487f651a | |||
| e7a364d610 | |||
| 161be3ff33 | |||
| 1fc0e52dc3 | |||
| 7bf4ca1837 | |||
| 59c3b26655 | |||
| 8296fe32cb | |||
| 4a15699669 | |||
| 24b990d899 | |||
| ca9a257eec | |||
| 6d4bb59865 | |||
| 9b75abfbe6 | |||
| c4f95f14cb | |||
| 9c74014443 | |||
| a25cb9e07a | |||
| 2cd7c94653 | |||
| 9648eba5bb | |||
| 18495c44aa | |||
| 4f22354aae | |||
| 9d00b8d4a8 | |||
| aeacd0a7d3 | |||
| 526700f2de | |||
| a25a9450a6 | |||
| f8a825e9b9 | |||
| dbddd762a9 | |||
| e715b77307 | |||
| d1388ff446 | |||
| 38e8a830c9 | |||
| 7605c2e8fb | |||
| 16bc44948e | |||
| e4f85c1e08 | |||
| 222110e153 | |||
| a40004f9cc | |||
| 0ba49518e9 | |||
| 14eb1a7fd8 | |||
| 23a8124c51 | |||
| f5af9dc4e2 | |||
| d8e4e34b74 | |||
| d98fdf4b2e | |||
| 4e5f17e907 | |||
| 38cdaeb191 | |||
| 956eb77369 | |||
| 465986e397 | |||
| 23a5583a7f | |||
| aec7efc3af | |||
| 30353f1a83 | |||
| 6f4bf55d5e | |||
| 2e77920943 | |||
| 4a813aec42 | |||
| 4304ec63bb | |||
| 6e89e69bba | |||
| 9a4282208e | |||
| 238fc0799e | |||
| 5b95a0fcb6 | |||
| 1946ac415a | |||
| 2186317676 | |||
| 5ab1037094 | |||
| f6d7ff1084 | |||
| 3e242d0bad | |||
| c491fbeabc | |||
| e654e8ec8a | |||
| 48e9e51f4f | |||
| 5a2172dc56 | |||
| 07535ec3ec | |||
| b8dd466988 | |||
| 01a92476e6 | |||
| 3928a7e359 | |||
| f9f614a474 | |||
| e30a5939d0 | |||
| 4b2f3fecb5 | |||
| ccfc119e45 | |||
| 11adf11957 | |||
| 38b5018bab | |||
| 404c284440 | |||
| e0655008b3 | |||
| 22f2392fca | |||
| 136c5ec653 | |||
| faddb4900f | |||
| 6ca7d5ec27 | |||
| 6f679a9e78 | |||
| b0eeaa6679 | |||
| d22e1dc018 | |||
| 3a0799ec18 | |||
| 4e163b2888 | |||
| 86da2eed3a | |||
| e7ed01b35f | |||
| 4d303caa62 | |||
| 3f55c98a3f | |||
| 2723e1049e | |||
| f11eb7d69b | |||
| 73b49c1038 | |||
| 4bec68713a | |||
| 8b90cd6ba1 | |||
| ac72bf34dd | |||
| a282cb7a84 | |||
| b52f916af6 | |||
| ec7810f08e | |||
| 904682b856 | |||
| 92fd5bc3e1 | |||
| d75c6d6538 | |||
| a30beded1c | |||
| df7bbb28d5 | |||
| dc40f16161 | |||
| 1abfb7e965 | |||
| d2d75787d2 | |||
| 5c57e17b77 | |||
| deaa85c40e | |||
| 49d92c7022 | |||
| d842d45b2b | |||
| 684ced5a41 | |||
| afb0897553 | |||
| 8a5c4306f5 | |||
| 01e24967b6 | |||
| 08adffd2fe | |||
| 0842c5718f | |||
| 5452e9cb10 | |||
| 232ff28cc7 | |||
| 81ed2bd91e | |||
| 305d660e39 | |||
| a716c72d37 | |||
| 302150b4f5 | |||
| 3fa03d4e1e | |||
| e014d6e646 | |||
| c2a49b7b1a | |||
| 265fa9e47a | |||
| d31cbda7b6 | |||
| b64dc7749c | |||
| b9e2be5c21 | |||
| 839a33de49 | |||
| 4dfe2bfdd7 | |||
| 1678803566 | |||
| d7e929e0a8 | |||
| 94e8e23eef | |||
| dbf93c0152 | |||
| 9224f792f0 | |||
| f6ed972926 | |||
| 0c75a63184 | |||
| 30cf7a50bb | |||
| e452ef3940 | |||
| cd5a69b9eb | |||
| 74c11ef226 | |||
| 662882d2f0 | |||
| de0c87c801 | |||
| 41aaa4184f | |||
| 451602e58d | |||
| 581f5c0356 | |||
| e5f6f112db | |||
| b3cef944b2 | |||
| 6edb6067f3 | |||
| b2c659c0a6 | |||
| 807ea52ba7 | |||
| 3ac9f571f5 | |||
| d95a612548 | |||
| 19303a546e | |||
| 4d117fd0af | |||
| 2bf84fc705 | |||
| 70692ee770 | |||
| 6370e150dd | |||
| b14e39815b | |||
| 6aaf9433a5 | |||
| 46014397d8 | |||
| 93d1c30d4d | |||
| 6300859c80 | |||
| 2892d3b755 | |||
| 25012f3839 | |||
| 3dfc1184ca | |||
| 53d1b04cc2 | |||
| b67465b91d | |||
| cba65fc51a | |||
| 75b16c72aa | |||
| 577e2bc987 | |||
| adb58af17b | |||
| dd11de3a50 | |||
| fc7543fa25 | |||
| a44ea7c12a | |||
| e734763002 | |||
| 37af08bacf | |||
| 616db5bf1c | |||
| 9ad9c382f4 | |||
| 1d090aa7a3 | |||
| 61e9029be4 | |||
| 5de92e9788 | |||
| 85ca2a3e45 | |||
| 07e2e3f72a | |||
| 3cf00626a2 | |||
| e289915dcb | |||
| 06dec32965 | |||
| 86317dd185 | |||
| 473da56abf | |||
| 162ab14517 | |||
| 40d2e0d1ad | |||
| b6c5069ef7 | |||
| 7273d561f5 | |||
| 78126aea0b | |||
| 7892eee207 | |||
| 72a9f40f3f | |||
| 495d51113c | |||
| 0abeec9cae | |||
| ea3c21f270 | |||
| 23fe6f4dee | |||
| 34ea2acd75 | |||
| ea64886469 | |||
| 023aaf7dff | |||
| 2f412804e2 | |||
| d6ffd389da | |||
| be0b17329a | |||
| b4edad86d6 | |||
| 9d1552a54e | |||
| 47ebfcc60a | |||
| beecf32dff | |||
| 57b2e4a4f1 | |||
| 48817d849f | |||
| d9f2d17040 | |||
| cd98afdd7b | |||
| dac6885fb0 | |||
| fcd180891b | |||
| d5d541c7f7 | |||
| a868c0656e | |||
| cd466ebdd4 | |||
| 15e89b78de | |||
| 66006ba48d | |||
| 18e9c62087 | |||
| 34f89e5936 | |||
| e1f3b9d8c1 | |||
| d3278f4886 | |||
| 77f0b75d2f | |||
| 012e6dd8a2 | |||
| 199b3d4709 | |||
| 7890989cbb | |||
| 945cc2ac54 | |||
| c30acbbf90 | |||
| 881025cc9e | |||
| 4cc210c5ab | |||
| 2d5316a334 | |||
| 9cfa177af0 | |||
| fe44355d25 | |||
| 487df27008 | |||
| 1965916f7a | |||
| 8948eded09 | |||
| 2f59cb5a0a | |||
| f505c8224e | |||
| 2154e898bc | |||
| f9e80f995d | |||
| a5c1915772 | |||
| 6edf3d6427 | |||
| 944c1fd148 | |||
| 20281737b1 | |||
| 0555c4216e | |||
| 5b6684ca19 | |||
| c4ac923b4c | |||
| cc022a68f4 | |||
| 1571379304 | |||
| 858562ed53 | |||
| b8724425d3 | |||
| b9ce0faee2 | |||
| 1e0e2752bf | |||
| 7a488b93d0 | |||
| d67b83e671 | |||
| a100be9dce | |||
| 4d39ea887e | |||
| 7638b5b3e3 | |||
| 600cee118d | |||
| c2c56d0263 | |||
| ac578d188e | |||
| 53dfd48f59 | |||
| f5701fa9ad | |||
| 84174c560d | |||
| 9bf525ee7a | |||
| 4b69d4b1bb | |||
| bbcccd6e98 | |||
| c0bcb41175 | |||
| b5fef788da | |||
| 42f5965d8a | |||
| d904d91252 | |||
| 4d81a4bf68 | |||
| 9abe3fa662 | |||
| 9d8f1b00f7 | |||
| bb6d2b1b74 | |||
| 096a230b70 | |||
| 8ef01d37a9 | |||
| cb9148f6cd | |||
| c4fa53ec8b | |||
| 1e2df406ee | |||
| 1dabfbe1da | |||
| 771bbfeac5 | |||
| 3694786112 | |||
| e477d7d186 | |||
| fbf3c83019 | |||
| 8a51a26819 | |||
| 1838f52e19 | |||
| e9eb248f2f | |||
| 77581eda59 | |||
| 7f3e423bbe | |||
| 706e3cbebd | |||
| 2effffd8e2 | |||
| 2908878988 | |||
| 01a26a9011 | |||
| 23e1fbf7b9 | |||
| ed2e877bb6 | |||
| 3663082a01 | |||
| 9645d57c05 | |||
| f6fbd75320 | |||
| 20a6c9d35d | |||
| 0a90a40569 | |||
| ef079f4e94 | |||
| b589ebec23 | |||
| 8c58ba8ac0 | |||
| a19d223434 | |||
| fc06419549 | |||
| 032fcf2a47 | |||
| 5b5bc3114a | |||
| 81e267d421 | |||
| 4fc92d59f3 | |||
| a0f347b10f | |||
| 5debf523b0 | |||
| 1c8aadafd1 | |||
| d33f252404 | |||
| 4da56acdcc | |||
| 493802a605 | |||
| 22b0ae9d51 | |||
| 134a54b32f | |||
| 7f6fbb0021 | |||
| ec8bae9827 | |||
| a4d916af13 | |||
| 32f096fa3f | |||
| c692093921 | |||
| 1a6d0826b5 | |||
| f2c5e38724 | |||
| bf33fe7f55 | |||
| ff9d6a75c7 | |||
| 4c6690f5f0 | |||
| 67b9381825 | |||
| 4b53d4bf6b | |||
| ed85b72bf5 | |||
| 17fda89ae9 | |||
| 1987bcbea3 | |||
| d019dda4f7 | |||
| dc9255bc5f | |||
| b0e8758b63 | |||
| 6d65e30dd5 | |||
| b70f8db037 | |||
| 4d833a4fb9 | |||
| a6563211af | |||
| 8d66b0e4a8 | |||
| 555c1dde3d | |||
| b04bca37a5 | |||
| 168e588ac7 | |||
| 7158e50ee2 | |||
| bd0fadbe5f | |||
| d1c638ab7a | |||
| 51f65d7b14 | |||
| a16d00d673 | |||
| 4d0daddad4 | |||
| d246b9e8bf | |||
| ef3d4e506d | |||
| 462b37b0bb | |||
| 4341291cc2 | |||
| c589886ba5 | |||
| d0363b118e | |||
| 533322859d | |||
| f8d5b76edb | |||
| 834862f8a4 | |||
| d9f6469b7b | |||
| 852e6ee4c6 | |||
| ecb24c9c26 | |||
| 543231c9f2 | |||
| d96a08d4ad | |||
| f2f930300a | |||
| 81ad3ce0ae | |||
| 38e1eaa8a3 | |||
| 6aa786b886 | |||
| 58750cc441 | |||
| 2188fb067e | |||
| 910255745e | |||
| cf86fa51b0 | |||
| a4e4ce8aea | |||
| 7f43b7fa53 | |||
| 0492f75197 | |||
| 493cf20f95 | |||
| 3553fb65a0 | |||
| 059cc57ba6 | |||
| 4df4ffe06f | |||
| 462347256d | |||
| 133feb67c3 | |||
| be308e9d11 | |||
| 2cf195741c | |||
| f61baada46 | |||
| 6e1e0d1d31 | |||
| 66419e7292 | |||
| 9e998877e9 | |||
| 5ebd4039e6 | |||
| 3628b9d1e2 | |||
| 8d2bd874d7 | |||
| 7e27c6cf7d | |||
| b8ff4ede49 | |||
| 081d6c1245 | |||
| eb8004c355 | |||
| b3bf9f8f54 | |||
| 503c86bb8a | |||
| 757f4caee8 | |||
| 95fabd1f3a | |||
| 0b42e08636 | |||
| d5e4f99e72 | |||
| 781d4dd857 | |||
| f35fe850c0 | |||
| 7102b9eb74 | |||
| 9a55c9fd9f | |||
| 041b4ec05c | |||
| 2438066d52 | |||
| 3c5b35b3b9 | |||
| e98827ec6f | |||
| 864941d4d5 | |||
| 05863a3178 | |||
| 075d161bb1 | |||
| f8b5c911ed | |||
| 6f40503328 | |||
| 3440c3348c | |||
| 853be26612 | |||
| 89b1c32b24 | |||
| 44d2cc15f7 | |||
| 76d88f2291 | |||
| 72340cfbd9 | |||
| 81805715bd | |||
| ebcbb02d16 | |||
| 8c29b60129 | |||
| 93cf67c65d | |||
| 211cc0f5f2 | |||
| 8fb70c5e4a | |||
| c58c91fc4f | |||
| 78e2345c19 | |||
| 3bc565b986 | |||
| b7187352e1 | |||
| 1520ca6220 | |||
| f7a63d5c97 | |||
| 3958450c05 | |||
| 5121c73ed1 | |||
| 44eaac8471 | |||
| 77501c3600 | |||
| 8205607716 | |||
| c7132b7f56 | |||
| 1043e08dfa | |||
| e5f4f9b69e | |||
| 1e3c10d803 | |||
| 6fa4477673 | |||
| 6d1e704e34 | |||
| ffc33a447f | |||
| 39ec8e242e | |||
| 0d4e39ad1c | |||
| 6112eb6a49 | |||
| 986fab8738 | |||
| b5ffd419d8 | |||
| c6e23212bb | |||
| e75c11956c | |||
| 1ec8528502 | |||
| 892e7129c5 | |||
| bdea2ee5c8 | |||
| a5186c73c2 | |||
| 4580e6ff93 | |||
| 60d456a993 | |||
| bcf6838e86 | |||
| 41a66d9706 | |||
| 958ad83a4b | |||
| ba61470bae | |||
| 0e895f60a0 | |||
| 8210a1b2f2 | |||
| 2feefeec47 | |||
| 55cb8920d5 | |||
| 46acbf10ba | |||
| f28e3bd89e | |||
| 2c0408b95a | |||
| 057da86d87 | |||
| 8de6cb3504 | |||
| 2bb745cdd7 | |||
| 35090ba4d5 | |||
| 132a1bebbb | |||
| c8e13300e1 | |||
| a23d02923c | |||
| 194262c0ef | |||
| e2fbcb4d30 | |||
| 0c1bc0f803 | |||
| 83361e7905 | |||
| 19abd9ffaf | |||
| cbd925c56f | |||
| 63bbb47378 | |||
| 0fbd0eac80 | |||
| e5231fa3c7 | |||
| ad1961417d | |||
| 4154e539ea | |||
| e0a38adaf6 | |||
| c3e711da58 | |||
| 700e288718 | |||
| e72a64785b | |||
| 5ace2b70fc | |||
| 62c8be85d6 | |||
| 3f6b5ae6a5 | |||
| dc365b65a0 | |||
| 9c88ec2128 | |||
| 3dd9832f61 | |||
| 4d1f5c899f | |||
| 1f796d432d | |||
| 35006e318f | |||
| 7d0c66e08a | |||
| 9dc4148743 | |||
| f324d8e04f | |||
| f16ee96d7e | |||
| 0d849b38c2 | |||
| 8ed020610f | |||
| d8766b2051 | |||
| 9db70bab63 | |||
| 8a7548a9d4 | |||
| 0cb057dadd | |||
| 0f5db0d1bf | |||
| 94e67c9c58 | |||
| 274321524c | |||
| 40f7007263 | |||
| 66597ec5f2 | |||
| 75e72385cc | |||
| eb58460175 | |||
| 0852521a7e | |||
| 56ed6bb97f | |||
| 73098220bf | |||
| ca7756fa77 | |||
| 8b15db6dcb | |||
| 0b60aa81eb | |||
| da70cb92a8 | |||
| 746a027e98 | |||
| 80c11b6c12 | |||
| b76226a06d | |||
| 8945e98d8b | |||
| 97c249d5b9 | |||
| 6e134a23f9 | |||
| a4803543a1 | |||
| 2cc88b933f | |||
| ce1221c867 | |||
| 005a684600 | |||
| 3af17c3019 | |||
| f26a0aa71d | |||
| 2b4619842d | |||
| cf529e0af7 | |||
| 4da02d023b | |||
| fe0677651e | |||
| 811ea0cb85 | |||
| a5f84535f3 | |||
| 84bf460f99 | |||
| 538fbed302 | |||
| feafb44bae | |||
| a44fc1103d | |||
| 48a872e285 | |||
| c44a60f3f5 | |||
| f373df9682 | |||
| 9e01d5b8d1 | |||
| 7fbda230f5 | |||
| 56141557dc | |||
| fe2b269b6e | |||
| eb6fe69af2 | |||
| 6489c5d394 | |||
| 854a8a8356 | |||
| d34475d6a1 | |||
| b72f4b43a4 | |||
| 34e797d6f5 | |||
| bb20fc3c98 | |||
| 3e66e42ae5 | |||
| 0665d78550 | |||
| 55d670f22f | |||
| 32ae0d2c79 | |||
| 924eeb3587 | |||
| bc9126d774 | |||
| 4df50e7f85 | |||
| 4e26728cbf | |||
| 7135666060 | |||
| 018ccb9a11 | |||
| ee1d3a9057 | |||
| b762a0a85b | |||
| 9771be29b2 | |||
| 010a6b83ef | |||
| 87224b830b | |||
| 9e77650e8c | |||
| e0712f444d | |||
| 913b72fdaf | |||
| 39102608aa | |||
| 23945a0130 | |||
| bdfb9ee815 | |||
| 1a75a94253 | |||
| a9d527d517 | |||
| 94666f7754 | |||
| 41acbaa746 | |||
| d5042a73bd | |||
| f6756596b3 | |||
| a24f78f5a4 | |||
| 64d4b2c0b0 | |||
| cc4a307415 | |||
| a07a5336f6 | |||
| 9789ca1a4d | |||
| 4ec2d685e7 | |||
| dc62953040 | |||
| 0c42a74a8a | |||
| cf3e82d10a | |||
| 0e815c2fbc | |||
| 27a5536749 | |||
| db338b36b8 | |||
| 72b906255f | |||
| 2211678d91 | |||
| 80162c126b | |||
| 1db94a0b30 | |||
| b6349e9428 | |||
| bedd648d47 | |||
| 58b3cce320 | |||
| 81102a5963 | |||
| 42fb30852b | |||
| e51361cb94 | |||
| 80a9ed9d6a | |||
| e2129001eb | |||
| 805cc064af | |||
| 3c2279db39 | |||
| 412876ca33 | |||
| 461a346bf4 | |||
| ae4bcd405c | |||
| 0536dc1112 | |||
| 1c3507ede1 | |||
| 3f47037cc7 | |||
| d87321ef3c | |||
| 01ac815346 | |||
| 2daa1d53d9 | |||
| cc2eed84a5 | |||
| c644e4bef2 | |||
| a7d83df3eb | |||
| e1886b5343 | |||
| 873e376098 | |||
| 3c378a4a7a | |||
| ca7266fb76 | |||
| 4a0c23258e | |||
| ccc79ec139 | |||
| 5c4a0a862a | |||
| 2a779df4fd | |||
| f5bf41cf11 | |||
| f74c7407db | |||
| 433c04af26 | |||
| 2e51a339a9 | |||
| 0411872dda | |||
| d8f1aef7be | |||
| 14d3e12103 | |||
| fd13eb314b | |||
| daba563a1c | |||
| b79b0b1a92 | |||
| 01d4323b50 | |||
| dc85eda188 | |||
| ccf3e7bfa2 | |||
| d22acae9a3 | |||
| 7ecfba811e | |||
| 1e7330e499 | |||
| 68475e5fcf | |||
| 8cc008c501 | |||
| faaaf2ee3c | |||
| 3d42d0515d | |||
| 3d70e8966f | |||
| 6837cbfe06 | |||
| a5c20175a1 | |||
| 43d1439420 | |||
| d8cf26bd50 | |||
| c321c8c23f | |||
| 6227fce091 | |||
| aba8e6ccfa | |||
| f528a86219 | |||
| 5b15c7c4e9 | |||
| afd438d363 | |||
| 80972aa33d | |||
| 520995a7e8 | |||
| 5c3c6bff2f | |||
| fb937635f5 | |||
| 3ad1c95e86 | |||
| 2c4c78fd65 | |||
| 0afef077ed | |||
| 80f598f439 | |||
| bacd15714a | |||
| 5cb7a31c09 | |||
| 99aace4fc9 | |||
| c7f8f61d09 | |||
| 159cd2c682 | |||
| 513f41fddf | |||
| 543aed2a32 | |||
| 20ea96328c | |||
| 007edb1773 | |||
| f78bcc5229 | |||
| 43ec064fb9 | |||
| ffac4c8417 | |||
| 4adcd4a6c8 | |||
| e936854493 | |||
| 850a813171 | |||
| 5bbcd3b981 | |||
| a337c32840 | |||
| d39e4e4b1f | |||
| b51d60ef5e | |||
| 58037da061 | |||
| 1eb266588e | |||
| deb560067e | |||
| 4e351c59e3 | |||
| eb597289cb | |||
| 6fd42d9fe4 | |||
| efff9c01a6 | |||
| a83d5baa90 | |||
| 50d13255e4 | |||
| 5195fa93fa | |||
| e5e63d6ac3 | |||
| 7a65a414c3 | |||
| 4a192ffdf9 | |||
| 944d21cabb | |||
| d267212289 | |||
| 58d8e5586a | |||
| ce803daf4a | |||
| 9bf34e2fda | |||
| 52218d2ddb | |||
| 6bd1a7eac0 | |||
| 6177d7b096 | |||
| 3161bf4608 | |||
| 2349dafb98 | |||
| 1cd58d7828 | |||
| 8aa06d621a | |||
| ecf13a8cb9 | |||
| b8ff6a647e | |||
| 49110c671d | |||
| febaf6849f | |||
| 4893174652 | |||
| 5820c72092 | |||
| 4f23f38583 | |||
| bbfa91141a | |||
| f439c6382f | |||
| 21321a9d96 | |||
| f03b50fd13 | |||
| 15a870f9d9 | |||
| 9472ee4a01 | |||
| 1f1f824da7 | |||
| f02105c346 | |||
| c596441f58 | |||
| 455e29cbea | |||
| 511d223468 | |||
| 5324c1b588 | |||
| 554ea49294 | |||
| d7f77a6282 | |||
| 048aea1151 | |||
| f1ad626b46 | |||
| a78d8d1da4 | |||
| fbebc6fa57 | |||
| c18fc901c4 | |||
| 245f14c8c1 | |||
| d732eef776 | |||
| 56b184fd0c | |||
| 9e5eaad79b | |||
| 738eaadcbf | |||
| 1cf3d2452e | |||
| 670dce6f4a | |||
| 07cfdcf205 | |||
| 15b7450713 | |||
| 272e0d3d46 | |||
| 8d8a2a66e5 | |||
| befc2e9e6f | |||
| 059ab1f0f4 | |||
| f2fe1738cd | |||
| 0d4847596e | |||
| 8e86ef1634 | |||
| a32a05962e | |||
| a7e81fda9b | |||
| 9e4f9d4bdc | |||
| 0677a096a9 | |||
| 381835e2a9 | |||
| af070fa3de | |||
| f9cd43b684 | |||
| 7095787f1f | |||
| 4ca336aed0 | |||
| 8a18806a57 | |||
| e5e124b9aa | |||
| 090ac7a8a0 | |||
| 97af471aa6 | |||
| 0176fc712b | |||
| c426c896d0 | |||
| 0a99470369 | |||
| cd280d8702 | |||
| b1bade37d8 | |||
| 005c186e1b | |||
| 6f8ecb28c5 | |||
| 4c28eb2a78 | |||
| 042332bcec | |||
| 071cd0f849 | |||
| d7bb834bcb | |||
| 9b437fc1fe | |||
| 6f6a67807b | |||
| e5785d4690 | |||
| a4ce54d615 | |||
| ddbcec5c96 | |||
| 7cbb4c958b | |||
| 6c5adbb9af | |||
| 4db6e1a10a | |||
| ea517c80a4 | |||
| 9f6ed6c08e | |||
| b1ddb483a4 | |||
| 17352ef4fd | |||
| 71766039ff | |||
| c607c9be58 | |||
| df604e50fb | |||
| 876662ff89 | |||
| 058706014b | |||
| 99ec09f13a | |||
| 4a011d87e0 | |||
| c6edc2cd8f | |||
| cc196bfdf0 | |||
| 895d5006bb | |||
| 62dcda6a56 | |||
| cbf072bc55 | |||
| 1b4d2a32d2 | |||
| b71c90bbe1 | |||
| 80344aafd3 | |||
| b5ca400500 | |||
| 2e89439120 | |||
| 555410386c | |||
| 08b132ddb9 | |||
| 1e86cc643d | |||
| f53503438c | |||
| 0d43d070cc | |||
| 0791fdca2a | |||
| 6e8678d5e3 | |||
| 10d9f75366 | |||
| 77993e607e | |||
| 74d8671e54 | |||
| 4c41eac29c | |||
| 3c97c1b251 | |||
| 84c671a81a | |||
| f336bdebcc | |||
| e5b8377202 | |||
| 4f42f4ab0c | |||
| 11b385820d | |||
| f1a9eaee54 | |||
| ffee8d5f39 | |||
| eeb839242b | |||
| f7760976a5 | |||
| ca2d1d873d | |||
| 951761ee2c | |||
| 231fd001d9 | |||
| e00afa8128 | |||
| e9297f39ea | |||
| 314dc5a398 | |||
| e07cb020cc | |||
| 9f79b9e0da | |||
| e87e40b3b4 | |||
| f0f95478ec | |||
| bd66b57ad3 | |||
| a1d3ff9766 | |||
| 7f61a947c2 | |||
| 4d0bef1e90 | |||
| 960823fb40 | |||
| c04e8e7a9d | |||
| fb6debd986 | |||
| d8185a25aa | |||
| 53337a0a28 | |||
| ae51556d04 | |||
| b3de9195a7 | |||
| 055ecf6ea7 | |||
| c603680e02 | |||
| 814aadd7e3 | |||
| dce63d1529 | |||
| 8ff05d8e38 | |||
| dfa84b82a8 | |||
| 6ac8618381 | |||
| 8c527c3616 | |||
| 54a074647e | |||
| c5ebf5d328 | |||
| 43c6ef560e | |||
| 3076abc744 | |||
| 07636c8a8d | |||
| 35d55ee513 | |||
| a442e843be | |||
| c0cf90cf8b | |||
| 3b1cc3b197 | |||
| a0c1f30ae7 | |||
| 8822a86709 | |||
| 98f397de0e | |||
| fd4d23f8f7 | |||
| 4820f0a42b | |||
| 807a618cf7 | |||
| a93e500b44 | |||
| 92d3114584 | |||
| 5062d6fbd9 | |||
| 7d14ebaf09 | |||
| cd794a6985 | |||
| 84b421d6ef | |||
| 8316da5bbe | |||
| fa287aeef7 | |||
| caaf4cac55 | |||
| 010276ceab | |||
| f219817eb3 | |||
| d487348d21 | |||
| eb0b29708f | |||
| 877842a720 | |||
| 1fc25e8c3f | |||
| 61ecafd426 | |||
| 79bd3441eb | |||
| 5f5da4b2cb | |||
| dede19d8c0 | |||
| fada95f58e | |||
| 014b8c5982 | |||
| 46d79c5bc2 | |||
| 40ba3b44a1 | |||
| beadf95975 | |||
| 2887f8916b | |||
| 0c9a8932f7 | |||
| ac72431195 | |||
| 2a7877beff | |||
| 7a56459103 | |||
| 5292fa74d1 | |||
| f2184e34dd | |||
| 1d4867830b | |||
| 36a4903843 | |||
| c83a3e67c9 | |||
| 05014c49c8 | |||
| aa69107165 | |||
| d373b0eca3 | |||
| 6aa40b2747 | |||
| 34c3bfe408 | |||
| 6ac56e722d | |||
| 61dc7f0a70 | |||
| 9f000957dd | |||
| b2141313e2 | |||
| aa9bd1fa3c | |||
| 5a2dc03a1c | |||
| 508fafbe62 | |||
| e29548178b | |||
| ab2f36f202 | |||
| b8c9fcfd70 | |||
| 58ce544e83 | |||
| e98ce36301 | |||
| 6401b4ae92 | |||
| 25b49e1a2e | |||
| c7def35b54 | |||
| ddba1c63c5 | |||
| c512516e14 | |||
| 2c43b1e12b | |||
| b68d97c6bf | |||
| f1757e4343 | |||
| e2d5641d99 | |||
| 523fe1e309 | |||
| c985b5e4d0 | |||
| 786f1a8fc7 | |||
| 18cb2e2662 | |||
| 743c706b0a | |||
| 4ed0e5f35a | |||
| fd6b37d3da | |||
| 56e24de0d4 | |||
| 2780043a7d | |||
| 54c9e48bb7 | |||
| ed5795eead | |||
| 3d225163f8 | |||
| 0569cec3ea | |||
| a2f8ac535e | |||
| 29355d75b0 | |||
| d9e89deef6 | |||
| 6b051eac47 | |||
| da997de918 | |||
| d97094fb8d | |||
| b91fc5409e | |||
| 3c970646d1 | |||
| a92668ae78 | |||
| 88cd8feb05 | |||
| 91c16f826a | |||
| d4d60ff315 | |||
| e8033f96de | |||
| 5fba542a29 | |||
| 44de3ffa05 | |||
| 2efa6df028 | |||
| 9e530c86ae | |||
| 95857733a1 | |||
| 664f910083 | |||
| 735e4b0848 | |||
| e8d76a6f58 | |||
| 0a6926be54 | |||
| 830a971bde | |||
| 4779d14d7d | |||
| 8929a27a24 | |||
| eea624c171 | |||
| cdaf4a9674 | |||
| 6fe92d5ed6 | |||
| 8649a68766 | |||
| af005b6e5e | |||
| b19d2ae78f | |||
| 5634f9bdcd | |||
| c703fa15c0 | |||
| d9c106cfde | |||
| 203f78fdae | |||
| c5af62b023 | |||
| dcd70daf48 | |||
| 8263919b0e | |||
| 97488e603f | |||
| 41c23adb0e | |||
| a85183d42c | |||
| 45b67b9604 | |||
| c376efdd28 | |||
| 4c5f510207 | |||
| 06032aa661 | |||
| e8fb2aefb3 | |||
| 3cb6c5e161 | |||
| 7e0c53dfe9 | |||
| c2ca7e43b6 | |||
| ae47d5d349 | |||
| 35d3fce7a0 | |||
| 4177d8bd3b | |||
| ad5349a488 | |||
| 6b57a8c1fc | |||
| 92a4034c5e | |||
| 3e4002df0d | |||
| 1b9ec7f4fc | |||
| 4a7c4a9e9d | |||
| 0d3fb0658a | |||
| 73cf2ba95d | |||
| 5a481e6a01 | |||
| d8e12839af | |||
| 3bf05dabea | |||
| d4e86a17d1 | |||
| 6555e7ebb0 | |||
| ae9d8eb734 | |||
| e49d594db4 | |||
| 66bb0898db | |||
| b323312312 | |||
| 58e52bad4f | |||
| 57b2a60172 | |||
| 212bbbbdf0 | |||
| a0e62b5588 | |||
| e9831dd772 | |||
| da95afba8a | |||
| 0bd875eb9e | |||
| af63a42773 | |||
| ad9a374229 | |||
| 1b86e4d414 | |||
| 86b0921ac4 | |||
| dbe98dcbd2 | |||
| 4a72b60707 | |||
| 7a4696fc17 | |||
| e3de6ea458 | |||
| 1db4739ed8 | |||
| 25375a6b48 | |||
| ca87df7d44 | |||
| d052dc0b9d | |||
| 3f542e9cf5 | |||
| 04493de767 | |||
| 4fdab46617 | |||
| 1a23b880d5 | |||
| b3c376afbe | |||
| adcf5754ae | |||
| 0863672e27 | |||
| 0f503ced25 | |||
| 987a166bf4 | |||
| 57b6f0eeaf | |||
| f71a28f9b9 | |||
| 45c7ea9194 | |||
| c9f4312588 | |||
| 8b657eee41 | |||
| b9c9de7f97 | |||
| e559f05326 | |||
| 824124fedc | |||
| be9d407fa0 | |||
| c494e54320 | |||
| b52b6f3fc5 | |||
| 82c9733101 | |||
| a45ff6cdaa | |||
| 960d45c853 | |||
| 246b767b64 | |||
| 0edf056e95 | |||
| 88819af2d8 | |||
| b048c981ad | |||
| e1dafe76dd | |||
| 1a2e63ecff | |||
| a60b32cb80 | |||
| 6b58220009 | |||
| a841057679 | |||
| ee6abea956 | |||
| 8b0acd75e0 | |||
| cea7d44717 | |||
| 0da87e75fe | |||
| 566961c7a5 | |||
| ac65258d30 | |||
| 35261e6dba | |||
| 2818f23ba5 | |||
| 88f071ea22 | |||
| bd4bc0e7f1 | |||
| 890c1d53ff | |||
| 026260e7a1 | |||
| 99fe93b7f1 | |||
| b764c53020 | |||
| 11bd7128d2 | |||
| 7cda32664b | |||
| 4c73a0ae56 | |||
| 97fc845a6a | |||
| 7d9ac0163b | |||
| d903e064e0 | |||
| e1928288fe | |||
| 6ab6412dd3 | |||
| 30b7a69d3d | |||
| ccd0a446d8 | |||
| 0418e53b3c | |||
| bad48ab59a | |||
| bbad76bb71 | |||
| 6c1bd98c14 | |||
| b95e4acaeb | |||
| c238701d09 | |||
| 60d2ac3c7a | |||
| 967ef4d56b | |||
| ad57bdda6c | |||
| a0b69d1d3d | |||
| 5df94d7e33 | |||
| 1cbe9fb7a3 | |||
| 395f736753 | |||
| 065516c5f3 | |||
| 8660abaea2 | |||
| 366f0705a0 | |||
| ccea87ca68 | |||
| 5d54883e2f | |||
| 662f65c3c8 | |||
| 259f028490 | |||
| 5db550a298 | |||
| e3c77d2906 | |||
| ba00e79253 | |||
| c1791f920e | |||
| df3803c7b7 | |||
| 384b4cbafa | |||
| 40501a7a73 | |||
| ab89b4cd4a | |||
| 48e0d55c8e | |||
| 1eba27a50a | |||
| 41206fa0e2 | |||
| 21cf1d64e5 | |||
| ae91b6f673 | |||
| f4515b5cfa | |||
| 6c57cde7f9 | |||
| 5014c1827b | |||
| f531e6aff2 | |||
| c5c110137b | |||
| 5957d9ead0 | |||
| 5675df2a44 | |||
| 00bc9142c4 | |||
| 5653ab39fc | |||
| 473dd7c940 | |||
| ee824d52ba | |||
| 7c5fba9890 | |||
| f214cb03b2 | |||
| 416c51799b | |||
| cf6f665f03 | |||
| 20da0e4dd3 | |||
| fa8c417526 | |||
| 2c65aec6c8 | |||
| 96d8e6d823 | |||
| 62b8084300 | |||
| 907aff5de4 | |||
| bc09129ec5 | |||
| cd94f638e2 | |||
| b1fe419870 | |||
| 98b724391f | |||
| 620f6ec616 | |||
| 0c6a3882a2 | |||
| a08880ae15 | |||
| f48826dfe9 | |||
| 9c3551478e | |||
| cc94e1da26 | |||
| 2b7ea5813c | |||
| 185192be67 | |||
| ae4caa96a0 | |||
| af13ae82c1 | |||
| 13503c063b | |||
| 337da59368 |
83
.clang-tidy
Normal file
83
.clang-tidy
Normal file
@ -0,0 +1,83 @@
|
||||
WarningsAsErrors: '*'
|
||||
Checks: '
|
||||
*,
|
||||
-abseil-*,
|
||||
-altera-*,
|
||||
-android-*,
|
||||
-boost-*,
|
||||
-bugprone-assignment-in-if-condition,
|
||||
-bugprone-branch-clone,
|
||||
-bugprone-easily-swappable-parameters,
|
||||
-bugprone-implicit-widening-of-multiplication-result,
|
||||
-bugprone-macro-parentheses,
|
||||
-bugprone-narrowing-conversions,
|
||||
-bugprone-unhandled-self-assignment,
|
||||
-cert-dcl50-cpp,
|
||||
-cert-env33-c,
|
||||
-cert-err33-c,
|
||||
-cert-err58-cpp,
|
||||
-clang-analyzer-security.insecureAPI.strcpy,
|
||||
-clang-analyzer-unix.BlockInCriticalSection,
|
||||
-concurrency-mt-unsafe,
|
||||
-cppcoreguidelines-avoid-c-arrays,
|
||||
-cppcoreguidelines-avoid-do-while,
|
||||
-cppcoreguidelines-avoid-magic-numbers,
|
||||
-cppcoreguidelines-avoid-non-const-global-variables,
|
||||
-cppcoreguidelines-init-variables,
|
||||
-cppcoreguidelines-macro-usage,
|
||||
-cppcoreguidelines-no-malloc,
|
||||
-cppcoreguidelines-owning-memory,
|
||||
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
||||
-cppcoreguidelines-pro-bounds-constant-array-index,
|
||||
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
|
||||
-cppcoreguidelines-pro-type-member-init,
|
||||
-cppcoreguidelines-pro-type-reinterpret-cast,
|
||||
-cppcoreguidelines-pro-type-union-access,
|
||||
-cppcoreguidelines-pro-type-vararg,
|
||||
-cppcoreguidelines-special-member-functions,
|
||||
-fuchsia-*,
|
||||
-google-build-using-namespace,
|
||||
-google-readability-function-size,
|
||||
-google-readability-todo,
|
||||
-google-runtime-int,
|
||||
-google-runtime-references,
|
||||
-hicpp-*,
|
||||
-llvm-*,
|
||||
-llvmlibc-*,
|
||||
-misc-const-correctness,
|
||||
-misc-include-cleaner,
|
||||
-misc-no-recursion,
|
||||
-misc-non-private-member-variables-in-classes,
|
||||
-misc-redundant-expression,
|
||||
-misc-unused-parameters,
|
||||
-misc-use-anonymous-namespace,
|
||||
-misc-use-internal-linkage,
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-loop-convert,
|
||||
-modernize-use-nodiscard,
|
||||
-modernize-raw-string-literal,
|
||||
-modernize-return-braced-init-list,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-use-using,
|
||||
-performance-avoid-endl,
|
||||
-performance-no-int-to-ptr,
|
||||
-readability-avoid-nested-conditional-operator,
|
||||
-readability-braces-around-statements,
|
||||
-readability-else-after-return,
|
||||
-readability-function-cognitive-complexity,
|
||||
-readability-function-size,
|
||||
-readability-identifier-length,
|
||||
-readability-inconsistent-declaration-parameter-name,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
-readability-math-missing-parentheses,
|
||||
-readability-named-parameter,
|
||||
-readability-redundant-access-specifiers,
|
||||
-readability-redundant-declaration,
|
||||
-readability-simplify-boolean-expr,
|
||||
-readability-suspicious-call-argument'
|
||||
CheckOptions:
|
||||
cppcoreguidelines-narrowing-conversions.WarnOnEquivalentBitWidth: 'false'
|
||||
readability-implicit-bool-conversion.AllowIntegerConditions: 'true'
|
||||
readability-implicit-bool-conversion.AllowPointerConditions: 'true'
|
||||
32
.gitattributes
vendored
Normal file
32
.gitattributes
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
* text eol=lf
|
||||
|
||||
*.png binary
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
27
.github/ISSUE_TEMPLATE.md
vendored
27
.github/ISSUE_TEMPLATE.md
vendored
@ -1,27 +0,0 @@
|
||||
#### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
|
||||
- Version of s3fs being used (s3fs --version)
|
||||
- _example: 1.0_
|
||||
|
||||
- Version of fuse being used (pkg-config --modversion fuse)
|
||||
- _example: 2.9.4_
|
||||
|
||||
- System information (uname -a)
|
||||
- _command result: uname -a_
|
||||
|
||||
- Distro (cat /etc/issue)
|
||||
- _command result: result_
|
||||
|
||||
- s3fs command line used (if applicable)
|
||||
```
|
||||
```
|
||||
- /etc/fstab entry (if applicable):
|
||||
```
|
||||
```
|
||||
- s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
#### Details about issue
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: 'feature request'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
### Feature request
|
||||
<!-- -------------------------------------------------------------
|
||||
Please let us know your ideas, such as features you want to improve,
|
||||
features to add, etc.
|
||||
And list any related Issue or Pull Request numbers.
|
||||
-------------------------------------------------------------- -->
|
||||
|
||||
|
||||
49
.github/ISSUE_TEMPLATE/support_request.md
vendored
Normal file
49
.github/ISSUE_TEMPLATE/support_request.md
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
name: Support request (Including bug reports)
|
||||
about: Request support for usage, bugs, etc.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
<!-- --------------------------------------------------------------------------
|
||||
The following information is very important in order to help us to help you.
|
||||
Omission of the following details may delay your support request or receive no
|
||||
attention at all.
|
||||
--------------------------------------------------------------------------- -->
|
||||
|
||||
### Additional Information
|
||||
|
||||
#### Version of s3fs being used (`s3fs --version`)
|
||||
<!-- example: V1.91 (commit:b19262a) -->
|
||||
|
||||
#### Version of fuse being used (`pkg-config --modversion fuse`, `rpm -qi fuse` or `dpkg -s fuse`)
|
||||
<!-- example: 2.9.2 -->
|
||||
|
||||
#### Provider (`AWS`, `OVH`, `Hetzner`, `iDrive E2`, ...)
|
||||
<!-- example: AWS -->
|
||||
|
||||
#### Kernel information (`uname -r`)
|
||||
<!-- example: 5.10.96-90.460.amzn2.x86_64 -->
|
||||
|
||||
#### GNU/Linux Distribution, if applicable (`cat /etc/os-release`)
|
||||
<!-- command result -->
|
||||
|
||||
#### How to run s3fs, if applicable
|
||||
<!-- Describe the s3fs "command line" or "/etc/fstab" entry used. -->
|
||||
[] command line
|
||||
[] /etc/fstab
|
||||
|
||||
<!-- Executed command line or /etc/fastab entry -->
|
||||
```
|
||||
```
|
||||
|
||||
#### s3fs syslog messages (`grep s3fs /var/log/syslog`, `journalctl | grep s3fs`, or `s3fs outputs`)
|
||||
<!-- if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages. -->
|
||||
```
|
||||
```
|
||||
|
||||
### Details about issue
|
||||
<!-- Please describe the content of the issue in detail. -->
|
||||
|
||||
|
||||
14
.github/PULL_REQUEST_TEMPLATE.md
vendored
14
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,5 +1,11 @@
|
||||
#### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
<!-- --------------------------------------------------------------------------
|
||||
Please describe the purpose of the pull request(such as resolving the issue)
|
||||
and what the fix/update is.
|
||||
--------------------------------------------------------------------------- -->
|
||||
|
||||
### Relevant Issue (if applicable)
|
||||
<!-- If there are Issues related to this PullRequest, please list it. -->
|
||||
|
||||
### Details
|
||||
<!-- Please describe the details of PullRequest. -->
|
||||
|
||||
#### Details
|
||||
_Please describe the details of PullRequest._
|
||||
|
||||
316
.github/workflows/ci.yml
vendored
Normal file
316
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,316 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
name: s3fs-fuse CI
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
#
|
||||
# CRON event is fired on every sunday (UTC).
|
||||
#
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
#
|
||||
# Jobs
|
||||
#
|
||||
jobs:
|
||||
Linux:
|
||||
runs-on: ubuntu-latest
|
||||
#
|
||||
# build matrix for containers
|
||||
#
|
||||
strategy:
|
||||
#
|
||||
# do not stop jobs automatically if any of the jobs fail
|
||||
#
|
||||
fail-fast: false
|
||||
|
||||
#
|
||||
# matrix for containers
|
||||
#
|
||||
matrix:
|
||||
container:
|
||||
- ubuntu:25.10
|
||||
- ubuntu:24.04
|
||||
- ubuntu:22.04
|
||||
- debian:trixie
|
||||
- debian:bookworm
|
||||
- debian:bullseye
|
||||
- rockylinux/rockylinux:10
|
||||
- rockylinux/rockylinux:9
|
||||
- rockylinux/rockylinux:8
|
||||
- fedora:43
|
||||
- fedora:42
|
||||
- opensuse/leap:15
|
||||
- opensuse/leap:16.0
|
||||
- alpine:3.22
|
||||
|
||||
container:
|
||||
image: ${{ matrix.container }}
|
||||
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
|
||||
|
||||
env:
|
||||
# [NOTE]
|
||||
# Installation special environment variables for debian and ubuntu.
|
||||
#
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
steps:
|
||||
# [NOTE]
|
||||
# On openSUSE, tar and gzip must be installed before action/checkout.
|
||||
#
|
||||
- name: Install openSUSE packages before checkout
|
||||
if: matrix.container == 'opensuse/leap:15' || matrix.container == 'opensuse/leap:16.0'
|
||||
run: zypper install -y tar gzip
|
||||
|
||||
- name: Install Alpine packages before checkout
|
||||
if: matrix.container == 'alpine:3.22'
|
||||
run: apk add --no-progress --no-cache bash
|
||||
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# [NOTE]
|
||||
# Matters that depend on OS:VERSION are determined and executed in the following script.
|
||||
# Please note that the option to configure (CONFIGURE_OPTIONS) is set in the environment variable.
|
||||
#
|
||||
- name: Install packages
|
||||
run: |
|
||||
.github/workflows/linux-ci-helper.sh ${{ matrix.container }}
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
|
||||
make --jobs=$(nproc)
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
make check -C src
|
||||
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
|
||||
|
||||
# [NOTE]
|
||||
# Using macos-fuse-t
|
||||
# This product(package) is a workaround for osxfuse which required an OS reboot(macos 11 and later).
|
||||
# see. https://github.com/macos-fuse-t/fuse-t
|
||||
# About osxfuse
|
||||
# This job doesn't work with GitHub Actions using macOS 11+ because "load_osxfuse" returns
|
||||
# "exit code = 1".(requires OS reboot)
|
||||
#
|
||||
macos-14:
|
||||
runs-on: macos-14
|
||||
|
||||
# [NOTE]
|
||||
# In macos-14 (and maybe later), the location of the CA certificate is different and you need to specify it.
|
||||
# We give the CA path as an environment variable.
|
||||
#
|
||||
env:
|
||||
CURL_CA_BUNDLE: "/opt/homebrew/etc/ca-certificates/cert.pem"
|
||||
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Brew tap
|
||||
run: |
|
||||
TAPS="$(brew --repository)/Library/Taps";
|
||||
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then rm -rf "$TAPS/caskroom/homebrew-cask"; fi;
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap macos-fuse-t/homebrew-cask
|
||||
|
||||
- name: Install fuse-t
|
||||
run: |
|
||||
if [ ! -d /usr/local/include ]; then sudo mkdir -p /usr/local/include; echo "Created /usr/local/include directory"; fi
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install fuse-t
|
||||
|
||||
- name: Install brew other packages
|
||||
run: |
|
||||
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed shellcheck jq';
|
||||
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do
|
||||
if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi
|
||||
done
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure
|
||||
make --jobs=$(sysctl -n hw.ncpu)
|
||||
|
||||
- name: Cppcheck
|
||||
run: make cppcheck
|
||||
|
||||
- name: Shellcheck
|
||||
run: make shellcheck
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
make check -C src
|
||||
make ALL_TESTS=1 check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
|
||||
|
||||
MemoryTest:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
#
|
||||
# build matrix for containers
|
||||
#
|
||||
strategy:
|
||||
#
|
||||
# do not stop jobs automatically if any of the jobs fail
|
||||
#
|
||||
fail-fast: false
|
||||
|
||||
#
|
||||
# matrix for type of checking
|
||||
#
|
||||
# [NOTE]
|
||||
# Currently following test is not supported:
|
||||
# - sanitize_memory : Future support planned
|
||||
#
|
||||
matrix:
|
||||
checktype:
|
||||
- glibc_debug
|
||||
- sanitize_address
|
||||
- sanitize_others
|
||||
- sanitize_thread
|
||||
- thread_safety
|
||||
- valgrind
|
||||
|
||||
container:
|
||||
image: fedora:43
|
||||
|
||||
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
|
||||
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
.github/workflows/linux-ci-helper.sh fedora:43
|
||||
|
||||
- name: Install clang
|
||||
run: |
|
||||
dnf install -y \
|
||||
clang \
|
||||
libcxx \
|
||||
libcxx-devel
|
||||
|
||||
- name: Install Valgrind
|
||||
if: matrix.checktype == 'valgrind'
|
||||
run: dnf install -y valgrind
|
||||
|
||||
#
|
||||
# Set CXX/CXXFLAGS and Variables for test
|
||||
#
|
||||
- name: Set variables
|
||||
run: |
|
||||
COMMON_CXXFLAGS='-g -Wno-cpp'
|
||||
{
|
||||
if [ "${{ matrix.checktype }}" = "glibc_debug" ]; then
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -D_GLIBCXX_DEBUG"
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_address" ]; then
|
||||
echo 'CXX=clang++'
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=address -fsanitize-address-use-after-scope"
|
||||
echo 'ASAN_OPTIONS=detect_leaks=1,detect_stack_use_after_return=1'
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_memory" ]; then
|
||||
echo 'CXX=clang++'
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=memory"
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_thread" ]; then
|
||||
echo 'CXX=clang++'
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O0 -fsanitize=thread"
|
||||
echo 'TSAN_OPTIONS=halt_on_error=1'
|
||||
# [NOTE]
|
||||
# Set this to avoid following error when running configure.
|
||||
# "FATAL: ThreadSanitizer: unexpected memory mapping"
|
||||
sysctl vm.mmap_rnd_bits=28
|
||||
elif [ "${{ matrix.checktype }}" = "sanitize_others" ]; then
|
||||
echo 'CXX=clang++'
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow"
|
||||
elif [ "${{ matrix.checktype }}" = "thread_safety" ]; then
|
||||
echo 'CXX=clang++'
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1 -Wthread-safety -Wthread-safety-beta -stdlib=libc++ -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -Werror"
|
||||
echo 'LDFLAGS=-DCLANG_DEFAULT_LINKER=lld'
|
||||
elif [ "${{ matrix.checktype }}" = "valgrind" ]; then
|
||||
echo "CXXFLAGS=${COMMON_CXXFLAGS} -O1"
|
||||
echo 'VALGRIND="--leak-check=full --error-exitcode=1"'
|
||||
echo 'RETRIES=100'
|
||||
echo 'S3_URL=http://127.0.0.1:8081'
|
||||
fi
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
/bin/sh -c "CXX=${CXX} CXXFLAGS=\"${CXXFLAGS}\" LDFLAGS=\"${LDFLAGS}\" ./configure --prefix=/usr --with-openssl"
|
||||
make --jobs=$(nproc)
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
/bin/sh -c "ALL_TESTS=1 ASAN_OPTIONS=${ASAN_OPTIONS} TSAN_OPTIONS=${TSAN_OPTIONS} VALGRIND=${VALGRIND} RETRIES=${RETRIES} make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)"
|
||||
|
||||
static-checks:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: fedora:42
|
||||
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
.github/workflows/linux-ci-helper.sh fedora:42
|
||||
|
||||
- name: Install extra packages
|
||||
run: |
|
||||
dnf install -y \
|
||||
clang-tools-extra \
|
||||
cppcheck \
|
||||
python3 \
|
||||
ShellCheck
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
|
||||
make --jobs=$(nproc)
|
||||
|
||||
- name: clang-tidy
|
||||
run: |
|
||||
make clang-tidy
|
||||
|
||||
- name: Cppcheck
|
||||
run: |
|
||||
make cppcheck
|
||||
|
||||
- name: Shellcheck
|
||||
run: |
|
||||
make shellcheck
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
417
.github/workflows/linux-ci-helper.sh
vendored
Executable file
417
.github/workflows/linux-ci-helper.sh
vendored
Executable file
@ -0,0 +1,417 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
# [NOTE]
|
||||
# Since bash is not present in some Runner containers, this script
|
||||
# runs in sh.
|
||||
# pipefail etc. are not native variables of sh. It exists in bash's
|
||||
# sh compatibility mode, but doesn't work in sh compatibility mode
|
||||
# of ash such as alpine.
|
||||
# However, it's not fatal that pipefail doesn't work for this script.
|
||||
#
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
#set -o pipefail
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Common variables
|
||||
#-----------------------------------------------------------
|
||||
PRGNAME=$(basename "$0")
|
||||
|
||||
echo "${PRGNAME} [INFO] Start Linux helper for installing packages."
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Parameter check
|
||||
#-----------------------------------------------------------
|
||||
#
|
||||
# Usage: ${PRGNAME} "OS:VERSION"
|
||||
#
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "${PRGNAME} [ERROR] No container name options specified."
|
||||
fi
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Container OS variables
|
||||
#-----------------------------------------------------------
|
||||
CONTAINER_FULLNAME=$1
|
||||
# shellcheck disable=SC2034
|
||||
CONTAINER_OSNAME=$(echo "${CONTAINER_FULLNAME}" | cut -d: -f1)
|
||||
# shellcheck disable=SC2034
|
||||
CONTAINER_OSVERSION=$(echo "${CONTAINER_FULLNAME}" | cut -d: -f2)
|
||||
|
||||
CURL_DIRECT_VERSION="v8.11.0"
|
||||
CURL_DIRECT_URL="https://github.com/moparisthebest/static-curl/releases/download/${CURL_DIRECT_VERSION}/curl-$(uname -m | sed -e s/x86_64/amd64/)"
|
||||
CURL_HASH_X86_64="d18aa1f4e03b50b649491ca2c401cd8c5e89e72be91ff758952ad2ab5a83135d"
|
||||
CURL_HASH_AARCH64="1b050abd1669f9a2ac29b34eb022cdeafb271dce5a4fb57d8ef8fadff6d7be1f"
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Parameters for configure(set environments)
|
||||
#-----------------------------------------------------------
|
||||
CXX="g++"
|
||||
CXXFLAGS="-O"
|
||||
LDFLAGS=""
|
||||
CONFIGURE_OPTIONS="--prefix=/usr --with-openssl"
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# OS dependent variables
|
||||
#-----------------------------------------------------------
|
||||
#
|
||||
# Default values
|
||||
#
|
||||
PACKAGE_ENABLE_REPO_OPTIONS=""
|
||||
PACKAGE_INSTALL_ADDITIONAL_OPTIONS=""
|
||||
CURL_DIRECT_INSTALL=0
|
||||
|
||||
if [ "${CONTAINER_FULLNAME}" = "ubuntu:25.10" ] ||
|
||||
[ "${CONTAINER_FULLNAME}" = "ubuntu:24.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
autoconf
|
||||
autotools-dev
|
||||
build-essential
|
||||
curl
|
||||
fuse
|
||||
g++
|
||||
git
|
||||
jq
|
||||
libcurl4-openssl-dev
|
||||
libfuse-dev
|
||||
libssl-dev
|
||||
libtool
|
||||
libxml2-dev
|
||||
locales-all
|
||||
mailcap
|
||||
openjdk-21-jre-headless
|
||||
pkg-config
|
||||
)
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:22.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
autoconf
|
||||
autotools-dev
|
||||
build-essential
|
||||
curl
|
||||
fuse
|
||||
g++
|
||||
git
|
||||
jq
|
||||
libcurl4-openssl-dev
|
||||
libfuse-dev
|
||||
libssl-dev
|
||||
libtool
|
||||
libxml2-dev
|
||||
locales-all
|
||||
mime-support
|
||||
openjdk-21-jre-headless
|
||||
pkg-config
|
||||
)
|
||||
|
||||
CURL_DIRECT_INSTALL=1
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:trixie" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
autoconf
|
||||
autotools-dev
|
||||
build-essential
|
||||
curl
|
||||
fuse
|
||||
g++
|
||||
git
|
||||
jq
|
||||
libcurl4-openssl-dev
|
||||
libfuse-dev
|
||||
libssl-dev
|
||||
libtool
|
||||
libxml2-dev
|
||||
locales-all
|
||||
mailcap
|
||||
openjdk-21-jre-headless
|
||||
pkg-config
|
||||
procps
|
||||
)
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:bookworm" ] ||
|
||||
[ "${CONTAINER_FULLNAME}" = "debian:bullseye" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
autoconf
|
||||
autotools-dev
|
||||
build-essential
|
||||
curl
|
||||
fuse
|
||||
g++
|
||||
git
|
||||
jq
|
||||
libcurl4-openssl-dev
|
||||
libfuse-dev
|
||||
libssl-dev
|
||||
libtool
|
||||
libxml2-dev
|
||||
locales-all
|
||||
mime-support
|
||||
openjdk-17-jre-headless
|
||||
pkg-config
|
||||
procps
|
||||
)
|
||||
|
||||
CURL_DIRECT_INSTALL=1
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:10" ] ||
|
||||
[ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:9" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
PACKAGE_ENABLE_REPO_OPTIONS="--enablerepo=crb"
|
||||
|
||||
# [NOTE]
|
||||
# Rocky Linux 9/10 (or CentOS Stream 9/10) images may have curl installation issues that
|
||||
# conflict with the curl-minimal package.
|
||||
#
|
||||
PACKAGE_INSTALL_ADDITIONAL_OPTIONS="--allowerasing"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
automake
|
||||
curl
|
||||
curl-devel
|
||||
diffutils
|
||||
fuse
|
||||
fuse-devel
|
||||
gcc
|
||||
gcc-c++
|
||||
git
|
||||
glibc-langpack-en
|
||||
java-21-openjdk-headless
|
||||
jq
|
||||
libstdc++-devel
|
||||
libxml2-devel
|
||||
mailcap
|
||||
make
|
||||
openssl
|
||||
openssl-devel
|
||||
perl-Test-Harness
|
||||
procps
|
||||
xz
|
||||
)
|
||||
|
||||
CURL_DIRECT_INSTALL=1
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "rockylinux/rockylinux:8" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
automake
|
||||
curl
|
||||
curl-devel
|
||||
diffutils
|
||||
fuse
|
||||
fuse-devel
|
||||
gcc
|
||||
gcc-c++
|
||||
git
|
||||
glibc-langpack-en
|
||||
java-21-openjdk-headless
|
||||
jq
|
||||
libstdc++-devel
|
||||
libxml2-devel
|
||||
mailcap
|
||||
make
|
||||
openssl
|
||||
openssl-devel
|
||||
perl-Test-Harness
|
||||
)
|
||||
|
||||
CURL_DIRECT_INSTALL=1
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "fedora:43" ] ||
|
||||
[ "${CONTAINER_FULLNAME}" = "fedora:42" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
automake
|
||||
curl
|
||||
curl-devel
|
||||
diffutils
|
||||
fuse
|
||||
fuse-devel
|
||||
gawk
|
||||
gcc
|
||||
gcc-c++
|
||||
git
|
||||
glibc-langpack-en
|
||||
java-latest-openjdk-headless
|
||||
jq
|
||||
libstdc++-devel
|
||||
libxml2-devel
|
||||
mailcap
|
||||
make
|
||||
openssl
|
||||
openssl-devel
|
||||
perl-Test-Harness
|
||||
procps
|
||||
)
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ] ||
|
||||
[ "${CONTAINER_FULLNAME}" = "opensuse/leap:16.0" ]; then
|
||||
PACKAGE_MANAGER_BIN="zypper"
|
||||
PACKAGE_UPDATE_OPTIONS="refresh"
|
||||
PACKAGE_INSTALL_OPTIONS="install -y"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
automake
|
||||
curl
|
||||
curl-devel
|
||||
diffutils
|
||||
fuse
|
||||
fuse-devel
|
||||
gcc-c++
|
||||
java-21-openjdk-headless
|
||||
jq
|
||||
libxml2-devel
|
||||
make
|
||||
openssl
|
||||
openssl-devel
|
||||
procps
|
||||
python3
|
||||
)
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "alpine:3.22" ]; then
|
||||
PACKAGE_MANAGER_BIN="apk"
|
||||
PACKAGE_UPDATE_OPTIONS="update --no-progress"
|
||||
PACKAGE_INSTALL_OPTIONS="add --no-progress --no-cache"
|
||||
|
||||
INSTALL_PACKAGES=(
|
||||
attr
|
||||
autoconf
|
||||
automake
|
||||
coreutils
|
||||
curl
|
||||
curl-dev
|
||||
fuse-dev
|
||||
g++
|
||||
git
|
||||
jq
|
||||
libtool
|
||||
libxml2-dev
|
||||
mailcap
|
||||
make
|
||||
openjdk21
|
||||
openssl
|
||||
perl-test-harness-utils
|
||||
procps
|
||||
sed
|
||||
)
|
||||
|
||||
else
|
||||
echo "No container configured for: ${CONTAINER_FULLNAME}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Install
|
||||
#-----------------------------------------------------------
|
||||
#
|
||||
# Update packages (ex. apt-get update -y -qq)
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Updates."
|
||||
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${PACKAGE_UPDATE_OPTIONS}"
|
||||
|
||||
#
|
||||
# Install packages
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Install packages."
|
||||
/bin/sh -c "${PACKAGE_MANAGER_BIN} ${PACKAGE_ENABLE_REPO_OPTIONS} ${PACKAGE_INSTALL_OPTIONS} ${PACKAGE_INSTALL_ADDITIONAL_OPTIONS} ${INSTALL_PACKAGES[*]}"
|
||||
|
||||
# Check Java version
|
||||
java -version
|
||||
|
||||
# Install newer curl for older distributions
|
||||
if [ "${CURL_DIRECT_INSTALL}" -eq 1 ]; then
|
||||
echo "${PRGNAME} [INFO] Install newer curl package."
|
||||
|
||||
curl --fail --location --silent --output "/tmp/curl" "${CURL_DIRECT_URL}"
|
||||
case "$(uname -m)" in
|
||||
x86_64) curl_hash="$CURL_HASH_X86_64" ;;
|
||||
aarch64) curl_hash="$CURL_HASH_AARCH64" ;;
|
||||
*) exit 1 ;;
|
||||
esac
|
||||
echo "$curl_hash" "/tmp/curl" | sha256sum --check
|
||||
mv "/tmp/curl" "/usr/local/bin/curl"
|
||||
chmod +x "/usr/local/bin/curl"
|
||||
|
||||
# Rocky Linux 8 and 9 have a different certificate path
|
||||
if [ ! -f /etc/ssl/certs/ca-certificates.crt ]; then
|
||||
ln -s /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check curl version
|
||||
curl --version
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Set environment for configure
|
||||
#-----------------------------------------------------------
|
||||
echo "${PRGNAME} [INFO] Set environment for configure options"
|
||||
|
||||
cat << EOF >> "${GITHUB_ENV}"
|
||||
CXX=${CXX}
|
||||
CXXFLAGS=${CXXFLAGS}
|
||||
LDFLAGS=${LDFLAGS}
|
||||
CONFIGURE_OPTIONS=${CONFIGURE_OPTIONS}
|
||||
EOF
|
||||
|
||||
echo "${PRGNAME} [INFO] Finish Linux helper for installing packages."
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
141
.gitignore
vendored
141
.gitignore
vendored
@ -1,31 +1,112 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Compiled Object files
|
||||
#
|
||||
*.slo
|
||||
*.lo
|
||||
*.o
|
||||
/Makefile
|
||||
/Makefile.in
|
||||
/aclocal.m4
|
||||
/autom4te.cache/
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/stamp-h1
|
||||
/config.h
|
||||
/config.h.in
|
||||
/config.h.in~
|
||||
/configure
|
||||
/depcomp
|
||||
/test-driver
|
||||
/compile
|
||||
/doc/Makefile
|
||||
/doc/Makefile.in
|
||||
/install-sh
|
||||
/missing
|
||||
/src/.deps/
|
||||
/src/Makefile
|
||||
/src/Makefile.in
|
||||
/src/s3fs
|
||||
/src/test_*
|
||||
/test/.deps/
|
||||
/test/Makefile
|
||||
/test/Makefile.in
|
||||
/test/*.log
|
||||
/default_commit_hash
|
||||
*.Po
|
||||
*.Plo
|
||||
|
||||
#
|
||||
# autotools/automake
|
||||
#
|
||||
aclocal.m4
|
||||
autom4te.cache
|
||||
autoscan.log
|
||||
config.guess
|
||||
config.h
|
||||
config.h.in
|
||||
config.h.in~
|
||||
config.log
|
||||
config.status
|
||||
config.sub
|
||||
configure
|
||||
configure.scan
|
||||
configure.ac~
|
||||
depcomp
|
||||
install-sh
|
||||
libtool
|
||||
ltmain.sh
|
||||
m4
|
||||
m4/*
|
||||
missing
|
||||
stamp-h1
|
||||
Makefile
|
||||
Makefile.in
|
||||
test-driver
|
||||
compile
|
||||
missing
|
||||
|
||||
#
|
||||
# man page
|
||||
#
|
||||
doc/man/s3fs.1
|
||||
|
||||
#
|
||||
# object directories
|
||||
#
|
||||
.deps
|
||||
.libs
|
||||
*/.deps
|
||||
*/.deps/*
|
||||
*/.libs
|
||||
*/.libs/*
|
||||
|
||||
#
|
||||
# each directories
|
||||
#
|
||||
*.log
|
||||
*.trs
|
||||
default_commit_hash
|
||||
src/s3fs
|
||||
src/test_curl_util
|
||||
src/test_page_list
|
||||
src/test_string_util
|
||||
|
||||
test/chaos-http-proxy-*
|
||||
test/junk_data
|
||||
test/pjdfstest
|
||||
test/pjd-pjdfstest-*
|
||||
test/s3proxy-*
|
||||
test/write_multiblock
|
||||
test/mknod_test
|
||||
test/truncate_read_file
|
||||
test/cr_filename
|
||||
|
||||
#
|
||||
# Windows ports
|
||||
#
|
||||
*.dll
|
||||
*.exe
|
||||
fuse.pc
|
||||
WinFsp/
|
||||
bin/
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
17
.travis.yml
17
.travis.yml
@ -1,17 +0,0 @@
|
||||
language: cpp
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
4
AUTHORS
4
AUTHORS
@ -17,3 +17,7 @@ Bugfixes, performance and other improvements.
|
||||
5. Takeshi Nakatani <ggtakec@gmail.com>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
6. Andrew Gaul <gaul@gaul.org>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
116
COMPILATION.md
Normal file
116
COMPILATION.md
Normal file
@ -0,0 +1,116 @@
|
||||
# Compilation from source code
|
||||
|
||||
These are generic instructions should work on almost any GNU/Linux, macOS, BSD, or similar.
|
||||
|
||||
If you want specific instructions for some distributions, check the [wiki](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Installation-Notes).
|
||||
|
||||
Keep in mind using the pre-built packages when available.
|
||||
|
||||
## Compilation on Linux
|
||||
|
||||
### Ensure your system satisfies build and runtime dependencies for:
|
||||
|
||||
* fuse >= 2.8.4
|
||||
* automake
|
||||
* gcc-c++
|
||||
* make
|
||||
* libcurl
|
||||
* libxml2
|
||||
* openssl/gnutls/nss
|
||||
* Please prepare the library according to the OS on which you will compile.
|
||||
* It is necessary to match the library used by libcurl.
|
||||
* Install the OpenSSL, GnuTLS or NSS devel package.
|
||||
* mime.types (the package providing depends on the OS)
|
||||
* s3fs tries to detect `/etc/mime.types` as default regardless of the OS
|
||||
* Else s3fs tries to detect `/etc/apache2/mime.types` if OS is macOS
|
||||
* s3fs exits with an error if these files are not exist
|
||||
* Alternatively, you can set mime.types file path with `mime` option without detecting these default files
|
||||
* pkg-config (or your OS equivalent)
|
||||
|
||||
* NOTE
|
||||
If you have any trouble about details on required packages, see `INSTALL_PACKAGES` in [linux-ci-helper.sh](https://github.com/s3fs-fuse/s3fs-fuse/blob/master/.github/workflows/linux-ci-helper.sh).
|
||||
|
||||
### Then compile from master via the following commands:
|
||||
1. Clone the source code:
|
||||
```sh
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
```
|
||||
2. Configuration:
|
||||
```sh
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
```
|
||||
Depending on the TLS library (OpenSSL/GnuTLS/NSS), add `--with-openssl`, `--with-gnutls` or `--with-nss` when executing `configure`. (If omitted, it is equivalent to `--with-openssl`.)
|
||||
3. Building:
|
||||
```sh
|
||||
make
|
||||
```
|
||||
4. Installing:
|
||||
```sh
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### NOTE - The required libraries/components required to run s3fs are:
|
||||
|
||||
* fuse >= 2.8.4
|
||||
* libcurl
|
||||
* libxml2
|
||||
* openssl/gnutls/nss
|
||||
* mime.types (the package providing depends on the OS)
|
||||
|
||||
|
||||
## Compilation on Windows (using MSYS2)
|
||||
|
||||
On Windows, use [MSYS2](https://www.msys2.org/) to compile for itself.
|
||||
|
||||
1. Install [WinFsp](https://github.com/billziss-gh/winfsp) to your machine. Note it should be installed with developer mode to include header files.
|
||||
2. Install dependencies onto MSYS2:
|
||||
|
||||
```sh
|
||||
pacman -S git autoconf automake gcc make pkg-config openssl-devel libcurl-devel libxml2-devel libzstd-devel
|
||||
```
|
||||
|
||||
3. Clone this repository, then change directory into the cloned one.
|
||||
4. Copy WinFsp files to the directory:
|
||||
|
||||
```sh
|
||||
cp -r "/c/Program Files (x86)/WinFsp" "./WinFsp"
|
||||
```
|
||||
|
||||
5. Write `fuse.pc` to resolve the package correctly:
|
||||
|
||||
```sh
|
||||
cat > ./fuse.pc << 'EOS'
|
||||
arch=x64
|
||||
prefix=${pcfiledir}/WinFsp
|
||||
incdir=${prefix}/inc/fuse
|
||||
implib=${prefix}/bin/winfsp-${arch}.dll
|
||||
|
||||
Name: fuse
|
||||
Description: WinFsp FUSE compatible API
|
||||
Version: 2.8.4
|
||||
URL: http://www.secfs.net/winfsp/
|
||||
Libs: "${implib}"
|
||||
Cflags: -I"${incdir}"
|
||||
EOS
|
||||
```
|
||||
|
||||
6. Compile using the command line:
|
||||
|
||||
```sh
|
||||
./autogen.sh
|
||||
PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$(pwd)" ./configure
|
||||
make CXXFLAGS="-I/usr/include"
|
||||
```
|
||||
|
||||
7. Copy binary files to distribute at one place:
|
||||
|
||||
```sh
|
||||
mkdir ./bin
|
||||
cp ./src/s3fs.exe ./bin/
|
||||
cp ./WinFsp/bin/winfsp-x64.dll ./bin/
|
||||
cp /usr/bin/msys-*.dll ./bin/
|
||||
```
|
||||
|
||||
8. Distribute these files.
|
||||
375
ChangeLog
375
ChangeLog
@ -1,6 +1,302 @@
|
||||
ChangeLog for S3FS
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.95 -- 25 Oct, 2024 (major changes only)
|
||||
#2424 - Add ipresolve option to select IPv4- or IPv6-only
|
||||
#2443 - Retry request on HTTP 429 error
|
||||
#2448 - Changed s3fs logo
|
||||
#2455 - Fix deadlock in FdManager::ChangeEntityToTempPath
|
||||
#2487 - #2492 - #2493 - Enable static lock checking and fix locking errors
|
||||
#2506 - #2517 - Fix Windows compilation
|
||||
#2515 - Fix FreeBSD support
|
||||
#2532 - Fix use-after-free in FdManager::ChangeEntityToTempPath
|
||||
|
||||
Version 1.94 -- 23 Feb, 2024 (major changes only)
|
||||
#2409 - Fixed a bug that mounting with ksmid specified to fail
|
||||
#2404 - Fixed ordering problem between fdatasync and flush
|
||||
#2399 - Fixed ListBucket/IAM edge cases
|
||||
#2376 - Corrected list_bucket to search in stat cache during creating new file
|
||||
#2369 - Make dir size 4096 not 0
|
||||
#2351 - Added option free_space_ratio to control cache size
|
||||
#2325 - Fixed a bug upload boundary calculation in StreamUpload
|
||||
#2298 - Abort MPU when MPU fails to avoid litter
|
||||
#2261 - Use explicit ownership for memory
|
||||
#2179 - Require C++11
|
||||
|
||||
Version 1.93 -- 19 Jul, 2023 (major changes only)
|
||||
#2212 - Allow listing implicit directories
|
||||
#2194 - #2209 - #2211 - #2214 - #2215 - Fix thread safety issues
|
||||
#2191 - #2201 - Add support for FUSE-T on macOS
|
||||
|
||||
Version 1.92 -- 21 May, 2023 (major changes only)
|
||||
#1802 - #2104 - New option: streamupload
|
||||
#1922 - Enable noobj_cache by default
|
||||
#1927 - #2101 - New option: credlib and credlib_ops
|
||||
#1957 - Fixed a bug that regular files could not be created by mknod
|
||||
#1964 - Added stat information to the mount point
|
||||
#1970 - #1986 - Enable notsup_compat_dir by default
|
||||
#2000 - #2001 - Set mtime/ctime/atime of all objects as nanosecond
|
||||
#2065 - Compatible with OpenSSL 3.0
|
||||
#2075 - Added proxy and proxy_cred_file option
|
||||
#2135 - Changed to rename cache files when renaming large files
|
||||
#2148 - New option: bucket_size
|
||||
|
||||
Version 1.91 -- 07 Mar, 2022 (major changes only)
|
||||
#1753 - Fix RowFlush can not upload last part smaller than 5MB using NoCacheMultipartPost
|
||||
#1760 - Fix IAM role retrieval from IMDSv2
|
||||
#1801 - Add option to allow unsigned payloads
|
||||
#1809 - Fix mixupload return EntityTooSmall while a copypart is less than 5MB after split
|
||||
#1855 - Allow compilation on Windows via MSYS2
|
||||
#1868 - Handle utimensat UTIME_NOW and UTIME_OMIT special values
|
||||
#1871 - #1880 - Preserve sub-second precision in more situations
|
||||
#1879 - Always flush open files with O_CREAT flag
|
||||
#1887 - Fixed not to call Flush even if the file size is increased
|
||||
#1888 - Include climits to support musl libc
|
||||
|
||||
Version 1.90 -- 07 Aug, 2021 (major changes only)
|
||||
#1599 - Don't ignore nomultipart when storage is low
|
||||
#1600 - #1602 - #1604 - #1617 - #1619 - #1620 - #1623 - #1624 - Fix POSIX compatibility issues found by pjdfstest
|
||||
#1630 - Fail CheckBucket when S3 returns PermanentRedirect
|
||||
#1640 - #1655 - Do not create zero-byte object when creating file
|
||||
#1648 - Allow arbitrary size AWS secret keys
|
||||
#1668 - #1678 - Fix race conditions
|
||||
#1696 - Set explicit Content-Length: 0 when initiating MPU
|
||||
#1681 - Set CURLOPT_UNRESTRICTED_AUTH when authenticating
|
||||
#1723 - Add jitter to avoid thundering herd
|
||||
#1728 - Loosen CheckBucket to check only the bucket
|
||||
#1729 - Add support for AWS-style environment variables
|
||||
|
||||
Version 1.89 -- 22 Feb, 2021 (major changes only)
|
||||
#1520 - #1525 - #1534 - #1549 - Propagate S3 errors to errno more accurately
|
||||
#1546 - #1559 - Allow writing > 5 GB single-part objects supported by some non-AWS S3
|
||||
#1553 - #1555 - Allow configuration of multipart copy size and limit to 5 GB
|
||||
#1562 - Allow configuration of multipart upload threshold and reduce default to 25 MB
|
||||
#1565 - Set default stat timeout to 900 seconds correctly
|
||||
#1579 - #1582 - Fix data corruption while updating metadata with use_cache
|
||||
|
||||
Version 1.88 -- 4 Jan, 2021 (major changes only)
|
||||
#1349 - Fixed a bug about move file over limit of ensure space
|
||||
#1363 - #1366 - #1439 - Fix multiple race conditions
|
||||
#1365 - Dynamically determine whether lseek extended options are supported
|
||||
#1374 - Add support for deep archive storage class
|
||||
#1385 - Plug FdEntity leaks
|
||||
#1388 - Fix use_session_token option parsing
|
||||
#1392 - Allow 32-bit platforms to upload single-part objects > 2 GB
|
||||
#1404 - Fix dead lock in disk insufficient and optimize code
|
||||
#1408 - Ensure environment variable is set when using ECS
|
||||
#1413 - not call put headers if not exist pending meta
|
||||
#1425 - Do not send SSE headers during bucket creation
|
||||
#1432 - Add sigv4 only option
|
||||
#1437 - Add atime and correct atime/mtime/ctime operations
|
||||
#1447 - Fixed a bug that symlink could not be read after restarting s3fs
|
||||
#1448 - #1467 - Periodically flush written data to reduce temporary local storage
|
||||
#1449 - Added logfile option for non-syslog logging
|
||||
#1462 - Add AWS IMDSv2 support
|
||||
#1502 - #1503 - #1505 - Fix multiple issues when retrying requests
|
||||
|
||||
Version 1.87 -- 10 Aug, 2020 (major changes only)
|
||||
#1244 - use correct content-type when complete multipart upload
|
||||
#1265 - Fixed a bug of stats cache compression
|
||||
#1271 - Fixed the truncation bug of stat file for cache file
|
||||
#1274 - Improved strictness of cache file stats(file)
|
||||
#1277 - Fixed insufficient upload size for mix multipart upload
|
||||
#1282 - Warn about missing MIME types instead of exiting
|
||||
#1285 - Not abort process by exception threw from s3fs_strtoofft
|
||||
#1286 - Support Google Cloud Storage headers
|
||||
#1295 - Added a parameter to output body to curldbg option
|
||||
#1302 - Fix renames of open files with nocopyapi option
|
||||
#1303 - Relink cache stats file atomically via rename
|
||||
#1305 - Ignore case when comparing ETags
|
||||
#1306 - Retry with exponential backoff during 500 error
|
||||
#1312 - Fixed a bug about serializing from cache file
|
||||
#1313 - Fixed about ParallelMixMultipartUpload
|
||||
#1316 - Add support for glacier storage class
|
||||
#1319 - Fixed upload error about mixuploading sparse file and truncating file
|
||||
#1334 - Added SIGUSR1 option for cache file integrity test
|
||||
#1341 - Change default stat_cache_expire
|
||||
|
||||
Version 1.86 -- 04 Feb, 2020 (major changes only)
|
||||
#965 - enable various optimizations when using modern curl
|
||||
#1002 - allow SSE-C keys to have NUL bytes
|
||||
#1008 - add session token support
|
||||
#1039 - allow large files on 32-bit systems like Raspberry Pi
|
||||
#1049 - fix data corruption when external modification changes a cached object
|
||||
#1063 - fix data corruption when opening a second fd to an unflushed file
|
||||
#1066 - fix clock skew errors when writing large files
|
||||
#1081 - allow concurrent metadata queries during data operations
|
||||
#1098 - use server-side copy for partially modified files
|
||||
#1107 - #1108 - fix multiple concurrency issues
|
||||
#1199 - add requester_pays support
|
||||
#1209 - add symlink cache
|
||||
#1224 - add intelligent_ia storage tier
|
||||
|
||||
Version 1.85 -- 11 Mar, 2019
|
||||
#804 - add Backblaze B2
|
||||
#812 - Fix typo s/mutliple/multiple/
|
||||
#819 - #691: Made instructions for creating password file more obvious.
|
||||
#820 - Enable big writes if capable
|
||||
#826 - For RPM distributions fuse-libs is enough
|
||||
#831 - Add support for storage class ONEZONE_IA.
|
||||
#832 - Simplify hex conversion
|
||||
#833 - New installation instructions for Fedora >= 27 and CentOS7
|
||||
#834 - Improve template for issues
|
||||
#835 - Make the compilation instructions generic
|
||||
#840 - Replace all mentions to MacOS X to macOS
|
||||
#849 - Correct typo
|
||||
#851 - Correctly compare list_object_max_keys
|
||||
#852 - Allow credentials from ${HOME}/.aws/credentials
|
||||
#853 - Replace ~ with ${HOME} in examples
|
||||
#855 - Include StackOverflow in FAQs
|
||||
#856 - Add icon for s3fs
|
||||
#859 - Upload S3 parts without batching
|
||||
#861 - Add 'profile' option to command line help.
|
||||
#865 - fix multihead warning check
|
||||
#866 - Multi-arch support for ppc64le
|
||||
#870 - Correct typos in command-line parsing
|
||||
#874 - Address cppcheck 1.86 errors
|
||||
#877 - Check arguments and environment before .aws/creds
|
||||
#882 - [curl] Assume long encryption keys are base64 encoded
|
||||
#885 - Update s3fs_util.cpp for correspondence of Nextcloud contype
|
||||
#888 - Add Server Fault to FAQs
|
||||
#892 - Repair xattr tests
|
||||
#893 - Store and retrieve file change time
|
||||
#894 - Default uid/gid/mode when object lacks permissions
|
||||
#895 - Emit more friendly error for buckets with dots
|
||||
#898 - Flush file before renaming
|
||||
#899 - Tighten up HTTP response code check
|
||||
#900 - Plug memory leak
|
||||
#901 - Plug memory leaks
|
||||
#902 - Avoid pass-by-value when not necessary
|
||||
#903 - Prefer find(char) over find(const char *)
|
||||
#904 - Remove unnecessary calls to std::string::c_str
|
||||
#905 - Fix comparison in s3fs_strtoofft
|
||||
#906 - Prefer HTTPS links where possible
|
||||
#908 - Added an error message when HTTP 301 status
|
||||
#909 - Ignore after period character of floating point in x-amz-meta-mtime
|
||||
#910 - Added a missing extension to .gitignore, and formatted dot files
|
||||
#911 - Added detail error message when HTTP 301/307 status
|
||||
#912 - Automatic region change made possible other than us-east-1(default)
|
||||
#913 - Prefer abort over assert(false)
|
||||
#914 - Issue readdir HEAD requests without batching
|
||||
#917 - Reference better-known AWS CLI for compatibility
|
||||
#918 - Load tail range during overwrite
|
||||
#919 - Add test for mv non-empty directory
|
||||
#920 - Remove unnecessary string copies
|
||||
#921 - Remove redundant string initializations
|
||||
#923 - Reverted automatic region change and changed messages
|
||||
#924 - Prefer empty over size checks
|
||||
#925 - Remove redundant null checks before delete
|
||||
#926 - Accept paths with : in them
|
||||
#930 - Correct enable_content_md5 docs
|
||||
#931 - Correct sigv2 typo
|
||||
#932 - Prefer AutoLock for synchronization
|
||||
#933 - Remove mirror path when deleting cache
|
||||
#934 - Checked and corrected all typo
|
||||
#937 - Disable malloc_trim
|
||||
#938 - Remove unneeded void parameter
|
||||
#939 - Prefer specific [io]stringstream where possible
|
||||
#940 - Copy parts in parallel
|
||||
#942 - Ensure s3fs compiles with C++03
|
||||
#943 - Return not supported when hard linking
|
||||
#944 - Repair utility mode
|
||||
#946 - Simplify async request completion code
|
||||
#948 - Add logging for too many parts
|
||||
#949 - Implement exponential backoff for 503
|
||||
#950 - Added S3FS_MALLOC_TRIM build switch
|
||||
#951 - Added a non-interactive option to utility mode
|
||||
#952 - Automatically abort failed multipart requests
|
||||
#953 - Update s3ql link
|
||||
#954 - Clear containers instead of individual erases
|
||||
#955 - Address miscellaneous clang-tidy warnings
|
||||
#957 - Upgrade to S3Proxy 1.6.1
|
||||
#958 - Document lack of inotify support
|
||||
#959 - Fixed code for latest cppcheck error on OSX
|
||||
#960 - Wtf8
|
||||
#961 - Work around cppcheck warnings
|
||||
#965 - Improvement of curl session pool for multipart
|
||||
#967 - Increase FdEntity reference count when returning
|
||||
#969 - Fix lazy typo
|
||||
#970 - Remove from file from stat cache during rename
|
||||
#972 - Add instructions for Amazon Linux
|
||||
#974 - Changed the description order of man page options
|
||||
#975 - Fixed ref-count when error occurred.
|
||||
#977 - Make macOS instructions consistent with others
|
||||
|
||||
Version 1.84 -- Jul 8, 2018
|
||||
#704 - Update README.md with details about .passwd-s3fs
|
||||
#710 - add disk space reservation
|
||||
#712 - Added Cygwin build options
|
||||
#714 - reduce lock contention on file open
|
||||
#724 - don't fail multirequest on single thread error
|
||||
#726 - add an instance_name option for logging
|
||||
#727 - Fixed Travis CI error about cppcheck - #713
|
||||
#729 - FreeBSD build fixes
|
||||
#733 - More useful error message for dupe entries in passwd file
|
||||
#739 - cleanup curl handle state on retries
|
||||
#745 - don't fail mkdir when directory exists
|
||||
#753 - fix xpath selector in bucket listing
|
||||
#754 - Validate the URL format for http/https
|
||||
#755 - Added reset curl handle when returning to handle pool
|
||||
#756 - Optimize defaults
|
||||
#761 - Simplify installation for Ubuntu 16.04
|
||||
#762 - Upgrade to S3Proxy 1.6.0
|
||||
#763 - cleanup curl handles before curl share
|
||||
#764 - Remove false multihead warnings
|
||||
#765 - Add Debian installation instructions
|
||||
#766 - Remove s3fs-python
|
||||
#768 - Fixed memory leak
|
||||
#769 - Revert "enable FUSE read_sync by default"
|
||||
#774 - Option for IAM authentication endpoint
|
||||
#780 - gnutls_auth: initialize libgcrypt
|
||||
#781 - Fixed an error by cppcheck on OSX
|
||||
#786 - Log messages for 5xx and 4xx HTTP response code
|
||||
#789 - Instructions for SUSE and openSUSE prebuilt packages
|
||||
#793 - Added list_object_max_keys option based on #783 PR
|
||||
|
||||
Version 1.83 -- Dec 17, 2017
|
||||
#606 - Add Homebrew instructions
|
||||
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
|
||||
#609 - Group permission checks sometimes fail with large number of groups
|
||||
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
|
||||
#621 - Upgrade to S3Proxy 1.5.3
|
||||
#627 - Update README.md
|
||||
#630 - Added travis test on osx for #601
|
||||
#631 - Merged macosx branch into master branch #601
|
||||
#636 - Fix intermittent upload failures on macOS
|
||||
#637 - Add blurb about non-Amazon S3 implementations
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argument - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
#659 - Do not fail updating directory when removing old-style object(ref #658)
|
||||
#660 - Refixed s3fs_init message(ref #652)
|
||||
#663 - Lock FdEntity when mutating orgmeta
|
||||
#664 - auth headers insertion refactoring
|
||||
#668 - Changed .travis.yml for fixing not found gpg2 on osx
|
||||
#669 - add IBM IAM authentication support
|
||||
#670 - Fixed a bug in S3fsCurl::LocateBundle
|
||||
#671 - Add support for ECS metadata endpoint
|
||||
#675 - Reduce use of preprocessor
|
||||
#676 - Move str definition from header to implementation
|
||||
#677 - Add s3proxy to .gitignore
|
||||
#679 - README.md Addition
|
||||
#681 - Changed functions about reading passwd file
|
||||
#684 - Correct signedness warning
|
||||
#686 - remove use of jsoncpp
|
||||
#688 - Improved use of temporary files - #678
|
||||
#690 - Added option ecs description to man page
|
||||
#692 - Updated template md files for issue and pr
|
||||
#695 - fix condition for parallel download
|
||||
#697 - Fixing race condition in FdEntity::GetStats
|
||||
#699 - Fix dbglevel usage
|
||||
|
||||
Version 1.82 -- May 13, 2017
|
||||
#597 - Not fallback to HTTP - #596
|
||||
#598 - Updated ChangeLog and configure.ac for release 1.82
|
||||
|
||||
Version 1.81 -- May 13, 2017
|
||||
#426 - Updated to correct ChangeLog
|
||||
#431 - fix typo s/controll/control/
|
||||
@ -37,7 +333,7 @@ Version 1.81 -- May 13, 2017
|
||||
#540 - Address cppcheck 1.77 warnings
|
||||
#545 - Changed base cached time of stat_cache_expire option - #523
|
||||
#546 - Fixed double initialization of SSL library at foreground
|
||||
#550 - Add umount instruction for unplivileged user
|
||||
#550 - Add umount instruction for unprivileged user
|
||||
#551 - Updated stat_cache_expire option description - #545
|
||||
#552 - switch S3fsMultiCurl to use foreground threads
|
||||
#553 - add TLS cipher suites customization
|
||||
@ -60,6 +356,7 @@ Version 1.81 -- May 13, 2017
|
||||
#590 - Updated man page for default_acl option - #567
|
||||
#593 - Backward compatible for changing default transport to HTTPS
|
||||
#594 - Check bucket at public bucket and add nocopyapi option automatically
|
||||
#595 - Updated ChangeLog and configure.ac for release 1.81
|
||||
|
||||
Version 1.80 -- May 29, 2016
|
||||
#213 - Parse ETag from copy multipart correctly
|
||||
@ -89,7 +386,7 @@ Version 1.80 -- May 29, 2016
|
||||
#250 - s3fs can print version with short commit hash - #228
|
||||
#251 - Skip xattr tests if utilities are missing
|
||||
#252 - This fixes an issue with caching when the creation of a subdirectory …
|
||||
#253 - Added chacking cache dir perms at starting.
|
||||
#253 - Added checking cache dir perms at starting.
|
||||
#256 - Add no atomic rename to limitations
|
||||
#257 - Update README.md: Bugfix password file permissions errors
|
||||
#258 - Update README.md to better explain mount upon boot
|
||||
@ -117,7 +414,7 @@ Version 1.80 -- May 29, 2016
|
||||
#306 - Fix read concurrency to work in parallel count
|
||||
#307 - Fix pthread portability problem
|
||||
#308 - Changed ensure free disk space as additional change for #306
|
||||
#309 - Check pthread prtability in configure as additional change for #307
|
||||
#309 - Check pthread portability in configure as additional change for #307
|
||||
#310 - Update integration-test-main.sh as additional change for #300
|
||||
#311 - Change error log to debug log in s3fs_read()
|
||||
#313 - fix gitignore
|
||||
@ -129,14 +426,14 @@ Version 1.80 -- May 29, 2016
|
||||
#330 - Pass by const reference where possible
|
||||
#331 - Address various clang warnings
|
||||
#334 - Bucket host should include port and not path
|
||||
#336 - update REAME.md for fstab
|
||||
#336 - update README.md for fstab
|
||||
#338 - Fixed a bug about IAMCRED type could not be retried.
|
||||
#339 - Updated README.md for fstab example.
|
||||
#341 - Fix the memory leak issue in fdcache.
|
||||
#346 - Fix empty directory check against AWS S3
|
||||
#348 - Integration test summary, continue on error
|
||||
#350 - Changed cache out logic for stat - #340
|
||||
#351 - Check cache dirctory path and attributes - #347
|
||||
#351 - Check cache directory path and attributes - #347
|
||||
#352 - Remove stat file cache dir if specified del_cache - #337
|
||||
#354 - Supported regex type for additional header format - #343
|
||||
#355 - Fixed codes about clock_gettime for osx
|
||||
@ -213,7 +510,7 @@ issue #184 - Add usage information for multipart_size
|
||||
issue #185 - Correct obvious typos in usage and README
|
||||
issue #190 - Add a no_check_certificate option.
|
||||
issue #194 - Tilda in a file-name breaks things (EPERM)
|
||||
issue #198 - Disasble integration tests for Travis
|
||||
issue #198 - Disable integration tests for Travis
|
||||
issue #199 - Supported extended attributes(retry)
|
||||
issue #200 - fixed fallback to sigv2 for bucket create and GCS
|
||||
issue #202 - Specialize {set,get}xattr for OS X
|
||||
@ -248,99 +545,99 @@ issue #3 - Fixed local timezone was incorrectly being applied to IAM and Last-Mo
|
||||
issue #4 - Fix compilation error on MacOSX with missing const
|
||||
|
||||
Version 1.74 -- Nov 24, 2013
|
||||
This version is initial version on Github, same as on GoogleCodes(s3fs).
|
||||
This version is initial version on GitHub, same as on GoogleCodes(s3fs).
|
||||
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
|
||||
see more detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
||||
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
||||
|
||||
Version 1.73 -- Aug 23, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
|
||||
|
||||
Version 1.72 -- Aug 10, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
|
||||
|
||||
Version 1.71 -- Jun 15, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
|
||||
|
||||
Version 1.70 -- Jun 01, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
|
||||
|
||||
Version 1.69 -- May 15, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
|
||||
|
||||
Version 1.68 -- Apr 30, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
|
||||
|
||||
Version 1.67 -- Apr 13, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
|
||||
|
||||
Version 1.66 -- Apr 06, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
|
||||
|
||||
Version 1.65 -- Mar 30, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
|
||||
|
||||
Version 1.64 -- Mar 23, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
|
||||
|
||||
Version 1.63 -- Feb 24, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
|
||||
|
||||
Version 1.62 -- Jan 27, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
|
||||
|
||||
Version 1.61 -- Aug 30, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
|
||||
|
||||
Version 1.60 -- Aug 29, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
|
||||
|
||||
Version 1.59 -- Jul 28, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
|
||||
|
||||
Version 1.58 -- Jul 19, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
|
||||
|
||||
Version 1.57 -- Jul 07, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
|
||||
|
||||
Version 1.56 -- Jul 07, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
|
||||
|
||||
Version 1.55 -- Jul 02, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
|
||||
|
||||
Version 1.54 -- Jun 25, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
|
||||
|
||||
Version 1.53 -- Jun 22, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
|
||||
|
||||
Version 1.40 -- Feb 11, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
|
||||
|
||||
Version 1.33 -- Dec 30, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
|
||||
|
||||
Version 1.25 -- Dec 16, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
|
||||
|
||||
Version 1.19 -- Dec 2, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
|
||||
|
||||
Version 1.16 -- Nov 22, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
|
||||
|
||||
Version 1.10 -- Nov 6, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
|
||||
|
||||
Version 1.02 -- Oct 29, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
|
||||
|
||||
Version 1.01 -- Oct 28, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
|
||||
|
||||
Version 1.0 -- Oct 24, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
|
||||
|
||||
------
|
||||
Version 1.1 -- Mon Oct 18 2010
|
||||
|
||||
2
INSTALL
2
INSTALL
@ -124,7 +124,7 @@ architecture at a time in the source code directory. After you have
|
||||
installed the package for one architecture, use `make distclean' before
|
||||
reconfiguring for another architecture.
|
||||
|
||||
On MacOS X 10.5 and later systems, you can create libraries and
|
||||
On macOS 10.5 and later systems, you can create libraries and
|
||||
executables that work on multiple system types--known as "fat" or
|
||||
"universal" binaries--by specifying multiple `-arch' options to the
|
||||
compiler but only a single `-arch' option to the preprocessor. Like
|
||||
|
||||
62
Makefile.am
62
Makefile.am
@ -17,6 +17,7 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
|
||||
SUBDIRS=src test doc
|
||||
|
||||
EXTRA_DIST=doc default_commit_hash
|
||||
@ -28,14 +29,65 @@ dist-hook:
|
||||
release : dist ../utils/release.sh
|
||||
../utils/release.sh $(DIST_ARCHIVES)
|
||||
|
||||
.PHONY: cppcheck shellcheck
|
||||
|
||||
clang-tidy:
|
||||
make -C src/ clang-tidy
|
||||
make -C test/ clang-tidy
|
||||
|
||||
cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
--std=@CPP_VERSION@ \
|
||||
--xml \
|
||||
-D HAVE_ATTR_XATTR_H \
|
||||
-D HAVE_SYS_EXTATTR_H \
|
||||
-D HAVE_MALLOC_TRIM \
|
||||
-U CURLE_PEER_FAILED_VERIFICATION \
|
||||
-U P_tmpdir \
|
||||
--enable=all \
|
||||
-U ENOATTR \
|
||||
--enable=warning,style,information,missingInclude \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unusedFunction \
|
||||
--suppress=variableScope \
|
||||
--suppress=unmatchedSuppression \
|
||||
--suppress=useStlAlgorithm \
|
||||
--suppress=checkLevelNormal \
|
||||
--suppress=normalCheckLevelMaxBranches \
|
||||
--addon=test/map-subscript-read.py \
|
||||
src/ test/
|
||||
|
||||
#
|
||||
# ShellCheck
|
||||
#
|
||||
SHELLCHECK_CMD = shellcheck
|
||||
SHELLCHECK_SH_OPT = --shell=sh
|
||||
SHELLCHECK_BASH_OPT = --shell=bash
|
||||
|
||||
# [NOTE]
|
||||
# To control error warnings as a whole, specify the "SC<number>" with the following variables.
|
||||
#
|
||||
SHELLCHECK_COMMON_IGN = --exclude=SC1091
|
||||
SHELLCHECK_CUSTOM_IGN = --exclude=SC1091
|
||||
|
||||
shellcheck:
|
||||
@if type shellcheck > /dev/null 2>&1; then \
|
||||
echo "* ShellCheck version"; \
|
||||
$(SHELLCHECK_CMD) --version; \
|
||||
echo ""; \
|
||||
echo "* Check all sh files with ShellCheck"; \
|
||||
LC_ALL=C.UTF-8 $(SHELLCHECK_CMD) $(SHELLCHECK_SH_OPT) $(SHELLCHECK_COMMON_IGN) $$(grep '#![[:space:]]*/bin/sh' $$(find . -type f -name \*.sh) | sed -e 's|^\(.*\):#\!.*$$|\1|g') || exit 1; \
|
||||
echo "-> No error was detected."; \
|
||||
echo ""; \
|
||||
echo "* Check all bash files with ShellCheck"; \
|
||||
LC_ALL=C.UTF-8 $(SHELLCHECK_CMD) $(SHELLCHECK_BASH_OPT) $(SHELLCHECK_COMMON_IGN) $$(grep '#![[:space:]]*/bin/bash' $$(find . -type f -name \*.sh) | sed -e 's|^\(.*\):#\!.*$$|\1|g') || exit 1; \
|
||||
echo "-> No error was detected."; \
|
||||
else \
|
||||
echo "* ShellCheck is not installed, so skip this."; \
|
||||
fi
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
191
README.md
191
README.md
@ -1,15 +1,19 @@
|
||||
s3fs
|
||||
====
|
||||
# s3fs
|
||||
|
||||
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
|
||||
[](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
|
||||
s3fs allows Linux, macOS, and FreeBSD to mount an S3 bucket via [FUSE(Filesystem in Userspace)](https://github.com/libfuse/libfuse).
|
||||
s3fs makes you operate files and directories in S3 bucket like a local file system.
|
||||
s3fs preserves the native object format for files, allowing use of other tools like [AWS CLI](https://github.com/aws/aws-cli).
|
||||
|
||||
Features
|
||||
--------
|
||||
[](https://github.com/s3fs-fuse/s3fs-fuse/actions/workflows/ci.yml)
|
||||
[](https://twitter.com/s3fsfuse)
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
|
||||
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
|
||||
* compatible with Amazon S3, and other [S3-based object stores](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Non-Amazon-S3)
|
||||
* allows random writes and appends
|
||||
* large files via multi-part upload
|
||||
* renames via server-side copy
|
||||
* optional server-side encryption
|
||||
@ -19,109 +23,162 @@ Features
|
||||
* user-specified regions, including Amazon GovCloud
|
||||
* authenticate via v2 or v4 signatures
|
||||
|
||||
Installation
|
||||
------------
|
||||
## Installation
|
||||
|
||||
Ensure you have all the dependencies:
|
||||
Many systems provide pre-built packages:
|
||||
|
||||
On Ubuntu 14.04:
|
||||
* Amazon Linux via EPEL:
|
||||
|
||||
```
|
||||
sudo amazon-linux-extras install epel
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* Arch Linux:
|
||||
|
||||
```
|
||||
sudo pacman -S s3fs-fuse
|
||||
```
|
||||
|
||||
* Debian 9 and Ubuntu 16.04 or newer:
|
||||
|
||||
```
|
||||
sudo apt install s3fs
|
||||
```
|
||||
|
||||
* Fedora 27 or newer:
|
||||
|
||||
```
|
||||
sudo dnf install s3fs-fuse
|
||||
```
|
||||
|
||||
* Gentoo:
|
||||
|
||||
```
|
||||
sudo emerge net-fs/s3fs
|
||||
```
|
||||
|
||||
* RHEL and CentOS 7 or newer via EPEL:
|
||||
|
||||
```
|
||||
sudo yum install epel-release
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* SUSE 12 and openSUSE 42.1 or newer:
|
||||
|
||||
```
|
||||
sudo zypper install s3fs
|
||||
```
|
||||
|
||||
* macOS 10.12 and newer via [Homebrew](https://brew.sh/):
|
||||
|
||||
```
|
||||
brew install --cask macfuse
|
||||
brew install gromgit/fuse/s3fs-mac
|
||||
```
|
||||
|
||||
* FreeBSD:
|
||||
|
||||
```
|
||||
pkg install fusefs-s3fs
|
||||
```
|
||||
|
||||
* Windows:
|
||||
|
||||
Windows has its own install, seening in [this link](COMPILATION.md)
|
||||
|
||||
Otherwise consult the [compilation instructions](COMPILATION.md).
|
||||
|
||||
## Examples
|
||||
|
||||
s3fs supports the standard
|
||||
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
|
||||
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
|
||||
Finally s3fs recognizes the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN`
|
||||
environment variables.
|
||||
|
||||
The default location for the s3fs password file can be created:
|
||||
|
||||
* using a `.passwd-s3fs` file in the users home directory (i.e. `${HOME}/.passwd-s3fs`)
|
||||
* using the system-wide `/etc/passwd-s3fs` file
|
||||
|
||||
Enter your credentials in a file `${HOME}/.passwd-s3fs` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
```
|
||||
|
||||
On CentOS 7:
|
||||
|
||||
```
|
||||
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
```
|
||||
|
||||
Compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Enter your S3 identity and credential in a file `/path/to/passwd`:
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
|
||||
```
|
||||
|
||||
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
|
||||
|
||||
```
|
||||
chmod 600 /path/to/passwd
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs
|
||||
chmod 600 ${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
|
||||
|
||||
or
|
||||
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=https://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > /etc/passwd-s3fs
|
||||
chmod 600 /etc/passwd-s3fs
|
||||
```
|
||||
|
||||
Note2: You may also need to make sure `netfs` service is start on boot
|
||||
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
## Limitations
|
||||
|
||||
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
|
||||
|
||||
* random writes or appends to files require rewriting the entire file
|
||||
* random writes or appends to files require rewriting the entire object, optimized with multi-part upload copy
|
||||
* metadata operations such as listing directories have poor performance due to network latency
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* non-AWS providers may have [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) so reads can temporarily yield stale data (AWS offers read-after-write consistency [since Dec 2020](https://aws.amazon.com/about-aws/whats-new/2020/12/amazon-s3-now-delivers-strong-read-after-write-consistency-automatically-for-all-applications/))
|
||||
* no atomic renames of files or directories
|
||||
* no coordination between multiple clients mounting the same bucket
|
||||
* no hard links
|
||||
* inotify detects only local modifications, not external ones by other clients or tools
|
||||
|
||||
References
|
||||
----------
|
||||
## References
|
||||
|
||||
* [CSI for S3](https://github.com/ctrox/csi-s3) - Kubernetes CSI driver
|
||||
* [docker-s3fs-client](https://github.com/efrecon/docker-s3fs-client) - Docker image containing s3fs
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
|
||||
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://github.com/s3ql/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
## Frequently Asked Questions
|
||||
|
||||
License
|
||||
-------
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
* [s3fs on Stack Overflow](https://stackoverflow.com/questions/tagged/s3fs)
|
||||
* [s3fs on Server Fault](https://serverfault.com/questions/tagged/s3fs)
|
||||
|
||||
## License
|
||||
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
|
||||
33
autogen.sh
33
autogen.sh
@ -1,5 +1,5 @@
|
||||
#! /bin/sh
|
||||
|
||||
#!/bin/sh
|
||||
#
|
||||
# This file is part of S3FS.
|
||||
#
|
||||
# Copyright 2009, 2010 Free Software Foundation, Inc.
|
||||
@ -21,21 +21,24 @@
|
||||
|
||||
echo "--- Make commit hash file -------"
|
||||
|
||||
SHORTHASH="unknown"
|
||||
type git > /dev/null 2>&1
|
||||
if [ $? -eq 0 -a -d .git ]; then
|
||||
RESULT=`git rev-parse --short HEAD`
|
||||
if [ $? -eq 0 ]; then
|
||||
SHORTHASH=${RESULT}
|
||||
fi
|
||||
SHORTHASH=""
|
||||
if command -v git > /dev/null 2>&1 && test -d .git; then
|
||||
if SHORTHASH=$(git rev-parse --short HEAD); then
|
||||
echo " -> Git commit hash : ${SHORTHASH}"
|
||||
else
|
||||
echo " -> Not get git commit hash"
|
||||
fi
|
||||
else
|
||||
echo " -> Not found git command or .git directory"
|
||||
fi
|
||||
echo ${SHORTHASH} > default_commit_hash
|
||||
echo "${SHORTHASH}" > default_commit_hash
|
||||
|
||||
echo "--- Finished commit hash file ---"
|
||||
|
||||
echo "--- Start autotools -------------"
|
||||
|
||||
aclocal \
|
||||
autoupdate \
|
||||
&& aclocal \
|
||||
&& autoheader \
|
||||
&& automake --add-missing \
|
||||
&& autoconf
|
||||
@ -44,3 +47,11 @@ echo "--- Finished autotools ----------"
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
155
configure.ac
155
configure.ac
@ -19,11 +19,11 @@
|
||||
######################################################################
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.81)
|
||||
AC_PREREQ([2.69])
|
||||
AC_INIT([s3fs],[1.95])
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
AC_CANONICAL_TARGET
|
||||
AM_INIT_AUTOMAKE([foreign])
|
||||
|
||||
AC_PROG_CXX
|
||||
@ -32,16 +32,26 @@ AC_PROG_CC
|
||||
AC_CHECK_HEADERS([sys/xattr.h])
|
||||
AC_CHECK_HEADERS([attr/xattr.h])
|
||||
AC_CHECK_HEADERS([sys/extattr.h])
|
||||
AC_CHECK_FUNCS([fallocate])
|
||||
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
CPP_VERSION=c++14
|
||||
AC_SUBST([CPP_VERSION])
|
||||
|
||||
CXXFLAGS="-Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=3 -std=$CPP_VERSION $CXXFLAGS"
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl For OSX
|
||||
dnl For macOS
|
||||
dnl ----------------------------------------------
|
||||
case "$target" in
|
||||
*-cygwin* )
|
||||
# Do something specific for windows using winfsp
|
||||
CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE=1"
|
||||
min_fuse_version=2.8
|
||||
;;
|
||||
*-darwin* )
|
||||
# Do something specific for mac
|
||||
min_fuse_version=2.7.3
|
||||
min_fuse_t_version=1.0.20
|
||||
;;
|
||||
*)
|
||||
# Default Case
|
||||
@ -50,11 +60,24 @@ case "$target" in
|
||||
;;
|
||||
esac
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl Checking the FUSE library
|
||||
dnl ----------------------------------------------
|
||||
dnl Distinguish between Linux (libfuse) and macOS (FUSE-T).
|
||||
dnl
|
||||
found_fuse_t=no
|
||||
PKG_CHECK_MODULES([FUSE_T], [fuse-t >= ${min_fuse_t_version}], [found_fuse_t=yes], [found_fuse_t=no])
|
||||
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([fuse_library_checking], [fuse-t >= ${min_fuse_t_version}])],
|
||||
[PKG_CHECK_MODULES([fuse_library_checking], [fuse >= ${min_fuse_version}])])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl Choice SSL library
|
||||
dnl ----------------------------------------------
|
||||
auth_lib=na
|
||||
nettle_lib=no
|
||||
use_openssl_30=no
|
||||
|
||||
dnl
|
||||
dnl nettle library
|
||||
@ -174,15 +197,24 @@ AS_IF(
|
||||
|
||||
dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])])
|
||||
|
||||
AC_MSG_CHECKING([openssl 3.0 or later])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <openssl/opensslv.h>
|
||||
#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x30000000L
|
||||
#error "found openssl is 3.0 or later(so compiling is stopped with error)"
|
||||
#endif]], [[]])],
|
||||
[AC_MSG_RESULT(no)],
|
||||
[AC_MSG_RESULT(yes); use_openssl_30=yes])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
@ -191,7 +223,9 @@ gnutls)
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
|
||||
AS_IF([test $gnutls_nettle = 0],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])])
|
||||
LIBS="-lgnutls -lgcrypt $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(gcrypt)
|
||||
@ -205,7 +239,9 @@ nettle)
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
|
||||
AS_IF([test $gnutls_nettle = 1],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])])
|
||||
LIBS="-lgnutls -lnettle $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(nettle)
|
||||
@ -214,7 +250,9 @@ nettle)
|
||||
;;
|
||||
nss)
|
||||
AC_MSG_RESULT(NSS)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
|
||||
AS_IF([test "$found_fuse_t" = "yes"],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse-t >= ${min_fuse_t_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])],
|
||||
[PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_ERROR([unknown ssl library type.])
|
||||
@ -222,6 +260,7 @@ nss)
|
||||
esac
|
||||
|
||||
AM_CONDITIONAL([USE_SSL_OPENSSL], [test "$auth_lib" = openssl])
|
||||
AM_CONDITIONAL([USE_SSL_OPENSSL_30], [test "$use_openssl_30" = yes])
|
||||
AM_CONDITIONAL([USE_SSL_GNUTLS], [test "$auth_lib" = gnutls -o "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss])
|
||||
@ -232,7 +271,7 @@ dnl ----------------------------------------------
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS([malloc_trim])
|
||||
|
||||
dnl clock_gettime function(osx)
|
||||
dnl clock_gettime function(macos)
|
||||
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
|
||||
AC_CHECK_FUNCS([clock_gettime])
|
||||
|
||||
@ -259,24 +298,94 @@ AC_COMPILE_IFELSE(
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl check CURLoption
|
||||
dnl ----------------------------------------------
|
||||
dnl CURLOPT_TCP_KEEPALIVE (is supported by 7.25.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_TCP_KEEPALIVE])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_TCP_KEEPALIVE;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 1, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 0, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl CURLOPT_SSL_ENABLE_ALPN (is supported by 7.36.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_SSL_ENABLE_ALPN])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_SSL_ENABLE_ALPN;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 1, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 0, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl CURLOPT_KEEP_SENDING_ON_ERROR (is supported by 7.51.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_KEEP_SENDING_ON_ERROR])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_KEEP_SENDING_ON_ERROR;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 1, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 0, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl dl library
|
||||
dnl ----------------------------------------------
|
||||
AC_CHECK_LIB([dl], [dlopen, dlclose, dlerror, dlsym], [], [AC_MSG_ERROR([Could not found dlopen, dlclose, dlerror and dlsym])])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl build date
|
||||
dnl ----------------------------------------------
|
||||
AC_SUBST([MAN_PAGE_DATE], [$(date -r doc/man/s3fs.1.in +"%B %Y")])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl output files
|
||||
dnl ----------------------------------------------
|
||||
AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile)
|
||||
AC_CONFIG_FILES(Makefile
|
||||
src/Makefile
|
||||
test/Makefile
|
||||
doc/Makefile
|
||||
doc/man/s3fs.1)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl short commit hash
|
||||
dnl ----------------------------------------------
|
||||
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
|
||||
AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no])
|
||||
AC_CHECK_PROG([GITCMD], [git --version], [yes], [no])
|
||||
AS_IF([test -d .git], [DOTGITDIR=yes], [DOTGITDIR=no])
|
||||
|
||||
AC_MSG_CHECKING([github short commit hash])
|
||||
if test “x${GITCMD}” = “xyes” -a “x${DOTGITDIR}” = “xyes”; then
|
||||
GITCOMMITHASH=`git rev-parse --short HEAD`
|
||||
if test "x${GITCMD}" = "xyes" -a "x${DOTGITDIR}" = "xyes"; then
|
||||
TMP_GITCOMMITHASH=`git rev-parse --short HEAD`
|
||||
UNTRACKED_FILES=`git status -s --untracked-files=no`
|
||||
if test -n "${UNTRACKED_FILES}"; then
|
||||
GITCOMMITHASH="(commit:${TMP_GITCOMMITHASH} +untracked files)"
|
||||
else
|
||||
GITCOMMITHASH="(commit:${TMP_GITCOMMITHASH})"
|
||||
fi
|
||||
elif test -f default_commit_hash; then
|
||||
GITCOMMITHASH=`cat default_commit_hash`
|
||||
TMP_GITCOMMITHASH=`cat default_commit_hash`
|
||||
if test -n "${TMP_GITCOMMITHASH}"; then
|
||||
GITCOMMITHASH="(base commit:${TMP_GITCOMMITHASH})"
|
||||
else
|
||||
GITCOMMITHASH=""
|
||||
fi
|
||||
else
|
||||
GITCOMMITHASH="unknown"
|
||||
GITCOMMITHASH=""
|
||||
fi
|
||||
AC_MSG_RESULT([${GITCOMMITHASH}])
|
||||
|
||||
@ -291,3 +400,11 @@ dnl ----------------------------------------------
|
||||
dnl end configuration
|
||||
dnl ----------------------------------------------
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
297
doc/man/s3fs.1
297
doc/man/s3fs.1
@ -1,297 +0,0 @@
|
||||
.TH S3FS "1" "February 2011" "S3FS" "User Commands"
|
||||
.SH NAME
|
||||
S3FS \- FUSE-based file system backed by Amazon S3
|
||||
.SH SYNOPSIS
|
||||
.SS mounting
|
||||
.TP
|
||||
\fBs3fs bucket[:/path] mountpoint \fP [options]
|
||||
.TP
|
||||
\fBs3fs mountpoint \fP [options(must specify bucket= option)]
|
||||
.SS unmounting
|
||||
.TP
|
||||
\fBumount mountpoint
|
||||
For root.
|
||||
.TP
|
||||
\fBfusermount -u mountpoint
|
||||
For unprivileged user.
|
||||
.SS utility mode ( remove interrupted multipart uploading objects )
|
||||
.TP
|
||||
\fBs3fs \-u bucket
|
||||
.SH DESCRIPTION
|
||||
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
|
||||
.SH AUTHENTICATION
|
||||
The s3fs password file has this format (use this format if you have only one set of credentials):
|
||||
.RS 4
|
||||
\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
.RE
|
||||
|
||||
If you have more than one set of credentials, this syntax is also recognized:
|
||||
.RS 4
|
||||
\fBbucketName\fP:\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
.RE
|
||||
.PP
|
||||
Password files can be stored in two locations:
|
||||
.RS 4
|
||||
\fB/etc/passwd-s3fs\fP [0640]
|
||||
\fB$HOME/.passwd-s3fs\fP [0600]
|
||||
.RE
|
||||
.SH OPTIONS
|
||||
.SS "general options"
|
||||
.TP
|
||||
\fB\-h\fR \fB\-\-help\fR
|
||||
print help
|
||||
.TP
|
||||
\fB\ \fR \fB\-\-version\fR
|
||||
print version
|
||||
.TP
|
||||
\fB\-f\fR
|
||||
FUSE foreground option - do not run as daemon.
|
||||
.TP
|
||||
\fB\-s\fR
|
||||
FUSE singlethreaded option (disables multi-threaded operation)
|
||||
.SS "mount options"
|
||||
.TP
|
||||
All s3fs options must given in the form where "opt" is:
|
||||
<option_name>=<option_value>
|
||||
.TP
|
||||
\fB\-o\fR bucket
|
||||
if it is not specified bucket name(and path) in command line, must specify this option after \-o option for bucket name.
|
||||
.TP
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
empty string means do not send header.
|
||||
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="2")
|
||||
number of times to retry a failed S3 transaction.
|
||||
.TP
|
||||
\fB\-o\fR use_cache (default="" which means disabled)
|
||||
local folder to use for local file cache.
|
||||
.TP
|
||||
\fB\-o\fR check_cache_dir_exist (default is disable)
|
||||
If use_cache is set, check if the cache directory exists.
|
||||
If this option is not specified, it will be created at runtime when the cache directory does not exist.
|
||||
.TP
|
||||
\fB\-o\fR del_cache - delete local file cache
|
||||
delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
\fB\-o\fR storage_class (default is standard)
|
||||
store object with specified storage class.
|
||||
this option replaces the old option use_rrs.
|
||||
Possible values: standard, standard_ia, and reduced_redundancy.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
this option can not be specified with use_sse.
|
||||
(can specify use_rrs=1 for old version)
|
||||
this option has been replaced by new storage_class option.
|
||||
.TP
|
||||
\fB\-o\fR use_sse (default is disable)
|
||||
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
|
||||
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
|
||||
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>"(only <custom key file path> specified is old type parameter).
|
||||
You can use "c" for short "custom".
|
||||
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
|
||||
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
|
||||
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
|
||||
If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
You must be careful about that you can not use the KMS id which is not same EC2 region.
|
||||
.TP
|
||||
\fB\-o\fR load_sse_c - specify SSE-C keys
|
||||
Specify the custom-provided encryption keys file path for decrypting at downloading.
|
||||
If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom".
|
||||
The file has many lines, one line means one custom key.
|
||||
So that you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
AWSSSECKEYS environment is as same as this file contents.
|
||||
.TP
|
||||
\fB\-o\fR passwd_file (default="")
|
||||
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
|
||||
.TP
|
||||
\fB\-o\fR ahbe_conf (default="" which means disabled)
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
|
||||
The configuration file format is below:
|
||||
-----------
|
||||
line = [file suffix or regex] HTTP-header [HTTP-values]
|
||||
file suffix = file(object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
|
||||
regex = regular expression to match the file(object) path. this type starts with "reg:" prefix.
|
||||
HTTP-header = additional HTTP header name
|
||||
HTTP-values = additional HTTP header value
|
||||
-----------
|
||||
Sample:
|
||||
-----------
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
|
||||
-----------
|
||||
A sample configuration file is uploaded in "test" directory.
|
||||
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
.TP
|
||||
\fB\-o\fR connect_timeout (default="300" seconds)
|
||||
time to wait for connection before giving up.
|
||||
.TP
|
||||
\fB\-o\fR readwrite_timeout (default="60" seconds)
|
||||
time to wait between read/write activity before giving up.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
|
||||
maximum number of entries in the stat cache
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time indicates the time since stat cached.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_interval_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time is based on the time from the last access time of the stat cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_noobj_cache (default is disable)
|
||||
enable cache entries for the object which does not exist.
|
||||
s3fs always has to check whether file(or sub directory) exists under object(path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
|
||||
It increases ListBucket request and makes performance bad.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
|
||||
.TP
|
||||
\fB\-o\fR no_check_certificate (by default this option is disabled)
|
||||
do not check ssl certificate.
|
||||
server certificate won't be checked against the available certificate authorities.
|
||||
.TP
|
||||
\fB\-o\fR nodnscache - disable dns cache.
|
||||
s3fs is always using dns cache, this option make dns cache disable.
|
||||
.TP
|
||||
\fB\-o\fR nosscache - disable ssl session cache.
|
||||
s3fs is always using ssl session cache, this option make ssl session cache disable.
|
||||
.TP
|
||||
\fB\-o\fR multireq_max (default="20")
|
||||
maximum number of parallel request for listing objects.
|
||||
.TP
|
||||
\fB\-o\fR parallel_count (default="5")
|
||||
number of parallel request for uploading big objects.
|
||||
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
|
||||
This option limits parallel request count which s3fs requests at once.
|
||||
It is necessary to set this value depending on a CPU and a network band.
|
||||
.TP
|
||||
\fB\-o\fR multipart_size(default="10"(10MB))
|
||||
number of one part size in multipart uploading request.
|
||||
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
|
||||
Specify number of MB and over 5(MB).
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, and uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you start s3fs without specifying the url option, s3fs will check the bucket using https://s3.amazonaws.com.
|
||||
And when bucket check fails, s3fs retries the bucket check using http://s3.amazonaws.com.
|
||||
This is the function left behind for backward compatibility.
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR endpoint (default="us-east-1")
|
||||
sets the endpoint to use.
|
||||
If this option is not specified, s3fs uses "us-east-1" region as the default.
|
||||
If the s3fs could not connect to the region specified by this option, s3fs could not run.
|
||||
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
|
||||
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
|
||||
.TP
|
||||
\fB\-o\fR sigv2 (default is signature version 4)
|
||||
sets signing AWS requests by sing Signature Version 2.
|
||||
.TP
|
||||
\fB\-o\fR mp_umask (default is "0000")
|
||||
sets umask for the mount point directory.
|
||||
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
|
||||
In the opposite case s3fs allows access to all users as the default.
|
||||
But if you set the allow_other with this option, you can control permissions of the mount point by this option like umask.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
\fB\-o\fR enable_content_md5 ( default is disable )
|
||||
verifying uploaded data without multipart by content-md5 header.
|
||||
Enable to send "Content-MD5" header when uploading a object without multipart posting.
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
.TP
|
||||
\fB\-o\fR iam_role ( default is no IAM role )
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
|
||||
Enable to handle the extended attribute(xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registering xml name space.
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
|
||||
.TP
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
.TP
|
||||
\fB\-o\fR noua (suppress User-Agent header)
|
||||
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
|
||||
If this option is specified, s3fs suppresses the output of the User-Agent.
|
||||
.TP
|
||||
\fB\-o\fR cipher_suites
|
||||
Customize TLS cipher suite list. Expects a colon separated list of cipher suite names.
|
||||
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
|
||||
https://curl.haxx.se/docs/ssl-ciphers.html
|
||||
.TP
|
||||
\fB\-o\fR complement_stat (complement lack of file/directory mode)
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
.TP
|
||||
\fB\-o\fR notsup_compat_dir (not support compatibility directory types)
|
||||
As a default, s3fs supports objects of the directory type as much as possible and recognizes them as directories.
|
||||
Objects that can be recognized as directory objects are "dir/", "dir", "dir_$folder$", and there is a file object that does not have a directory object but contains that directory path.
|
||||
s3fs needs redundant communication to support all these directory types.
|
||||
The object as the directory created by s3fs is "dir/".
|
||||
By restricting s3fs to recognize only "dir/" as a directory, communication traffic can be reduced.
|
||||
This option is used to give this restriction to s3fs.
|
||||
However, if there is a directory object other than "dir/" in the bucket, specifying this option is not recommended.
|
||||
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
|
||||
Please use this option when the directory in the bucket is only "dir/" object.
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
|
||||
.TP
|
||||
\fB\-o\fR curldbg - put curl debug message
|
||||
Put the debug message from libcurl when this option is specified.
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
.TP
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH NOTES
|
||||
.TP
|
||||
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
|
||||
.TP
|
||||
The folder specified by use_cache is just a local cache. It can be deleted at any time. s3fs rebuilds it on demand.
|
||||
.TP
|
||||
Local file caching works by calculating and comparing md5 checksums (ETag HTTP header).
|
||||
.TP
|
||||
s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types!
|
||||
.SH BUGS
|
||||
Due to S3's "eventual consistency" limitations, file creation can and will occasionally fail. Even after a successful create, subsequent reads can fail for an indeterminate time, even after one or more successful reads. Create and read enough files and you will eventually encounter this failure. This is not a flaw in s3fs and it is not something a FUSE wrapper like s3fs can work around. The retries option does not address this issue. Your application must either tolerate or compensate for these failures, for example by retrying creates or reads.
|
||||
.SH AUTHOR
|
||||
s3fs has been written by Randy Rizun <rrizun@gmail.com>.
|
||||
554
doc/man/s3fs.1.in
Normal file
554
doc/man/s3fs.1.in
Normal file
@ -0,0 +1,554 @@
|
||||
.TH S3FS "1" "@MAN_PAGE_DATE@" "S3FS" "User Commands"
|
||||
.SH NAME
|
||||
S3FS \- FUSE-based file system backed by Amazon S3
|
||||
.SH SYNOPSIS
|
||||
.SS mounting
|
||||
.TP
|
||||
\fBs3fs bucket[:/path] mountpoint \fP [options]
|
||||
.TP
|
||||
\fBs3fs mountpoint \fP [options (must specify bucket= option)]
|
||||
.SS unmounting
|
||||
.TP
|
||||
\fBumount mountpoint
|
||||
For root.
|
||||
.TP
|
||||
\fBfusermount -u mountpoint
|
||||
For unprivileged user.
|
||||
.SS utility mode (remove interrupted multipart uploading objects)
|
||||
.TP
|
||||
\fBs3fs --incomplete-mpu-list (-u) bucket
|
||||
.TP
|
||||
\fBs3fs --incomplete-mpu-abort[=all | =<expire date format>] bucket
|
||||
.SH DESCRIPTION
|
||||
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
|
||||
.SH AUTHENTICATION
|
||||
s3fs supports the standard AWS credentials file (https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`.
|
||||
Alternatively, s3fs supports a custom passwd file. Only AWS credentials file format can be used when AWS session token is required.
|
||||
The s3fs password file has this format (use this format if you have only one set of credentials):
|
||||
.RS 4
|
||||
\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
.RE
|
||||
|
||||
If you have more than one set of credentials, this syntax is also recognized:
|
||||
.RS 4
|
||||
\fBbucketName\fP:\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
.RE
|
||||
.PP
|
||||
Password files can be stored in two locations:
|
||||
.RS 4
|
||||
\fB/etc/passwd-s3fs\fP [0640]
|
||||
\fB$HOME/.passwd-s3fs\fP [0600]
|
||||
.RE
|
||||
.PP
|
||||
s3fs also recognizes the \fBAWS_ACCESS_KEY_ID\fP and \fBAWS_SECRET_ACCESS_KEY\fP environment variables.
|
||||
.SH OPTIONS
|
||||
.SS "general options"
|
||||
.TP
|
||||
\fB\-h\fR \fB\-\-help\fR
|
||||
print help
|
||||
.TP
|
||||
\fB\ \fR \fB\-\-version\fR
|
||||
print version
|
||||
.TP
|
||||
\fB\-f\fR
|
||||
FUSE foreground option - do not run as daemon.
|
||||
.TP
|
||||
\fB\-s\fR
|
||||
FUSE single-threaded option (disables multi-threaded operation)
|
||||
.SS "mount options"
|
||||
.TP
|
||||
All s3fs options must given in the form where "opt" is:
|
||||
<option_name>=<option_value>
|
||||
.TP
|
||||
\fB\-o\fR bucket
|
||||
if it is not specified bucket name (and path) in command line, must specify this option after \-o option for bucket name.
|
||||
.TP
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
see https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for the full list of canned ACLs.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="5")
|
||||
number of times to retry a failed S3 transaction.
|
||||
.TP
|
||||
\fB\-o\fR tmpdir (default="/tmp")
|
||||
local folder for temporary files.
|
||||
.TP
|
||||
\fB\-o\fR use_cache (default="" which means disabled)
|
||||
local folder to use for local file cache.
|
||||
.TP
|
||||
\fB\-o\fR check_cache_dir_exist (default is disable)
|
||||
If use_cache is set, check if the cache directory exists.
|
||||
If this option is not specified, it will be created at runtime when the cache directory does not exist.
|
||||
.TP
|
||||
\fB\-o\fR del_cache - delete local file cache
|
||||
delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
\fB\-o\fR storage_class (default="standard")
|
||||
store object with specified storage class.
|
||||
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering, glacier, glacier_ir, and deep_archive.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
this option can not be specified with use_sse.
|
||||
(can specify use_rrs=1 for old version)
|
||||
this option has been replaced by new storage_class option.
|
||||
.TP
|
||||
\fB\-o\fR use_sse (default is disable)
|
||||
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
|
||||
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
|
||||
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>" (only <custom key file path> specified is old type parameter).
|
||||
You can use "c" for short "custom".
|
||||
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
|
||||
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
|
||||
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom" ("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment. (AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:" (or "k:").
|
||||
If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
You must be careful about that you can not use the KMS id which is not same EC2 region.
|
||||
Additionally, if you specify SSE-KMS, your endpoints must use Secure Sockets Layer(SSL) or Transport Layer Security(TLS).
|
||||
.TP
|
||||
\fB\-o\fR load_sse_c - specify SSE-C keys
|
||||
Specify the custom-provided encryption keys file path for decrypting at downloading.
|
||||
If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom".
|
||||
The file has many lines, one line means one custom key.
|
||||
So that you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
AWSSSECKEYS environment is as same as this file contents.
|
||||
.TP
|
||||
\fB\-o\fR passwd_file (default="")
|
||||
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
|
||||
.TP
|
||||
\fB\-o\fR ahbe_conf (default="" which means disabled)
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file (object) extension.
|
||||
The configuration file format is below:
|
||||
-----------
|
||||
line = [file suffix or regex] HTTP-header [HTTP-values]
|
||||
file suffix = file (object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
|
||||
regex = regular expression to match the file (object) path. this type starts with "reg:" prefix.
|
||||
HTTP-header = additional HTTP header name
|
||||
HTTP-values = additional HTTP header value
|
||||
-----------
|
||||
Sample:
|
||||
-----------
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
|
||||
-----------
|
||||
A sample configuration file is uploaded in "test" directory.
|
||||
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
|
||||
.TP
|
||||
\fB\-o\fR profile (default="default")
|
||||
Choose a profile from ${HOME}/.aws/credentials to authenticate against S3.
|
||||
Note that this format matches the AWS CLI format and differs from the s3fs passwd format.
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
.TP
|
||||
\fB\-o\fR connect_timeout (default="300" seconds)
|
||||
time to wait for connection before giving up.
|
||||
.TP
|
||||
\fB\-o\fR readwrite_timeout (default="120" seconds)
|
||||
time to wait between read/write activity before giving up.
|
||||
.TP
|
||||
\fB\-o\fR list_object_max_keys (default="1000")
|
||||
specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
|
||||
maximum number of entries in the stat cache and symbolic link cache.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is 900)
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time indicates the time since cached.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_interval_expire (default is 900)
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_negative_cache (default is enabled negative cache)
|
||||
This option will keep non-existence of objects in a stat cache.
|
||||
When this negative cache is enabled, it will not process extra HeadObject requests to search for non-existent objects, improving performance.
|
||||
This feature is enabled by default, so there is no need to specify it.
|
||||
.TP
|
||||
\fB\-o\fR disable_negative_cache (default is enabled negative cache)
|
||||
By default, s3fs keeps non-existent objects in the stat cache.
|
||||
This option disables this negative caching.
|
||||
This prevents delays in updates due to cache retention.
|
||||
However, it may increase the number of HeadObject requests to check if an object exists, which may decrease performance.
|
||||
.TP
|
||||
\fB\-o\fR no_check_certificate (by default this option is disabled)
|
||||
server certificate won't be checked against the available certificate authorities.
|
||||
.TP
|
||||
\fB\-o\fR ssl_verify_hostname (default="2")
|
||||
When 0, do not verify the SSL certificate against the hostname.
|
||||
.TP
|
||||
\fB\-o\fR ssl_client_cert (default="")
|
||||
Specify an SSL client certificate.
|
||||
Specify this optional parameter in the following format:
|
||||
"<SSL Cert>[:<Cert Type>[:<Private Key>[:<Key Type>
|
||||
[:<Password>]]]]"
|
||||
<SSL Cert>: Client certificate.
|
||||
Specify the file path or NickName(for NSS, etc.).
|
||||
<Cert Type>: Type of certificate, default is "PEM"(optional).
|
||||
<Private Key>: Certificate's private key file(optional).
|
||||
<Key Type>: Type of private key, default is "PEM"(optional).
|
||||
<Password>: Passphrase of the private key(optional). It is also possible to omit this value and specify it using the environment variable "S3FS_SSL_PRIVKEY_PASSWORD".
|
||||
.TP
|
||||
\fB\-o\fR nodnscache - disable DNS cache.
|
||||
s3fs is always using DNS cache, this option make DNS cache disable.
|
||||
.TP
|
||||
\fB\-o\fR nosscache - disable SSL session cache.
|
||||
s3fs is always using SSL session cache, this option make SSL session cache disable.
|
||||
.TP
|
||||
\fB\-o\fR multipart_size (default="10")
|
||||
part size, in MB, for each multipart request.
|
||||
The minimum value is 5 MB and the maximum value is 5 GB.
|
||||
.TP
|
||||
\fB\-o\fR multipart_copy_size (default="512")
|
||||
part size, in MB, for each multipart copy request, used for
|
||||
renames and mixupload.
|
||||
The minimum value is 5 MB and the maximum value is 5 GB.
|
||||
Must be at least 512 MB to copy the maximum 5 TB object size
|
||||
but lower values may improve performance.
|
||||
.TP
|
||||
\fB\-o\fR max_dirty_data (default="5120")
|
||||
Flush dirty data to S3 after a certain number of MB written.
|
||||
The minimum value is 50 MB. -1 value means disable.
|
||||
Cannot be used with nomixupload.
|
||||
.TP
|
||||
\fB\-o\fR bucket_size (default=maximum long unsigned integer value)
|
||||
The size of the bucket with which the corresponding
|
||||
elements of the statvfs structure will be filled. The option
|
||||
argument is an integer optionally followed by a
|
||||
multiplicative suffix (GB, GiB, TB, TiB, PB, PiB,
|
||||
EB, EiB) (no spaces in between). If no suffix is supplied,
|
||||
bytes are assumed; eg: 20000000, 30GB, 45TiB. Note that
|
||||
s3fs does not compute the actual volume size (too
|
||||
expensive): by default it will assume the maximum possible
|
||||
size; however, since this may confuse other software which
|
||||
uses s3fs, the advertised bucket size can be set with this
|
||||
option.
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree (default 0)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use disk space as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR free_space_ratio (default="10")
|
||||
sets min free space ratio of the disk. The value of this option can be between 0 and 100. It will control
|
||||
the size of the cache according to this ratio to ensure that the idle ratio of the disk is greater than this value.
|
||||
For example, when the disk space is 50GB, the default value will
|
||||
ensure that the disk will reserve at least 50GB * 10%% = 5GB of remaining space.
|
||||
.TP
|
||||
\fB\-o\fR multipart_threshold (default="25")
|
||||
threshold, in MB, to use multipart upload instead of
|
||||
single-part. Must be at least 5 MB.
|
||||
.TP
|
||||
\fB\-o\fR singlepart_copy_limit (default="512")
|
||||
maximum size, in MB, of a single-part copy before trying
|
||||
multipart copy.
|
||||
.TP
|
||||
\fB\-o\fR host (default="https://s3.amazonaws.com")
|
||||
Set a non-Amazon host, e.g., https://example.com.
|
||||
.TP
|
||||
\fB\-o\fR servicepath (default="/")
|
||||
Set a service path when the non-Amazon host requires a prefix.
|
||||
.TP
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR region (default="us-east-1")
|
||||
sets the region to use on signature version 4.
|
||||
If this option is not specified, s3fs uses "us-east-1" region as the default.
|
||||
If the s3fs could not connect to the region specified by this option, s3fs could not run.
|
||||
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
|
||||
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
|
||||
You can also specify the legacy -o endpoint which means the same thing.
|
||||
.TP
|
||||
\fB\-o\fR sigv2 (default is signature version 4 falling back to version 2)
|
||||
sets signing AWS requests by using only signature version 2.
|
||||
.TP
|
||||
\fB\-o\fR sigv4 (default is signature version 4 falling back to version 2)
|
||||
sets signing AWS requests by using only signature version 4.
|
||||
.TP
|
||||
\fB\-o\fR mp_umask (default is "0000")
|
||||
sets umask for the mount point directory.
|
||||
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
|
||||
In the opposite case s3fs allows access to all users as the default.
|
||||
But if you set the allow_other with this option, you can control the permissions of the mount point by this option like umask.
|
||||
.TP
|
||||
\fB\-o\fR umask (default is "0000")
|
||||
sets umask for files under the mountpoint. This can allow
|
||||
users other than the mounting user to read and write to files
|
||||
that they did not create.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
\fB\-o\fR streamupload (default is disable)
|
||||
Enable stream upload.
|
||||
If this option is enabled, a sequential upload will be performed in parallel with the write from the part that has been written during a multipart upload.
|
||||
This is expected to give better performance than other upload functions.
|
||||
Note that this option is still experimental and may change in the future.
|
||||
.TP
|
||||
\fB\-o\fR max_thread_count (default is "10")
|
||||
This value is the maximum number of parallel requests to be sent, and the number of parallel processes for head requests, multipart uploads and stream uploads.
|
||||
Worker threads will be started to process requests according to this value.
|
||||
.TP
|
||||
\fB\-o\fR enable_content_md5 (default is disable)
|
||||
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
|
||||
This can add CPU overhead to transfers.
|
||||
.TP
|
||||
\fB\-o\fR enable_unsigned_payload (default is disable)
|
||||
Do not calculate Content-SHA256 for PutObject and UploadPart
|
||||
payloads. This can reduce CPU overhead to transfers.
|
||||
.TP
|
||||
\fB\-o\fR ecs (default is disable)
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
.TP
|
||||
\fB\-o\fR iam_role (default is no IAM role)
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR imdsv1only (default is to use IMDSv2 with fallback to v1)
|
||||
AWS instance metadata service, used with IAM role authentication,
|
||||
supports the use of an API token. If you're using an IAM role in an
|
||||
environment that does not support IMDSv2, setting this flag will skip
|
||||
retrieval and usage of the API token when retrieving IAM credentials.
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_auth (default is not using IBM IAM authentication)
|
||||
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_endpoint (default is https://iam.cloud.ibm.com)
|
||||
Sets the URL to use for IBM IAM authentication.
|
||||
.TP
|
||||
\fB\-o\fR credlib (default=\"\" which means disabled)
|
||||
Specifies the shared library that handles the credentials containing the authentication token.
|
||||
If this option is specified, the specified credential and token processing provided by the shared library ant will be performed instead of the built-in credential processing.
|
||||
This option cannot be specified with passwd_file, profile, use_session_token, ecs, ibm_iam_auth, ibm_iam_endpoint, imdsv1only and iam_role option.
|
||||
.TP
|
||||
\fB\-o\fR credlib_opts (default=\"\" which means disabled)
|
||||
Specifies the options to pass when the shared library specified in credlib is loaded and then initialized.
|
||||
For the string specified in this option, specify the string defined by the shared library.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr (default is not handling the extended attribute)
|
||||
Enable to handle the extended attribute (xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registering xml name space.
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nomixupload - disable copy in multipart uploads.
|
||||
Disable to use PUT (copy api) when multipart uploading large size objects.
|
||||
By default, when doing multipart upload, the range of unchanged data will use PUT (copy api) whenever possible.
|
||||
When nocopyapi or norenameapi is specified, use of PUT (copy api) is invalidated even if this option is not specified.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT (copy api).
|
||||
If you set this option, s3fs do not use PUT with "x-amz-copy-source" (copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
|
||||
.TP
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT (copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command (ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command (ex. mv).
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
.TP
|
||||
\fB\-o\fR listobjectsv2 (use ListObjectsV2)
|
||||
Issue ListObjectsV2 instead of ListObjects, useful on object
|
||||
stores without ListObjects support.
|
||||
.TP
|
||||
\fB\-o\fR noua (suppress User-Agent header)
|
||||
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
|
||||
If this option is specified, s3fs suppresses the output of the User-Agent.
|
||||
.TP
|
||||
\fB\-o\fR cipher_suites
|
||||
Customize the list of TLS cipher suites. Expects a colon separated list of cipher suite names.
|
||||
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
|
||||
https://curl.haxx.se/docs/ssl-ciphers.html
|
||||
.TP
|
||||
\fB\-o\fR instance_name
|
||||
The instance name of the current s3fs mountpoint.
|
||||
This name will be added to logging messages and user agent headers sent by s3fs.
|
||||
.TP
|
||||
\fB\-o\fR complement_stat (complement lack of file/directory mode)
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
.TP
|
||||
\fB\-o\fR compat_dir (enable support of alternative directory names)
|
||||
.RS
|
||||
s3fs supports two different naming schemas "dir/" and "dir" to map directory names to S3 objects and vice versa by default. As a third variant, directories can be determined indirectly if there is a file object with a path (e.g. "/dir/file") but without the parent directory. This option enables a fourth variant, "dir_$folder$", created by older applications.
|
||||
.TP
|
||||
S3fs uses only the first schema "dir/" to create S3 objects for directories.
|
||||
.TP
|
||||
The support for these different naming schemas causes an increased communication effort.
|
||||
.TP
|
||||
If you do not have access permissions to the bucket and specify a directory path created by a client other than s3fs for the mount point, you cannot start because the mount point directory cannot be found by s3fs. But by specifying this option, you can avoid this error.
|
||||
.RE
|
||||
.TP
|
||||
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
|
||||
S3 requires all object names to be valid UTF-8. But some
|
||||
clients, notably Windows NFS clients, use their own encoding.
|
||||
This option re-encodes invalid UTF-8 object names into valid
|
||||
UTF-8 by mapping offending codes into a 'private' codepage of the
|
||||
Unicode set.
|
||||
Useful on clients not using UTF-8 as their file system encoding.
|
||||
.TP
|
||||
\fB\-o\fR use_session_token - indicate that session token should be provided.
|
||||
If credentials are provided by environment variables this switch
|
||||
forces presence check of AWS_SESSION_TOKEN variable.
|
||||
Otherwise an error is returned.
|
||||
.TP
|
||||
\fB\-o\fR requester_pays (default is disable)
|
||||
This option instructs s3fs to enable requests involving Requester Pays buckets (It includes the 'x-amz-request-payer=requester' entry in the request header).
|
||||
.TP
|
||||
\fB\-o\fR mime (default is "/etc/mime.types")
|
||||
Specify the path of the mime.types file.
|
||||
If this option is not specified, the existence of "/etc/mime.types" is checked, and that file is loaded as mime information.
|
||||
If this file does not exist on macOS, then "/etc/apache2/mime.types" is checked as well.
|
||||
.TP
|
||||
\fB\-o\fR proxy (default="")
|
||||
This option specifies a proxy to S3 server.
|
||||
Specify the proxy with '[<scheme://]hostname(fqdn)[:<port>]' formatted.
|
||||
'<schema>://' can be omitted, and 'http://' is used when omitted.
|
||||
Also, ':<port>' can also be omitted. If omitted, port 443 is used for HTTPS schema, and port 1080 is used otherwise.
|
||||
This option is the same as the curl command's '--proxy(-x)' option and libcurl's 'CURLOPT_PROXY' flag.
|
||||
This option is equivalent to and takes precedence over the environment variables 'http_proxy', 'all_proxy', etc.
|
||||
.TP
|
||||
\fB\-o\fR proxy_cred_file (default="")
|
||||
This option specifies the file that describes the username and passphrase for authentication of the proxy when the HTTP schema proxy is specified by the 'proxy' option.
|
||||
Username and passphrase are valid only for HTTP schema.
|
||||
If the HTTP proxy does not require authentication, this option is not required.
|
||||
Separate the username and passphrase with a ':' character and specify each as a URL-encoded string.
|
||||
.TP
|
||||
\fB\-o\fR ipresolve (default="whatever")
|
||||
Select what type of IP addresses to use when establishing a connection.
|
||||
Default('whatever') can use addresses of all IP versions(IPv4 and IPv6) that your system allows.
|
||||
If you specify 'IPv4', only IPv4 addresses are used.
|
||||
And when 'IPv6' is specified, only IPv6 addresses will be used.
|
||||
.TP
|
||||
\fB\-o\fR logfile - specify the log output file.
|
||||
s3fs outputs the log file to syslog. Alternatively, if s3fs is started with the "-f" option specified, the log will be output to the stdout/stderr.
|
||||
You can use this option to specify the log file that s3fs outputs.
|
||||
If you specify a log file with this option, it will reopen the log file when s3fs receives a SIGHUP signal. You can use the SIGHUP signal for log rotation.
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit (critical), err (error), warn (warning), info (information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
When s3fs catch the signal SIGUSR2, the debug level is bump up.
|
||||
.TP
|
||||
\fB\-o\fR curldbg - put curl debug message
|
||||
Put the debug message from libcurl when this option is specified.
|
||||
Specify "normal" or "body" for the parameter.
|
||||
If the parameter is omitted, it is the same as "normal".
|
||||
If "body" is specified, some API communication body data will be output in addition to the debug message output as "normal".
|
||||
.TP
|
||||
\fB\-o\fR no_time_stamp_msg - no time stamp in debug message
|
||||
The time stamp is output to the debug message by default.
|
||||
If this option is specified, the time stamp will not be output in the debug message.
|
||||
It is the same even if the environment variable "S3FS_MSGTIMESTAMP" is set to "no".
|
||||
.TP
|
||||
\fB\-o\fR set_check_cache_sigusr1 (default is stdout)
|
||||
If the cache is enabled, you can check the integrity of the cache file and the cache file's stats info file.
|
||||
This option is specified and when sending the SIGUSR1 signal to the s3fs process checks the cache status at that time.
|
||||
This option can take a file path as parameter to output the check result to that file.
|
||||
The file path parameter can be omitted. If omitted, the result will be output to stdout or syslog.
|
||||
.TP
|
||||
\fB\-o\fR update_parent_dir_stat (default is disable)
|
||||
The parent directory's mtime and ctime are updated when a file or directory is created or deleted (when the parent directory's inode is updated).
|
||||
By default, parent directory statistics are not updated.
|
||||
.SS "utility mode options"
|
||||
.TP
|
||||
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
|
||||
Lists multipart incomplete objects uploaded to the specified bucket.
|
||||
.TP
|
||||
\fB\-\-incomplete\-mpu\-abort\fR all or date format (default="24H")
|
||||
Delete the multipart incomplete object uploaded to the specified bucket.
|
||||
If "all" is specified for this option, all multipart incomplete objects will be deleted.
|
||||
If you specify no argument as an option, objects older than 24 hours (24H) will be deleted (This is the default value).
|
||||
You can specify an optional date format.
|
||||
It can be specified as year, month, day, hour, minute, second, and it is expressed as "Y", "M", "D", "h", "m", "s" respectively.
|
||||
For example, "1Y6M10D12h30m30s".
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync, async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
.TP
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH SERVER URL/REQUEST STYLE
|
||||
Be careful when specifying the server endpoint(URL).
|
||||
.TP
|
||||
If your bucket name contains dots("."), you should use the path request style(using "use_path_request_style" option).
|
||||
.TP
|
||||
Also, if you are using a server other than Amazon S3, you need to specify the endpoint with the "url" option. At that time, depending on the server you are using, you may have to specify the path request style("use_path_request_style" option).
|
||||
.SH LOCAL STORAGE CONSUMPTION
|
||||
.TP
|
||||
s3fs requires local caching for operation. You can enable a local cache with "\-o use_cache" or s3fs uses temporary files to cache pending requests to s3.
|
||||
.TP
|
||||
Apart from the requirements discussed below, it is recommended to keep enough cache resp. temporary storage to allow one copy each of all files open for reading and writing at any one time.
|
||||
.TP
|
||||
.SS Local cache with \[dq]\-o use_cache\[dq]
|
||||
.TP
|
||||
s3fs automatically maintains a local cache of files. The cache folder is specified by the parameter of "\-o use_cache". It is only a local cache that can be deleted at any time. s3fs rebuilds it if necessary.
|
||||
.TP
|
||||
Whenever s3fs needs to read or write a file on S3, it first creates the file in the cache directory and operates on it.
|
||||
.TP
|
||||
The amount of local cache storage used can be indirectly controlled with "\-o ensure_diskfree".
|
||||
.TP
|
||||
.SS Without local cache
|
||||
.TP
|
||||
Since s3fs always requires some storage space for operation, it creates temporary files to store incoming write requests until the required s3 request size is reached and the segment has been uploaded. After that, this data is truncated in the temporary file to free up storage space.
|
||||
.TP
|
||||
Per file you need at least twice the part size (default 5MB or "-o multipart_size") for writing multipart requests or space for the whole file if single requests are enabled ("\-o nomultipart").
|
||||
.SH PERFORMANCE CONSIDERATIONS
|
||||
.TP
|
||||
This section discusses settings to improve s3fs performance.
|
||||
.TP
|
||||
In most cases, backend performance cannot be controlled and is therefore not part of this discussion.
|
||||
.TP
|
||||
Details of the local storage usage is discussed in "LOCAL STORAGE CONSUMPTION".
|
||||
.TP
|
||||
.SS CPU and Memory Consumption
|
||||
.TP
|
||||
s3fs is a multi-threaded application. Depending on the workload it may use multiple CPUs and a certain amount of memory. You can monitor the CPU and memory consumption with the "top" utility.
|
||||
.TP
|
||||
.SS Performance of S3 requests
|
||||
.TP
|
||||
s3fs provides several options (e.g. "max_thread_count" option) to control behaviour and thus indirectly the performance. The possible combinations of these options in conjunction with the various S3 backends are so varied that there is no individual recommendation other than the default values. Improved individual settings can be found by testing and measuring.
|
||||
.TP
|
||||
The two options "Enable negative cache" ("\-o enable_negative_cache") and "Disable support of alternative directory names" ("\-o notsup_compat_dir") can be used to control shared access to the same bucket by different applications:
|
||||
.TP
|
||||
.IP \[bu]
|
||||
Enable negative cache ("\-o enable_negative_cache")
|
||||
.RS
|
||||
.TP
|
||||
If a bucket is used exclusively by an s3fs instance, you can enable the cache for non-existent files and directories with "\-o enable_negative_cache". This eliminates repeated requests to check the existence of an object, saving time and possibly money.
|
||||
.RE
|
||||
.IP \[bu]
|
||||
Enable support of alternative directory names ("\-o compat_dir")
|
||||
.RS
|
||||
.TP
|
||||
s3fs recognizes "dir/" objects as directories. Clients other than s3fs may use "dir", "dir_$folder$" objects as directories, or directory objects may not exist. In order for s3fs to recognize these as directories, you can specify the "compat_dir" option.
|
||||
.RE
|
||||
.IP \[bu]
|
||||
Completion of file and directory information ("\-o complement_stat")
|
||||
.RS
|
||||
.TP
|
||||
s3fs uses the "x-amz-meta-mode header" to determine if an object is a file or a directory. For this reason, objects that do not have the "x-amz-meta-mode header" may not produce the expected results(The directory cannot be displayed, etc.). By specifying the "complement_stat" option, s3fs can automatically complete this missing attribute information, and you can get the expected results.
|
||||
.RE
|
||||
.SH NOTES
|
||||
.TP
|
||||
The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types!
|
||||
.SH SEE ALSO
|
||||
fuse(8), mount(8), fusermount(1), fstab(5)
|
||||
.SH BUGS
|
||||
Due to S3's "eventual consistency" limitations, file creation can and will occasionally fail. Even after a successful create, subsequent reads can fail for an indeterminate time, even after one or more successful reads. Create and read enough files and you will eventually encounter this failure. This is not a flaw in s3fs and it is not something a FUSE wrapper like s3fs can work around. The retries option does not address this issue. Your application must either tolerate or compensate for these failures, for example by retrying creates or reads.
|
||||
.SH AUTHOR
|
||||
s3fs has been written by Randy Rizun <rrizun@gmail.com>.
|
||||
@ -21,24 +21,98 @@ bin_PROGRAMS=s3fs
|
||||
|
||||
AM_CPPFLAGS = $(DEPS_CFLAGS)
|
||||
if USE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
endif
|
||||
if USE_SSL_OPENSSL_30
|
||||
AM_CPPFLAGS += -DUSE_OPENSSL_30
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h addhead.cpp addhead.h common.h
|
||||
s3fs_SOURCES = \
|
||||
s3fs.cpp \
|
||||
s3fs_global.cpp \
|
||||
s3fs_help.cpp \
|
||||
s3fs_logger.cpp \
|
||||
s3fs_xml.cpp \
|
||||
metaheader.cpp \
|
||||
mpu_util.cpp \
|
||||
curl.cpp \
|
||||
curl_share.cpp \
|
||||
curl_util.cpp \
|
||||
s3objlist.cpp \
|
||||
cache.cpp \
|
||||
cache_node.cpp \
|
||||
string_util.cpp \
|
||||
s3fs_cred.cpp \
|
||||
s3fs_util.cpp \
|
||||
s3fs_threadreqs.cpp \
|
||||
fdcache.cpp \
|
||||
fdcache_entity.cpp \
|
||||
fdcache_page.cpp \
|
||||
fdcache_stat.cpp \
|
||||
fdcache_auto.cpp \
|
||||
fdcache_fdinfo.cpp \
|
||||
fdcache_pseudofd.cpp \
|
||||
fdcache_untreated.cpp \
|
||||
filetimes.cpp \
|
||||
addhead.cpp \
|
||||
sighandlers.cpp \
|
||||
threadpoolman.cpp \
|
||||
syncfiller.cpp \
|
||||
common_auth.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
if USE_SSL_GNUTLS
|
||||
s3fs_SOURCES += gnutls_auth.cpp
|
||||
s3fs_SOURCES += gnutls_auth.cpp
|
||||
endif
|
||||
if USE_SSL_NSS
|
||||
s3fs_SOURCES += nss_auth.cpp
|
||||
s3fs_SOURCES += nss_auth.cpp
|
||||
endif
|
||||
|
||||
s3fs_LDADD = $(DEPS_LIBS)
|
||||
|
||||
noinst_PROGRAMS = test_string_util
|
||||
noinst_PROGRAMS = \
|
||||
test_curl_util \
|
||||
test_page_list \
|
||||
test_string_util
|
||||
|
||||
test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h
|
||||
test_curl_util_SOURCES = common_auth.cpp curl_util.cpp string_util.cpp test_curl_util.cpp s3fs_global.cpp s3fs_logger.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
test_curl_util_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
if USE_SSL_GNUTLS
|
||||
test_curl_util_SOURCES += gnutls_auth.cpp
|
||||
endif
|
||||
if USE_SSL_NSS
|
||||
test_curl_util_SOURCES += nss_auth.cpp
|
||||
endif
|
||||
|
||||
TESTS = test_string_util
|
||||
test_curl_util_LDADD = $(DEPS_LIBS)
|
||||
|
||||
test_page_list_SOURCES = \
|
||||
fdcache_page.cpp \
|
||||
s3fs_global.cpp \
|
||||
s3fs_logger.cpp \
|
||||
string_util.cpp \
|
||||
test_page_list.cpp
|
||||
|
||||
test_string_util_SOURCES = string_util.cpp test_string_util.cpp s3fs_logger.cpp
|
||||
|
||||
TESTS = \
|
||||
test_curl_util \
|
||||
test_page_list \
|
||||
test_string_util
|
||||
|
||||
clang-tidy:
|
||||
clang-tidy -extra-arg-before=-xc++ -extra-arg=-std=@CPP_VERSION@ \
|
||||
*.h $(s3fs_SOURCES) test_curl_util.cpp test_page_list.cpp test_string_util.cpp \
|
||||
-- $(DEPS_CFLAGS) $(CPPFLAGS)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noexpandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: noexpandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
361
src/addhead.cpp
361
src/addhead.cpp
@ -18,30 +18,25 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <assert.h>
|
||||
#include <curl/curl.h>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <strings.h>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "addhead.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs.h"
|
||||
|
||||
using namespace std;
|
||||
#include "addhead.h"
|
||||
#include "curl_util.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Symbols
|
||||
//-------------------------------------------------------------------
|
||||
#define ADD_HEAD_REGEX "reg:"
|
||||
static constexpr char ADD_HEAD_REGEX[] = "reg:";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
@ -53,227 +48,195 @@ AdditionalHeader AdditionalHeader::singleton;
|
||||
//-------------------------------------------------------------------
|
||||
AdditionalHeader::AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
assert(false);
|
||||
}
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
AdditionalHeader::~AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
assert(false);
|
||||
}
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Load(const char* file)
|
||||
{
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is NULL.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is nullptr.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
|
||||
ifstream AH(file);
|
||||
if(!AH.good()){
|
||||
S3FS_PRN_WARN("Could not open file(%s).", file);
|
||||
return false;
|
||||
}
|
||||
|
||||
// read file
|
||||
string line;
|
||||
PADDHEAD paddhead;
|
||||
while(getline(AH, line)){
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(0 == line.size()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
stringstream ss(line);
|
||||
string key(""); // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
if(ss){
|
||||
ss >> head;
|
||||
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
|
||||
value = line.substr(static_cast<int>(ss.tellg()) + 1);
|
||||
}
|
||||
std::ifstream AH(file);
|
||||
if(!AH.good()){
|
||||
S3FS_PRN_WARN("Could not open file(%s).", file);
|
||||
return false;
|
||||
}
|
||||
|
||||
// check it
|
||||
if(0 == head.size()){
|
||||
if(0 == key.size()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
Unload();
|
||||
return false;
|
||||
// read file
|
||||
std::string line;
|
||||
while(getline(AH, line)){
|
||||
if(line.empty()){
|
||||
continue;
|
||||
}
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
std::istringstream ss(line);
|
||||
std::string key; // suffix(key)
|
||||
std::string head; // additional HTTP header
|
||||
std::string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
if(ss){
|
||||
ss >> head;
|
||||
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
|
||||
value = line.substr(static_cast<int>(ss.tellg()) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// check it
|
||||
if(head.empty()){
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
Unload();
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key std::string.", key.c_str());
|
||||
continue;
|
||||
}
|
||||
key.erase(0, strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
RegexPtr preg(new regex_t, regfree);
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg.get(), key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg.get(), errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
continue;
|
||||
}
|
||||
|
||||
addheadlist.emplace_back(std::move(preg), key, head, value);
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
addheadlist.emplace_back(RegexPtr(nullptr, regfree), key, head, value);
|
||||
}
|
||||
|
||||
// set flag
|
||||
is_enable = true;
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
|
||||
continue;
|
||||
}
|
||||
key = key.substr(strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
char errbuf[256];
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
|
||||
// set flag
|
||||
if(!is_enable){
|
||||
is_enable = true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void AdditionalHeader::Unload(void)
|
||||
void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); iter = addheadlist.erase(iter)){
|
||||
PADDHEAD paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
addheadlist.clear();
|
||||
}
|
||||
|
||||
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
{
|
||||
if(!is_enable){
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is NULL.");
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t pathlength = strlen(path);
|
||||
|
||||
// loop
|
||||
//
|
||||
// [NOTE]
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const PADDHEAD paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
if(!is_enable){
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is nullptr.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
size_t pathlength = strlen(path);
|
||||
|
||||
// loop
|
||||
//
|
||||
// [NOTE]
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(auto iter = addheadlist.cbegin(); iter != addheadlist.cend(); ++iter){
|
||||
const add_header *paddhead = &*iter;
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex.get(), path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(paddhead->basestring.empty() || paddhead->basestring == &path[pathlength - paddhead->basestring.length()]){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const
|
||||
{
|
||||
headers_t meta;
|
||||
headers_t meta;
|
||||
|
||||
if(!AddHeader(meta, path)){
|
||||
if(!AddHeader(meta, path)){
|
||||
return list;
|
||||
}
|
||||
for(auto iter = meta.cbegin(); iter != meta.cend(); ++iter){
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
return list;
|
||||
}
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return list;
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Dump(void) const
|
||||
bool AdditionalHeader::Dump() const
|
||||
{
|
||||
if(!IS_S3FS_LOG_DBG()){
|
||||
return true;
|
||||
}
|
||||
|
||||
stringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const PADDHEAD paddhead = *iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << endl;
|
||||
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << endl;
|
||||
}
|
||||
ssdbg << " base string\t--->\t" << paddhead->basestring << endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl;
|
||||
if(!S3fsLog::IsS3fsLogDbg()){
|
||||
return true;
|
||||
}
|
||||
|
||||
std::ostringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;
|
||||
|
||||
for(auto iter = addheadlist.cbegin(); iter != addheadlist.cend(); ++iter, ++cnt){
|
||||
const add_header *paddhead = &*iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << std::endl;
|
||||
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << std::endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << std::endl;
|
||||
}
|
||||
ssdbg << " base std::string\t--->\t" << paddhead->basestring << std::endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << std::endl;
|
||||
ssdbg << " }" << std::endl;
|
||||
}
|
||||
ssdbg << " }" << endl;
|
||||
}
|
||||
|
||||
|
||||
ssdbg << "}" << endl;
|
||||
ssdbg << "}" << std::endl;
|
||||
|
||||
// print all
|
||||
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
|
||||
// print all
|
||||
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -281,6 +244,6 @@ bool AdditionalHeader::Dump(void) const
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -21,41 +21,69 @@
|
||||
#ifndef S3FS_ADDHEAD_H_
|
||||
#define S3FS_ADDHEAD_H_
|
||||
|
||||
#include <memory>
|
||||
#include <regex.h>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "metaheader.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// class AdditionalHeader
|
||||
// Structure / Typedef
|
||||
//----------------------------------------------
|
||||
typedef struct add_header{
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD, *PADDHEAD;
|
||||
typedef std::unique_ptr<regex_t, decltype(®free)> RegexPtr;
|
||||
|
||||
typedef std::vector<PADDHEAD> addheadlist_t;
|
||||
struct add_header{
|
||||
add_header(RegexPtr pregex, std::string basestring, std::string headkey, std::string headvalue)
|
||||
: pregex(std::move(pregex))
|
||||
, basestring(std::move(basestring))
|
||||
, headkey(std::move(headkey))
|
||||
, headvalue(std::move(headvalue))
|
||||
{}
|
||||
|
||||
add_header(const add_header&) = delete;
|
||||
add_header(add_header&& val) = default;
|
||||
add_header& operator=(const add_header&) = delete;
|
||||
add_header& operator=(add_header&&) = delete;
|
||||
|
||||
RegexPtr pregex; // not nullptr means using regex, nullptr means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
};
|
||||
|
||||
typedef std::vector<add_header> addheadlist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
//----------------------------------------------
|
||||
class AdditionalHeader
|
||||
{
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
addheadlist_t addheadlist;
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
addheadlist_t addheadlist;
|
||||
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get(void) { return &singleton; }
|
||||
public:
|
||||
AdditionalHeader(const AdditionalHeader&) = delete;
|
||||
AdditionalHeader(AdditionalHeader&&) = delete;
|
||||
AdditionalHeader& operator=(const AdditionalHeader&) = delete;
|
||||
AdditionalHeader& operator=(AdditionalHeader&&) = delete;
|
||||
|
||||
bool Load(const char* file);
|
||||
void Unload(void);
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get() { return &singleton; }
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump(void) const;
|
||||
bool Load(const char* file);
|
||||
void Unload();
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump() const;
|
||||
};
|
||||
|
||||
#endif // S3FS_ADDHEAD_H_
|
||||
@ -65,6 +93,6 @@ class AdditionalHeader
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
943
src/cache.cpp
943
src/cache.cpp
File diff suppressed because it is too large
Load Diff
189
src/cache.h
189
src/cache.h
@ -21,116 +21,113 @@
|
||||
#ifndef S3FS_CACHE_H_
|
||||
#define S3FS_CACHE_H_
|
||||
|
||||
#include <cstring>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
#include "s3objlist.h"
|
||||
#include "cache_node.h"
|
||||
|
||||
//
|
||||
// Struct
|
||||
//
|
||||
struct stat_cache_entry {
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
unsigned long notruncate; // 0<: not remove automatically at checking truncate
|
||||
|
||||
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) {
|
||||
memset(&stbuf, 0, sizeof(struct stat));
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
meta.clear();
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
|
||||
//
|
||||
// Class
|
||||
//-------------------------------------------------------------------
|
||||
// Class StatCache
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE] About Symbolic link cache
|
||||
// The Stats cache class now also has a symbolic link cache.
|
||||
// It is possible to take out the Symbolic link cache in another class,
|
||||
// but the cache out etc. should be synchronized with the Stats cache
|
||||
// and implemented in this class.
|
||||
// Symbolic link cache size and timeout use the same settings as Stats
|
||||
// cache. This simplifies user configuration, and from a user perspective,
|
||||
// the symbolic link cache appears to be included in the Stats cache.
|
||||
//
|
||||
class StatCache
|
||||
{
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static std::mutex stat_cache_lock;
|
||||
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
std::shared_ptr<DirStatCache> pMountPointDir GUARDED_BY(stat_cache_lock); // Top directory = Mount point
|
||||
unsigned long CacheSize;
|
||||
|
||||
void Clear(void);
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData(void) {
|
||||
return &singleton;
|
||||
}
|
||||
bool AddStatHasLock(const std::string& key, const struct stat* pstbuf, const headers_t* pmeta, objtype_t type, bool notruncate) REQUIRES(StatCache::stat_cache_lock);
|
||||
bool TruncateCacheHasLock(bool check_only_oversize_case = true) REQUIRES(StatCache::stat_cache_lock);
|
||||
bool DelStatHasLock(const std::string& key) REQUIRES(StatCache::stat_cache_lock);
|
||||
bool RawGetChildStats(const std::string& dir, s3obj_list_t* plist, s3obj_type_map_t* pobjmap);
|
||||
|
||||
// Attribute
|
||||
unsigned long GetCacheSize(void) const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime(void) const;
|
||||
time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
time_t UnsetExpireTime(void);
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject(void) {
|
||||
return SetCacheNoObject(true);
|
||||
}
|
||||
bool DisableCacheNoObject(void) {
|
||||
return SetCacheNoObject(false);
|
||||
}
|
||||
bool GetCacheNoObject(void) const {
|
||||
return IsCacheNoObject;
|
||||
}
|
||||
public:
|
||||
StatCache(const StatCache&) = delete;
|
||||
StatCache(StatCache&&) = delete;
|
||||
StatCache& operator=(const StatCache&) = delete;
|
||||
StatCache& operator=(StatCache&&) = delete;
|
||||
|
||||
// Get stat cache
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
}
|
||||
bool GetStat(std::string& key, struct stat* pst, bool overcheck = true) {
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool GetStat(std::string& key, headers_t* meta, bool overcheck = true) {
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(std::string& key, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(std::string& key, const char* etag, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
}
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData()
|
||||
{
|
||||
return &singleton;
|
||||
}
|
||||
|
||||
// Cache For no object
|
||||
bool IsNoObjectCache(std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(std::string& key);
|
||||
// Attribute
|
||||
unsigned long GetCacheSize() const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
// Get stat cache
|
||||
bool GetStat(const std::string& key, struct stat* pstbuf, headers_t* pmeta, objtype_t* ptype, const char* petag = nullptr);
|
||||
bool GetStat(const std::string& key, struct stat* pstbuf, headers_t* pmeta)
|
||||
{
|
||||
return GetStat(key, pstbuf, pmeta, nullptr, nullptr);
|
||||
}
|
||||
bool GetStat(const std::string& key, struct stat* pstbuf, const char* petag)
|
||||
{
|
||||
return GetStat(key, pstbuf, nullptr, nullptr, petag);
|
||||
}
|
||||
bool GetStat(const std::string& key, struct stat* pstbuf)
|
||||
{
|
||||
return GetStat(key, pstbuf, nullptr, nullptr, nullptr);
|
||||
}
|
||||
bool GetStat(const std::string& key, headers_t* pmeta)
|
||||
{
|
||||
return GetStat(key, nullptr, pmeta, nullptr, nullptr);
|
||||
}
|
||||
bool HasStat(const std::string& key, const char* petag = nullptr)
|
||||
{
|
||||
return GetStat(key, nullptr, nullptr, nullptr, petag);
|
||||
}
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
|
||||
// Add stat cache
|
||||
bool AddStat(const std::string& key, const struct stat& stbuf, const headers_t& meta, objtype_t type, bool notruncate = false);
|
||||
bool AddStat(const std::string& key, const struct stat& stbuf, objtype_t type, bool notruncate = false);
|
||||
bool AddNegativeStat(const std::string& key);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key);
|
||||
bool DelStat(std::string& key) {
|
||||
return DelStat(key.c_str());
|
||||
}
|
||||
// Update meta stats
|
||||
bool UpdateStat(const std::string& key, const struct stat& stbuf, const headers_t& meta);
|
||||
|
||||
// Change no truncate flag
|
||||
void ClearNoTruncateFlag(const std::string& key);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const std::string& key);
|
||||
|
||||
// Cache for symbolic link
|
||||
bool GetSymlink(const std::string& key, std::string& value);
|
||||
bool AddSymlink(const std::string& key, const struct stat& stbuf, const headers_t& meta, const std::string& value);
|
||||
|
||||
// Get List/Map
|
||||
bool GetChildStatList(const std::string& dir, s3obj_list_t& list);
|
||||
bool GetChildStatMap(const std::string& dir, s3obj_type_map_t& objmap);
|
||||
|
||||
// For debugging
|
||||
void Dump(bool detail);
|
||||
};
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst, bool forcedir = false);
|
||||
|
||||
#endif // S3FS_CACHE_H_
|
||||
|
||||
/*
|
||||
@ -138,6 +135,6 @@ bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst,
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
1454
src/cache_node.cpp
Normal file
1454
src/cache_node.cpp
Normal file
File diff suppressed because it is too large
Load Diff
360
src/cache_node.h
Normal file
360
src/cache_node.h
Normal file
@ -0,0 +1,360 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CACHE_NODE_H_
|
||||
#define S3FS_CACHE_NODE_H_
|
||||
|
||||
#include <iosfwd>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
#include "s3objlist.h"
|
||||
#include "types.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utilities
|
||||
//-------------------------------------------------------------------
|
||||
#define MAX_STAT_CACHE_COUNTER 6
|
||||
|
||||
constexpr int stat_counter_pos(objtype_t type)
|
||||
{
|
||||
if(IS_FILE_OBJ(type)){
|
||||
return 1;
|
||||
}else if(IS_SYMLINK_OBJ(type)){
|
||||
return 2;
|
||||
}else if(IS_DIR_OBJ(type)){
|
||||
return 3;
|
||||
}else if(IS_NEGATIVE_OBJ(type)){
|
||||
return 4;
|
||||
}else{ // objtype_t::UNKNOWN and other
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Base Class : StatCacheNode
|
||||
//-------------------------------------------------------------------
|
||||
class DirStatCache;
|
||||
|
||||
class StatCacheNode : public std::enable_shared_from_this<StatCacheNode>
|
||||
{
|
||||
// [NOTE]
|
||||
// As an exception, declare friends to call some protected methods from
|
||||
// DirStatCache::RemoveChildHasLock and AddHasLock methods.
|
||||
//
|
||||
friend class DirStatCache;
|
||||
|
||||
protected:
|
||||
// Stat cache counter(see. stat_counter_pos())
|
||||
// <position>
|
||||
// 0 = total node count
|
||||
// 1 = file node count
|
||||
// 2 = symlink node count
|
||||
// 3 = directory node count
|
||||
// 4 = negative cache node count
|
||||
//
|
||||
static std::mutex counter_lock;
|
||||
static unsigned long counter[MAX_STAT_CACHE_COUNTER] GUARDED_BY(counter_lock);
|
||||
static bool EnableExpireTime;
|
||||
static bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
static time_t ExpireTime;
|
||||
static bool UseNegativeCache;
|
||||
static std::mutex cache_lock; // for internal data
|
||||
static unsigned long DisableCheckingExpire GUARDED_BY(cache_lock); // If greater than 0, it disables the expiration check, which allows disabling checks during processing.
|
||||
static struct timespec DisableExpireDate GUARDED_BY(cache_lock); // Data registered after this time will not be truncated(if 0 < DisableCheckingExpire)
|
||||
|
||||
private:
|
||||
objtype_t cache_type GUARDED_BY(StatCacheNode::cache_lock) = objtype_t::UNKNOWN; // object type is set in the constructor(except dir).
|
||||
std::string fullpath GUARDED_BY(StatCacheNode::cache_lock); // full path(This value is set only when the object is created)
|
||||
unsigned long hit_count GUARDED_BY(StatCacheNode::cache_lock) = 0L; // hit count
|
||||
struct timespec cache_date GUARDED_BY(StatCacheNode::cache_lock) = {0, 0}; // registration/renewal time
|
||||
bool notruncate GUARDED_BY(StatCacheNode::cache_lock) = false; // If true, not remove automatically at checking truncate.
|
||||
bool has_stat GUARDED_BY(StatCacheNode::cache_lock) = false; // valid stat information flag (for case only path registration and no stat information)
|
||||
struct stat stbuf GUARDED_BY(StatCacheNode::cache_lock) = {}; // stat data
|
||||
bool has_meta GUARDED_BY(StatCacheNode::cache_lock) = false; // valid meta headers information flag (for case only path registration and no meta headers)
|
||||
headers_t meta GUARDED_BY(StatCacheNode::cache_lock); // meta list
|
||||
bool has_extval GUARDED_BY(StatCacheNode::cache_lock) = false; // valid extra value flag
|
||||
std::string extvalue GUARDED_BY(StatCacheNode::cache_lock); // extra value for key(ex. used for symlink)
|
||||
|
||||
protected:
|
||||
static void IncrementCacheCount(objtype_t type);
|
||||
static void DecrementCacheCount(objtype_t type);
|
||||
static bool SetNegativeCache(bool flag);
|
||||
static bool NeedExpireCheckHasLock(const struct timespec& ts) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Cache Type
|
||||
bool isSameObjectTypeHasLock(objtype_t type) const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool isDirectoryHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool isFileHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool isSymlinkHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool isNegativeHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Clear
|
||||
virtual bool ClearDataHasLock() REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool ClearHasLock() REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool RemoveChildHasLock(const std::string& strpath) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool isRemovableHasLock() REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Add
|
||||
virtual bool AddHasLock(const std::string& strpath, const struct stat* pstat, const headers_t* pmeta, objtype_t type, bool is_notruncate) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Update(Set)
|
||||
bool UpdateHasLock(objtype_t type) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool UpdateHasLock(const struct stat* pstat, const headers_t* pmeta, bool clear_meta) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool UpdateHasLock(const struct stat* pstat, bool clear_meta) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool UpdateHasLock(bool is_notruncate) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool UpdateHasLock(const std::string* pextvalue) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool UpdateHasLock() REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool SetHasLock(const struct stat& stbuf, const headers_t& meta, bool is_notruncate) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Get
|
||||
objtype_t GetTypeHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
const std::string& GetPathHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool HasStatHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool HasMetaHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
bool GetNoTruncateHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool GetHasLock(headers_t* pmeta, struct stat* pst) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool GetExtraHasLock(std::string& value) REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual s3obj_type_map_t::size_type GetChildMapHasLock(s3obj_type_map_t& childmap) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Find
|
||||
virtual bool CheckETagValueHasLock(const char* petagval) const REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual std::shared_ptr<StatCacheNode> FindHasLock(const std::string& strpath, const char* petagval, bool& needTruncate) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// Cache out
|
||||
bool IsExpireStatCacheTimeHasLock() const REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool IsExpiredHasLock() REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual bool TruncateCacheHasLock() REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
// For debug
|
||||
void DumpElementHasLock(const std::string& indent, std::ostringstream& oss) const REQUIRES(StatCacheNode::cache_lock);
|
||||
virtual void DumpHasLock(const std::string& indent, bool detail, std::ostringstream& oss) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
public:
|
||||
// Properties
|
||||
static unsigned long GetCacheCount(objtype_t type = objtype_t::UNKNOWN);
|
||||
static time_t GetExpireTime();
|
||||
static time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
static time_t UnsetExpireTime();
|
||||
static bool IsEnableExpireTime();
|
||||
static bool EnableNegativeCache() { return SetNegativeCache(true); }
|
||||
static bool DisableNegativeCache() { return SetNegativeCache(false); }
|
||||
static bool IsEnabledNegativeCache() { return UseNegativeCache; }
|
||||
static bool PreventExpireCheck();
|
||||
static bool ResumeExpireCheck();
|
||||
|
||||
// Constructor/Destructor
|
||||
explicit StatCacheNode(const char* path = nullptr, objtype_t type = objtype_t::UNKNOWN);
|
||||
virtual ~StatCacheNode();
|
||||
|
||||
StatCacheNode(const StatCacheNode&) = delete;
|
||||
StatCacheNode(StatCacheNode&&) = delete;
|
||||
StatCacheNode& operator=(const StatCacheNode&) = delete;
|
||||
StatCacheNode& operator=(StatCacheNode&&) = delete;
|
||||
|
||||
// Cache Type
|
||||
bool isSameObjectType(objtype_t type);
|
||||
bool isDirectory();
|
||||
bool isFile();
|
||||
bool isSymlink();
|
||||
bool isNegative();
|
||||
|
||||
// Clear
|
||||
bool Clear();
|
||||
bool ClearData();
|
||||
bool RemoveChild(const std::string& strpath);
|
||||
|
||||
// Add
|
||||
bool Add(const std::string& strpath, const struct stat* pstat, const headers_t* pmeta, objtype_t type, bool is_notruncate = false);
|
||||
bool AddExtra(const std::string& value);
|
||||
|
||||
// Update(Set)
|
||||
bool Update(const struct stat& stbuf, const headers_t& meta);
|
||||
bool Update(const struct stat& stbuf, bool clear_meta);
|
||||
bool Update(bool is_notruncate);
|
||||
bool Update(const std::string& extvalue);
|
||||
bool Set(const struct stat& stbuf, const headers_t& meta, bool is_notruncate);
|
||||
|
||||
// Get
|
||||
std::string Get();
|
||||
bool Get(headers_t* pmeta, struct stat* pstbuf);
|
||||
bool Get(headers_t& get_meta, struct stat& st);
|
||||
bool Get(headers_t& get_meta);
|
||||
bool Get(struct stat& st);
|
||||
objtype_t GetType() const;
|
||||
struct timespec GetDate() const;
|
||||
unsigned long GetHitCount() const;
|
||||
unsigned long IncrementHitCount();
|
||||
bool GetExtra(std::string& value);
|
||||
s3obj_type_map_t::size_type GetChildMap(s3obj_type_map_t& childmap);
|
||||
|
||||
// Find
|
||||
std::shared_ptr<StatCacheNode> Find(const std::string& strpath, const char* petagval = nullptr);
|
||||
|
||||
// Cache out
|
||||
bool IsExpired();
|
||||
void ClearNoTruncate();
|
||||
bool TruncateCache();
|
||||
|
||||
// For debug
|
||||
void Dump(bool detail);
|
||||
};
|
||||
|
||||
typedef std::map<std::string, std::shared_ptr<StatCacheNode>> statcache_map_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Derived Class : FileStatCache
|
||||
//-------------------------------------------------------------------
|
||||
class FileStatCache : public StatCacheNode
|
||||
{
|
||||
public:
|
||||
explicit FileStatCache(const char* path = nullptr);
|
||||
~FileStatCache() override;
|
||||
|
||||
FileStatCache(const FileStatCache&) = delete;
|
||||
FileStatCache(FileStatCache&&) = delete;
|
||||
FileStatCache& operator=(const FileStatCache&) = delete;
|
||||
FileStatCache& operator=(FileStatCache&&) = delete;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Derived Class : DirStatCache
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// The fullpath of a DirStatCache always ends with a slash ('/').
|
||||
// The keys of the 'children' map managed by this object are the partial
|
||||
// path names of the child objects(files, directories, etc).
|
||||
// For sub-directory objects, the partial path names do not include a
|
||||
// slash.
|
||||
//
|
||||
class DirStatCache : public StatCacheNode
|
||||
{
|
||||
private:
|
||||
std::mutex dir_cache_lock; // for local variables
|
||||
struct timespec last_check_date GUARDED_BY(dir_cache_lock) = {0, 0};
|
||||
objtype_t dir_cache_type GUARDED_BY(dir_cache_lock) = objtype_t::UNKNOWN; // [NOTE] backup for use in destructors only
|
||||
statcache_map_t children GUARDED_BY(dir_cache_lock);
|
||||
|
||||
protected:
|
||||
bool ClearHasLock() override REQUIRES(StatCacheNode::cache_lock);
|
||||
bool RemoveChildHasLock(const std::string& strpath) override REQUIRES(StatCacheNode::cache_lock);
|
||||
bool isRemovableHasLock() override REQUIRES(StatCacheNode::cache_lock);
|
||||
bool HasExistedChildHasLock() REQUIRES(StatCacheNode::cache_lock, dir_cache_lock);
|
||||
|
||||
bool AddHasLock(const std::string& strpath, const struct stat* pstat, const headers_t* pmeta, objtype_t type, bool is_notruncate) override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
s3obj_type_map_t::size_type GetChildMapHasLock(s3obj_type_map_t& childmap) override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
std::shared_ptr<StatCacheNode> FindHasLock(const std::string& strpath, const char* petagval, bool& needTruncate) override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
bool NeedTruncateProcessing();
|
||||
bool IsExpiredHasLock() override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
bool TruncateCacheHasLock() override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
bool GetChildLeafNameHasLock(const std::string& strpath, std::string& strLeafName, bool& hasNestedChildren) REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
void DumpHasLock(const std::string& indent, bool detail, std::ostringstream& oss) override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
public:
|
||||
explicit DirStatCache(const char* path = nullptr, objtype_t type = objtype_t::DIR_NORMAL);
|
||||
~DirStatCache() override;
|
||||
|
||||
DirStatCache(const DirStatCache&) = delete;
|
||||
DirStatCache(DirStatCache&&) = delete;
|
||||
DirStatCache& operator=(const DirStatCache&) = delete;
|
||||
DirStatCache& operator=(DirStatCache&&) = delete;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Derived Class : SymlinkStatCache
|
||||
//-------------------------------------------------------------------
|
||||
class SymlinkStatCache : public StatCacheNode
|
||||
{
|
||||
private:
|
||||
std::string link_path;
|
||||
|
||||
protected:
|
||||
bool ClearHasLock() override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
public:
|
||||
explicit SymlinkStatCache(const char* path = nullptr);
|
||||
~SymlinkStatCache() override;
|
||||
|
||||
SymlinkStatCache(const SymlinkStatCache&) = delete;
|
||||
SymlinkStatCache(SymlinkStatCache&&) = delete;
|
||||
SymlinkStatCache& operator=(const SymlinkStatCache&) = delete;
|
||||
SymlinkStatCache& operator=(SymlinkStatCache&&) = delete;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Derived Class : NegativeStatCache
|
||||
//-------------------------------------------------------------------
|
||||
class NegativeStatCache : public StatCacheNode
|
||||
{
|
||||
protected:
|
||||
bool CheckETagValueHasLock(const char* petagval) const override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
bool IsExpiredHasLock() override REQUIRES(StatCacheNode::cache_lock);
|
||||
|
||||
public:
|
||||
explicit NegativeStatCache(const char* path = nullptr);
|
||||
~NegativeStatCache() override;
|
||||
|
||||
NegativeStatCache(const NegativeStatCache&) = delete;
|
||||
NegativeStatCache(NegativeStatCache&&) = delete;
|
||||
NegativeStatCache& operator=(const NegativeStatCache&) = delete;
|
||||
NegativeStatCache& operator=(NegativeStatCache&&) = delete;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Class : PreventStatCacheExpire
|
||||
//-------------------------------------------------------------------
|
||||
class PreventStatCacheExpire
|
||||
{
|
||||
public:
|
||||
explicit PreventStatCacheExpire()
|
||||
{
|
||||
StatCacheNode::PreventExpireCheck();
|
||||
}
|
||||
|
||||
~PreventStatCacheExpire()
|
||||
{
|
||||
StatCacheNode::ResumeExpireCheck();
|
||||
}
|
||||
|
||||
PreventStatCacheExpire(const PreventStatCacheExpire&) = delete;
|
||||
PreventStatCacheExpire(PreventStatCacheExpire&&) = delete;
|
||||
PreventStatCacheExpire& operator=(const PreventStatCacheExpire&) = delete;
|
||||
PreventStatCacheExpire& operator=(PreventStatCacheExpire&&) = delete;
|
||||
};
|
||||
|
||||
#endif // S3FS_CACHE_NODE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
194
src/common.h
194
src/common.h
@ -21,147 +21,77 @@
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <string>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "../config.h"
|
||||
|
||||
//
|
||||
// Extended attribute
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
|
||||
|
||||
//
|
||||
// Debug level
|
||||
//
|
||||
enum s3fs_log_level{
|
||||
S3FS_LOG_CRIT = 0, // LOG_CRIT
|
||||
S3FS_LOG_ERR = 1, // LOG_ERR
|
||||
S3FS_LOG_WARN = 3, // LOG_WARNING
|
||||
S3FS_LOG_INFO = 7, // LOG_INFO
|
||||
S3FS_LOG_DBG = 15 // LOG_DEBUG
|
||||
};
|
||||
|
||||
//
|
||||
// Debug macros
|
||||
//
|
||||
#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level)
|
||||
#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG))
|
||||
|
||||
#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT )
|
||||
|
||||
#define S3FS_LOG_LEVEL_STRING(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " )
|
||||
|
||||
#define S3FS_LOG_NEST_MAX 4
|
||||
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
|
||||
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
if(foreground){ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__)
|
||||
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "")
|
||||
|
||||
//
|
||||
// Typedef
|
||||
//
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value{
|
||||
unsigned char* pvalue;
|
||||
size_t length;
|
||||
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
if(pvalue){
|
||||
free(pvalue);
|
||||
}
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: namespace these
|
||||
static constexpr int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
static constexpr off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
extern bool complement_stat;
|
||||
extern bool noxmlns;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string host;
|
||||
extern std::string bucket;
|
||||
extern std::string s3host;
|
||||
extern std::string mount_prefix;
|
||||
extern std::string endpoint;
|
||||
extern std::string region;
|
||||
extern std::string cipher_suites;
|
||||
extern s3fs_log_level debug_level;
|
||||
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
extern std::string instance_name;
|
||||
|
||||
extern std::atomic<long long unsigned> num_requests_head_object;
|
||||
extern std::atomic<long long unsigned> num_requests_put_object;
|
||||
extern std::atomic<long long unsigned> num_requests_get_object;
|
||||
extern std::atomic<long long unsigned> num_requests_delete_object;
|
||||
extern std::atomic<long long unsigned> num_requests_list_bucket;
|
||||
extern std::atomic<long long unsigned> num_requests_mpu_initiate;
|
||||
extern std::atomic<long long unsigned> num_requests_mpu_complete;
|
||||
extern std::atomic<long long unsigned> num_requests_mpu_abort;
|
||||
extern std::atomic<long long unsigned> num_requests_mpu_upload_part;
|
||||
extern std::atomic<long long unsigned> num_requests_mpu_copy_part;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// For weak attribute
|
||||
//-------------------------------------------------------------------
|
||||
#define S3FS_FUNCATTR_WEAK __attribute__ ((weak,unused))
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// For clang -Wthread-safety
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef __clang__
|
||||
#define THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
|
||||
#else
|
||||
#define THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
|
||||
#endif
|
||||
|
||||
#define GUARDED_BY(x) \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x))
|
||||
|
||||
#define PT_GUARDED_BY(x) \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x))
|
||||
|
||||
#define REQUIRES(...) \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(requires_capability(__VA_ARGS__))
|
||||
|
||||
#define RETURN_CAPABILITY(...) \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(lock_returned(__VA_ARGS__))
|
||||
|
||||
#define ACQUIRED_BEFORE(...) \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__))
|
||||
|
||||
#define ACQUIRED_AFTER(...) \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__))
|
||||
|
||||
#define NO_THREAD_SAFETY_ANALYSIS \
|
||||
THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis)
|
||||
|
||||
#endif // S3FS_COMMON_H_
|
||||
|
||||
@ -170,6 +100,6 @@ extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -18,74 +18,37 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function
|
||||
//-------------------------------------------------------------------
|
||||
string s3fs_get_content_md5(int fd)
|
||||
std::string s3fs_get_content_md5(int fd)
|
||||
{
|
||||
unsigned char* md5hex;
|
||||
char* base64;
|
||||
string Signature;
|
||||
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, 0, -1))){
|
||||
return string("");
|
||||
}
|
||||
if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){
|
||||
return string(""); // ENOMEM
|
||||
}
|
||||
free(md5hex);
|
||||
|
||||
Signature = base64;
|
||||
free(base64);
|
||||
|
||||
return Signature;
|
||||
md5_t md5;
|
||||
if(!s3fs_md5_fd(fd, 0, -1, &md5)){
|
||||
// TODO: better return value?
|
||||
return "";
|
||||
}
|
||||
return s3fs_base64(md5.data(), md5.size());
|
||||
}
|
||||
|
||||
string s3fs_md5sum(int fd, off_t start, ssize_t size)
|
||||
std::string s3fs_sha256_hex_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
size_t digestlen = get_md5_digest_length();
|
||||
unsigned char* md5hex;
|
||||
sha256_t sha256;
|
||||
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
if(!s3fs_sha256_fd(fd, start, size, &sha256)){
|
||||
// TODO: better return value?
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string md5 = s3fs_hex(md5hex, digestlen);
|
||||
free(md5hex);
|
||||
std::string sha256hex = s3fs_hex_lower(sha256.data(), sha256.size());
|
||||
|
||||
return md5;
|
||||
}
|
||||
|
||||
string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
char sha256[2 * digestlen + 1];
|
||||
char hexbuf[3];
|
||||
unsigned char* sha256hex;
|
||||
|
||||
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
|
||||
memset(sha256, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(hexbuf, 3, "%02x", sha256hex[pos]);
|
||||
strncat(sha256, hexbuf, 2);
|
||||
}
|
||||
free(sha256hex);
|
||||
|
||||
return string(sha256);
|
||||
return sha256hex;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -93,6 +56,6 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
7271
src/curl.cpp
7271
src/curl.cpp
File diff suppressed because it is too large
Load Diff
731
src/curl.h
731
src/curl.h
@ -21,468 +21,347 @@
|
||||
#ifndef S3FS_CURL_H_
|
||||
#define S3FS_CURL_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <curl/curl.h>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "metaheader.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "types.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
// Avoid dependency on libcurl version
|
||||
//----------------------------------------------
|
||||
#define MIN_MULTIPART_SIZE 5242880 // 5MB
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
//----------------------------------------------
|
||||
// memory class for curl write memory callback
|
||||
// [NOTE]
|
||||
// The following symbols (enum) depend on the version of libcurl.
|
||||
// CURLOPT_TCP_KEEPALIVE 7.25.0 and later
|
||||
// CURLOPT_SSL_ENABLE_ALPN 7.36.0 and later
|
||||
// CURLOPT_KEEP_SENDING_ON_ERROR 7.51.0 and later
|
||||
//
|
||||
class BodyData
|
||||
{
|
||||
private:
|
||||
char* text;
|
||||
size_t lastpos;
|
||||
size_t bufsize;
|
||||
// s3fs uses these, if you build s3fs with the old libcurl,
|
||||
// substitute the following symbols to avoid errors.
|
||||
// If the version of libcurl linked at runtime is old,
|
||||
// curl_easy_setopt results in an error(CURLE_UNKNOWN_OPTION) and
|
||||
// a message is output.
|
||||
//
|
||||
#if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1)
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE
|
||||
#else
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE static_cast<CURLoption>(213)
|
||||
#endif
|
||||
|
||||
private:
|
||||
bool IsSafeSize(size_t addbytes) const {
|
||||
return ((lastpos + addbytes + 1) > bufsize ? false : true);
|
||||
}
|
||||
bool Resize(size_t addbytes);
|
||||
#if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1)
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN
|
||||
#else
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast<CURLoption>(226)
|
||||
#endif
|
||||
|
||||
public:
|
||||
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
|
||||
~BodyData() {
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Clear(void);
|
||||
bool Append(void* ptr, size_t bytes);
|
||||
bool Append(void* ptr, size_t blockSize, size_t numBlocks) {
|
||||
return Append(ptr, (blockSize * numBlocks));
|
||||
}
|
||||
const char* str() const;
|
||||
size_t size() const {
|
||||
return lastpos;
|
||||
}
|
||||
};
|
||||
#if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1)
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR
|
||||
#else
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast<CURLoption>(245)
|
||||
#endif
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility structs & typedefs
|
||||
// Structure / Typedefs
|
||||
//----------------------------------------------
|
||||
typedef std::vector<std::string> etaglist_t;
|
||||
|
||||
// Each part information for Multipart upload
|
||||
struct filepart
|
||||
{
|
||||
bool uploaded; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
ssize_t size; // uploading size
|
||||
etaglist_t* etaglist; // use only parallel upload
|
||||
int etagpos; // use only parallel upload
|
||||
|
||||
filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {}
|
||||
~filepart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear(void)
|
||||
{
|
||||
uploaded = false;
|
||||
etag = "";
|
||||
fd = -1;
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t* list)
|
||||
{
|
||||
if(list){
|
||||
list->push_back(std::string(""));
|
||||
etaglist = list;
|
||||
etagpos = list->size() - 1;
|
||||
}else{
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// for progress
|
||||
struct case_insensitive_compare_func
|
||||
{
|
||||
bool operator()(const std::string& a, const std::string& b) const {
|
||||
return strcasecmp(a.c_str(), b.c_str()) < 0;
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
|
||||
typedef std::pair<double, double> progress_t;
|
||||
typedef std::map<CURL*, time_t> curltime_t;
|
||||
typedef std::map<CURL*, progress_t> curlprogress_t;
|
||||
|
||||
class S3fsMultiCurl;
|
||||
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers)
|
||||
: mMaxHandlers(maxHandlers)
|
||||
, mHandlers(NULL)
|
||||
, mIndex(-1)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler();
|
||||
void ReturnHandler(CURL* h);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
|
||||
pthread_mutex_t mLock;
|
||||
CURL** mHandlers;
|
||||
int mIndex;
|
||||
struct curlprogress {
|
||||
time_t time;
|
||||
double dl_progress;
|
||||
double ul_progress;
|
||||
};
|
||||
typedef std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> CurlUniquePtr;
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
class S3fsCred;
|
||||
class S3fsCurl;
|
||||
|
||||
// Prototype function for lazy setup options for curl handle
|
||||
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
|
||||
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// storage class(rrs)
|
||||
enum storage_class_t {
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
REDUCED_REDUNDANCY
|
||||
};
|
||||
|
||||
// sse type
|
||||
enum sse_type_t {
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
typedef std::vector<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
class S3fsCurl
|
||||
{
|
||||
friend class S3fsMultiCurl;
|
||||
private:
|
||||
enum class REQTYPE : int8_t {
|
||||
UNSET = -1,
|
||||
DELETE,
|
||||
HEAD,
|
||||
PUTHEAD,
|
||||
PUT,
|
||||
GET,
|
||||
CHKBUCKET,
|
||||
LISTBUCKET,
|
||||
PREMULTIPOST,
|
||||
COMPLETEMULTIPOST,
|
||||
UPLOADMULTIPOST,
|
||||
COPYMULTIPOST,
|
||||
MULTILIST,
|
||||
IAMCRED,
|
||||
ABORTMULTIUPLOAD,
|
||||
IAMROLE
|
||||
};
|
||||
|
||||
private:
|
||||
enum REQTYPE {
|
||||
REQTYPE_UNSET = -1,
|
||||
REQTYPE_DELETE = 0,
|
||||
REQTYPE_HEAD,
|
||||
REQTYPE_PUTHEAD,
|
||||
REQTYPE_PUT,
|
||||
REQTYPE_GET,
|
||||
REQTYPE_CHKBUCKET,
|
||||
REQTYPE_LISTBUCKET,
|
||||
REQTYPE_PREMULTIPOST,
|
||||
REQTYPE_COMPLETEMULTIPOST,
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD,
|
||||
REQTYPE_IAMROLE
|
||||
};
|
||||
// Environment name
|
||||
static constexpr char S3FS_SSL_PRIVKEY_PASSWORD[] = "S3FS_SSL_PRIVKEY_PASSWORD";
|
||||
|
||||
// class variables
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX];
|
||||
static bool is_initglobal_done;
|
||||
static CurlHandlerPool* sCurlPool;
|
||||
static int sCurlPoolSize;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_cert_check;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static std::string default_acl; // TODO: to enum
|
||||
static storage_class_t storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static std::string ssekmsid;
|
||||
static sse_type_t ssetype;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static std::string AWSAccessKeyId;
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static int max_parallel_cnt;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
static bool is_ua; // User-Agent
|
||||
// class variables
|
||||
static std::atomic<bool> curl_warnings_once; // emit older curl warnings only once
|
||||
static std::mutex curl_handles_lock;
|
||||
static struct callback_locks_t {
|
||||
std::mutex dns;
|
||||
std::mutex ssl_session;
|
||||
} callback_locks;
|
||||
static bool is_initglobal_done;
|
||||
static bool is_cert_check;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static acl_t default_acl;
|
||||
static std::string storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static std::string ssekmsid;
|
||||
static sse_type_t ssetype;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static bool is_dump_body;
|
||||
static S3fsCred* ps3fscred;
|
||||
static long ssl_verify_hostname;
|
||||
static std::string client_cert;
|
||||
static std::string client_cert_type;
|
||||
static std::string client_priv_key;
|
||||
static std::string client_priv_key_type;
|
||||
static std::string client_key_password;
|
||||
static std::map<const CURL*, curlprogress> curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_multireq;
|
||||
static off_t multipart_size;
|
||||
static off_t multipart_copy_size;
|
||||
static signature_type_t signature_type;
|
||||
static bool is_unsigned_payload;
|
||||
static bool is_ua; // User-Agent
|
||||
static bool listobjectsv2;
|
||||
static bool requester_pays;
|
||||
static std::string proxy_url;
|
||||
static bool proxy_http;
|
||||
static std::string proxy_userpwd; // load from file(<username>:<passphrase>)
|
||||
static long ipresolve_type; // this value is a libcurl symbol.
|
||||
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
REQTYPE type; // type of request
|
||||
std::string path; // target object path
|
||||
std::string base_path; // base path (for multi curl head request)
|
||||
std::string saved_path; // saved path = cache key (for multi curl head request)
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData* bodydata; // body data by WriteMemoryCallback
|
||||
BodyData* headdata; // header data by WriteMemoryCallback
|
||||
long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count for multipart
|
||||
FILE* b_infile; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
ssize_t b_partdata_size; // backup for retrying
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
// variables
|
||||
CurlUniquePtr hCurl PT_GUARDED_BY(curl_handles_lock) = {nullptr, curl_easy_cleanup};
|
||||
REQTYPE type; // type of request
|
||||
std::string path; // target object path
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
std::string bodydata; // body data by WriteMemoryCallback
|
||||
std::string headdata; // header data by WriteMemoryCallback
|
||||
long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
off_t postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count, this is used only sleep time before retrying
|
||||
std::unique_ptr<FILE, decltype(&s3fs_fclose)> b_infile = {nullptr, &s3fs_fclose}; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
off_t b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
off_t b_partdata_size; // backup for retrying
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
CURLcode curlCode; // handle curl return
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
public:
|
||||
static constexpr long S3FSCURL_RESPONSECODE_NOTSET = -1;
|
||||
static constexpr long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
|
||||
static constexpr int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
|
||||
|
||||
private:
|
||||
// class methods
|
||||
static bool InitGlobalCurl(void);
|
||||
static bool DestroyGlobalCurl(void);
|
||||
static bool InitShareCurl(void);
|
||||
static bool DestroyShareCurl(void);
|
||||
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr);
|
||||
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr);
|
||||
static bool InitCryptMutex(void);
|
||||
static bool DestroyCryptMutex(void);
|
||||
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
|
||||
public:
|
||||
// constructor/destructor
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
S3fsCurl(const S3fsCurl&) = delete;
|
||||
S3fsCurl(S3fsCurl&&) = delete;
|
||||
S3fsCurl& operator=(const S3fsCurl&) = delete;
|
||||
S3fsCurl& operator=(S3fsCurl&&) = delete;
|
||||
|
||||
static bool InitMimeType(const char* MimeFile = NULL);
|
||||
static bool LocateBundle(void);
|
||||
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
|
||||
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
|
||||
static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
private:
|
||||
// class methods
|
||||
static bool InitGlobalCurl();
|
||||
static bool DestroyGlobalCurl();
|
||||
static bool InitCryptMutex();
|
||||
static bool DestroyCryptMutex();
|
||||
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
|
||||
static std::string extractURI(const std::string& url);
|
||||
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
static bool LocateBundle();
|
||||
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
|
||||
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
|
||||
static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
static bool SetIAMRoleFromMetaData(const char* response);
|
||||
static bool LoadEnvSseCKeys(void);
|
||||
static bool LoadEnvSseKmsid(void);
|
||||
static bool PushbackSseKeys(std::string& onekey);
|
||||
static bool AddUserAgent(CURL* hCurl);
|
||||
// lazy functions for set curl options
|
||||
static bool MultipartUploadPartSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartUploadSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
|
||||
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static bool LoadEnvSseCKeys();
|
||||
static bool LoadEnvSseKmsid();
|
||||
static bool PushbackSseKeys(const std::string& onekey);
|
||||
static bool AddUserAgent(const CurlUniquePtr& hCurl);
|
||||
|
||||
// methods
|
||||
bool ResetHandle(void);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
int GetIAMCredentials(void);
|
||||
static int CurlDebugFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyInFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyOutFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int RawCurlDebugFunc(const CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
|
||||
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
// methods
|
||||
bool ResetHandle() REQUIRES(S3fsCurl::curl_handles_lock);
|
||||
bool RemakeHandle();
|
||||
bool ClearInternalData();
|
||||
bool insertV4Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
|
||||
void insertV2Headers(const std::string& access_key_id, const std::string& secret_access_key, const std::string& access_token);
|
||||
void insertIBMIAMHeaders(const std::string& access_key_id, const std::string& access_token);
|
||||
bool insertAuthHeaders();
|
||||
bool AddSseRequestHead(sse_type_t ssetype, const std::string& ssevalue, bool is_copy);
|
||||
bool PreHeadRequest(const char* tpath, size_t ssekey_pos = -1);
|
||||
bool PreHeadRequest(const std::string& tpath, size_t ssekey_pos = -1) {
|
||||
return PreHeadRequest(tpath.c_str(), ssekey_pos);
|
||||
}
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource, const std::string& secret_access_key, const std::string& access_token);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601, const std::string& secret_access_key, const std::string& access_token);
|
||||
int MultipartUploadContentPartSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartUploadCopyPartSetup(const char* from, const char* to, int part_num, const std::string& upload_id, const headers_t& meta);
|
||||
bool MultipartUploadContentPartComplete();
|
||||
bool MultipartUploadCopyPartComplete();
|
||||
int MapPutErrorResponse(int result) const;
|
||||
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl(const char* MimeFile = NULL);
|
||||
static bool DestroyS3fsCurl(void);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl();
|
||||
static bool InitCredentialObject(S3fsCred* pcredobj);
|
||||
static bool InitMimeType(const std::string& strFile);
|
||||
static bool DestroyS3fsCurl();
|
||||
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; }
|
||||
static int SetRetries(int count);
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static std::string SetDefaultAcl(const char* acl);
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
static sse_type_t SetSseType(sse_type_t type);
|
||||
static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; }
|
||||
static bool IsSseDisable(void) { return (SSE_DISABLE == S3fsCurl::ssetype); }
|
||||
static bool IsSseS3Type(void) { return (SSE_S3 == S3fsCurl::ssetype); }
|
||||
static bool IsSseCType(void) { return (SSE_C == S3fsCurl::ssetype); }
|
||||
static bool IsSseKmsType(void) { return (SSE_KMS == S3fsCurl::ssetype); }
|
||||
static bool FinalCheckSse(void);
|
||||
static bool SetSseCKeys(const char* filepath);
|
||||
static bool SetSseKmsid(const char* kmsid);
|
||||
static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); }
|
||||
static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); }
|
||||
static bool GetSseKey(std::string& md5, std::string& ssekey);
|
||||
static bool GetSseKeyMd5(int pos, std::string& md5);
|
||||
static int GetSseKeyCount(void);
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool IsSetAccessKeyId(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; }
|
||||
static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; }
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout() { return S3fsCurl::readwrite_timeout; }
|
||||
static int SetRetries(int count);
|
||||
static int GetRetries();
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket() { return S3fsCurl::is_public_bucket; }
|
||||
static acl_t SetDefaultAcl(acl_t acl);
|
||||
static acl_t GetDefaultAcl();
|
||||
static std::string SetStorageClass(const std::string& storage_class);
|
||||
static std::string GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse() { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
static sse_type_t SetSseType(sse_type_t type);
|
||||
static sse_type_t GetSseType() { return S3fsCurl::ssetype; }
|
||||
static bool IsSseDisable() { return (sse_type_t::SSE_DISABLE == S3fsCurl::ssetype); }
|
||||
static bool IsSseS3Type() { return (sse_type_t::SSE_S3 == S3fsCurl::ssetype); }
|
||||
static bool IsSseCType() { return (sse_type_t::SSE_C == S3fsCurl::ssetype); }
|
||||
static bool IsSseKmsType() { return (sse_type_t::SSE_KMS == S3fsCurl::ssetype); }
|
||||
static bool FinalCheckSse();
|
||||
static bool SetSseCKeys(const char* filepath);
|
||||
static bool SetSseKmsid(const char* kmsid);
|
||||
static bool IsSetSseKmsId() { return !S3fsCurl::ssekmsid.empty(); }
|
||||
static const char* GetSseKmsId() { return S3fsCurl::ssekmsid.c_str(); }
|
||||
static bool GetSseKey(std::string& md5, std::string& ssekey);
|
||||
static bool GetSseKeyMd5(size_t pos, std::string& md5);
|
||||
static size_t GetSseKeyCount();
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose() { return S3fsCurl::is_verbose; }
|
||||
static bool SetDumpBody(bool flag);
|
||||
static bool IsDumpBody() { return S3fsCurl::is_dump_body; }
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname() { return S3fsCurl::ssl_verify_hostname; }
|
||||
static bool SetSSLClientCertOptions(const std::string& values);
|
||||
static void ResetOffset(S3fsCurl* pCurl);
|
||||
static bool SetMultipartSize(off_t size);
|
||||
static off_t GetMultipartSize() { return S3fsCurl::multipart_size; }
|
||||
static bool SetMultipartCopySize(off_t size);
|
||||
static off_t GetMultipartCopySize() { return S3fsCurl::multipart_copy_size; }
|
||||
static signature_type_t SetSignatureType(signature_type_t signature_type) { signature_type_t bresult = S3fsCurl::signature_type; S3fsCurl::signature_type = signature_type; return bresult; }
|
||||
static signature_type_t GetSignatureType() { return S3fsCurl::signature_type; }
|
||||
static bool SetUnsignedPayload(bool issset) { bool bresult = S3fsCurl::is_unsigned_payload; S3fsCurl::is_unsigned_payload = issset; return bresult; }
|
||||
static bool GetUnsignedPayload() { return S3fsCurl::is_unsigned_payload; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag() { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent();
|
||||
static bool SetListObjectsV2(bool isset) { bool bresult = S3fsCurl::listobjectsv2; S3fsCurl::listobjectsv2 = isset; return bresult; }
|
||||
static bool IsListObjectsV2() { return S3fsCurl::listobjectsv2; }
|
||||
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
|
||||
static bool IsRequesterPays() { return S3fsCurl::requester_pays; }
|
||||
static bool SetProxy(const char* url);
|
||||
static bool SetProxyUserPwd(const char* userpwd);
|
||||
static bool SetIPResolveType(const char* value);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
bool DestroyCurlHandle(void);
|
||||
// methods
|
||||
bool CreateCurlHandle(bool remake = false);
|
||||
bool DestroyCurlHandle(bool clear_internal_data = true);
|
||||
bool DestroyCurlHandleHasLock(bool clear_internal_data = true) REQUIRES(S3fsCurl::curl_handles_lock);
|
||||
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode);
|
||||
int RequestPerform(void);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
|
||||
bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) {
|
||||
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
|
||||
}
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
bool GetIAMCredentials(const char* cred_url, const char* iam_v2_token, const char* ibm_secret_access_key, std::string& response);
|
||||
bool GetIAMRoleFromMetaData(const char* cred_url, const char* iam_v2_token, std::string& token);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true) const;
|
||||
int RequestPerform(bool dontAddAuthHeaders=false);
|
||||
int DeleteRequest(const char* tpath);
|
||||
int GetIAMv2ApiToken(const char* token_url, int token_ttl, const char* token_ttl_hdr, std::string& response);
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, const headers_t& meta, bool is_copy);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
|
||||
int CheckBucket(const char* check_path, bool compat_dir, bool force_no_sse);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartUploadRequest(const char* tpath, const headers_t& meta, std::string& upload_id);
|
||||
int MultipartUploadPartSetup(const char* tpath, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy);
|
||||
int MultipartUploadComplete(const char* tpath, const std::string& upload_id, const etaglist_t& parts);
|
||||
bool MultipartUploadPartComplete();
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
|
||||
int MultipartPutHeadRequest(const std::string& from, const std::string& to, int part_number, const std::string& upload_id, const headers_t& meta);
|
||||
int MultipartUploadPartRequest(const char* tpath, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy);
|
||||
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
std::string GetSpacialSavedPath(void) const { return saved_path; }
|
||||
std::string GetUrl(void) const { return url; }
|
||||
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
|
||||
BodyData* GetBodyData(void) const { return bodydata; }
|
||||
BodyData* GetHeadData(void) const { return headdata; }
|
||||
long GetLastResponseCode(void) const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe(void) { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe(void) const { return is_use_ahbe; }
|
||||
int GetMultipartRetryCount(void) const { return retry_count; }
|
||||
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
|
||||
bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); }
|
||||
int GetLastPreHeadSeecKeyPos(void) const { return b_ssekey_pos; }
|
||||
// methods(variables)
|
||||
const std::string& GetPath() const { return path; }
|
||||
const std::string& GetUrl() const { return url; }
|
||||
const std::string& GetOp() const { return op; }
|
||||
const headers_t* GetResponseHeaders() const { return &responseHeaders; }
|
||||
const std::string& GetBodyData() const { return bodydata; }
|
||||
const std::string& GetHeadData() const { return headdata; }
|
||||
CURLcode GetCurlCode() const { return curlCode; }
|
||||
long GetLastResponseCode() const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe() { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe() { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe() const { return is_use_ahbe; }
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsMultiCurl
|
||||
//----------------------------------------------
|
||||
// Class for lapping multi curl
|
||||
//
|
||||
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
static int max_multireq;
|
||||
|
||||
s3fscurlmap_t cMap_all; // all of curl requests
|
||||
s3fscurlmap_t cMap_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
S3fsMultiCurl();
|
||||
~S3fsMultiCurl();
|
||||
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsMultiCurl::max_multireq; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
bool Clear(void) { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
int Request(void);
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility Functions
|
||||
//----------------------------------------------
|
||||
std::string GetContentMD5(int fd);
|
||||
unsigned char* md5hexsum(int fd, off_t start, ssize_t size);
|
||||
std::string md5sum(int fd, off_t start, ssize_t size);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
#endif // S3FS_CURL_H_
|
||||
|
||||
/*
|
||||
@ -490,6 +369,6 @@ bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& sse
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
234
src/curl_share.cpp
Normal file
234
src/curl_share.cpp
Normal file
@ -0,0 +1,234 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "curl_share.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsCurlShare
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsCurlShare::is_dns_cache = true; // default
|
||||
bool S3fsCurlShare::is_ssl_cache = true; // default
|
||||
std::mutex S3fsCurlShare::curl_share_lock;
|
||||
std::map<std::thread::id, CurlSharePtr> S3fsCurlShare::ShareHandles;
|
||||
std::map<std::thread::id, ShareLocksPtr> S3fsCurlShare::ShareLocks;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class methods for S3fsCurlShare
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsCurlShare::SetDnsCache(bool isCache)
|
||||
{
|
||||
bool old = S3fsCurlShare::is_dns_cache;
|
||||
S3fsCurlShare::is_dns_cache = isCache;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsCurlShare::SetSslSessionCache(bool isCache)
|
||||
{
|
||||
bool old = S3fsCurlShare::is_ssl_cache;
|
||||
S3fsCurlShare::is_ssl_cache = isCache;
|
||||
return old;
|
||||
}
|
||||
|
||||
void S3fsCurlShare::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr)
|
||||
{
|
||||
auto* pLocks = static_cast<curl_share_locks*>(useptr);
|
||||
|
||||
if(CURL_LOCK_DATA_DNS == nLockData){
|
||||
pLocks->lock_dns.lock();
|
||||
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
|
||||
pLocks->lock_session.lock();
|
||||
}
|
||||
}
|
||||
|
||||
void S3fsCurlShare::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr)
|
||||
{
|
||||
auto* pLocks = static_cast<curl_share_locks*>(useptr);
|
||||
|
||||
if(CURL_LOCK_DATA_DNS == nLockData){
|
||||
pLocks->lock_dns.unlock();
|
||||
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
|
||||
pLocks->lock_session.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsCurlShare::SetCurlShareHandle(CURL* hCurl)
|
||||
{
|
||||
if(!hCurl){
|
||||
S3FS_PRN_ERR("Curl handle is null");
|
||||
return false;
|
||||
}
|
||||
|
||||
// get curl share handle
|
||||
S3fsCurlShare CurlShareObj;
|
||||
CURLSH* hCurlShare = CurlShareObj.GetCurlShareHandle();
|
||||
if(!hCurlShare){
|
||||
// a case of not to use CurlShare
|
||||
return true;
|
||||
}
|
||||
|
||||
// set share handle to curl handle
|
||||
if(CURLE_OK != curl_easy_setopt(hCurl, CURLOPT_SHARE, hCurlShare)){
|
||||
S3FS_PRN_ERR("Failed to set Curl share handle to curl handle.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCurlShare::DestroyCurlShareHandleForThread()
|
||||
{
|
||||
S3fsCurlShare CurlShareObj;
|
||||
CurlShareObj.DestroyCurlShareHandle();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsCurlShare::InitializeCurlShare(const CurlSharePtr& hShare, const ShareLocksPtr& ShareLock)
|
||||
{
|
||||
CURLSHcode nSHCode;
|
||||
|
||||
// set lock handlers
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_LOCKFUNC, S3fsCurlShare::LockCurlShare))){
|
||||
S3FS_PRN_ERR("curl_share_setopt(LOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_UNLOCKFUNC, S3fsCurlShare::UnlockCurlShare))){
|
||||
S3FS_PRN_ERR("curl_share_setopt(UNLOCKFUNC) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
|
||||
// set user data for lock functions
|
||||
if(CURLSHE_OK != (nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_USERDATA, ShareLock.get()))){
|
||||
S3FS_PRN_ERR("curl_share_setopt(USERDATA) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}
|
||||
|
||||
// set share type
|
||||
if(S3fsCurlShare::is_dns_cache){
|
||||
nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS);
|
||||
if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){
|
||||
S3FS_PRN_ERR("curl_share_setopt(DNS) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){
|
||||
S3FS_PRN_WARN("curl_share_setopt(DNS) returns %d(%s), but continue without shared dns data.", nSHCode, curl_share_strerror(nSHCode));
|
||||
}
|
||||
}
|
||||
if(S3fsCurlShare::is_ssl_cache){
|
||||
nSHCode = curl_share_setopt(hShare.get(), CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION);
|
||||
if(CURLSHE_OK != nSHCode && CURLSHE_BAD_OPTION != nSHCode && CURLSHE_NOT_BUILT_IN != nSHCode){
|
||||
S3FS_PRN_ERR("curl_share_setopt(SSL SESSION) returns %d(%s)", nSHCode, curl_share_strerror(nSHCode));
|
||||
return false;
|
||||
}else if(CURLSHE_BAD_OPTION == nSHCode || CURLSHE_NOT_BUILT_IN == nSHCode){
|
||||
S3FS_PRN_WARN("curl_share_setopt(SSL SESSION) returns %d(%s), but continue without shared ssl session data.", nSHCode, curl_share_strerror(nSHCode));
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods for S3fsCurlShare
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// set current thread id(std style) to ThreadId
|
||||
//
|
||||
S3fsCurlShare::S3fsCurlShare() : ThreadId(std::this_thread::get_id())
|
||||
{
|
||||
}
|
||||
|
||||
void S3fsCurlShare::DestroyCurlShareHandle()
|
||||
{
|
||||
if(!S3fsCurlShare::is_dns_cache && !S3fsCurlShare::is_ssl_cache){
|
||||
// Any curl share handle does not exist
|
||||
return;
|
||||
}
|
||||
|
||||
const std::lock_guard<std::mutex> lock(S3fsCurlShare::curl_share_lock);
|
||||
|
||||
// find existed handle and cleanup it
|
||||
auto handle_iter = S3fsCurlShare::ShareHandles.find(ThreadId);
|
||||
if(handle_iter == S3fsCurlShare::ShareHandles.end()){
|
||||
S3FS_PRN_WARN("Not found curl share handle");
|
||||
}else{
|
||||
S3fsCurlShare::ShareHandles.erase(handle_iter);
|
||||
}
|
||||
|
||||
// find lock and cleanup it
|
||||
auto locks_iter = S3fsCurlShare::ShareLocks.find(ThreadId);
|
||||
if(locks_iter == S3fsCurlShare::ShareLocks.end()){
|
||||
S3FS_PRN_WARN("Not found locks of curl share handle");
|
||||
}else{
|
||||
S3fsCurlShare::ShareLocks.erase(locks_iter);
|
||||
}
|
||||
}
|
||||
|
||||
CURLSH* S3fsCurlShare::GetCurlShareHandle()
|
||||
{
|
||||
if(!S3fsCurlShare::is_dns_cache && !S3fsCurlShare::is_ssl_cache){
|
||||
// Any curl share handle does not exist
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const std::lock_guard<std::mutex> lock(S3fsCurlShare::curl_share_lock);
|
||||
|
||||
// find existed handle
|
||||
auto handle_iter = S3fsCurlShare::ShareHandles.find(ThreadId);
|
||||
if(handle_iter != S3fsCurlShare::ShareHandles.end()){
|
||||
// Already created share handle for this thread.
|
||||
return handle_iter->second.get();
|
||||
}
|
||||
|
||||
// create new curl share handle and locks
|
||||
CurlSharePtr hShare = {nullptr, curl_share_cleanup};
|
||||
hShare.reset(curl_share_init());
|
||||
if(!hShare){
|
||||
S3FS_PRN_ERR("Failed to create curl share handle");
|
||||
return nullptr;
|
||||
}
|
||||
auto pLocks = std::make_unique<curl_share_locks>();
|
||||
|
||||
// Initialize curl share handle
|
||||
if(!S3fsCurlShare::InitializeCurlShare(hShare, pLocks)){
|
||||
S3FS_PRN_ERR("Failed to initialize curl share handle");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// set map
|
||||
S3fsCurlShare::ShareHandles.emplace(ThreadId, std::move(hShare));
|
||||
S3fsCurlShare::ShareLocks.emplace(ThreadId, std::move(pLocks));
|
||||
|
||||
// For clang-tidy measures
|
||||
handle_iter = S3fsCurlShare::ShareHandles.find(ThreadId);
|
||||
if(handle_iter == S3fsCurlShare::ShareHandles.end()){
|
||||
S3FS_PRN_ERR("Failed to insert curl share to map.");
|
||||
return nullptr;
|
||||
}
|
||||
return handle_iter->second.get();
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
89
src/curl_share.h
Normal file
89
src/curl_share.h
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_SHARE_H_
|
||||
#define S3FS_CURL_SHARE_H_
|
||||
|
||||
#include <curl/curl.h>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Structure / Typedefs
|
||||
//----------------------------------------------
|
||||
struct curl_share_locks {
|
||||
std::mutex lock_dns;
|
||||
std::mutex lock_session;
|
||||
};
|
||||
|
||||
typedef std::unique_ptr<CURLSH, decltype(&curl_share_cleanup)> CurlSharePtr;
|
||||
typedef std::unique_ptr<curl_share_locks> ShareLocksPtr;
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurlShare
|
||||
//----------------------------------------------
|
||||
class S3fsCurlShare
|
||||
{
|
||||
private:
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_cache;
|
||||
static std::mutex curl_share_lock;
|
||||
static std::map<std::thread::id, CurlSharePtr> ShareHandles GUARDED_BY(curl_share_lock);
|
||||
static std::map<std::thread::id, ShareLocksPtr> ShareLocks GUARDED_BY(curl_share_lock);
|
||||
|
||||
std::thread::id ThreadId;
|
||||
|
||||
private:
|
||||
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr) NO_THREAD_SAFETY_ANALYSIS;
|
||||
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr) NO_THREAD_SAFETY_ANALYSIS;
|
||||
static bool InitializeCurlShare(const CurlSharePtr& hShare, const ShareLocksPtr& ShareLock) REQUIRES(curl_share_lock);
|
||||
|
||||
void DestroyCurlShareHandle();
|
||||
CURLSH* GetCurlShareHandle();
|
||||
|
||||
public:
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static bool SetCurlShareHandle(CURL* hCurl);
|
||||
static bool DestroyCurlShareHandleForThread();
|
||||
|
||||
// constructor/destructor
|
||||
explicit S3fsCurlShare();
|
||||
~S3fsCurlShare() = default;
|
||||
S3fsCurlShare(const S3fsCurlShare&) = delete;
|
||||
S3fsCurlShare(S3fsCurlShare&&) = delete;
|
||||
S3fsCurlShare& operator=(const S3fsCurlShare&) = delete;
|
||||
S3fsCurlShare& operator=(S3fsCurlShare&&) = delete;
|
||||
};
|
||||
|
||||
#endif // S3FS_CURL_SHARE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
337
src/curl_util.cpp
Normal file
337
src/curl_util.cpp
Normal file
@ -0,0 +1,337 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <curl/curl.h>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "curl_util.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "s3fs_cred.h"
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// curl_slist_sort_insert
|
||||
// This function is like curl_slist_append function, but this adds data by a-sorting.
|
||||
// Because AWS signature needs sorted header.
|
||||
//
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value)
|
||||
{
|
||||
if(!key){
|
||||
return list;
|
||||
}
|
||||
|
||||
// key & value are trimmed and lower (only key)
|
||||
std::string strkey = trim(key);
|
||||
std::string strval = value ? trim(value) : "";
|
||||
std::string strnew = key + ": "s + strval;
|
||||
char* data;
|
||||
if(nullptr == (data = strdup(strnew.c_str()))){
|
||||
return list;
|
||||
}
|
||||
|
||||
struct curl_slist **p = &list;
|
||||
for(;*p; p = &(*p)->next){
|
||||
std::string strcur = (*p)->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strcur.find(':', 0))){
|
||||
strcur.erase(pos);
|
||||
}
|
||||
|
||||
int result = strcasecmp(strkey.c_str(), strcur.c_str());
|
||||
if(0 == result){
|
||||
free((*p)->data);
|
||||
(*p)->data = data;
|
||||
return list;
|
||||
}else if(result < 0){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct curl_slist* new_item;
|
||||
// Must use malloc since curl_slist_free_all calls free.
|
||||
if(nullptr == (new_item = static_cast<struct curl_slist*>(malloc(sizeof(*new_item))))){
|
||||
free(data);
|
||||
return list;
|
||||
}
|
||||
|
||||
struct curl_slist* before = *p;
|
||||
*p = new_item;
|
||||
new_item->data = data;
|
||||
new_item->next = before;
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key)
|
||||
{
|
||||
if(!key){
|
||||
return list;
|
||||
}
|
||||
|
||||
std::string strkey = trim(key);
|
||||
struct curl_slist **p = &list;
|
||||
while(*p){
|
||||
std::string strcur = (*p)->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strcur.find(':', 0))){
|
||||
strcur.erase(pos);
|
||||
}
|
||||
|
||||
int result = strcasecmp(strkey.c_str(), strcur.c_str());
|
||||
if(0 == result){
|
||||
free((*p)->data);
|
||||
struct curl_slist *tmp = *p;
|
||||
*p = (*p)->next;
|
||||
free(tmp);
|
||||
}else{
|
||||
p = &(*p)->next;
|
||||
}
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list)
|
||||
{
|
||||
std::string sorted_headers;
|
||||
|
||||
if(!list){
|
||||
return sorted_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strkey = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strkey.find(':', 0))){
|
||||
if (trim(strkey.substr(pos + 1)).empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strkey.erase(pos);
|
||||
}
|
||||
if(!sorted_headers.empty()){
|
||||
sorted_headers += ";";
|
||||
}
|
||||
sorted_headers += lower(strkey);
|
||||
}
|
||||
|
||||
return sorted_headers;
|
||||
}
|
||||
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key)
|
||||
{
|
||||
if(!list){
|
||||
return "";
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strkey = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strkey.find(':', 0))){
|
||||
if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){
|
||||
return trim(strkey.substr(pos+1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz)
|
||||
{
|
||||
std::string canonical_headers;
|
||||
|
||||
if(!list){
|
||||
canonical_headers = "\n";
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strhead = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strhead.find(':', 0))){
|
||||
std::string strkey = trim(lower(strhead.substr(0, pos)));
|
||||
std::string strval = trim(strhead.substr(pos + 1));
|
||||
if (strval.empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strhead = strkey;
|
||||
strhead += ":";
|
||||
strhead += strval;
|
||||
}else{
|
||||
strhead = trim(lower(strhead));
|
||||
}
|
||||
if(only_amz && strhead.substr(0, 5) != "x-amz"){
|
||||
continue;
|
||||
}
|
||||
canonical_headers += strhead;
|
||||
canonical_headers += "\n";
|
||||
}
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
// function for using global values
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url)
|
||||
{
|
||||
if(!realpath){
|
||||
return false;
|
||||
}
|
||||
resourcepath = urlEncodePath(service_path + S3fsCred::GetBucket() + realpath);
|
||||
url = s3host + resourcepath;
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string prepare_url(const char* url)
|
||||
{
|
||||
S3FS_PRN_DBG("URL is %s", url);
|
||||
|
||||
std::string uri;
|
||||
std::string hostname;
|
||||
std::string path;
|
||||
std::string url_str = url;
|
||||
std::string token = "/" + S3fsCred::GetBucket();
|
||||
size_t bucket_pos;
|
||||
size_t bucket_length = token.size();
|
||||
size_t uri_length = 0;
|
||||
|
||||
if(!strncasecmp(url_str.c_str(), "https://", 8)){
|
||||
uri_length = 8;
|
||||
} else if(!strncasecmp(url_str.c_str(), "http://", 7)) {
|
||||
uri_length = 7;
|
||||
}
|
||||
uri = url_str.substr(0, uri_length);
|
||||
bucket_pos = url_str.find(token, uri_length);
|
||||
|
||||
if(!pathrequeststyle){
|
||||
hostname = S3fsCred::GetBucket() + "." + url_str.substr(uri_length, bucket_pos - uri_length);
|
||||
path = url_str.substr((bucket_pos + bucket_length));
|
||||
}else{
|
||||
hostname = url_str.substr(uri_length, bucket_pos - uri_length);
|
||||
std::string part = url_str.substr((bucket_pos + bucket_length));
|
||||
if('/' != part[0]){
|
||||
part = "/" + part;
|
||||
}
|
||||
path = "/" + S3fsCred::GetBucket() + part;
|
||||
}
|
||||
|
||||
url_str = uri + hostname + path;
|
||||
|
||||
S3FS_PRN_DBG("URL changed is %s", url_str.c_str());
|
||||
|
||||
return url_str;
|
||||
}
|
||||
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5)
|
||||
{
|
||||
if(!pstr || '\0' == pstr[0]){
|
||||
S3FS_PRN_ERR("Parameter is wrong.");
|
||||
return false;
|
||||
}
|
||||
md5_t binary;
|
||||
if(!s3fs_md5(reinterpret_cast<const unsigned char*>(pstr), length, &binary)){
|
||||
return false;
|
||||
}
|
||||
|
||||
md5 = s3fs_base64(binary.data(), binary.size());
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string url_to_host(const std::string &url)
|
||||
{
|
||||
S3FS_PRN_INFO3("url is %s", url.c_str());
|
||||
|
||||
static constexpr char HTTP[] = "http://";
|
||||
static constexpr char HTTPS[] = "https://";
|
||||
std::string hostname;
|
||||
|
||||
if (is_prefix(url.c_str(), HTTP)) {
|
||||
hostname = url.substr(sizeof(HTTP) - 1);
|
||||
} else if (is_prefix(url.c_str(), HTTPS)) {
|
||||
hostname = url.substr(sizeof(HTTPS) - 1);
|
||||
} else {
|
||||
S3FS_PRN_EXIT("url does not begin with http:// or https://");
|
||||
abort();
|
||||
}
|
||||
|
||||
size_t idx;
|
||||
if ((idx = hostname.find('/')) != std::string::npos) {
|
||||
return hostname.substr(0, idx);
|
||||
} else {
|
||||
return hostname;
|
||||
}
|
||||
}
|
||||
|
||||
std::string get_bucket_host()
|
||||
{
|
||||
if(!pathrequeststyle){
|
||||
return S3fsCred::GetBucket() + "." + url_to_host(s3host);
|
||||
}
|
||||
return url_to_host(s3host);
|
||||
}
|
||||
|
||||
const char* getCurlDebugHead(curl_infotype type)
|
||||
{
|
||||
const char* unknown = "";
|
||||
const char* dataIn = "BODY <";
|
||||
const char* dataOut = "BODY >";
|
||||
const char* headIn = "<";
|
||||
const char* headOut = ">";
|
||||
|
||||
switch(type){
|
||||
case CURLINFO_DATA_IN:
|
||||
return dataIn;
|
||||
case CURLINFO_DATA_OUT:
|
||||
return dataOut;
|
||||
case CURLINFO_HEADER_IN:
|
||||
return headIn;
|
||||
case CURLINFO_HEADER_OUT:
|
||||
return headOut;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return unknown;
|
||||
}
|
||||
|
||||
//
|
||||
// compare ETag ignoring quotes and case
|
||||
//
|
||||
bool etag_equals(const std::string& s1, const std::string& s2)
|
||||
{
|
||||
return 0 == strcasecmp(peeloff(s1).c_str(), peeloff(s2).c_str());
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
60
src/curl_util.h
Normal file
60
src/curl_util.h
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_UTIL_H_
|
||||
#define S3FS_CURL_UTIL_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <curl/curl.h>
|
||||
#include <string>
|
||||
#include "metaheader.h"
|
||||
|
||||
enum class sse_type_t : uint8_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Functions
|
||||
//----------------------------------------------
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
int put_headers(const char* path, const headers_t& meta, bool is_copy, bool use_st_size = true); // implement in s3fs.cpp
|
||||
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5);
|
||||
std::string url_to_host(const std::string &url);
|
||||
std::string get_bucket_host();
|
||||
const char* getCurlDebugHead(curl_infotype type);
|
||||
|
||||
bool etag_equals(const std::string& s1, const std::string& s2);
|
||||
|
||||
#endif // S3FS_CURL_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
3038
src/fdcache.cpp
3038
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
281
src/fdcache.h
281
src/fdcache.h
@ -17,165 +17,16 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#ifndef FD_CACHE_H_
|
||||
#define FD_CACHE_H_
|
||||
|
||||
#include <sys/statvfs.h>
|
||||
#include "curl.h"
|
||||
#ifndef S3FS_FDCACHE_H_
|
||||
#define S3FS_FDCACHE_H_
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
class CacheFileStat
|
||||
{
|
||||
private:
|
||||
std::string path;
|
||||
int fd;
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
private:
|
||||
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
public:
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStatDirectory(void);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open(void);
|
||||
bool Release(void);
|
||||
bool SetPath(const char* tpath, bool is_open = true);
|
||||
int GetFd(void) const { return fd; }
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// fdpage & PageList
|
||||
//------------------------------------------------
|
||||
// page block information
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
size_t bytes;
|
||||
bool loaded;
|
||||
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
|
||||
: offset(start), bytes(size), loaded(is_loaded) {}
|
||||
|
||||
off_t next(void) const { return (offset + bytes); }
|
||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||
};
|
||||
typedef std::list<struct fdpage*> fdpage_list_t;
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//
|
||||
// Management of loading area/modifying
|
||||
//
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
bool Compress(void);
|
||||
bool Parse(off_t new_pos);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(size_t size = 0, bool is_loaded = false);
|
||||
~PageList();
|
||||
|
||||
bool Init(size_t size, bool is_loaded);
|
||||
size_t Size(void) const;
|
||||
bool Resize(size_t size, bool is_loaded);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
|
||||
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output);
|
||||
void Dump(void);
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
PageList pagelist;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
headers_t orgmeta; // original headers at opening
|
||||
size_t size_orgmeta; // original file size in original headers
|
||||
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
size_t mp_size; // size for no cached multipart(write method only)
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
|
||||
|
||||
void Clear(void);
|
||||
int OpenMirrorFile(void);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
|
||||
public:
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool no_fd_lock_wait = false);
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
void SetPath(const std::string &newpath) { path = newpath; }
|
||||
int GetFd(void) const { return fd; }
|
||||
|
||||
bool GetStats(struct stat& st);
|
||||
int SetMtime(time_t time);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(size_t& size);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost(void);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
|
||||
int NoCacheCompleteMultipartPost(void);
|
||||
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
void CleanupCache();
|
||||
};
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
#include "common.h"
|
||||
#include "fdcache_entity.h"
|
||||
#include "s3fs_util.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdManager
|
||||
@ -183,59 +34,101 @@ typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value
|
||||
class FdManager
|
||||
{
|
||||
private:
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static size_t free_disk_space; // limit free disk space
|
||||
static FdManager singleton;
|
||||
static std::mutex fd_manager_lock;
|
||||
static std::mutex cache_cleanup_lock;
|
||||
static std::mutex reserved_diskspace_lock;
|
||||
static std::mutex except_entmap_lock;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static off_t free_disk_space GUARDED_BY(reserved_diskspace_lock); // limit free disk space
|
||||
static off_t fake_used_disk_space GUARDED_BY(reserved_diskspace_lock); // difference between fake free disk space and actual at startup(for test/debug)
|
||||
static std::string check_cache_output;
|
||||
static bool checked_lseek;
|
||||
static bool have_lseek_hole;
|
||||
static std::string tmp_dir;
|
||||
|
||||
fdent_map_t fent;
|
||||
fdent_map_t fent GUARDED_BY(fd_manager_lock);
|
||||
fdent_map_t except_fent GUARDED_BY(except_entmap_lock); // A map of delayed deletion fdentity
|
||||
|
||||
private:
|
||||
static fsblkcnt_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
static off_t GetFreeDiskSpaceHasLock(const char* path) REQUIRES(FdManager::reserved_diskspace_lock);
|
||||
static off_t GetTotalDiskSpace(const char* path);
|
||||
static bool IsDir(const std::string& dir);
|
||||
static int GetVfsStat(const char* path, struct statvfs* vfsbuf);
|
||||
static off_t GetEnsureFreeDiskSpaceHasLock() REQUIRES(FdManager::reserved_diskspace_lock);
|
||||
|
||||
// Returns the number of open pseudo fd.
|
||||
int GetPseudoFdCount(const char* path) REQUIRES(fd_manager_lock);
|
||||
bool UpdateEntityToTempPath() REQUIRES(fd_manager_lock);
|
||||
void CleanupCacheDirInternal(const std::string &path = "") REQUIRES(cache_cleanup_lock);
|
||||
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
|
||||
|
||||
public:
|
||||
FdManager();
|
||||
~FdManager();
|
||||
FdManager();
|
||||
~FdManager();
|
||||
FdManager(const FdManager&) = delete;
|
||||
FdManager(FdManager&&) = delete;
|
||||
FdManager& operator=(const FdManager&) = delete;
|
||||
FdManager& operator=(FdManager&&) = delete;
|
||||
|
||||
// Reference singleton
|
||||
static FdManager* get(void) { return &singleton; }
|
||||
// Reference singleton
|
||||
static FdManager* get() { return &singleton; }
|
||||
|
||||
static bool DeleteCacheDirectory(void);
|
||||
static int DeleteCacheFile(const char* path);
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir(void);
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist(void);
|
||||
static bool DeleteCacheDirectory();
|
||||
static int DeleteCacheFile(const char* path);
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir() { return !FdManager::cache_dir.empty(); }
|
||||
static const char* GetCacheDir() { return FdManager::cache_dir.c_str(); }
|
||||
static bool SetCacheCheckOutput(const char* path);
|
||||
static const char* GetCacheCheckOutput() { return FdManager::check_cache_output.c_str(); }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir();
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist();
|
||||
static bool HasOpenEntityFd(const char* path);
|
||||
static int GetOpenFdCount(const char* path);
|
||||
static off_t GetEnsureFreeDiskSpace()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(FdManager::reserved_diskspace_lock);
|
||||
return FdManager::GetEnsureFreeDiskSpaceHasLock();
|
||||
}
|
||||
static off_t SetEnsureFreeDiskSpace(off_t size);
|
||||
static bool InitFakeUsedDiskSize(off_t fake_freesize);
|
||||
static bool IsSafeDiskSpace(const char* path, off_t size, bool withmsg = false);
|
||||
static void FreeReservedDiskSpace(off_t size);
|
||||
static bool ReserveDiskSpace(off_t size);
|
||||
static bool HaveLseekHole();
|
||||
static bool SetTmpDir(const char* dir);
|
||||
static bool CheckTmpDirExist();
|
||||
static std::unique_ptr<FILE, decltype(&s3fs_fclose)> MakeTempFile();
|
||||
static off_t GetTotalDiskSpaceByRatio(int ratio);
|
||||
|
||||
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
|
||||
static size_t SetEnsureFreeDiskSpace(size_t size);
|
||||
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
|
||||
static bool IsSafeDiskSpace(const char* path, size_t size);
|
||||
// Return FdEntity associated with path, returning nullptr on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int& existfd, bool newfd = true) {
|
||||
const std::lock_guard<std::mutex> lock(FdManager::fd_manager_lock);
|
||||
return GetFdEntityHasLock(path, existfd, newfd);
|
||||
}
|
||||
FdEntity* GetFdEntityHasLock(const char* path, int& existfd, bool newfd = true) REQUIRES(FdManager::fd_manager_lock);
|
||||
FdEntity* Open(int& fd, const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify);
|
||||
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* OpenExistFdEntity(const char* path, int& fd, int flags = O_RDONLY);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent, int fd);
|
||||
bool ChangeEntityToTempPath(std::shared_ptr<FdEntity> ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
bool CheckAllCache();
|
||||
};
|
||||
|
||||
#endif // FD_CACHE_H_
|
||||
#endif // S3FS_FDCACHE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
126
src/fdcache_auto.cpp
Normal file
126
src/fdcache_auto.cpp
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_auto.h"
|
||||
#include "fdcache.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// AutoFdEntity methods
|
||||
//------------------------------------------------
|
||||
AutoFdEntity::AutoFdEntity() : pFdEntity(nullptr), pseudo_fd(-1)
|
||||
{
|
||||
}
|
||||
|
||||
AutoFdEntity::~AutoFdEntity()
|
||||
{
|
||||
Close();
|
||||
}
|
||||
|
||||
bool AutoFdEntity::Close()
|
||||
{
|
||||
if(pFdEntity){
|
||||
if(!FdManager::get()->Close(pFdEntity, pseudo_fd)){
|
||||
S3FS_PRN_ERR("Failed to close fdentity.");
|
||||
return false;
|
||||
}
|
||||
pFdEntity = nullptr;
|
||||
pseudo_fd = -1;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method touches the internal fdentity with.
|
||||
// This is used to keep the file open.
|
||||
//
|
||||
int AutoFdEntity::Detach()
|
||||
{
|
||||
if(!pFdEntity){
|
||||
S3FS_PRN_ERR("Does not have a associated FdEntity.");
|
||||
return -1;
|
||||
}
|
||||
int fd = pseudo_fd;
|
||||
pseudo_fd = -1;
|
||||
pFdEntity = nullptr;
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
FdEntity* AutoFdEntity::Attach(const char* path, int existfd)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(nullptr == (pFdEntity = FdManager::get()->GetFdEntity(path, existfd, false))){
|
||||
S3FS_PRN_DBG("Could not find fd entity object(file=%s, pseudo_fd=%d)", path, existfd);
|
||||
return nullptr;
|
||||
}
|
||||
pseudo_fd = existfd;
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
FdEntity* AutoFdEntity::Open(const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, int* error)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(nullptr == (pFdEntity = FdManager::get()->Open(pseudo_fd, path, pmeta, size, ts_times, flags, force_tmpfile, is_create, ignore_modify))){
|
||||
if(error){
|
||||
*error = pseudo_fd;
|
||||
}
|
||||
pseudo_fd = -1;
|
||||
return nullptr;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// the fd obtained by this method is not a newly created pseudo fd.
|
||||
//
|
||||
FdEntity* AutoFdEntity::GetExistFdEntity(const char* path, int existfd)
|
||||
{
|
||||
Close();
|
||||
|
||||
FdEntity* ent;
|
||||
if(nullptr == (ent = FdManager::get()->GetExistFdEntity(path, existfd))){
|
||||
return nullptr;
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
|
||||
FdEntity* AutoFdEntity::OpenExistFdEntity(const char* path, int flags)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(nullptr == (pFdEntity = FdManager::get()->OpenExistFdEntity(path, pseudo_fd, flags))){
|
||||
return nullptr;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
73
src/fdcache_auto.h
Normal file
73
src/fdcache_auto.h
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_AUTO_H_
|
||||
#define S3FS_FDCACHE_AUTO_H_
|
||||
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
#include "filetimes.h"
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//------------------------------------------------
|
||||
// class AutoFdEntity
|
||||
//------------------------------------------------
|
||||
// A class that opens fdentry and closes it automatically.
|
||||
// This class object is used to prevent inconsistencies in
|
||||
// the number of references in fdentry.
|
||||
// The methods are wrappers to the method of the FdManager class.
|
||||
//
|
||||
class AutoFdEntity
|
||||
{
|
||||
private:
|
||||
FdEntity* pFdEntity;
|
||||
int pseudo_fd;
|
||||
|
||||
public:
|
||||
AutoFdEntity();
|
||||
~AutoFdEntity();
|
||||
AutoFdEntity(const AutoFdEntity&) = delete;
|
||||
AutoFdEntity(AutoFdEntity&&) = delete;
|
||||
AutoFdEntity& operator=(const AutoFdEntity&) = delete;
|
||||
AutoFdEntity& operator=(AutoFdEntity&&) = delete;
|
||||
|
||||
bool Close();
|
||||
int Detach();
|
||||
FdEntity* Attach(const char* path, int existfd);
|
||||
int GetPseudoFd() const { return pseudo_fd; }
|
||||
|
||||
FdEntity* Open(const char* path, const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags, bool force_tmpfile, bool is_create, bool ignore_modify, int* error = nullptr);
|
||||
FdEntity* GetExistFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* OpenExistFdEntity(const char* path, int flags = O_RDONLY);
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_AUTO_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
2608
src/fdcache_entity.cpp
Normal file
2608
src/fdcache_entity.cpp
Normal file
File diff suppressed because it is too large
Load Diff
267
src/fdcache_entity.h
Normal file
267
src/fdcache_entity.h
Normal file
@ -0,0 +1,267 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_ENTITY_H_
|
||||
#define S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <fcntl.h>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "fdcache_untreated.h"
|
||||
#include "metaheader.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "filetimes.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedef
|
||||
//----------------------------------------------
|
||||
class PseudoFdInfo;
|
||||
typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
class FdEntity : public std::enable_shared_from_this<FdEntity>
|
||||
{
|
||||
private:
|
||||
// [NOTE]
|
||||
// Distinguish between meta pending and new file creation pending,
|
||||
// because the processing(request) at these updates is different.
|
||||
// Therefore, the pending state is expressed by this enum type.
|
||||
//
|
||||
enum class pending_status_t : uint8_t {
|
||||
NO_UPDATE_PENDING = 0,
|
||||
UPDATE_META_PENDING, // pending meta header
|
||||
CREATE_FILE_PENDING // pending file creation and meta header
|
||||
};
|
||||
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
static bool streamupload; // whether stream uploading.
|
||||
|
||||
mutable std::mutex fdent_lock;
|
||||
std::string path GUARDED_BY(fdent_lock); // object path
|
||||
int physical_fd GUARDED_BY(fdent_lock); // physical file(cache or temporary file) descriptor
|
||||
UntreatedParts untreated_list GUARDED_BY(fdent_lock); // list of untreated parts that have been written and not yet uploaded(for streamupload)
|
||||
fdinfo_map_t pseudo_fd_map GUARDED_BY(fdent_lock); // pseudo file descriptor information map
|
||||
std::unique_ptr<FILE, decltype(&s3fs_fclose)> pfile GUARDED_BY(fdent_lock) = {nullptr, &s3fs_fclose}; // file pointer(tmp file or cache file)
|
||||
ino_t inode GUARDED_BY(fdent_lock); // inode number for cache file
|
||||
headers_t orgmeta GUARDED_BY(fdent_lock); // original headers at opening
|
||||
off_t size_orgmeta GUARDED_BY(fdent_lock); // original file size in original headers
|
||||
|
||||
mutable std::mutex fdent_data_lock ACQUIRED_AFTER(fdent_lock); // protects the following members
|
||||
PageList pagelist GUARDED_BY(fdent_data_lock);
|
||||
std::string cachepath GUARDED_BY(fdent_data_lock); // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath GUARDED_BY(fdent_data_lock); // mirror file path to local cache file path
|
||||
pending_status_t pending_status GUARDED_BY(fdent_data_lock); // status for new file creation and meta update
|
||||
FileTimes timestamps GUARDED_BY(fdent_data_lock); // file timestamps(atime/ctime/mtime)
|
||||
mutable std::mutex ro_path_lock; // for only the ro_path variable
|
||||
std::string ro_path GUARDED_BY(ro_path_lock); // holds the same value as "path". this is used as a backup(read-only variable) by special functions only.
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
|
||||
static ino_t GetInode(int fd);
|
||||
|
||||
void Clear();
|
||||
ino_t GetInode() const REQUIRES(FdEntity::fdent_data_lock);
|
||||
int OpenMirrorFile() REQUIRES(FdEntity::fdent_data_lock);
|
||||
int NoCacheLoadAndPost(PseudoFdInfo* pseudo_obj, off_t start = 0, off_t size = 0) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock); // size=0 means loading to end
|
||||
PseudoFdInfo* CheckPseudoFdFlags(int fd, bool writable) REQUIRES(FdEntity::fdent_lock);
|
||||
bool IsUploading() REQUIRES(FdEntity::fdent_lock);
|
||||
int SetCtimeHasLock(struct timespec time) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int SetAtimeHasLock(struct timespec time) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int SetMtimeHasLock(struct timespec time) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int SetFileTimesHasLock(const FileTimes& ts_times) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
bool SetAllStatus(bool is_loaded) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
bool SetAllStatusUnloaded() REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock) { return SetAllStatus(false); }
|
||||
int PreMultipartUploadRequest(PseudoFdInfo* pseudo_obj) REQUIRES(FdEntity::fdent_lock, fdent_data_lock);
|
||||
int NoCachePreMultipartUploadRequest(PseudoFdInfo* pseudo_obj) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int NoCacheMultipartUploadRequest(PseudoFdInfo* pseudo_obj, int tgfd, off_t start, off_t size) REQUIRES(FdEntity::fdent_lock);
|
||||
int NoCacheMultipartUploadComplete(PseudoFdInfo* pseudo_obj) REQUIRES(FdEntity::fdent_lock);
|
||||
int RowFlushHasLock(int fd, const char* tpath, bool force_sync) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int RowFlushNoMultipart(const PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int RowFlushMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int RowFlushMixMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
int RowFlushStreamMultipart(PseudoFdInfo* pseudo_obj, const char* tpath) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
ssize_t WriteNoMultipart(const PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
ssize_t WriteMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
ssize_t WriteMixMultipart(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
ssize_t WriteStreamUpload(PseudoFdInfo* pseudo_obj, const char* bytes, off_t start, size_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
|
||||
int UploadPendingHasLock(int fd) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
|
||||
bool ReserveDiskSpace(off_t size) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock);
|
||||
|
||||
bool AddUntreated(off_t start, off_t size) REQUIRES(FdEntity::fdent_lock);
|
||||
|
||||
bool IsDirtyMetadata() const REQUIRES(FdEntity::fdent_data_lock);
|
||||
|
||||
std::shared_ptr<FdEntity> get_shared_ptr() { return shared_from_this(); }
|
||||
|
||||
public:
|
||||
static bool GetNoMixMultipart() { return mixmultipart; }
|
||||
static bool SetNoMixMultipart();
|
||||
static bool GetStreamUpload() { return streamupload; }
|
||||
static bool SetStreamUpload(bool isstream);
|
||||
|
||||
explicit FdEntity(const char* tpath = nullptr, const char* cpath = nullptr);
|
||||
~FdEntity();
|
||||
FdEntity(const FdEntity&) = delete;
|
||||
FdEntity(FdEntity&&) = delete;
|
||||
FdEntity& operator=(const FdEntity&) = delete;
|
||||
FdEntity& operator=(FdEntity&&) = delete;
|
||||
|
||||
void Close(int fd);
|
||||
bool IsOpen() const {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return (-1 != physical_fd);
|
||||
}
|
||||
bool FindPseudoFd(int fd) const {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return FindPseudoFdWithLock(fd);
|
||||
}
|
||||
bool FindPseudoFdWithLock(int fd) const REQUIRES(FdEntity::fdent_lock);
|
||||
std::string GetROPath() const {
|
||||
const std::lock_guard<std::mutex> ro_lock(ro_path_lock);
|
||||
return ro_path;
|
||||
}
|
||||
int Open(const headers_t* pmeta, off_t size, const FileTimes& ts_times, int flags);
|
||||
|
||||
bool LoadAll(int fd, off_t* size = nullptr, bool force_load = false);
|
||||
int Dup(int fd) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return DupWithLock(fd);
|
||||
}
|
||||
int DupWithLock(int fd) REQUIRES(FdEntity::fdent_lock);
|
||||
int OpenPseudoFd(int flags = O_RDONLY);
|
||||
int GetOpenCount() const {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return GetOpenCountHasLock();
|
||||
}
|
||||
int GetOpenCountHasLock() const REQUIRES(FdEntity::fdent_lock);
|
||||
std::string GetPath() const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return path;
|
||||
}
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetPhysicalFd() const REQUIRES(FdEntity::fdent_lock) { return physical_fd; }
|
||||
bool IsModified() const;
|
||||
bool MergeOrgMeta(headers_t& updatemeta);
|
||||
bool GetOrgMeta(headers_t& meta) const;
|
||||
|
||||
int UploadPending(int fd) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
const std::lock_guard<std::mutex> lock_data(fdent_data_lock);
|
||||
return UploadPendingHasLock(fd);
|
||||
}
|
||||
bool HaveUploadPending(){
|
||||
const std::lock_guard<std::mutex> lock_data(fdent_data_lock);
|
||||
return (pending_status_t::NO_UPDATE_PENDING != pending_status);
|
||||
}
|
||||
|
||||
bool GetStats(struct stat& st) const {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return GetStatsHasLock(st);
|
||||
}
|
||||
bool GetStatsHasLock(struct stat& st) const REQUIRES(FdEntity::fdent_lock);
|
||||
|
||||
int SetCtime(struct timespec time) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
const std::lock_guard<std::mutex> lock2(fdent_data_lock);
|
||||
return SetCtimeHasLock(time);
|
||||
}
|
||||
int SetAtime(struct timespec time) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
const std::lock_guard<std::mutex> lock2(fdent_data_lock);
|
||||
return SetAtimeHasLock(time);
|
||||
}
|
||||
int SetMtime(struct timespec time) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
const std::lock_guard<std::mutex> lock2(fdent_data_lock);
|
||||
return SetMtimeHasLock(time);
|
||||
}
|
||||
|
||||
bool GetSize(off_t& size) const;
|
||||
bool GetXattr(std::string& xattr) const;
|
||||
bool SetXattr(const std::string& xattr);
|
||||
bool SetMode(mode_t mode) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return SetModeHasLock(mode);
|
||||
}
|
||||
bool SetModeHasLock(mode_t mode) REQUIRES(FdEntity::fdent_lock);
|
||||
bool SetUId(uid_t uid) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return SetUIdHasLock(uid);
|
||||
}
|
||||
bool SetUIdHasLock(uid_t uid) REQUIRES(FdEntity::fdent_lock);
|
||||
bool SetGId(gid_t gid) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
return SetGIdHasLock(gid);
|
||||
}
|
||||
bool SetGIdHasLock(gid_t gid) REQUIRES(FdEntity::fdent_lock);
|
||||
bool SetContentType(const char* path);
|
||||
bool GetStatsFromMeta(struct stat& st) const;
|
||||
|
||||
int Load(off_t start, off_t size, bool is_modified_flag = false) REQUIRES(FdEntity::fdent_lock, FdEntity::fdent_data_lock); // size=0 means loading to end
|
||||
|
||||
off_t BytesModified();
|
||||
int RowFlush(int fd, const char* tpath, bool force_sync = false) {
|
||||
const std::lock_guard<std::mutex> lock(fdent_lock);
|
||||
const std::lock_guard<std::mutex> lock_data(fdent_data_lock);
|
||||
return RowFlushHasLock(fd, tpath, force_sync);
|
||||
}
|
||||
int Flush(int fd, bool force_sync = false) {
|
||||
return RowFlush(fd, nullptr, force_sync);
|
||||
}
|
||||
|
||||
ssize_t Read(int fd, char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(int fd, const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool PunchHole(off_t start = 0, size_t size = 0);
|
||||
|
||||
void MarkDirtyNewFile();
|
||||
bool IsDirtyNewFile() const;
|
||||
void MarkDirtyMetadata();
|
||||
|
||||
bool GetLastUpdateUntreatedPart(off_t& start, off_t& size) const REQUIRES(FdEntity::fdent_lock);
|
||||
bool ReplaceLastUpdateUntreatedPart(off_t front_start, off_t front_size, off_t behind_start, off_t behind_size) REQUIRES(FdEntity::fdent_lock);
|
||||
|
||||
// Intentionally unimplemented -- for lock checking only.
|
||||
std::mutex* GetMutex() RETURN_CAPABILITY(fdent_lock);
|
||||
};
|
||||
|
||||
typedef std::map<std::string, std::shared_ptr<FdEntity>> fdent_map_t; // key=path, value=FdEntity
|
||||
|
||||
#endif // S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
981
src/fdcache_fdinfo.cpp
Normal file
981
src/fdcache_fdinfo.cpp
Normal file
@ -0,0 +1,981 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "fdcache_fdinfo.h"
|
||||
#include "fdcache_pseudofd.h"
|
||||
#include "fdcache_entity.h"
|
||||
#include "curl.h"
|
||||
#include "string_util.h"
|
||||
#include "threadpoolman.h"
|
||||
#include "s3fs_threadreqs.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdInfo methods
|
||||
//------------------------------------------------
|
||||
PseudoFdInfo::PseudoFdInfo(int fd, int open_flags) : pseudo_fd(-1), physical_fd(fd), flags(0), upload_fd(-1), instruct_count(0), last_result(0), uploaded_sem(0)
|
||||
{
|
||||
if(-1 != physical_fd){
|
||||
pseudo_fd = PseudoFdManager::Get();
|
||||
flags = open_flags;
|
||||
}
|
||||
}
|
||||
|
||||
PseudoFdInfo::~PseudoFdInfo()
|
||||
{
|
||||
Clear(); // call before destroying the mutex
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::Clear()
|
||||
{
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!CancelAllThreads()){
|
||||
return false;
|
||||
}
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!ResetUploadInfo()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
CloseUploadFd();
|
||||
|
||||
if(-1 != pseudo_fd){
|
||||
PseudoFdManager::Release(pseudo_fd);
|
||||
}
|
||||
pseudo_fd = -1;
|
||||
physical_fd = -1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::IsUploadingHasLock() const
|
||||
{
|
||||
return !upload_id.empty();
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::IsUploading() const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
return IsUploadingHasLock();
|
||||
}
|
||||
|
||||
void PseudoFdInfo::CloseUploadFd()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
if(-1 != upload_fd){
|
||||
close(upload_fd);
|
||||
}
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::OpenUploadFd()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
if(-1 != upload_fd){
|
||||
// already initialized
|
||||
return true;
|
||||
}
|
||||
if(-1 == physical_fd){
|
||||
S3FS_PRN_ERR("physical_fd is not initialized yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// duplicate fd
|
||||
int fd;
|
||||
if(-1 == (fd = dup(physical_fd))){
|
||||
S3FS_PRN_ERR("Could not duplicate physical file descriptor(errno=%d)", errno);
|
||||
return false;
|
||||
}
|
||||
scope_guard guard([&]() { close(fd); });
|
||||
|
||||
if(0 != lseek(fd, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("Could not seek physical file descriptor(errno=%d)", errno);
|
||||
return false;
|
||||
}
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
S3FS_PRN_ERR("Invalid file descriptor for uploading(errno=%d)", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
guard.dismiss();
|
||||
upload_fd = fd;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::Set(int fd, int open_flags)
|
||||
{
|
||||
if(-1 == fd){
|
||||
return false;
|
||||
}
|
||||
Clear();
|
||||
physical_fd = fd;
|
||||
pseudo_fd = PseudoFdManager::Get();
|
||||
flags = open_flags;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::Writable() const
|
||||
{
|
||||
if(-1 == pseudo_fd){
|
||||
return false;
|
||||
}
|
||||
if(0 == (flags & (O_WRONLY | O_RDWR))){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::Readable() const
|
||||
{
|
||||
if(-1 == pseudo_fd){
|
||||
return false;
|
||||
}
|
||||
// O_RDONLY is 0x00, it means any pattern is readable.
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ClearUploadInfo(bool is_cancel_mp)
|
||||
{
|
||||
if(is_cancel_mp){
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!CancelAllThreads()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
return ResetUploadInfo();
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ResetUploadInfo()
|
||||
{
|
||||
upload_id.clear();
|
||||
upload_list.clear();
|
||||
instruct_count = 0;
|
||||
last_result = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::RowInitialUploadInfo(const std::string& id, bool is_cancel_mp)
|
||||
{
|
||||
if(is_cancel_mp){
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!ClearUploadInfo(is_cancel_mp)){
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!ResetUploadInfo()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
upload_id = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
void PseudoFdInfo::IncreaseInstructionCount()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
++instruct_count;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::GetUploadInfo(std::string& id, int& fd) const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
if(!IsUploadingHasLock()){
|
||||
S3FS_PRN_ERR("Multipart Upload has not started yet.");
|
||||
return false;
|
||||
}
|
||||
id = upload_id;
|
||||
fd = upload_fd;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::GetUploadId(std::string& id) const
|
||||
{
|
||||
int fd = -1;
|
||||
return GetUploadInfo(id, fd);
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::GetEtaglist(etaglist_t& list) const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
if(!IsUploadingHasLock()){
|
||||
S3FS_PRN_ERR("Multipart Upload has not started yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
list.clear();
|
||||
for(auto iter = upload_list.cbegin(); iter != upload_list.cend(); ++iter){
|
||||
if(iter->petag){
|
||||
list.push_back(*(iter->petag));
|
||||
}else{
|
||||
S3FS_PRN_ERR("The pointer to the etag string is null(internal error).");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return !list.empty();
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method adds a part for a multipart upload.
|
||||
// The added new part must be an area that is exactly continuous with the
|
||||
// immediately preceding part.
|
||||
// An error will occur if it is discontinuous or if it overlaps with an
|
||||
// existing area.
|
||||
//
|
||||
bool PseudoFdInfo::AppendUploadPart(off_t start, off_t size, bool is_copy, etagpair** ppetag)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
if(!IsUploadingHasLock()){
|
||||
S3FS_PRN_ERR("Multipart Upload has not started yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
off_t next_start_pos = 0;
|
||||
if(!upload_list.empty()){
|
||||
next_start_pos = upload_list.back().startpos + upload_list.back().size;
|
||||
}
|
||||
if(start != next_start_pos){
|
||||
S3FS_PRN_ERR("The expected starting position for the next part is %lld, but %lld was specified.", static_cast<long long int>(next_start_pos), static_cast<long long int>(start));
|
||||
return false;
|
||||
}
|
||||
|
||||
// make part number
|
||||
int partnumber = static_cast<int>(upload_list.size()) + 1;
|
||||
|
||||
// add new part
|
||||
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, partnumber)); // [NOTE] Create the etag entity and register it in the list.
|
||||
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
|
||||
|
||||
// set etag pointer
|
||||
if(ppetag){
|
||||
*ppetag = petag_entity;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Utility for sorting upload list
|
||||
//
|
||||
static bool filepart_partnum_compare(const filepart& src1, const filepart& src2)
|
||||
{
|
||||
return src1.get_part_number() < src2.get_part_number();
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
//S3FS_PRN_DBG("[start=%lld][size=%lld][part_num=%d][is_copy=%s]", static_cast<long long int>(start), static_cast<long long int>(size), part_num, (is_copy ? "true" : "false"));
|
||||
|
||||
if(!IsUploadingHasLock()){
|
||||
S3FS_PRN_ERR("Multipart Upload has not started yet.");
|
||||
return false;
|
||||
}
|
||||
if(start < 0 || size <= 0 || part_num < 0 || !ppetag){
|
||||
S3FS_PRN_ERR("Parameters are wrong.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// insert new part
|
||||
etagpair* petag_entity = etag_entities.add(etagpair(nullptr, part_num));
|
||||
upload_list.emplace_back(false, physical_fd, start, size, is_copy, petag_entity);
|
||||
|
||||
// sort by part number
|
||||
std::sort(upload_list.begin(), upload_list.end(), filepart_partnum_compare);
|
||||
|
||||
// set etag pointer
|
||||
*ppetag = petag_entity;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy)
|
||||
{
|
||||
//S3FS_PRN_DBG("[path=%s][mplist(%zu)]", SAFESTRPTR(path), mplist.size());
|
||||
|
||||
if(mplist.empty()){
|
||||
// nothing to do
|
||||
return true;
|
||||
}
|
||||
if(!OpenUploadFd()){
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get upload id/fd before loop
|
||||
std::string tmp_upload_id;
|
||||
int tmp_upload_fd = -1;
|
||||
if(!GetUploadInfo(tmp_upload_id, tmp_upload_fd)){
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string strpath = SAFESTRPTR(path);
|
||||
|
||||
for(auto iter = mplist.cbegin(); iter != mplist.cend(); ++iter){
|
||||
// Insert upload part
|
||||
etagpair* petag = nullptr;
|
||||
if(!InsertUploadPart(iter->start, iter->size, iter->part_num, is_copy, &petag)){
|
||||
S3FS_PRN_ERR("Failed to insert Multipart Upload Part to mplist [path=%s][start=%lld][size=%lld][part_num=%d][is_copy=%s]", strpath.c_str(), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
|
||||
return false;
|
||||
}
|
||||
|
||||
// setup instruction and request on another thread
|
||||
int result;
|
||||
if(0 != (result = multipart_upload_part_request(strpath, tmp_upload_fd, iter->start, iter->size, iter->part_num, tmp_upload_id, petag, is_copy, &uploaded_sem, &upload_list_lock, &last_result))){
|
||||
S3FS_PRN_ERR("failed setup instruction for Multipart Upload Part Request by error(%d) [path=%s][start=%lld][size=%lld][part_num=%d][is_copy=%s]", result, strpath.c_str(), static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->part_num, (is_copy ? "true" : "false"));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Count up the number of internally managed threads
|
||||
IncreaseInstructionCount();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][to_upload_list(%zu)][copy_list(%zu)]", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
|
||||
|
||||
result = 0;
|
||||
|
||||
if(!OpenUploadFd()){
|
||||
return false;
|
||||
}
|
||||
if(!ParallelMultipartUpload(path, to_upload_list, false) || !ParallelMultipartUpload(path, copy_list, true)){
|
||||
S3FS_PRN_ERR("Failed setup instruction for uploading(path=%s, to_upload_list=%zu, copy_list=%zu).", SAFESTRPTR(path), to_upload_list.size(), copy_list.size());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait for all thread exiting
|
||||
result = WaitAllThreadsExit();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Common method that calls S3fsCurl::PreMultipartUploadRequest via pre_multipart_upload_request
|
||||
//
|
||||
// [NOTE]
|
||||
// If the request is successful, initialize upload_id.
|
||||
//
|
||||
int PseudoFdInfo::PreMultipartUploadRequest(const std::string& strpath, const headers_t& meta)
|
||||
{
|
||||
// get upload_id
|
||||
std::string new_upload_id;
|
||||
int result;
|
||||
if(0 != (result = pre_multipart_upload_request(strpath, meta, new_upload_id))){
|
||||
return result;
|
||||
}
|
||||
|
||||
// reset upload_id
|
||||
if(!RowInitialUploadInfo(new_upload_id, false/* not need to cancel */)){
|
||||
S3FS_PRN_ERR("failed to setup multipart upload(set upload id to object)");
|
||||
return -EIO;
|
||||
}
|
||||
S3FS_PRN_DBG("succeed to setup multipart upload(set upload id to object)");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Upload the last updated Untreated area
|
||||
//
|
||||
// [Overview]
|
||||
// Uploads untreated areas with the maximum multipart upload size as the
|
||||
// boundary.
|
||||
//
|
||||
// * The starting position of the untreated area is aligned with the maximum
|
||||
// multipart upload size as the boundary.
|
||||
// * If there is an uploaded area that overlaps with the aligned untreated
|
||||
// area, that uploaded area is canceled and absorbed by the untreated area.
|
||||
// * Upload only when the aligned untreated area exceeds the maximum multipart
|
||||
// upload size.
|
||||
// * When the start position of the untreated area is changed to boundary
|
||||
// alignment(to backward), and if that gap area is remained, that area is
|
||||
// rest to untreated area.
|
||||
//
|
||||
ssize_t PseudoFdInfo::UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][pseudo_fd=%d][physical_fd=%d]", SAFESTRPTR(path), pseudo_fd, physical_fd);
|
||||
|
||||
if(!path || -1 == physical_fd || -1 == pseudo_fd || !pfdent){
|
||||
S3FS_PRN_ERR("pseudo_fd(%d) to physical_fd(%d) for path(%s) is not opened or not writable, or pfdent is nullptr.", pseudo_fd, physical_fd, path);
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
//
|
||||
// Get last update untreated area
|
||||
//
|
||||
off_t last_untreated_start = 0;
|
||||
off_t last_untreated_size = 0;
|
||||
if(!pfdent->GetLastUpdateUntreatedPart(last_untreated_start, last_untreated_size) || last_untreated_start < 0 || last_untreated_size <= 0){
|
||||
S3FS_PRN_WARN("Not found last update untreated area or it is empty, thus return without any error.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Aligns the start position of the last updated raw area with the boundary
|
||||
//
|
||||
// * Align the last updated raw space with the maximum upload size boundary.
|
||||
// * The remaining size of the part before the boundary is will not be uploaded.
|
||||
//
|
||||
off_t max_mp_size = S3fsCurl::GetMultipartSize();
|
||||
off_t aligned_start = ((last_untreated_start / max_mp_size) + (0 < (last_untreated_start % max_mp_size) ? 1 : 0)) * max_mp_size;
|
||||
if((last_untreated_start + last_untreated_size) <= aligned_start){
|
||||
S3FS_PRN_INFO("After the untreated area(start=%lld, size=%lld) is aligned with the boundary, the aligned start(%lld) exceeds the untreated area, so there is nothing to do.", static_cast<long long int>(last_untreated_start), static_cast<long long int>(last_untreated_size), static_cast<long long int>(aligned_start));
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t aligned_size = (((last_untreated_start + last_untreated_size) - aligned_start) / max_mp_size) * max_mp_size;
|
||||
if(0 == aligned_size){
|
||||
S3FS_PRN_DBG("After the untreated area(start=%lld, size=%lld) is aligned with the boundary(start is %lld), the aligned size is empty, so nothing to do.", static_cast<long long int>(last_untreated_start), static_cast<long long int>(last_untreated_size), static_cast<long long int>(aligned_start));
|
||||
return 0;
|
||||
}
|
||||
|
||||
off_t front_rem_start = last_untreated_start; // start of the remainder untreated area in front of the boundary
|
||||
off_t front_rem_size = aligned_start - last_untreated_start; // size of the remainder untreated area in front of the boundary
|
||||
|
||||
//
|
||||
// Get the area for uploading, if last update treated area can be uploaded.
|
||||
//
|
||||
// [NOTE]
|
||||
// * Create the upload area list, if the untreated area aligned with the boundary
|
||||
// exceeds the maximum upload size.
|
||||
// * If it overlaps with an area that has already been uploaded(unloaded list),
|
||||
// that area is added to the cancellation list and included in the untreated area.
|
||||
//
|
||||
mp_part_list_t to_upload_list;
|
||||
filepart_list_t cancel_uploaded_list;
|
||||
if(!ExtractUploadPartsFromUntreatedArea(aligned_start, aligned_size, to_upload_list, cancel_uploaded_list, S3fsCurl::GetMultipartSize())){
|
||||
S3FS_PRN_ERR("Failed to extract upload parts from last untreated area.");
|
||||
return -EIO;
|
||||
}
|
||||
if(to_upload_list.empty()){
|
||||
S3FS_PRN_INFO("There is nothing to upload. In most cases, the untreated area does not meet the upload size.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Has multipart uploading already started?
|
||||
//
|
||||
if(!IsUploading()){
|
||||
std::string strpath = SAFESTRPTR(path);
|
||||
int result;
|
||||
if(0 != (result = PreMultipartUploadRequest(strpath, meta))){
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Output debug level information
|
||||
//
|
||||
// When canceling(overwriting) a part that has already been uploaded, output it.
|
||||
//
|
||||
if(S3fsLog::IsS3fsLogDbg()){
|
||||
for(auto cancel_iter = cancel_uploaded_list.cbegin(); cancel_iter != cancel_uploaded_list.cend(); ++cancel_iter){
|
||||
S3FS_PRN_DBG("Cancel uploaded: start(%lld), size(%lld), part number(%d)", static_cast<long long int>(cancel_iter->startpos), static_cast<long long int>(cancel_iter->size), (cancel_iter->petag ? cancel_iter->petag->part_num : -1));
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Upload Multipart parts
|
||||
//
|
||||
if(!ParallelMultipartUpload(path, to_upload_list, false)){
|
||||
S3FS_PRN_ERR("Failed to upload multipart parts.");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
//
|
||||
// Exclude the uploaded Untreated area and update the last Untreated area.
|
||||
//
|
||||
off_t behind_rem_start = aligned_start + aligned_size;
|
||||
off_t behind_rem_size = (last_untreated_start + last_untreated_size) - behind_rem_start;
|
||||
|
||||
if(!pfdent->ReplaceLastUpdateUntreatedPart(front_rem_start, front_rem_size, behind_rem_start, behind_rem_size)){
|
||||
S3FS_PRN_WARN("The last untreated area could not be detected and the uploaded area could not be excluded from it, but continue because it does not affect the overall processing.");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int PseudoFdInfo::WaitAllThreadsExit()
|
||||
{
|
||||
int result;
|
||||
bool is_loop = true;
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
if(0 == instruct_count){
|
||||
result = last_result;
|
||||
is_loop = false;
|
||||
}
|
||||
}
|
||||
|
||||
while(is_loop){
|
||||
// need to wait the worker exiting
|
||||
uploaded_sem.acquire();
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
if(0 == --instruct_count){
|
||||
// break loop
|
||||
result = last_result;
|
||||
is_loop = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool PseudoFdInfo::CancelAllThreads()
|
||||
{
|
||||
bool need_cancel = false;
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
if(0 < instruct_count){
|
||||
S3FS_PRN_INFO("The upload thread is running, so cancel them and wait for the end.");
|
||||
need_cancel = true;
|
||||
last_result = -ECANCELED; // to stop thread running
|
||||
}
|
||||
}
|
||||
if(need_cancel){
|
||||
WaitAllThreadsExit();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Extract the list for multipart upload from the Untreated Area
|
||||
//
|
||||
// The untreated_start parameter must be set aligning it with the boundaries
|
||||
// of the maximum multipart upload size. This method expects it to be bounded.
|
||||
//
|
||||
// This method creates the upload area aligned from the untreated area by
|
||||
// maximum size and creates the required list.
|
||||
// If it overlaps with an area that has already been uploaded, the overlapped
|
||||
// upload area will be canceled and absorbed by the untreated area.
|
||||
// If the list creation process is complete and areas smaller than the maximum
|
||||
// size remain, those area will be reset to untreated_start and untreated_size
|
||||
// and returned to the caller.
|
||||
// If the called untreated area is smaller than the maximum size of the
|
||||
// multipart upload, no list will be created.
|
||||
//
|
||||
// [NOTE]
|
||||
// Maximum multipart upload size must be uploading boundary.
|
||||
//
|
||||
bool PseudoFdInfo::ExtractUploadPartsFromUntreatedArea(off_t untreated_start, off_t untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size)
|
||||
{
|
||||
if(untreated_start < 0 || untreated_size <= 0){
|
||||
S3FS_PRN_ERR("Parameters are wrong(untreated_start=%lld, untreated_size=%lld).", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size));
|
||||
return false;
|
||||
}
|
||||
|
||||
// Initialize lists
|
||||
to_upload_list.clear();
|
||||
cancel_upload_list.clear();
|
||||
|
||||
//
|
||||
// Align start position with maximum multipart upload boundaries
|
||||
//
|
||||
off_t aligned_start = (untreated_start / max_mp_size) * max_mp_size;
|
||||
off_t aligned_size = untreated_size + (untreated_start - aligned_start);
|
||||
|
||||
//
|
||||
// Check aligned untreated size
|
||||
//
|
||||
if(aligned_size < max_mp_size){
|
||||
S3FS_PRN_INFO("untreated area(start=%lld, size=%lld) to aligned boundary(start=%lld, size=%lld) is smaller than max mp size(%lld), so nothing to do.", static_cast<long long int>(untreated_start), static_cast<long long int>(untreated_size), static_cast<long long int>(aligned_start), static_cast<long long int>(aligned_size), static_cast<long long int>(max_mp_size));
|
||||
return true; // successful termination
|
||||
}
|
||||
|
||||
//
|
||||
// Check each unloaded area in list
|
||||
//
|
||||
// [NOTE]
|
||||
// The uploaded area must be to be aligned by boundary.
|
||||
// Also, it is assumed that it must not be a copy area.
|
||||
// So if the areas overlap, include uploaded area as an untreated area.
|
||||
//
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
for(auto cur_iter = upload_list.begin(); cur_iter != upload_list.end(); /* ++cur_iter */){
|
||||
// Check overlap
|
||||
if((cur_iter->startpos + cur_iter->size - 1) < aligned_start || (aligned_start + aligned_size - 1) < cur_iter->startpos){
|
||||
// Areas do not overlap
|
||||
++cur_iter;
|
||||
|
||||
}else{
|
||||
// The areas overlap
|
||||
//
|
||||
// Since the start position of the uploaded area is aligned with the boundary,
|
||||
// it is not necessary to check the start position.
|
||||
// If the uploaded area exceeds the untreated area, expand the untreated area.
|
||||
//
|
||||
if((aligned_start + aligned_size - 1) < (cur_iter->startpos + cur_iter->size - 1)){
|
||||
aligned_size += (cur_iter->startpos + cur_iter->size) - (aligned_start + aligned_size);
|
||||
}
|
||||
|
||||
//
|
||||
// Add this to cancel list
|
||||
//
|
||||
cancel_upload_list.push_back(*cur_iter); // Copy and Push to cancel list
|
||||
cur_iter = upload_list.erase(cur_iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Add upload area to the list
|
||||
//
|
||||
while(max_mp_size <= aligned_size){
|
||||
int part_num = static_cast<int>((aligned_start / max_mp_size) + 1);
|
||||
to_upload_list.emplace_back(aligned_start, max_mp_size, part_num);
|
||||
|
||||
aligned_start += max_mp_size;
|
||||
aligned_size -= max_mp_size;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Extract the area lists to be uploaded/downloaded for the entire file.
|
||||
//
|
||||
// [Parameters]
|
||||
// to_upload_list : A list of areas to upload in multipart upload.
|
||||
// to_copy_list : A list of areas for copy upload in multipart upload.
|
||||
// to_download_list : A list of areas that must be downloaded before multipart upload.
|
||||
// cancel_upload_list : A list of areas that have already been uploaded and will be canceled(overwritten).
|
||||
// wait_upload_complete : If cancellation areas exist, this flag is set to true when it is necessary to wait until the upload of those cancellation areas is complete.
|
||||
// file_size : The size of the upload file.
|
||||
// use_copy : Specify true if copy multipart upload is available.
|
||||
//
|
||||
// [NOTE]
|
||||
// The untreated_list in fdentity does not change, but upload_list is changed.
|
||||
// (If you want to restore it, you can use cancel_upload_list.)
|
||||
//
|
||||
bool PseudoFdInfo::ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(upload_list_lock);
|
||||
|
||||
// Initialize lists
|
||||
to_upload_list.clear();
|
||||
to_copy_list.clear();
|
||||
to_download_list.clear();
|
||||
cancel_upload_list.clear();
|
||||
wait_upload_complete = false;
|
||||
|
||||
// Duplicate untreated list
|
||||
untreated_list_t dup_untreated_list;
|
||||
untreated_list.Duplicate(dup_untreated_list);
|
||||
|
||||
// Initialize the iterator of each list first
|
||||
auto dup_untreated_iter = dup_untreated_list.begin();
|
||||
auto uploaded_iter = upload_list.begin();
|
||||
|
||||
//
|
||||
// Loop to extract areas to upload and download
|
||||
//
|
||||
// Check at the boundary of the maximum upload size from the beginning of the file
|
||||
//
|
||||
for(off_t cur_start = 0, cur_size = 0; cur_start < file_size; cur_start += cur_size){
|
||||
//
|
||||
// Set part size
|
||||
// (To avoid confusion, the area to be checked is called the "current area".)
|
||||
//
|
||||
cur_size = ((cur_start + max_mp_size) <= file_size ? max_mp_size : (file_size - cur_start));
|
||||
|
||||
//
|
||||
// Extract the untreated area that overlaps this current area.
|
||||
// (The extracted area is deleted from dup_untreated_list.)
|
||||
//
|
||||
untreated_list_t cur_untreated_list;
|
||||
for(cur_untreated_list.clear(); dup_untreated_iter != dup_untreated_list.end(); ){
|
||||
if((dup_untreated_iter->start < (cur_start + cur_size)) && (cur_start < (dup_untreated_iter->start + dup_untreated_iter->size))){
|
||||
// this untreated area is overlap
|
||||
off_t tmp_untreated_start;
|
||||
off_t tmp_untreated_size;
|
||||
if(dup_untreated_iter->start < cur_start){
|
||||
// [NOTE]
|
||||
// This untreated area overlaps with the current area, but starts
|
||||
// in front of the target area.
|
||||
// This state should not be possible, but if this state is detected,
|
||||
// the part before the target area will be deleted.
|
||||
//
|
||||
tmp_untreated_start = cur_start;
|
||||
tmp_untreated_size = dup_untreated_iter->size - (cur_start - dup_untreated_iter->start);
|
||||
}else{
|
||||
tmp_untreated_start = dup_untreated_iter->start;
|
||||
tmp_untreated_size = dup_untreated_iter->size;
|
||||
}
|
||||
|
||||
//
|
||||
// Check the end of the overlapping untreated area.
|
||||
//
|
||||
if((tmp_untreated_start + tmp_untreated_size) <= (cur_start + cur_size)){
|
||||
//
|
||||
// All of untreated areas are within the current area
|
||||
//
|
||||
// - Add this untreated area to cur_untreated_list
|
||||
// - Delete this from dup_untreated_list
|
||||
//
|
||||
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
|
||||
dup_untreated_iter = dup_untreated_list.erase(dup_untreated_iter);
|
||||
}else{
|
||||
//
|
||||
// The untreated area exceeds the end of the current area
|
||||
//
|
||||
|
||||
// Adjust untreated area
|
||||
tmp_untreated_size = (cur_start + cur_size) - tmp_untreated_start;
|
||||
|
||||
// Add adjusted untreated area to cur_untreated_list
|
||||
cur_untreated_list.emplace_back(tmp_untreated_start, tmp_untreated_size);
|
||||
|
||||
// Remove this adjusted untreated area from the area pointed
|
||||
// to by dup_untreated_iter.
|
||||
dup_untreated_iter->size = (dup_untreated_iter->start + dup_untreated_iter->size) - (cur_start + cur_size);
|
||||
dup_untreated_iter->start = tmp_untreated_start + tmp_untreated_size;
|
||||
}
|
||||
|
||||
}else if((cur_start + cur_size - 1) < dup_untreated_iter->start){
|
||||
// this untreated area is over the current area, thus break loop.
|
||||
break;
|
||||
}else{
|
||||
++dup_untreated_iter;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Check uploaded area
|
||||
//
|
||||
// [NOTE]
|
||||
// The uploaded area should be aligned with the maximum upload size boundary.
|
||||
// It also assumes that each size of uploaded area must be a maximum upload
|
||||
// size.
|
||||
//
|
||||
auto overlap_uploaded_iter = upload_list.end();
|
||||
for(; uploaded_iter != upload_list.end(); ++uploaded_iter){
|
||||
if((cur_start < (uploaded_iter->startpos + uploaded_iter->size)) && (uploaded_iter->startpos < (cur_start + cur_size))){
|
||||
if(overlap_uploaded_iter != upload_list.end()){
|
||||
//
|
||||
// Something wrong in this unloaded area.
|
||||
//
|
||||
// This area is not aligned with the boundary, then this condition
|
||||
// is unrecoverable and return failure.
|
||||
//
|
||||
S3FS_PRN_ERR("The uploaded list may not be the boundary for the maximum multipart upload size. No further processing is possible.");
|
||||
return false;
|
||||
}
|
||||
// Set this iterator to overlap iter
|
||||
overlap_uploaded_iter = uploaded_iter;
|
||||
|
||||
}else if((cur_start + cur_size - 1) < uploaded_iter->startpos){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Create upload/download/cancel/copy list for this current area
|
||||
//
|
||||
int part_num = static_cast<int>((cur_start / max_mp_size) + 1);
|
||||
if(cur_untreated_list.empty()){
|
||||
//
|
||||
// No untreated area was detected in this current area
|
||||
//
|
||||
if(overlap_uploaded_iter != upload_list.end()){
|
||||
//
|
||||
// This current area already uploaded, then nothing to add to lists.
|
||||
//
|
||||
S3FS_PRN_DBG("Already uploaded: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
|
||||
}else{
|
||||
//
|
||||
// This current area has not been uploaded
|
||||
// (neither an uploaded area nor an untreated area.)
|
||||
//
|
||||
if(use_copy){
|
||||
//
|
||||
// Copy multipart upload available
|
||||
//
|
||||
S3FS_PRN_DBG("To copy: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
to_copy_list.emplace_back(cur_start, cur_size, part_num);
|
||||
}else{
|
||||
//
|
||||
// This current area needs to be downloaded and uploaded
|
||||
//
|
||||
S3FS_PRN_DBG("To download and upload: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
to_download_list.emplace_back(cur_start, cur_size);
|
||||
to_upload_list.emplace_back(cur_start, cur_size, part_num);
|
||||
}
|
||||
}
|
||||
}else{
|
||||
//
|
||||
// Found untreated area in this current area
|
||||
//
|
||||
if(overlap_uploaded_iter != upload_list.end()){
|
||||
//
|
||||
// This current area is also the uploaded area
|
||||
//
|
||||
// [NOTE]
|
||||
// The uploaded area is aligned with boundary, there are all data in
|
||||
// this current area locally(which includes all data of untreated area).
|
||||
// So this current area only needs to be uploaded again.
|
||||
//
|
||||
S3FS_PRN_DBG("Cancel upload: start=%lld, size=%lld", static_cast<long long int>(overlap_uploaded_iter->startpos), static_cast<long long int>(overlap_uploaded_iter->size));
|
||||
|
||||
if(!overlap_uploaded_iter->uploaded){
|
||||
S3FS_PRN_DBG("This cancel upload area is still uploading, so you must wait for it to complete before starting any Stream uploads.");
|
||||
wait_upload_complete = true;
|
||||
}
|
||||
cancel_upload_list.push_back(*overlap_uploaded_iter); // add this uploaded area to cancel_upload_list
|
||||
uploaded_iter = upload_list.erase(overlap_uploaded_iter); // remove it from upload_list
|
||||
|
||||
S3FS_PRN_DBG("To upload: start=%lld, size=%lld", static_cast<long long int>(cur_start), static_cast<long long int>(cur_size));
|
||||
to_upload_list.emplace_back(cur_start, cur_size, part_num); // add new uploading area to list
|
||||
|
||||
}else{
|
||||
//
|
||||
// No uploaded area overlap this current area
|
||||
// (Areas other than the untreated area must be downloaded.)
|
||||
//
|
||||
// [NOTE]
|
||||
// Need to consider the case where there is a gap between the start
|
||||
// of the current area and the untreated area.
|
||||
// This gap is the area that should normally be downloaded.
|
||||
// But it is the area that can be copied if we can use copy multipart
|
||||
// upload. Then If we can use copy multipart upload and the previous
|
||||
// area is used copy multipart upload, this gap will be absorbed by
|
||||
// the previous area.
|
||||
// Unifying the copy multipart upload area can reduce the number of
|
||||
// upload requests.
|
||||
//
|
||||
off_t tmp_cur_start = cur_start;
|
||||
off_t tmp_cur_size = cur_size;
|
||||
off_t changed_start = cur_start;
|
||||
off_t changed_size = cur_size;
|
||||
bool first_area = true;
|
||||
for(auto tmp_cur_untreated_iter = cur_untreated_list.cbegin(); tmp_cur_untreated_iter != cur_untreated_list.cend(); ++tmp_cur_untreated_iter, first_area = false){
|
||||
if(tmp_cur_start < tmp_cur_untreated_iter->start){
|
||||
//
|
||||
// Detected a gap at the start of area
|
||||
//
|
||||
bool include_prev_copy_part = false;
|
||||
if(first_area && use_copy && !to_copy_list.empty()){
|
||||
//
|
||||
// Make sure that the area of the last item in to_copy_list
|
||||
// is contiguous with this current area.
|
||||
//
|
||||
// [NOTE]
|
||||
// Areas can be unified if the total size of the areas is
|
||||
// within 5GB and the remaining area after unification is
|
||||
// larger than the minimum multipart upload size.
|
||||
//
|
||||
auto copy_riter = to_copy_list.rbegin();
|
||||
|
||||
if( (copy_riter->start + copy_riter->size) == tmp_cur_start &&
|
||||
(copy_riter->size + (tmp_cur_untreated_iter->start - tmp_cur_start)) <= FIVE_GB &&
|
||||
((tmp_cur_start + tmp_cur_size) - tmp_cur_untreated_iter->start) >= MIN_MULTIPART_SIZE )
|
||||
{
|
||||
//
|
||||
// Unify to this area to previous copy area.
|
||||
//
|
||||
copy_riter->size += tmp_cur_untreated_iter->start - tmp_cur_start;
|
||||
S3FS_PRN_DBG("Resize to copy: start=%lld, size=%lld", static_cast<long long int>(copy_riter->start), static_cast<long long int>(copy_riter->size));
|
||||
|
||||
changed_size -= (tmp_cur_untreated_iter->start - changed_start);
|
||||
changed_start = tmp_cur_untreated_iter->start;
|
||||
include_prev_copy_part = true;
|
||||
}
|
||||
}
|
||||
if(!include_prev_copy_part){
|
||||
//
|
||||
// If this area is not unified, need to download this area
|
||||
//
|
||||
S3FS_PRN_DBG("To download: start=%lld, size=%lld", static_cast<long long int>(tmp_cur_start), static_cast<long long int>(tmp_cur_untreated_iter->start - tmp_cur_start));
|
||||
to_download_list.emplace_back(tmp_cur_start, tmp_cur_untreated_iter->start - tmp_cur_start);
|
||||
}
|
||||
}
|
||||
//
|
||||
// Set next start position
|
||||
//
|
||||
tmp_cur_size = (tmp_cur_start + tmp_cur_size) - (tmp_cur_untreated_iter->start + tmp_cur_untreated_iter->size);
|
||||
tmp_cur_start = tmp_cur_untreated_iter->start + tmp_cur_untreated_iter->size;
|
||||
}
|
||||
|
||||
//
|
||||
// Add download area to list, if remaining size
|
||||
//
|
||||
if(0 < tmp_cur_size){
|
||||
S3FS_PRN_DBG("To download: start=%lld, size=%lld", static_cast<long long int>(tmp_cur_start), static_cast<long long int>(tmp_cur_size));
|
||||
to_download_list.emplace_back(tmp_cur_start, tmp_cur_size);
|
||||
}
|
||||
|
||||
//
|
||||
// Set upload area(whole of area) to list
|
||||
//
|
||||
S3FS_PRN_DBG("To upload: start=%lld, size=%lld", static_cast<long long int>(changed_start), static_cast<long long int>(changed_size));
|
||||
to_upload_list.emplace_back(changed_start, changed_size, part_num);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
111
src/fdcache_fdinfo.h
Normal file
111
src/fdcache_fdinfo.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_FDINFO_H_
|
||||
#define S3FS_FDCACHE_FDINFO_H_
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "fdcache_entity.h"
|
||||
#include "psemaphore.h"
|
||||
#include "metaheader.h"
|
||||
#include "types.h"
|
||||
|
||||
class UntreatedParts;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PseudoFdInfo
|
||||
//------------------------------------------------
|
||||
class PseudoFdInfo
|
||||
{
|
||||
private:
|
||||
int pseudo_fd;
|
||||
int physical_fd;
|
||||
int flags; // flags at open
|
||||
mutable std::mutex upload_list_lock; // protects upload_id/fd, upload_list, etc.
|
||||
std::string upload_id GUARDED_BY(upload_list_lock); //
|
||||
int upload_fd GUARDED_BY(upload_list_lock); // duplicated fd for uploading
|
||||
filepart_list_t upload_list GUARDED_BY(upload_list_lock);
|
||||
petagpool etag_entities GUARDED_BY(upload_list_lock); // list of etag string and part number entities(to maintain the etag entity even if MPPART_INFO is destroyed)
|
||||
int instruct_count GUARDED_BY(upload_list_lock); // number of instructions for processing by threads
|
||||
int last_result GUARDED_BY(upload_list_lock); // the result of thread processing
|
||||
Semaphore uploaded_sem; // use a semaphore to trigger an upload completion like event flag
|
||||
|
||||
private:
|
||||
bool Clear();
|
||||
void CloseUploadFd();
|
||||
bool OpenUploadFd();
|
||||
bool ResetUploadInfo() REQUIRES(upload_list_lock);
|
||||
bool RowInitialUploadInfo(const std::string& id, bool is_cancel_mp);
|
||||
void IncreaseInstructionCount();
|
||||
bool GetUploadInfo(std::string& id, int& fd) const;
|
||||
bool ParallelMultipartUpload(const char* path, const mp_part_list_t& mplist, bool is_copy);
|
||||
bool InsertUploadPart(off_t start, off_t size, int part_num, bool is_copy, etagpair** ppetag);
|
||||
bool CancelAllThreads();
|
||||
bool ExtractUploadPartsFromUntreatedArea(off_t untreated_start, off_t untreated_size, mp_part_list_t& to_upload_list, filepart_list_t& cancel_upload_list, off_t max_mp_size);
|
||||
bool IsUploadingHasLock() const REQUIRES(upload_list_lock);
|
||||
|
||||
public:
|
||||
explicit PseudoFdInfo(int fd = -1, int open_flags = 0);
|
||||
~PseudoFdInfo();
|
||||
PseudoFdInfo(const PseudoFdInfo&) = delete;
|
||||
PseudoFdInfo(PseudoFdInfo&&) = delete;
|
||||
PseudoFdInfo& operator=(const PseudoFdInfo&) = delete;
|
||||
PseudoFdInfo& operator=(PseudoFdInfo&&) = delete;
|
||||
|
||||
int GetPhysicalFd() const { return physical_fd; }
|
||||
int GetPseudoFd() const { return pseudo_fd; }
|
||||
int GetFlags() const { return flags; }
|
||||
bool Writable() const;
|
||||
bool Readable() const;
|
||||
|
||||
bool Set(int fd, int open_flags);
|
||||
bool ClearUploadInfo(bool is_cancel_mp = false);
|
||||
bool InitialUploadInfo(const std::string& id){ return RowInitialUploadInfo(id, true); }
|
||||
|
||||
bool IsUploading() const;
|
||||
bool GetUploadId(std::string& id) const;
|
||||
bool GetEtaglist(etaglist_t& list) const;
|
||||
|
||||
bool AppendUploadPart(off_t start, off_t size, bool is_copy = false, etagpair** ppetag = nullptr);
|
||||
|
||||
bool ParallelMultipartUploadAll(const char* path, const mp_part_list_t& to_upload_list, const mp_part_list_t& copy_list, int& result);
|
||||
int PreMultipartUploadRequest(const std::string& strpath, const headers_t& meta);
|
||||
|
||||
int WaitAllThreadsExit();
|
||||
ssize_t UploadBoundaryLastUntreatedArea(const char* path, headers_t& meta, FdEntity* pfdent) REQUIRES(pfdent->GetMutex());
|
||||
bool ExtractUploadPartsFromAllArea(UntreatedParts& untreated_list, mp_part_list_t& to_upload_list, mp_part_list_t& to_copy_list, mp_part_list_t& to_download_list, filepart_list_t& cancel_upload_list, bool& wait_upload_complete, off_t max_mp_size, off_t file_size, bool use_copy);
|
||||
};
|
||||
|
||||
typedef std::map<int, std::unique_ptr<PseudoFdInfo>> fdinfo_map_t;
|
||||
|
||||
#endif // S3FS_FDCACHE_FDINFO_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1032
src/fdcache_page.cpp
Normal file
1032
src/fdcache_page.cpp
Normal file
File diff suppressed because it is too large
Load Diff
140
src/fdcache_page.h
Normal file
140
src/fdcache_page.h
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_PAGE_H_
|
||||
#define S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <sys/types.h>
|
||||
#include <vector>
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
// [NOTE]
|
||||
// If the following symbols in lseek whence are undefined, define them.
|
||||
// If it is not supported by lseek, s3fs judges by the processing result of lseek.
|
||||
//
|
||||
#ifndef SEEK_DATA
|
||||
#define SEEK_DATA 3
|
||||
#endif
|
||||
#ifndef SEEK_HOLE
|
||||
#define SEEK_HOLE 4
|
||||
#endif
|
||||
|
||||
//------------------------------------------------
|
||||
// Structure fdpage
|
||||
//------------------------------------------------
|
||||
// page block information
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
off_t bytes;
|
||||
bool loaded;
|
||||
bool modified;
|
||||
|
||||
explicit fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) :
|
||||
offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
|
||||
|
||||
off_t next() const
|
||||
{
|
||||
return (offset + bytes);
|
||||
}
|
||||
off_t end() const
|
||||
{
|
||||
return (0 < bytes ? offset + bytes - 1 : 0);
|
||||
}
|
||||
};
|
||||
typedef std::vector<struct fdpage> fdpage_list_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PageList
|
||||
//------------------------------------------------
|
||||
class CacheFileStat;
|
||||
class FdEntity;
|
||||
|
||||
// cppcheck-suppress copyCtorAndEqOperator
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
bool is_shrink; // [NOTE] true if it has been shrunk even once
|
||||
|
||||
public:
|
||||
enum class page_status : int8_t {
|
||||
NOT_LOAD_MODIFIED = 0,
|
||||
LOADED,
|
||||
MODIFIED,
|
||||
LOAD_MODIFIED
|
||||
};
|
||||
|
||||
private:
|
||||
static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list);
|
||||
static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes);
|
||||
static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
|
||||
void Clear();
|
||||
bool Parse(off_t new_pos);
|
||||
bool Serialize(const CacheFileStat& file, ino_t inode) const;
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false, bool shrunk = false);
|
||||
PageList(const PageList&) = delete;
|
||||
PageList(PageList&&) = delete;
|
||||
PageList& operator=(const PageList&) = delete;
|
||||
PageList& operator=(PageList&&) = delete;
|
||||
~PageList();
|
||||
|
||||
bool Init(off_t size, bool is_loaded, bool is_modified);
|
||||
off_t Size() const;
|
||||
bool Resize(off_t size, bool is_loaded, bool is_modified);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = page_status::LOADED, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
|
||||
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0, off_t limit_size = 0) const; // size=0 is checking to end of list
|
||||
size_t GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize);
|
||||
bool GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start = 0, size_t size = 0);
|
||||
|
||||
off_t BytesModified() const;
|
||||
bool IsModified() const;
|
||||
bool ClearAllModified();
|
||||
|
||||
bool Compress();
|
||||
bool Deserialize(CacheFileStat& file, ino_t inode);
|
||||
void Dump() const;
|
||||
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list) const;
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
105
src/fdcache_pseudofd.cpp
Normal file
105
src/fdcache_pseudofd.cpp
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "fdcache_pseudofd.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
// [NOTE]
|
||||
// The minimum pseudo fd value starts 2.
|
||||
// This is to avoid mistakes for 0(stdout) and 1(stderr), which are usually used.
|
||||
//
|
||||
static constexpr int MIN_PSEUDOFD_NUMBER = 2;
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdManager class methods
|
||||
//------------------------------------------------
|
||||
PseudoFdManager& PseudoFdManager::GetManager()
|
||||
{
|
||||
static PseudoFdManager singleton;
|
||||
return singleton;
|
||||
}
|
||||
|
||||
int PseudoFdManager::Get()
|
||||
{
|
||||
return (PseudoFdManager::GetManager()).CreatePseudoFd();
|
||||
}
|
||||
|
||||
bool PseudoFdManager::Release(int fd)
|
||||
{
|
||||
return (PseudoFdManager::GetManager()).ReleasePseudoFd(fd);
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// PseudoFdManager methods
|
||||
//------------------------------------------------
|
||||
int PseudoFdManager::GetUnusedMinPseudoFd() const
|
||||
{
|
||||
int min_fd = MIN_PSEUDOFD_NUMBER;
|
||||
|
||||
// Look for the first discontinuous value.
|
||||
for(auto iter = pseudofd_list.cbegin(); iter != pseudofd_list.cend(); ++iter){
|
||||
if(min_fd == (*iter)){
|
||||
++min_fd;
|
||||
}else if(min_fd < (*iter)){
|
||||
break;
|
||||
}
|
||||
}
|
||||
return min_fd;
|
||||
}
|
||||
|
||||
int PseudoFdManager::CreatePseudoFd()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(pseudofd_list_lock);
|
||||
|
||||
int new_fd = PseudoFdManager::GetUnusedMinPseudoFd();
|
||||
pseudofd_list.push_back(new_fd);
|
||||
std::sort(pseudofd_list.begin(), pseudofd_list.end());
|
||||
|
||||
return new_fd;
|
||||
}
|
||||
|
||||
bool PseudoFdManager::ReleasePseudoFd(int fd)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(pseudofd_list_lock);
|
||||
|
||||
for(auto iter = pseudofd_list.begin(); iter != pseudofd_list.end(); ++iter){
|
||||
if(fd == (*iter)){
|
||||
pseudofd_list.erase(iter);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
73
src/fdcache_pseudofd.h
Normal file
73
src/fdcache_pseudofd.h
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_PSEUDOFD_H_
|
||||
#define S3FS_FDCACHE_PSEUDOFD_H_
|
||||
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Typdefs
|
||||
//------------------------------------------------
|
||||
// List of pseudo fd in use
|
||||
//
|
||||
typedef std::vector<int> pseudofd_list_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PseudoFdManager
|
||||
//------------------------------------------------
|
||||
class PseudoFdManager
|
||||
{
|
||||
private:
|
||||
pseudofd_list_t pseudofd_list GUARDED_BY(pseudofd_list_lock);
|
||||
std::mutex pseudofd_list_lock; // protects pseudofd_list
|
||||
|
||||
static PseudoFdManager& GetManager();
|
||||
|
||||
PseudoFdManager() = default;
|
||||
~PseudoFdManager() = default;
|
||||
|
||||
int GetUnusedMinPseudoFd() const REQUIRES(pseudofd_list_lock);
|
||||
int CreatePseudoFd();
|
||||
bool ReleasePseudoFd(int fd);
|
||||
|
||||
public:
|
||||
PseudoFdManager(const PseudoFdManager&) = delete;
|
||||
PseudoFdManager(PseudoFdManager&&) = delete;
|
||||
PseudoFdManager& operator=(const PseudoFdManager&) = delete;
|
||||
PseudoFdManager& operator=(PseudoFdManager&&) = delete;
|
||||
|
||||
static int Get();
|
||||
static bool Release(int fd);
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_PSEUDOFD_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
326
src/fdcache_stat.cpp
Normal file
326
src/fdcache_stat.cpp
Normal file
@ -0,0 +1,326 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <string>
|
||||
#include <sys/file.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "fdcache.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "s3fs_cred.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat class methods
|
||||
//------------------------------------------------
|
||||
std::string CacheFileStat::GetCacheFileStatTopDir()
|
||||
{
|
||||
std::string top_path;
|
||||
if(!FdManager::IsCacheDir() || S3fsCred::GetBucket().empty()){
|
||||
return top_path;
|
||||
}
|
||||
|
||||
// stat top dir( "/<cache_dir>/.<bucket_name>.stat" )
|
||||
top_path += FdManager::GetCacheDir();
|
||||
top_path += "/.";
|
||||
top_path += S3fsCred::GetBucket();
|
||||
top_path += ".stat";
|
||||
return top_path;
|
||||
}
|
||||
|
||||
int CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir)
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_ERR("The path to cache top dir is empty.");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if(is_create_dir){
|
||||
int result;
|
||||
if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){
|
||||
S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
if(!path || '\0' == path[0]){
|
||||
sfile_path = top_path;
|
||||
}else{
|
||||
sfile_path = top_path + SAFESTRPTR(path);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool CacheFileStat::CheckCacheFileStatTopDir()
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to check permission.");
|
||||
return true;
|
||||
}
|
||||
|
||||
return check_exist_dir_permission(top_path.c_str());
|
||||
}
|
||||
|
||||
int CacheFileStat::DeleteCacheFileStat(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return -EINVAL;
|
||||
}
|
||||
// stat path
|
||||
std::string sfile_path;
|
||||
int result;
|
||||
if(0 != (result = CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false))){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path);
|
||||
return result;
|
||||
}
|
||||
if(0 != unlink(sfile_path.c_str())){
|
||||
result = -errno;
|
||||
if(-ENOENT == result){
|
||||
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, result);
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If remove stat file directory, it should do before removing
|
||||
// file cache directory.
|
||||
//
|
||||
bool CacheFileStat::DeleteCacheFileStatDirectory()
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to remove it.");
|
||||
return true;
|
||||
}
|
||||
return delete_files_in_dir(top_path.c_str(), true);
|
||||
}
|
||||
|
||||
bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath)
|
||||
{
|
||||
if(!oldpath || '\0' == oldpath[0] || !newpath || '\0' == newpath[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
// stat path
|
||||
std::string old_filestat;
|
||||
std::string new_filestat;
|
||||
if(0 != CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || 0 != CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
|
||||
return false;
|
||||
}
|
||||
|
||||
// check new stat path
|
||||
struct stat st;
|
||||
if(0 == stat(new_filestat.c_str(), &st)){
|
||||
// new stat path is existed, then unlink it.
|
||||
if(-1 == unlink(new_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to unlink new cache file stat path(%s) by errno(%d).", new_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// check old stat path
|
||||
if(0 != stat(old_filestat.c_str(), &st)){
|
||||
// old stat path is not existed, then nothing to do any more.
|
||||
return true;
|
||||
}
|
||||
|
||||
// link and unlink
|
||||
if(-1 == link(old_filestat.c_str(), new_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to link old cache file stat path(%s) to new cache file stat path(%s) by errno(%d).", old_filestat.c_str(), new_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
if(-1 == unlink(old_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to unlink old cache file stat path(%s) by errno(%d).", old_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat methods
|
||||
//------------------------------------------------
|
||||
CacheFileStat::CacheFileStat(const char* tpath) : fd(-1)
|
||||
{
|
||||
if(tpath && '\0' != tpath[0]){
|
||||
SetPath(tpath, true);
|
||||
}
|
||||
}
|
||||
|
||||
CacheFileStat::~CacheFileStat()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
bool CacheFileStat::SetPath(const char* tpath, bool is_open)
|
||||
{
|
||||
if(!tpath || '\0' == tpath[0]){
|
||||
return false;
|
||||
}
|
||||
if(!Release()){
|
||||
// could not close old stat file.
|
||||
return false;
|
||||
}
|
||||
path = tpath;
|
||||
if(!is_open){
|
||||
return true;
|
||||
}
|
||||
return Open();
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// There is no need to check whether the file is open because using rename().
|
||||
//
|
||||
bool CacheFileStat::OverWriteFile(const std::string& strall) const
|
||||
{
|
||||
// make temporary file path(in same cache directory)
|
||||
std::string sfile_path;
|
||||
if(0 != CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
|
||||
return false;
|
||||
}
|
||||
std::string strTmpFile = mydirname(sfile_path) + "/.tmpstat.XXXXXX";
|
||||
strTmpFile.push_back('\0'); // terminate with a null character and allocate space for it.
|
||||
|
||||
// open temporary file(mode: 0600)
|
||||
//
|
||||
// [TODO]
|
||||
// Currently, use "&str[pos]" to make it possible to build with C++14.
|
||||
// Once we support C++17 or later, we will use "str.data()".
|
||||
//
|
||||
int tmpfd;
|
||||
if(-1 == (tmpfd = mkstemp(&strTmpFile[0]))){ // NOLINT(readability-container-data-pointer)
|
||||
S3FS_PRN_ERR("failed to create temporary cache stat file path(%s) for %s cache", strTmpFile.c_str(), sfile_path.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// write contents
|
||||
if(0 >= pwrite(tmpfd, strall.c_str(), strall.length(), 0)){
|
||||
S3FS_PRN_ERR("failed to write stats to temporary file(%d)", errno);
|
||||
close(tmpfd);
|
||||
return false;
|
||||
}
|
||||
close(tmpfd);
|
||||
|
||||
// rename
|
||||
if(0 != rename(strTmpFile.c_str(), sfile_path.c_str())){
|
||||
S3FS_PRN_ERR("failed to rename temporary cache stat file path(%s) to %s cache", strTmpFile.c_str(), sfile_path.c_str());
|
||||
unlink(strTmpFile.c_str());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::RawOpen(bool readonly)
|
||||
{
|
||||
if(path.empty()){
|
||||
return false;
|
||||
}
|
||||
if(-1 != fd){
|
||||
// already opened
|
||||
return true;
|
||||
}
|
||||
// stat path
|
||||
std::string sfile_path;
|
||||
if(0 != CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
|
||||
return false;
|
||||
}
|
||||
// open
|
||||
int tmpfd;
|
||||
if(readonly){
|
||||
if(-1 == (tmpfd = open(sfile_path.c_str(), O_RDONLY))){
|
||||
S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(-1 == (tmpfd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
|
||||
S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
scope_guard guard([&]() { close(tmpfd); });
|
||||
|
||||
// lock
|
||||
if(-1 == flock(tmpfd, LOCK_EX)){
|
||||
S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
// seek top
|
||||
if(0 != lseek(tmpfd, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
flock(tmpfd, LOCK_UN);
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str());
|
||||
|
||||
guard.dismiss();
|
||||
fd = tmpfd;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::Open()
|
||||
{
|
||||
return RawOpen(false);
|
||||
}
|
||||
|
||||
bool CacheFileStat::ReadOnlyOpen()
|
||||
{
|
||||
return RawOpen(true);
|
||||
}
|
||||
|
||||
bool CacheFileStat::Release()
|
||||
{
|
||||
if(-1 == fd){
|
||||
// already release
|
||||
return true;
|
||||
}
|
||||
// unlock
|
||||
if(-1 == flock(fd, LOCK_UN)){
|
||||
S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file unlocked(%s)", path.c_str());
|
||||
|
||||
if(-1 == close(fd)){
|
||||
S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
fd = -1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
71
src/fdcache_stat.h
Normal file
71
src/fdcache_stat.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_STAT_H_
|
||||
#define S3FS_FDCACHE_STAT_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
class CacheFileStat
|
||||
{
|
||||
private:
|
||||
std::string path;
|
||||
int fd;
|
||||
|
||||
private:
|
||||
static int MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
bool RawOpen(bool readonly);
|
||||
|
||||
public:
|
||||
static std::string GetCacheFileStatTopDir();
|
||||
static int DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir();
|
||||
static bool DeleteCacheFileStatDirectory();
|
||||
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = nullptr);
|
||||
~CacheFileStat();
|
||||
CacheFileStat(const CacheFileStat&) = delete;
|
||||
CacheFileStat(CacheFileStat&&) = delete;
|
||||
CacheFileStat& operator=(const CacheFileStat&) = delete;
|
||||
CacheFileStat& operator=(CacheFileStat&&) = delete;
|
||||
|
||||
bool Open();
|
||||
bool ReadOnlyOpen();
|
||||
bool Release();
|
||||
bool SetPath(const char* tpath, bool is_open = true);
|
||||
int GetFd() const { return fd; }
|
||||
bool OverWriteFile(const std::string& strall) const;
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_STAT_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
249
src/fdcache_untreated.cpp
Normal file
249
src/fdcache_untreated.cpp
Normal file
@ -0,0 +1,249 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <mutex>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "fdcache_untreated.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// UntreatedParts methods
|
||||
//------------------------------------------------
|
||||
bool UntreatedParts::empty() const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
return untreated_list.empty();
|
||||
}
|
||||
|
||||
bool UntreatedParts::AddPart(off_t start, off_t size)
|
||||
{
|
||||
if(start < 0 || size <= 0){
|
||||
S3FS_PRN_ERR("Parameter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
|
||||
return false;
|
||||
}
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
++last_tag;
|
||||
|
||||
// Check the overlap with the existing part and add the part.
|
||||
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->stretch(start, size, last_tag)){
|
||||
// the part was stretched, thus check if it overlaps with next parts
|
||||
auto niter = iter;
|
||||
for(++niter; niter != untreated_list.cend(); ){
|
||||
if(!iter->stretch(niter->start, niter->size, last_tag)){
|
||||
// This next part does not overlap with the current part
|
||||
break;
|
||||
}
|
||||
// Since the parts overlap and the current part is stretched, delete this next part.
|
||||
niter = untreated_list.erase(niter);
|
||||
}
|
||||
// success to stretch and compress existed parts
|
||||
return true;
|
||||
|
||||
}else if((start + size) < iter->start){
|
||||
// The part to add should be inserted before the current part.
|
||||
untreated_list.insert(iter, untreatedpart(start, size, last_tag));
|
||||
// success to stretch and compress existed parts
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// There are no overlapping parts in the untreated_list, then add the part at end of list
|
||||
untreated_list.emplace_back(start, size, last_tag);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UntreatedParts::RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const
|
||||
{
|
||||
if(max_size <= 0 || min_size < 0 || max_size < min_size){
|
||||
S3FS_PRN_ERR("Parameter are wrong(max_size=%lld, min_size=%lld).", static_cast<long long int>(max_size), static_cast<long long int>(min_size));
|
||||
return false;
|
||||
}
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
// Check the overlap with the existing part and add the part.
|
||||
for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){
|
||||
if(!lastpart || iter->untreated_tag == last_tag){
|
||||
if(min_size <= iter->size){
|
||||
if(iter->size <= max_size){
|
||||
// whole part( min <= part size <= max )
|
||||
start = iter->start;
|
||||
size = iter->size;
|
||||
}else{
|
||||
// Partially take out part( max < part size )
|
||||
start = iter->start;
|
||||
size = max_size;
|
||||
}
|
||||
return true;
|
||||
}else{
|
||||
if(lastpart){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If size is specified as 0, all areas(parts) after start will be deleted.
|
||||
//
|
||||
bool UntreatedParts::ClearParts(off_t start, off_t size)
|
||||
{
|
||||
if(start < 0 || size < 0){
|
||||
S3FS_PRN_ERR("Parameter are wrong(start=%lld, size=%lld).", static_cast<long long int>(start), static_cast<long long int>(size));
|
||||
return false;
|
||||
}
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
if(untreated_list.empty()){
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check the overlap with the existing part.
|
||||
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ){
|
||||
if(0 != size && (start + size) <= iter->start){
|
||||
// clear area is in front of iter area, no more to do.
|
||||
break;
|
||||
}else if(start <= iter->start){
|
||||
if(0 != size && (start + size) <= (iter->start + iter->size)){
|
||||
// clear area overlaps with iter area(on the start side)
|
||||
iter->size = (iter->start + iter->size) - (start + size);
|
||||
iter->start = start + size;
|
||||
if(0 == iter->size){
|
||||
iter = untreated_list.erase(iter);
|
||||
}
|
||||
}else{
|
||||
// clear area overlaps with all of iter area
|
||||
iter = untreated_list.erase(iter);
|
||||
}
|
||||
}else if(start < (iter->start + iter->size)){
|
||||
// clear area overlaps with iter area(on the end side)
|
||||
if(0 == size || (iter->start + iter->size) <= (start + size)){
|
||||
// start to iter->end is clear
|
||||
iter->size = start - iter->start;
|
||||
}else{
|
||||
// parse current part
|
||||
iter->size = start - iter->start;
|
||||
|
||||
// add new part
|
||||
off_t next_start = start + size;
|
||||
off_t next_size = (iter->start + iter->size) - (start + size);
|
||||
long next_tag = iter->untreated_tag;
|
||||
++iter;
|
||||
iter = untreated_list.insert(iter, untreatedpart(next_start, next_size, next_tag));
|
||||
++iter;
|
||||
}
|
||||
}else{
|
||||
// clear area is in behind of iter area
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Update the last updated Untreated part
|
||||
//
|
||||
bool UntreatedParts::GetLastUpdatePart(off_t& start, off_t& size) const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
start = iter->start;
|
||||
size = iter->size;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Replaces the last updated Untreated part.
|
||||
//
|
||||
// [NOTE]
|
||||
// If size <= 0, delete that part
|
||||
//
|
||||
bool UntreatedParts::ReplaceLastUpdatePart(off_t start, off_t size)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
if(0 < size){
|
||||
iter->start = start;
|
||||
iter->size = size;
|
||||
}else{
|
||||
untreated_list.erase(iter);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Remove the last updated Untreated part.
|
||||
//
|
||||
bool UntreatedParts::RemoveLastUpdatePart()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
for(auto iter = untreated_list.begin(); iter != untreated_list.end(); ++iter){
|
||||
if(iter->untreated_tag == last_tag){
|
||||
untreated_list.erase(iter);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Duplicate the internally untreated_list.
|
||||
//
|
||||
bool UntreatedParts::Duplicate(untreated_list_t& list)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
list = untreated_list;
|
||||
return true;
|
||||
}
|
||||
|
||||
void UntreatedParts::Dump()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(untreated_list_lock);
|
||||
|
||||
S3FS_PRN_DBG("untreated list = [");
|
||||
for(auto iter = untreated_list.cbegin(); iter != untreated_list.cend(); ++iter){
|
||||
S3FS_PRN_DBG(" {%014lld - %014lld : tag=%ld}", static_cast<long long int>(iter->start), static_cast<long long int>(iter->size), iter->untreated_tag);
|
||||
}
|
||||
S3FS_PRN_DBG("]");
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
77
src/fdcache_untreated.h
Normal file
77
src/fdcache_untreated.h
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_UNTREATED_H_
|
||||
#define S3FS_FDCACHE_UNTREATED_H_
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "common.h"
|
||||
#include "types.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Class UntreatedParts
|
||||
//------------------------------------------------
|
||||
class UntreatedParts
|
||||
{
|
||||
private:
|
||||
mutable std::mutex untreated_list_lock; // protects untreated_list
|
||||
|
||||
untreated_list_t untreated_list GUARDED_BY(untreated_list_lock);
|
||||
long last_tag GUARDED_BY(untreated_list_lock) = 0; // [NOTE] Use this to identify the latest updated part.
|
||||
|
||||
private:
|
||||
bool RowGetPart(off_t& start, off_t& size, off_t max_size, off_t min_size, bool lastpart) const;
|
||||
|
||||
public:
|
||||
UntreatedParts() = default;
|
||||
~UntreatedParts() = default;
|
||||
UntreatedParts(const UntreatedParts&) = delete;
|
||||
UntreatedParts(UntreatedParts&&) = delete;
|
||||
UntreatedParts& operator=(const UntreatedParts&) = delete;
|
||||
UntreatedParts& operator=(UntreatedParts&&) = delete;
|
||||
|
||||
bool empty() const;
|
||||
|
||||
bool AddPart(off_t start, off_t size);
|
||||
bool GetLastUpdatedPart(off_t& start, off_t& size, off_t max_size, off_t min_size = MIN_MULTIPART_SIZE) const { return RowGetPart(start, size, max_size, min_size, true); }
|
||||
|
||||
bool ClearParts(off_t start, off_t size);
|
||||
bool ClearAll() { return ClearParts(0, 0); }
|
||||
|
||||
bool GetLastUpdatePart(off_t& start, off_t& size) const;
|
||||
bool ReplaceLastUpdatePart(off_t start, off_t size);
|
||||
bool RemoveLastUpdatePart();
|
||||
|
||||
bool Duplicate(untreated_list_t& list);
|
||||
|
||||
void Dump();
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_UNTREATED_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
303
src/filetimes.cpp
Normal file
303
src/filetimes.cpp
Normal file
@ -0,0 +1,303 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include "filetimes.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// result: -1 ts1 < ts2
|
||||
// 0 ts1 == ts2
|
||||
// 1 ts1 > ts2
|
||||
//
|
||||
bool valid_timespec(const struct timespec& ts)
|
||||
{
|
||||
if(0 > ts.tv_sec || UTIME_OMIT == ts.tv_nsec || UTIME_NOW == ts.tv_nsec){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// result: -1 ts1 < ts2
|
||||
// 0 ts1 == ts2
|
||||
// 1 ts1 > ts2
|
||||
//
|
||||
constexpr int compare_timespec(const struct timespec& ts1, const struct timespec& ts2)
|
||||
{
|
||||
if(ts1.tv_sec < ts2.tv_sec){
|
||||
return -1;
|
||||
}else if(ts1.tv_sec > ts2.tv_sec){
|
||||
return 1;
|
||||
}else{
|
||||
if(ts1.tv_nsec < ts2.tv_nsec){
|
||||
return -1;
|
||||
}else if(ts1.tv_nsec > ts2.tv_nsec){
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// result: -1 st < ts
|
||||
// 0 st == ts
|
||||
// 1 st > ts
|
||||
//
|
||||
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts)
|
||||
{
|
||||
struct timespec st_ts;
|
||||
set_stat_to_timespec(st, type, st_ts);
|
||||
|
||||
return compare_timespec(st_ts, ts);
|
||||
}
|
||||
|
||||
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts)
|
||||
{
|
||||
if(stat_time_type::ATIME == type){
|
||||
#ifdef __APPLE__
|
||||
st.st_atime = ts.tv_sec;
|
||||
st.st_atimespec.tv_nsec = ts.tv_nsec;
|
||||
#else
|
||||
st.st_atim.tv_sec = ts.tv_sec;
|
||||
st.st_atim.tv_nsec = ts.tv_nsec;
|
||||
#endif
|
||||
}else if(stat_time_type::MTIME == type){
|
||||
#ifdef __APPLE__
|
||||
st.st_mtime = ts.tv_sec;
|
||||
st.st_mtimespec.tv_nsec = ts.tv_nsec;
|
||||
#else
|
||||
st.st_mtim.tv_sec = ts.tv_sec;
|
||||
st.st_mtim.tv_nsec = ts.tv_nsec;
|
||||
#endif
|
||||
}else if(stat_time_type::CTIME == type){
|
||||
#ifdef __APPLE__
|
||||
st.st_ctime = ts.tv_sec;
|
||||
st.st_ctimespec.tv_nsec = ts.tv_nsec;
|
||||
#else
|
||||
st.st_ctim.tv_sec = ts.tv_sec;
|
||||
st.st_ctim.tv_nsec = ts.tv_nsec;
|
||||
#endif
|
||||
}else{
|
||||
S3FS_PRN_ERR("unknown type(%d), so skip to set value.", static_cast<int>(type));
|
||||
}
|
||||
}
|
||||
|
||||
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts)
|
||||
{
|
||||
if(stat_time_type::ATIME == type){
|
||||
#ifdef __APPLE__
|
||||
ts.tv_sec = st.st_atime;
|
||||
ts.tv_nsec = st.st_atimespec.tv_nsec;
|
||||
#else
|
||||
ts = st.st_atim;
|
||||
#endif
|
||||
}else if(stat_time_type::MTIME == type){
|
||||
#ifdef __APPLE__
|
||||
ts.tv_sec = st.st_mtime;
|
||||
ts.tv_nsec = st.st_mtimespec.tv_nsec;
|
||||
#else
|
||||
ts = st.st_mtim;
|
||||
#endif
|
||||
}else if(stat_time_type::CTIME == type){
|
||||
#ifdef __APPLE__
|
||||
ts.tv_sec = st.st_ctime;
|
||||
ts.tv_nsec = st.st_ctimespec.tv_nsec;
|
||||
#else
|
||||
ts = st.st_ctim;
|
||||
#endif
|
||||
}else{
|
||||
S3FS_PRN_ERR("unknown type(%d), so use 0 as timespec.", static_cast<int>(type));
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
return &ts;
|
||||
}
|
||||
|
||||
std::string str_stat_time(const struct stat& st, stat_time_type type)
|
||||
{
|
||||
struct timespec ts;
|
||||
return str(*set_stat_to_timespec(st, type, ts));
|
||||
}
|
||||
|
||||
struct timespec* s3fs_realtime(struct timespec& ts)
|
||||
{
|
||||
if(-1 == clock_gettime(static_cast<clockid_t>(CLOCK_REALTIME), &ts)){
|
||||
S3FS_PRN_WARN("failed to clock_gettime by errno(%d)", errno);
|
||||
ts.tv_sec = time(nullptr);
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
return &ts;
|
||||
}
|
||||
|
||||
std::string s3fs_str_realtime()
|
||||
{
|
||||
struct timespec ts;
|
||||
return str(*s3fs_realtime(ts));
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// FileTimes Class
|
||||
//-------------------------------------------------------------------
|
||||
void FileTimes::Clear()
|
||||
{
|
||||
ClearCTime();
|
||||
ClearATime();
|
||||
ClearMTime();
|
||||
}
|
||||
|
||||
void FileTimes::Clear(stat_time_type type)
|
||||
{
|
||||
if(stat_time_type::CTIME == type){
|
||||
ft_ctime = {0, UTIME_OMIT};
|
||||
}else if(stat_time_type::ATIME == type){
|
||||
ft_atime = {0, UTIME_OMIT};
|
||||
}else{ // stat_time_type::MTIME
|
||||
ft_mtime = {0, UTIME_OMIT};
|
||||
}
|
||||
}
|
||||
|
||||
const struct timespec& FileTimes::GetTime(stat_time_type type) const
|
||||
{
|
||||
if(stat_time_type::CTIME == type){
|
||||
return ft_ctime;
|
||||
}else if(stat_time_type::ATIME == type){
|
||||
return ft_atime;
|
||||
}else{ // stat_time_type::MTIME
|
||||
return ft_mtime;
|
||||
}
|
||||
}
|
||||
|
||||
void FileTimes::GetTime(stat_time_type type, struct timespec& time) const
|
||||
{
|
||||
if(stat_time_type::CTIME == type){
|
||||
time = ft_ctime;
|
||||
}else if(stat_time_type::ATIME == type){
|
||||
time = ft_atime;
|
||||
}else{ // stat_time_type::MTIME
|
||||
time = ft_mtime;
|
||||
}
|
||||
}
|
||||
|
||||
void FileTimes::ReflectFileTimes(struct stat& st) const
|
||||
{
|
||||
if(!IsOmitCTime()){
|
||||
set_timespec_to_stat(st, stat_time_type::CTIME, ft_ctime);
|
||||
}
|
||||
if(!IsOmitATime()){
|
||||
set_timespec_to_stat(st, stat_time_type::ATIME, ft_atime);
|
||||
}
|
||||
if(!IsOmitMTime()){
|
||||
set_timespec_to_stat(st, stat_time_type::MTIME, ft_mtime);
|
||||
}
|
||||
}
|
||||
|
||||
void FileTimes::SetTime(stat_time_type type, struct timespec time)
|
||||
{
|
||||
if(UTIME_NOW == time.tv_nsec){
|
||||
s3fs_realtime(time);
|
||||
}
|
||||
if(stat_time_type::CTIME == type){
|
||||
ft_ctime = time;
|
||||
}else if(stat_time_type::ATIME == type){
|
||||
ft_atime = time;
|
||||
}else{ // stat_time_type::MTIME
|
||||
ft_mtime = time;
|
||||
}
|
||||
}
|
||||
|
||||
void FileTimes::SetAllNow()
|
||||
{
|
||||
struct timespec time;
|
||||
s3fs_realtime(time);
|
||||
SetAll(time, time, time);
|
||||
}
|
||||
|
||||
void FileTimes::SetAll(const struct stat& stbuf, bool no_omit)
|
||||
{
|
||||
struct timespec ts_ctime;
|
||||
struct timespec ts_atime;
|
||||
struct timespec ts_mtime;
|
||||
set_stat_to_timespec(stbuf, stat_time_type::CTIME, ts_ctime);
|
||||
set_stat_to_timespec(stbuf, stat_time_type::ATIME, ts_atime);
|
||||
set_stat_to_timespec(stbuf, stat_time_type::MTIME, ts_mtime);
|
||||
|
||||
SetAll(ts_ctime, ts_atime, ts_mtime, no_omit);
|
||||
}
|
||||
|
||||
void FileTimes::SetAll(struct timespec ts_ctime, struct timespec ts_atime, struct timespec ts_mtime, bool no_omit)
|
||||
{
|
||||
struct timespec ts_now_time;
|
||||
s3fs_realtime(ts_now_time);
|
||||
|
||||
if(UTIME_NOW == ts_ctime.tv_nsec){
|
||||
SetCTime(ts_now_time);
|
||||
}else if(!no_omit || UTIME_OMIT != ts_ctime.tv_nsec){
|
||||
SetCTime(ts_ctime);
|
||||
}
|
||||
|
||||
if(UTIME_NOW == ts_atime.tv_nsec){
|
||||
SetATime(ts_now_time);
|
||||
}else if(!no_omit || UTIME_OMIT != ts_atime.tv_nsec){
|
||||
SetATime(ts_atime);
|
||||
}
|
||||
|
||||
if(UTIME_NOW == ts_mtime.tv_nsec){
|
||||
SetMTime(ts_now_time);
|
||||
}else if(!no_omit || UTIME_OMIT != ts_mtime.tv_nsec){
|
||||
SetMTime(ts_mtime);
|
||||
}
|
||||
}
|
||||
|
||||
void FileTimes::SetAll(const FileTimes& other, bool no_omit)
|
||||
{
|
||||
if(!no_omit || !other.IsOmitCTime()){
|
||||
SetCTime(other.ctime());
|
||||
}
|
||||
if(!no_omit || !other.IsOmitATime()){
|
||||
SetATime(other.atime());
|
||||
}
|
||||
if(!no_omit || !other.IsOmitMTime()){
|
||||
SetMTime(other.mtime());
|
||||
}
|
||||
}
|
||||
|
||||
bool FileTimes::IsOmit(stat_time_type type) const
|
||||
{
|
||||
if(stat_time_type::CTIME == type){
|
||||
return (UTIME_OMIT == ft_ctime.tv_nsec);
|
||||
}else if(stat_time_type::ATIME == type){
|
||||
return (UTIME_OMIT == ft_atime.tv_nsec);
|
||||
}else{ // stat_time_type::MTIME
|
||||
return (UTIME_OMIT == ft_mtime.tv_nsec);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
120
src/filetimes.h
Normal file
120
src/filetimes.h
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FILETIMES_H_
|
||||
#define S3FS_FILETIMES_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility for stat time type
|
||||
//-------------------------------------------------------------------
|
||||
enum class stat_time_type : uint8_t {
|
||||
ATIME,
|
||||
MTIME,
|
||||
CTIME
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions for timespecs
|
||||
//-------------------------------------------------------------------
|
||||
bool valid_timespec(const struct timespec& ts);
|
||||
constexpr int compare_timespec(const struct timespec& ts1, const struct timespec& ts2);
|
||||
int compare_timespec(const struct stat& st, stat_time_type type, const struct timespec& ts);
|
||||
void set_timespec_to_stat(struct stat& st, stat_time_type type, const struct timespec& ts);
|
||||
struct timespec* set_stat_to_timespec(const struct stat& st, stat_time_type type, struct timespec& ts);
|
||||
std::string str_stat_time(const struct stat& st, stat_time_type type);
|
||||
struct timespec* s3fs_realtime(struct timespec& ts);
|
||||
std::string s3fs_str_realtime();
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// FileTimes Class
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// In this class, UTIME_OMIT is set when initializing or clearing
|
||||
// internal data.
|
||||
// Also, if UTIME_NOW is specified, the value will be corrected to
|
||||
// the current time and maintained.
|
||||
//
|
||||
class FileTimes
|
||||
{
|
||||
private:
|
||||
struct timespec ft_ctime; // Change time
|
||||
struct timespec ft_atime; // Access time
|
||||
struct timespec ft_mtime; // Modification time
|
||||
|
||||
private:
|
||||
void Clear(stat_time_type type);
|
||||
|
||||
const struct timespec& GetTime(stat_time_type type) const;
|
||||
void GetTime(stat_time_type type, struct timespec& time) const;
|
||||
|
||||
void SetTime(stat_time_type type, struct timespec time);
|
||||
|
||||
bool IsOmit(stat_time_type type) const;
|
||||
|
||||
public:
|
||||
explicit FileTimes() : ft_ctime{0, UTIME_OMIT}, ft_atime{0, UTIME_OMIT}, ft_mtime{0, UTIME_OMIT} {}
|
||||
|
||||
// Clear
|
||||
void Clear();
|
||||
void ClearCTime() { Clear(stat_time_type::CTIME); }
|
||||
void ClearATime() { Clear(stat_time_type::ATIME); }
|
||||
void ClearMTime() { Clear(stat_time_type::MTIME); }
|
||||
|
||||
// Get value
|
||||
const struct timespec& ctime() const { return GetTime(stat_time_type::CTIME); }
|
||||
const struct timespec& atime() const { return GetTime(stat_time_type::ATIME); }
|
||||
const struct timespec& mtime() const { return GetTime(stat_time_type::MTIME); }
|
||||
|
||||
void GetCTime(struct timespec& time) const { GetTime(stat_time_type::CTIME, time); }
|
||||
void GetATime(struct timespec& time) const { GetTime(stat_time_type::ATIME, time); }
|
||||
void GetMTime(struct timespec& time) const { GetTime(stat_time_type::MTIME, time); }
|
||||
|
||||
void ReflectFileTimes(struct stat& st) const;
|
||||
|
||||
// Set value
|
||||
void SetCTime(struct timespec time) { SetTime(stat_time_type::CTIME, time); }
|
||||
void SetATime(struct timespec time) { SetTime(stat_time_type::ATIME, time); }
|
||||
void SetMTime(struct timespec time) { SetTime(stat_time_type::MTIME, time); }
|
||||
|
||||
void SetAllNow();
|
||||
void SetAll(const struct stat& stbuf, bool no_omit = true);
|
||||
void SetAll(struct timespec ts_ctime, struct timespec ts_atime, struct timespec ts_mtime, bool no_omit = true);
|
||||
void SetAll(const FileTimes& other, bool no_omit = true);
|
||||
|
||||
// Check
|
||||
bool IsOmitCTime() const { return IsOmit(stat_time_type::CTIME); }
|
||||
bool IsOmitATime() const { return IsOmit(stat_time_type::ATIME); }
|
||||
bool IsOmitMTime() const { return IsOmit(stat_time_type::MTIME); }
|
||||
};
|
||||
|
||||
#endif // S3FS_FILETIMES_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -18,19 +18,18 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <gcrypt.h>
|
||||
#include <gnutls/gnutls.h>
|
||||
#include <gnutls/crypto.h>
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#include <nettle/md5.h>
|
||||
#include <nettle/sha1.h>
|
||||
#include <nettle/hmac.h>
|
||||
@ -39,415 +38,353 @@
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
{
|
||||
static const char version[] = "GnuTLS(nettle)";
|
||||
static constexpr char version[] = "GnuTLS(nettle)";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
static constexpr char version[] = "GnuTLS(gcrypt)";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
#ifndef USE_GNUTLS_NETTLE
|
||||
if(nullptr == gcry_check_version(nullptr)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA1_DIGEST_SIZE))){
|
||||
return false;
|
||||
}
|
||||
auto digest = std::make_unique<unsigned char[]>(SHA1_DIGEST_SIZE);
|
||||
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA1_DIGEST_SIZE;
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(digest.get()));
|
||||
*digestlen = SHA1_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA256_DIGEST_SIZE))){
|
||||
return false;
|
||||
}
|
||||
auto digest = std::make_unique<unsigned char[]>(SHA256_DIGEST_SIZE);
|
||||
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(digest.get()));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
return false;
|
||||
}
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
free(*digest);
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return nullptr;
|
||||
}
|
||||
auto digest = std::make_unique<unsigned char[]>(*digestlen + 1);
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, digest.get())){
|
||||
return nullptr;
|
||||
}
|
||||
return digest;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
return false;
|
||||
}
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
free(*digest);
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return nullptr;
|
||||
}
|
||||
auto digest = std::make_unique<unsigned char[]>(*digestlen + 1);
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, digest.get())){
|
||||
return nullptr;
|
||||
}
|
||||
return digest;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
#define MD5_DIGEST_LENGTH 16
|
||||
|
||||
size_t get_md5_digest_length(void)
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result)
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
struct md5_ctx ctx_md5;
|
||||
md5_init(&ctx_md5);
|
||||
md5_update(&ctx_md5, datalen, data);
|
||||
md5_digest(&ctx_md5, result->size(), result->data());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
struct md5_ctx ctx_md5;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
struct md5_ctx ctx_md5;
|
||||
off_t bytes;
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
md5_init(&ctx_md5);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
md5_init(&ctx_md5);
|
||||
|
||||
return result;
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, reinterpret_cast<const uint8_t*>(buf.data()));
|
||||
}
|
||||
md5_digest(&ctx_md5, result->size(), result->data());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
|
||||
{
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return false;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
gcry_md_write(ctx_md5, digest->data(), digest->size());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
off_t bytes;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return false;
|
||||
}
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf.data(), bytes);
|
||||
}
|
||||
memcpy(result->data(), gcry_md_read(ctx_md5, 0), result->size());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
#define SHA256_DIGEST_LENGTH 32
|
||||
|
||||
size_t get_sha256_digest_length(void)
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, digest->size(), digest->data());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
struct sha256_ctx ctx_sha256;
|
||||
off_t bytes;
|
||||
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, *digestlen, *digest);
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
struct sha256_ctx ctx_sha256;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, reinterpret_cast<const uint8_t*>(buf.data()));
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
sha256_digest(&ctx_sha256, result->size(), result->data());
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(digest->data(), gcry_md_read(ctx_sha256, 0), digest->size());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
free(*digest);
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
off_t bytes;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf.data(), bytes);
|
||||
}
|
||||
memcpy(result->data(), gcry_md_read(ctx_sha256, 0), result->size());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
493
src/metaheader.cpp
Normal file
493
src/metaheader.cpp
Normal file
@ -0,0 +1,493 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <ctime>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "filetimes.h"
|
||||
|
||||
static constexpr struct timespec ERROR_TIMESPEC = {-1, 0};
|
||||
static constexpr struct timespec OMIT_TIMESPEC = {0, UTIME_OMIT};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for convert
|
||||
//-------------------------------------------------------------------
|
||||
static struct timespec cvt_string_to_time(const char *str)
|
||||
{
|
||||
// [NOTE]
|
||||
// In rclone, there are cases where ns is set to x-amz-meta-mtime
|
||||
// with floating point number. s3fs uses x-amz-meta-mtime by
|
||||
// truncating the floating point or less (in seconds or less) to
|
||||
// correspond to this.
|
||||
//
|
||||
std::string strmtime;
|
||||
long nsec = 0;
|
||||
if(str && '\0' != *str){
|
||||
strmtime = str;
|
||||
std::string::size_type pos = strmtime.find('.', 0);
|
||||
if(std::string::npos != pos){
|
||||
nsec = cvt_strtoofft(strmtime.substr(pos + 1).c_str(), /*base=*/ 10);
|
||||
strmtime.erase(pos);
|
||||
}
|
||||
}
|
||||
struct timespec ts = {static_cast<time_t>(cvt_strtoofft(strmtime.c_str(), /*base=*/ 10)), nsec};
|
||||
return ts;
|
||||
}
|
||||
|
||||
static struct timespec get_time(const headers_t& meta, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.cend() == (iter = meta.find(header))){
|
||||
return ERROR_TIMESPEC;
|
||||
}
|
||||
return cvt_string_to_time((*iter).second.c_str());
|
||||
}
|
||||
|
||||
struct timespec get_mtime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
struct timespec mtime = get_time(meta, "x-amz-meta-mtime");
|
||||
if(0 <= mtime.tv_sec && UTIME_OMIT != mtime.tv_nsec){
|
||||
return mtime;
|
||||
}
|
||||
|
||||
mtime = get_time(meta, "x-amz-meta-goog-reserved-file-mtime");
|
||||
if(0 <= mtime.tv_sec && UTIME_OMIT != mtime.tv_nsec){
|
||||
return mtime;
|
||||
}
|
||||
if(overcheck){
|
||||
mtime = {get_lastmodified(meta), 0};
|
||||
return mtime;
|
||||
}
|
||||
return OMIT_TIMESPEC;
|
||||
}
|
||||
|
||||
struct timespec get_ctime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
struct timespec ctime = get_time(meta, "x-amz-meta-ctime");
|
||||
if(0 <= ctime.tv_sec && UTIME_OMIT != ctime.tv_nsec){
|
||||
return ctime;
|
||||
}
|
||||
if(overcheck){
|
||||
ctime = {get_lastmodified(meta), 0};
|
||||
return ctime;
|
||||
}
|
||||
return OMIT_TIMESPEC;
|
||||
}
|
||||
|
||||
struct timespec get_atime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
struct timespec atime = get_time(meta, "x-amz-meta-atime");
|
||||
if(0 <= atime.tv_sec && UTIME_OMIT != atime.tv_nsec){
|
||||
return atime;
|
||||
}
|
||||
if(overcheck){
|
||||
atime = {get_lastmodified(meta), 0};
|
||||
return atime;
|
||||
}
|
||||
return OMIT_TIMESPEC;
|
||||
}
|
||||
|
||||
off_t get_size(const char *s)
|
||||
{
|
||||
return cvt_strtoofft(s, /*base=*/ 10);
|
||||
}
|
||||
|
||||
off_t get_size(const headers_t& meta)
|
||||
{
|
||||
auto iter = meta.find("Content-Length");
|
||||
if(meta.cend() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_size((*iter).second.c_str());
|
||||
}
|
||||
|
||||
mode_t get_mode(const char *s, int base)
|
||||
{
|
||||
return static_cast<mode_t>(cvt_strtoofft(s, base));
|
||||
}
|
||||
|
||||
mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir, bool forcedir)
|
||||
{
|
||||
mode_t mode = 0;
|
||||
bool isS3sync = false;
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
if(meta.cend() != (iter = meta.find("x-amz-meta-mode"))){
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
|
||||
mode = get_mode((*iter).second.c_str(), 8);
|
||||
}else{
|
||||
// If another tool creates an object without permissions, default to owner
|
||||
// read-write and group readable.
|
||||
mode = (!strpath.empty() && '/' == *strpath.rbegin()) ? 0750 : 0640;
|
||||
}
|
||||
|
||||
// Checking the bitmask, if the last 3 bits are all zero then process as a regular
|
||||
// file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO,
|
||||
// S_IFSOCK, S_IFCHR, S_IFLNK and S_IFBLK devices can be processed properly by fuse.
|
||||
if(!(mode & S_IFMT)){
|
||||
if(!isS3sync){
|
||||
if(checkdir){
|
||||
if(forcedir){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(meta.cend() != (iter = meta.find("Content-Type"))){
|
||||
std::string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
std::string::size_type pos = strConType.find(';');
|
||||
if(std::string::npos != pos){
|
||||
strConType.erase(pos);
|
||||
}
|
||||
if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){
|
||||
// Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}else if(!strpath.empty() && '/' == *strpath.rbegin()){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' character at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
if(strConType == "text/plain" && (0 == size || 1 == size)){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}
|
||||
// If complement lack stat mode, when it's mode is not set any permission,
|
||||
// the object is added minimal mode only for read permission.
|
||||
if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){
|
||||
mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR));
|
||||
}
|
||||
}else{
|
||||
if(!checkdir){
|
||||
// cut dir/reg flag.
|
||||
mode &= ~S_IFDIR;
|
||||
mode &= ~S_IFREG;
|
||||
}
|
||||
}
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// Gets a only FMT bit in mode from meta headers.
|
||||
// The processing is almost the same as get_mode().
|
||||
// This function is intended to be used from get_object_attribute().
|
||||
//
|
||||
static mode_t convert_meta_to_mode_fmt(const headers_t& meta)
|
||||
{
|
||||
mode_t mode = 0;
|
||||
bool isS3sync = false;
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
if(meta.cend() != (iter = meta.find("x-amz-meta-mode"))){
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
|
||||
mode = get_mode((*iter).second.c_str(), 8);
|
||||
}
|
||||
|
||||
if(!(mode & S_IFMT)){
|
||||
if(!isS3sync){
|
||||
if(meta.cend() != (iter = meta.find("Content-Type"))){
|
||||
std::string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
std::string::size_type pos = strConType.find(';');
|
||||
if(std::string::npos != pos){
|
||||
strConType.erase(pos);
|
||||
}
|
||||
if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){
|
||||
// Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return (mode & S_IFMT);
|
||||
}
|
||||
|
||||
bool is_reg_fmt(const headers_t& meta)
|
||||
{
|
||||
return S_ISREG(convert_meta_to_mode_fmt(meta));
|
||||
}
|
||||
|
||||
bool is_symlink_fmt(const headers_t& meta)
|
||||
{
|
||||
return S_ISLNK(convert_meta_to_mode_fmt(meta));
|
||||
}
|
||||
|
||||
bool is_dir_fmt(const headers_t& meta)
|
||||
{
|
||||
return S_ISDIR(convert_meta_to_mode_fmt(meta));
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// For directory types, detailed judgment is not possible.
|
||||
// DIR_NORMAL is always returned.
|
||||
//
|
||||
// Objects uploaded using clients other than s3fs has a Content-Type
|
||||
// of application/unknown and dose not have x-amz-meta-mode header.
|
||||
// In this case, you can specify objtype_t as default_type.
|
||||
//
|
||||
objtype_t derive_object_type(const std::string& strpath, const headers_t& meta, objtype_t default_type)
|
||||
{
|
||||
mode_t mode = convert_meta_to_mode_fmt(meta);
|
||||
|
||||
if(S_ISDIR(mode)){
|
||||
if('/' != *strpath.rbegin()){
|
||||
return objtype_t::DIR_NOT_TERMINATE_SLASH;
|
||||
}else if(std::string::npos != strpath.find("_$folder$", 0)){
|
||||
return objtype_t::DIR_FOLDER_SUFFIX;
|
||||
}else{
|
||||
// [NOTE]
|
||||
// It returns DIR_NORMAL, although it could be DIR_NOT_EXIST_OBJECT.
|
||||
//
|
||||
return objtype_t::DIR_NORMAL;
|
||||
}
|
||||
}else if(S_ISLNK(mode)){
|
||||
return objtype_t::SYMLINK;
|
||||
}else if(S_ISREG(mode)){
|
||||
return objtype_t::FILE;
|
||||
}else if(0 == mode){
|
||||
// If the x-amz-meta-mode header is not present, mode is 0.
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.cend() != (iter = meta.find("Content-Type"))){
|
||||
std::string strConType = iter->second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
std::string::size_type pos = strConType.find(';');
|
||||
if(std::string::npos != pos){
|
||||
strConType.erase(pos);
|
||||
}
|
||||
if(strConType == "application/unknown"){
|
||||
return default_type;
|
||||
}
|
||||
}
|
||||
}
|
||||
return objtype_t::UNKNOWN;
|
||||
}
|
||||
|
||||
uid_t get_uid(const char *s)
|
||||
{
|
||||
return static_cast<uid_t>(cvt_strtoofft(s, /*base=*/ 0));
|
||||
}
|
||||
|
||||
uid_t get_uid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.cend() != (iter = meta.find("x-amz-meta-uid"))){
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else{
|
||||
return geteuid();
|
||||
}
|
||||
}
|
||||
|
||||
gid_t get_gid(const char *s)
|
||||
{
|
||||
return static_cast<gid_t>(cvt_strtoofft(s, /*base=*/ 0));
|
||||
}
|
||||
|
||||
gid_t get_gid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.cend() != (iter = meta.find("x-amz-meta-gid"))){
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.cend() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else{
|
||||
return getegid();
|
||||
}
|
||||
}
|
||||
|
||||
blkcnt_t get_blocks(off_t size)
|
||||
{
|
||||
return (size / 512) + (0 == (size % 512) ? 0 : 1);
|
||||
}
|
||||
|
||||
time_t cvtIAMExpireStringToTime(const char* s)
|
||||
{
|
||||
struct tm tm{};
|
||||
if(!s){
|
||||
return 0L;
|
||||
}
|
||||
s3fs_strptime(s, "%Y-%m-%dT%H:%M:%S", &tm);
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(const char* s)
|
||||
{
|
||||
struct tm tm{};
|
||||
if(!s){
|
||||
return -1;
|
||||
}
|
||||
s3fs_strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm);
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(const headers_t& meta)
|
||||
{
|
||||
auto iter = meta.find("Last-Modified");
|
||||
if(meta.cend() == iter){
|
||||
return -1;
|
||||
}
|
||||
return get_lastmodified((*iter).second.c_str());
|
||||
}
|
||||
|
||||
//
|
||||
// Returns it whether it is an object with need checking in detail.
|
||||
// If this function returns true, the object is possible to be directory
|
||||
// and is needed checking detail(searching sub object).
|
||||
//
|
||||
bool is_need_check_obj_detail(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
// directory object is Content-Length as 0.
|
||||
if(0 != get_size(meta)){
|
||||
return false;
|
||||
}
|
||||
// if the object has x-amz-meta information, checking is no more.
|
||||
if(meta.cend() != meta.find("x-amz-meta-mode") ||
|
||||
meta.cend() != meta.find("x-amz-meta-mtime") ||
|
||||
meta.cend() != meta.find("x-amz-meta-ctime") ||
|
||||
meta.cend() != meta.find("x-amz-meta-atime") ||
|
||||
meta.cend() != meta.find("x-amz-meta-uid") ||
|
||||
meta.cend() != meta.find("x-amz-meta-gid") ||
|
||||
meta.cend() != meta.find("x-amz-meta-owner") ||
|
||||
meta.cend() != meta.find("x-amz-meta-group") ||
|
||||
meta.cend() != meta.find("x-amz-meta-permissions") )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// if there is not Content-Type, or Content-Type is "x-directory",
|
||||
// checking is no more.
|
||||
if(meta.cend() == (iter = meta.find("Content-Type"))){
|
||||
return false;
|
||||
}
|
||||
if("application/x-directory" == (*iter).second){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If add_noexist is false and the key does not exist, it will not be added.
|
||||
//
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist)
|
||||
{
|
||||
bool added = false;
|
||||
for(auto iter = additional.cbegin(); iter != additional.cend(); ++iter){
|
||||
if(add_noexist || base.find(iter->first) != base.cend()){
|
||||
base[iter->first] = iter->second;
|
||||
added = true;
|
||||
}
|
||||
}
|
||||
return added;
|
||||
}
|
||||
|
||||
bool convert_header_to_stat(const std::string& strpath, const headers_t& meta, struct stat& stbuf, bool forcedir)
|
||||
{
|
||||
stbuf = {};
|
||||
|
||||
// set hard link count always 1
|
||||
stbuf.st_nlink = 1; // see fuse FAQ
|
||||
|
||||
// mode
|
||||
stbuf.st_mode = get_mode(meta, strpath, true, forcedir);
|
||||
|
||||
// blocks
|
||||
if(S_ISREG(stbuf.st_mode)){
|
||||
stbuf.st_blocks = get_blocks(stbuf.st_size);
|
||||
}
|
||||
stbuf.st_blksize = 4096;
|
||||
|
||||
// mtime
|
||||
struct timespec mtime = get_mtime(meta);
|
||||
if(mtime.tv_sec < 0){
|
||||
mtime = {0, 0};
|
||||
}
|
||||
set_timespec_to_stat(stbuf, stat_time_type::MTIME, mtime);
|
||||
|
||||
// ctime
|
||||
struct timespec ctime = get_ctime(meta);
|
||||
if(ctime.tv_sec < 0){
|
||||
ctime = {0, 0};
|
||||
}
|
||||
set_timespec_to_stat(stbuf, stat_time_type::CTIME, ctime);
|
||||
|
||||
// atime
|
||||
struct timespec atime = get_atime(meta);
|
||||
if(atime.tv_sec < 0){
|
||||
atime = {0, 0};
|
||||
}
|
||||
set_timespec_to_stat(stbuf, stat_time_type::ATIME, atime);
|
||||
|
||||
// size
|
||||
if(S_ISDIR(stbuf.st_mode)){
|
||||
stbuf.st_size = 4096;
|
||||
}else{
|
||||
stbuf.st_size = get_size(meta);
|
||||
}
|
||||
|
||||
// uid/gid
|
||||
stbuf.st_uid = get_uid(meta);
|
||||
stbuf.st_gid = get_gid(meta);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
70
src/metaheader.h
Normal file
70
src/metaheader.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_METAHEADER_H_
|
||||
#define S3FS_METAHEADER_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "types.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// headers_t
|
||||
//-------------------------------------------------------------------
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> headers_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
struct timespec get_mtime(const headers_t& meta, bool overcheck = true);
|
||||
struct timespec get_ctime(const headers_t& meta, bool overcheck = true);
|
||||
struct timespec get_atime(const headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(const headers_t& meta);
|
||||
mode_t get_mode(const char *s, int base = 0);
|
||||
mode_t get_mode(const headers_t& meta, const std::string& strpath, bool checkdir = false, bool forcedir = false);
|
||||
bool is_reg_fmt(const headers_t& meta);
|
||||
bool is_symlink_fmt(const headers_t& meta);
|
||||
bool is_dir_fmt(const headers_t& meta);
|
||||
objtype_t derive_object_type(const std::string& strpath, const headers_t& meta, objtype_t default_type = objtype_t::UNKNOWN);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(const headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(const headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(const headers_t& meta);
|
||||
bool is_need_check_obj_detail(const headers_t& meta);
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist);
|
||||
bool convert_header_to_stat(const std::string& strpath, const headers_t& meta, struct stat& stbuf, bool forcedir = false);
|
||||
|
||||
#endif // S3FS_METAHEADER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
156
src/mpu_util.cpp
Normal file
156
src/mpu_util.cpp
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "mpu_util.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_threadreqs.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
utility_incomp_type utility_mode = utility_incomp_type::NO_UTILITY_MODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static void print_incomp_mpu_list(const incomp_mpu_list_t& list)
|
||||
{
|
||||
printf("\n");
|
||||
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
|
||||
printf("\n");
|
||||
|
||||
if(!list.empty()){
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
int cnt = 0;
|
||||
for(auto iter = list.cbegin(); iter != list.cend(); ++iter, ++cnt){
|
||||
printf(" Path : %s\n", (*iter).key.c_str());
|
||||
printf(" UploadId : %s\n", (*iter).id.c_str());
|
||||
printf(" Date : %s\n", (*iter).date.c_str());
|
||||
printf("\n");
|
||||
}
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
}else{
|
||||
printf("There is no list.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static bool abort_incomp_mpu_list(const incomp_mpu_list_t& list, time_t abort_time)
|
||||
{
|
||||
if(list.empty()){
|
||||
return true;
|
||||
}
|
||||
time_t now_time = time(nullptr);
|
||||
|
||||
// do removing.
|
||||
bool result = true;
|
||||
for(auto iter = list.cbegin(); iter != list.cend(); ++iter){
|
||||
const char* tpath = (*iter).key.c_str();
|
||||
std::string upload_id = (*iter).id;
|
||||
|
||||
if(0 != abort_time){ // abort_time is 0, it means all.
|
||||
time_t date = 0;
|
||||
if(!get_unixtime_from_iso8601((*iter).date.c_str(), date)){
|
||||
S3FS_PRN_DBG("date format is not ISO 8601 for %s multipart uploading object, skip this.", tpath);
|
||||
continue;
|
||||
}
|
||||
if(now_time <= (date + abort_time)){
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != abort_multipart_upload_request(tpath, upload_id)){
|
||||
S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath);
|
||||
result = false;
|
||||
}else{
|
||||
printf("Succeed to remove %s multipart uploading object.\n", tpath);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int s3fs_utility_processing(time_t abort_time)
|
||||
{
|
||||
if(utility_incomp_type::NO_UTILITY_MODE == utility_mode){
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
printf("\n*** s3fs run as utility mode.\n\n");
|
||||
|
||||
S3fsCurl s3fscurl;
|
||||
std::string body;
|
||||
int result = EXIT_SUCCESS;
|
||||
if(0 != s3fscurl.MultipartListRequest(body)){
|
||||
S3FS_PRN_EXIT("Could not get list multipart upload.\nThere is no incomplete multipart uploaded object in bucket.\n");
|
||||
result = EXIT_FAILURE;
|
||||
}else{
|
||||
// parse result(incomplete multipart upload information)
|
||||
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
|
||||
|
||||
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", nullptr, 0), xmlFreeDoc);
|
||||
if(nullptr == doc){
|
||||
S3FS_PRN_DBG("xmlReadMemory exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
// make incomplete uploads list
|
||||
incomp_mpu_list_t list;
|
||||
if(!get_incomp_mpu_list(doc.get(), list)){
|
||||
S3FS_PRN_DBG("get_incomp_mpu_list exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
if(utility_incomp_type::INCOMP_TYPE_LIST == utility_mode){
|
||||
// print list
|
||||
print_incomp_mpu_list(list);
|
||||
}else if(utility_incomp_type::INCOMP_TYPE_ABORT == utility_mode){
|
||||
// remove
|
||||
if(!abort_incomp_mpu_list(list, abort_time)){
|
||||
S3FS_PRN_DBG("an error occurred during removal process.");
|
||||
result = EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ssl
|
||||
s3fs_destroy_global_ssl();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
66
src/mpu_util.h
Normal file
66
src/mpu_util.h
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_MPU_UTIL_H_
|
||||
#define S3FS_MPU_UTIL_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <ctime>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct incomplete_multipart_upload_info
|
||||
{
|
||||
std::string key;
|
||||
std::string id;
|
||||
std::string date;
|
||||
}INCOMP_MPU_INFO;
|
||||
|
||||
typedef std::vector<INCOMP_MPU_INFO> incomp_mpu_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// enum for utility process mode
|
||||
//-------------------------------------------------------------------
|
||||
enum class utility_incomp_type : uint8_t {
|
||||
NO_UTILITY_MODE = 0, // not utility mode
|
||||
INCOMP_TYPE_LIST, // list of incomplete mpu
|
||||
INCOMP_TYPE_ABORT // delete incomplete mpu
|
||||
};
|
||||
|
||||
extern utility_incomp_type utility_mode;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
int s3fs_utility_processing(time_t abort_time);
|
||||
|
||||
#endif // S3FS_MPU_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
330
src/nss_auth.cpp
330
src/nss_auth.cpp
@ -18,15 +18,14 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <nss.h>
|
||||
#include <pk11pub.h>
|
||||
#include <hasht.h>
|
||||
@ -35,253 +34,218 @@
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "NSS";
|
||||
static constexpr char version[] = "NSS";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
NSS_Init(NULL);
|
||||
NSS_NoDB_Init(NULL);
|
||||
return true;
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
if(SECSuccess != NSS_NoDB_Init(nullptr)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
PR_Cleanup();
|
||||
return true;
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
PR_Cleanup();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
SECStatus SecStatus;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, nullptr, 0};
|
||||
|
||||
if(NULL == (Slot = PK11_GetInternalKeySlot())){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(nullptr == (Slot = PK11_GetInternalKeySlot())){
|
||||
return nullptr;
|
||||
}
|
||||
if(nullptr == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, nullptr))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return nullptr;
|
||||
}
|
||||
if(nullptr == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != (SecStatus = PK11_DigestBegin(Context)) ||
|
||||
SECSuccess != (SecStatus = PK11_DigestOp(Context, data, datalen)) ||
|
||||
SECSuccess != (SecStatus = PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest))) )
|
||||
{
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != PK11_DigestBegin(Context) ||
|
||||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
|
||||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
|
||||
{
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return nullptr;
|
||||
}
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen))){
|
||||
return false;
|
||||
}
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
auto digest = std::make_unique<unsigned char[]>(*digestlen);
|
||||
memcpy(digest.get(), tmpdigest, *digestlen);
|
||||
|
||||
return true;
|
||||
return digest;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result)
|
||||
{
|
||||
return MD5_LENGTH;
|
||||
PK11Context* md5ctx;
|
||||
unsigned int md5outlen;
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
PK11_DigestOp(md5ctx, data, datalen);
|
||||
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
PK11Context* md5ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int md5outlen;
|
||||
PK11Context* md5ctx;
|
||||
off_t bytes;
|
||||
unsigned int md5outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
memset(buf, 0, 512);
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<unsigned char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return false;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf.data(), bytes);
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
PK11_DigestFinal(md5ctx, result->data(), &md5outlen, result->size());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
*digestlen = sha256outlen;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
PK11Context* sha256ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int sha256outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, digest->data(), &sha256outlen, digest->size());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return result;
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
PK11Context* sha256ctx;
|
||||
off_t bytes;
|
||||
unsigned int sha256outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<unsigned char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return false;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf.data(), bytes);
|
||||
}
|
||||
PK11_DigestFinal(sha256ctx, result->data(), &sha256outlen, result->size());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -289,6 +253,6 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -18,57 +18,60 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/md5.h>
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/crypto.h>
|
||||
#include <openssl/err.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <thread>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
using namespace std;
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "OpenSSL";
|
||||
static constexpr char version[] = "OpenSSL";
|
||||
|
||||
return version;
|
||||
return version;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
ERR_load_crypto_strings();
|
||||
ERR_load_BIO_strings();
|
||||
OpenSSL_add_all_algorithms();
|
||||
return true;
|
||||
ERR_load_crypto_strings();
|
||||
|
||||
// [NOTE]
|
||||
// OpenSSL 3.0 loads error strings automatically so these functions are not needed.
|
||||
//
|
||||
#ifndef USE_OPENSSL_30
|
||||
ERR_load_BIO_strings();
|
||||
#endif
|
||||
|
||||
OpenSSL_add_all_algorithms();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
return true;
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -77,276 +80,298 @@ bool s3fs_destroy_global_ssl(void)
|
||||
// internal use struct for openssl
|
||||
struct CRYPTO_dynlock_value
|
||||
{
|
||||
pthread_mutex_t dyn_mutex;
|
||||
std::mutex dyn_mutex;
|
||||
};
|
||||
|
||||
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
|
||||
static std::unique_ptr<std::mutex[]> s3fs_crypt_mutex;
|
||||
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused)) NO_THREAD_SAFETY_ANALYSIS;
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
if(mode & CRYPTO_LOCK){
|
||||
pthread_mutex_lock(&s3fs_crypt_mutex[pos]);
|
||||
}else{
|
||||
pthread_mutex_unlock(&s3fs_crypt_mutex[pos]);
|
||||
if(s3fs_crypt_mutex){
|
||||
if(mode & CRYPTO_LOCK){
|
||||
s3fs_crypt_mutex[pos].lock();
|
||||
}else{
|
||||
s3fs_crypt_mutex[pos].unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long s3fs_crypt_get_threadid(void)
|
||||
static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused));
|
||||
static unsigned long s3fs_crypt_get_threadid()
|
||||
{
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
return (unsigned long)(pthread_self());
|
||||
return static_cast<unsigned long>(std::hash<std::thread::id>()(std::this_thread::get_id()));
|
||||
}
|
||||
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused));
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
|
||||
{
|
||||
struct CRYPTO_dynlock_value* dyndata;
|
||||
|
||||
if(NULL == (dyndata = static_cast<struct CRYPTO_dynlock_value*>(malloc(sizeof(struct CRYPTO_dynlock_value))))){
|
||||
S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value");
|
||||
return NULL;
|
||||
}
|
||||
pthread_mutex_init(&(dyndata->dyn_mutex), NULL);
|
||||
return dyndata;
|
||||
return new CRYPTO_dynlock_value();
|
||||
}
|
||||
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused)) NO_THREAD_SAFETY_ANALYSIS;
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
if(mode & CRYPTO_LOCK){
|
||||
pthread_mutex_lock(&(dyndata->dyn_mutex));
|
||||
}else{
|
||||
pthread_mutex_unlock(&(dyndata->dyn_mutex));
|
||||
if(dyndata){
|
||||
if(mode & CRYPTO_LOCK){
|
||||
dyndata->dyn_mutex.lock();
|
||||
}else{
|
||||
dyndata->dyn_mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
pthread_mutex_destroy(&(dyndata->dyn_mutex));
|
||||
free(dyndata);
|
||||
}
|
||||
delete dyndata;
|
||||
}
|
||||
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not nullptr, destroy it.");
|
||||
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(NULL == (s3fs_crypt_mutex = static_cast<pthread_mutex_t*>(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){
|
||||
S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
pthread_mutex_init(&s3fs_crypt_mutex[cnt], NULL);
|
||||
}
|
||||
// static lock
|
||||
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
|
||||
CRYPTO_set_id_callback(s3fs_crypt_get_threadid);
|
||||
// dynamic lock
|
||||
CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex);
|
||||
CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock);
|
||||
CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex);
|
||||
s3fs_crypt_mutex = std::make_unique<std::mutex[]>(CRYPTO_num_locks());
|
||||
// static lock
|
||||
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
|
||||
CRYPTO_set_id_callback(s3fs_crypt_get_threadid);
|
||||
// dynamic lock
|
||||
CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex);
|
||||
CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock);
|
||||
CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
if(!s3fs_crypt_mutex){
|
||||
if(!s3fs_crypt_mutex){
|
||||
return true;
|
||||
}
|
||||
|
||||
CRYPTO_set_dynlock_destroy_callback(nullptr);
|
||||
CRYPTO_set_dynlock_lock_callback(nullptr);
|
||||
CRYPTO_set_dynlock_create_callback(nullptr);
|
||||
CRYPTO_set_id_callback(nullptr);
|
||||
CRYPTO_set_locking_callback(nullptr);
|
||||
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
s3fs_crypt_mutex.reset();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
CRYPTO_set_dynlock_destroy_callback(NULL);
|
||||
CRYPTO_set_dynlock_lock_callback(NULL);
|
||||
CRYPTO_set_dynlock_create_callback(NULL);
|
||||
CRYPTO_set_id_callback(NULL);
|
||||
CRYPTO_set_locking_callback(NULL);
|
||||
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
free(s3fs_crypt_mutex);
|
||||
s3fs_crypt_mutex = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
static std::unique_ptr<unsigned char[]> s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
if(NULL == ((*digest) = (unsigned char*)malloc(*digestlen))){
|
||||
return false;
|
||||
}
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
MD5_CTX md5ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(!key || !data || !digestlen){
|
||||
return nullptr;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
MD5_Init(&md5ctx);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
auto digest = std::make_unique<unsigned char[]>(*digestlen);
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, static_cast<int>(keylen), data, datalen, digest.get(), digestlen);
|
||||
}
|
||||
MD5_Update(&md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
MD5_Final(result, &md5ctx);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
return digest;
|
||||
}
|
||||
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, false);
|
||||
}
|
||||
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digestlen, true);
|
||||
}
|
||||
|
||||
#ifdef USE_OPENSSL_30
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5 (OpenSSL >= 3.0)
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// OpenSSL 3.0 deprecated the MD5_*** low-level encryption functions,
|
||||
// so we should use the high-level EVP API instead.
|
||||
//
|
||||
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
|
||||
{
|
||||
auto digestlen = static_cast<unsigned int>(digest->size());
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("md5");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, nullptr);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
auto md5_digest_len = static_cast<unsigned int>(result->size());
|
||||
off_t bytes;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
// instead of MD5_Init
|
||||
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)> mdctx(EVP_MD_CTX_new(), EVP_MD_CTX_free);
|
||||
EVP_DigestInit_ex(mdctx.get(), EVP_md5(), nullptr);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
// instead of MD5_Update
|
||||
EVP_DigestUpdate(mdctx.get(), buf.data(), bytes);
|
||||
}
|
||||
|
||||
// instead of MD5_Final
|
||||
EVP_DigestFinal_ex(mdctx.get(), result->data(), &md5_digest_len);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5 (OpenSSL < 3.0)
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// TODO: Does this fail on OpenSSL < 3.0 and we need to use MD5_CTX functions?
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* digest)
|
||||
{
|
||||
unsigned int digestlen = digest->size();
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("md5");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, nullptr);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result)
|
||||
{
|
||||
MD5_CTX md5ctx;
|
||||
off_t bytes;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
MD5_Init(&md5ctx);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
MD5_Update(&md5ctx, buf.data(), bytes);
|
||||
}
|
||||
|
||||
MD5_Final(result->data(), &md5ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest)
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, nullptr);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
auto digestlen = static_cast<unsigned int>(digest->size());
|
||||
EVP_DigestFinal_ex(mdctx, digest->data(), &digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result)
|
||||
{
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
off_t bytes;
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, NULL);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
if(-1 == fd){
|
||||
return false;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
|
||||
memset(buf, 0, 512);
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
S3FS_PRN_ERR("fstat error(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, nullptr);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
std::array<char, 512> buf;
|
||||
bytes = std::min(static_cast<off_t>(buf.size()), (size - total));
|
||||
bytes = pread(fd, buf.data(), bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return false;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf.data(), bytes);
|
||||
}
|
||||
EVP_DigestFinal_ex(sha256ctx, result->data(), nullptr);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -354,6 +379,6 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
124
src/psemaphore.h
Normal file
124
src/psemaphore.h
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_SEMAPHORE_H_
|
||||
#define S3FS_SEMAPHORE_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class Semaphore
|
||||
//-------------------------------------------------------------------
|
||||
#if __cplusplus >= 202002L
|
||||
|
||||
#include <semaphore>
|
||||
typedef std::counting_semaphore<INT_MAX> Semaphore;
|
||||
|
||||
#else
|
||||
|
||||
// portability wrapper for sem_t since macOS does not implement it
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
|
||||
~Semaphore()
|
||||
{
|
||||
// macOS cannot destroy a semaphore with posts less than the initializer
|
||||
for(int i = 0; i < value; ++i){
|
||||
release();
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
Semaphore(const Semaphore&) = delete;
|
||||
Semaphore(Semaphore&&) = delete;
|
||||
Semaphore& operator=(const Semaphore&) = delete;
|
||||
Semaphore& operator=(Semaphore&&) = delete;
|
||||
|
||||
void acquire() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
bool try_acquire()
|
||||
{
|
||||
if(0 == dispatch_semaphore_wait(sem, DISPATCH_TIME_NOW)){
|
||||
return true;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
void release() { dispatch_semaphore_signal(sem); }
|
||||
|
||||
private:
|
||||
int value;
|
||||
dispatch_semaphore_t sem;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#include <cerrno>
|
||||
#include <semaphore.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) { sem_init(&mutex, 0, value); }
|
||||
~Semaphore() { sem_destroy(&mutex); }
|
||||
Semaphore(const Semaphore&) = delete;
|
||||
Semaphore(Semaphore&&) = delete;
|
||||
Semaphore& operator=(const Semaphore&) = delete;
|
||||
Semaphore& operator=(Semaphore&&) = delete;
|
||||
|
||||
void acquire()
|
||||
{
|
||||
int r;
|
||||
do {
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
|
||||
bool try_acquire()
|
||||
{
|
||||
int result;
|
||||
do{
|
||||
result = sem_trywait(&mutex);
|
||||
}while(result == -1 && errno == EINTR);
|
||||
|
||||
return (0 == result);
|
||||
}
|
||||
|
||||
void release() { sem_post(&mutex); }
|
||||
|
||||
private:
|
||||
sem_t mutex;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif // S3FS_SEMAPHORE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
10088
src/s3fs.cpp
10088
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
77
src/s3fs.h
77
src/s3fs.h
@ -17,80 +17,29 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#ifndef S3FS_S3_H_
|
||||
#define S3FS_S3_H_
|
||||
|
||||
#ifndef S3FS_S3FS_H_
|
||||
#define S3FS_S3FS_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#define FIVE_GB 5368709120LL
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
#define S3FS_FUSE_EXIT() { \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}
|
||||
#define S3FS_FUSE_EXIT() \
|
||||
do{ \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
//
|
||||
// s3fs use many small allocated chunk in heap area for
|
||||
// stats cache and parsing xml, etc. The OS may decide
|
||||
// that giving this little memory back to the kernel
|
||||
// will cause too much overhead and delay the operation.
|
||||
// So s3fs calls malloc_trim function to really get the
|
||||
// memory back. Following macros is prepared for that
|
||||
// your system does not have it.
|
||||
//
|
||||
// Address of gratitude, this workaround quotes a document
|
||||
// of libxml2.
|
||||
// http://xmlsoft.org/xmlmem.html
|
||||
//
|
||||
#ifdef HAVE_MALLOC_TRIM
|
||||
|
||||
#include <malloc.h>
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str)
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) \
|
||||
{ \
|
||||
xmlFreeDoc(doc); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
#define S3FS_XMLFREE(ptr) \
|
||||
{ \
|
||||
xmlFree(ptr); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
|
||||
{ \
|
||||
xmlXPathFreeContext(ctx); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) \
|
||||
{ \
|
||||
xmlXPathFreeObject(obj); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str) \
|
||||
fprintf(stderr, "Warning: %s without malloc_trim is possibility of the use memory increase.\n", program_name.c_str())
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) xmlFreeDoc(doc)
|
||||
#define S3FS_XMLFREE(ptr) xmlFree(ptr)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) xmlXPathFreeContext(ctx)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) xmlXPathFreeObject(obj)
|
||||
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
#endif // S3FS_S3FS_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -17,12 +17,18 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_AUTH_H_
|
||||
#define S3FS_AUTH_H_
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <sys/types.h>
|
||||
|
||||
typedef std::array<unsigned char, 16> md5_t;
|
||||
typedef std::array<unsigned char, 32> sha256_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for Authentication
|
||||
//-------------------------------------------------------------------
|
||||
@ -30,24 +36,22 @@
|
||||
// in common_auth.cpp
|
||||
//
|
||||
std::string s3fs_get_content_md5(int fd);
|
||||
std::string s3fs_md5sum(int fd, off_t start, ssize_t size);
|
||||
std::string s3fs_sha256sum(int fd, off_t start, ssize_t size);
|
||||
std::string s3fs_sha256_hex_fd(int fd, off_t start, off_t size);
|
||||
|
||||
//
|
||||
// in xxxxxx_auth.cpp
|
||||
//
|
||||
const char* s3fs_crypt_lib_name(void);
|
||||
bool s3fs_init_global_ssl(void);
|
||||
bool s3fs_destroy_global_ssl(void);
|
||||
bool s3fs_init_crypt_mutex(void);
|
||||
bool s3fs_destroy_crypt_mutex(void);
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_md5_digest_length(void);
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size);
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_sha256_digest_length(void);
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size);
|
||||
const char* s3fs_crypt_lib_name();
|
||||
bool s3fs_init_global_ssl();
|
||||
bool s3fs_destroy_global_ssl();
|
||||
bool s3fs_init_crypt_mutex();
|
||||
bool s3fs_destroy_crypt_mutex();
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen);
|
||||
std::unique_ptr<unsigned char[]> s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned int* digestlen);
|
||||
bool s3fs_md5(const unsigned char* data, size_t datalen, md5_t* result);
|
||||
bool s3fs_md5_fd(int fd, off_t start, off_t size, md5_t* result);
|
||||
bool s3fs_sha256(const unsigned char* data, size_t datalen, sha256_t* digest);
|
||||
bool s3fs_sha256_fd(int fd, off_t start, off_t size, sha256_t* result);
|
||||
|
||||
#endif // S3FS_AUTH_H_
|
||||
|
||||
@ -56,6 +60,6 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size);
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
1601
src/s3fs_cred.cpp
Normal file
1601
src/s3fs_cred.cpp
Normal file
File diff suppressed because it is too large
Load Diff
201
src/s3fs_cred.h
Normal file
201
src/s3fs_cred.h
Normal file
@ -0,0 +1,201 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CRED_H_
|
||||
#define S3FS_CRED_H_
|
||||
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_extcred.h"
|
||||
#include "types.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedefs
|
||||
//----------------------------------------------
|
||||
typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// class S3fsCred
|
||||
//------------------------------------------------
|
||||
// This is a class for operating and managing Credentials(accesskey,
|
||||
// secret key, tokens, etc.) used by S3fs.
|
||||
// Operations related to Credentials are aggregated in this class.
|
||||
//
|
||||
// cppcheck-suppress ctuOneDefinitionRuleViolation ; for stub in test_curl_util.cpp
|
||||
class S3fsCred
|
||||
{
|
||||
private:
|
||||
static constexpr char ALLBUCKET_FIELDS_TYPE[] = ""; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static constexpr char KEYVAL_FIELDS_TYPE[] = "\t"; // special key for mapping(This name is absolutely not used as a bucket name)
|
||||
static constexpr char AWS_ACCESSKEYID[] = "AWSAccessKeyId";
|
||||
static constexpr char AWS_SECRETKEY[] = "AWSSecretKey";
|
||||
|
||||
static constexpr int IAM_EXPIRE_MERGING = 20 * 60; // update timing
|
||||
static constexpr char ECS_IAM_ENV_VAR[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
||||
static constexpr char IAMCRED_ACCESSKEYID[] = "AccessKeyId";
|
||||
static constexpr char IAMCRED_SECRETACCESSKEY[] = "SecretAccessKey";
|
||||
static constexpr char IAMCRED_ROLEARN[] = "RoleArn";
|
||||
|
||||
static std::string bucket_name;
|
||||
|
||||
mutable std::mutex token_lock;
|
||||
|
||||
std::string passwd_file;
|
||||
std::string aws_profile;
|
||||
|
||||
bool load_iamrole;
|
||||
|
||||
std::string AWSAccessKeyId GUARDED_BY(token_lock);
|
||||
std::string AWSSecretAccessKey GUARDED_BY(token_lock);
|
||||
std::string AWSAccessToken GUARDED_BY(token_lock);
|
||||
time_t AWSAccessTokenExpire GUARDED_BY(token_lock);
|
||||
|
||||
bool is_ecs;
|
||||
bool is_use_session_token;
|
||||
bool is_ibm_iam_auth;
|
||||
|
||||
std::string IAM_cred_url;
|
||||
int IAM_api_version GUARDED_BY(token_lock);
|
||||
std::string IAMv2_api_token GUARDED_BY(token_lock);
|
||||
size_t IAM_field_count;
|
||||
std::string IAM_token_field;
|
||||
std::string IAM_expiry_field;
|
||||
std::string IAM_role GUARDED_BY(token_lock);
|
||||
|
||||
bool set_builtin_cred_opts; // true if options other than "credlib" is set
|
||||
std::string credlib; // credlib(name or path)
|
||||
std::string credlib_opts; // options for credlib
|
||||
|
||||
void* hExtCredLib;
|
||||
fp_VersionS3fsCredential pFuncCredVersion;
|
||||
fp_InitS3fsCredential pFuncCredInit;
|
||||
fp_FreeS3fsCredential pFuncCredFree;
|
||||
fp_UpdateS3fsCredential pFuncCredUpdate;
|
||||
|
||||
public:
|
||||
static constexpr char IAMv2_token_url[] = "http://169.254.169.254/latest/api/token";
|
||||
static constexpr int IAMv2_token_ttl = 21600;
|
||||
static constexpr char IAMv2_token_ttl_hdr[] = "X-aws-ec2-metadata-token-ttl-seconds";
|
||||
static constexpr char IAMv2_token_hdr[] = "X-aws-ec2-metadata-token";
|
||||
|
||||
private:
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
|
||||
bool SetS3fsPasswdFile(const char* file);
|
||||
bool IsSetPasswdFile() const;
|
||||
bool SetAwsProfileName(const char* profile_name);
|
||||
bool SetIAMRoleMetadataType(bool flag);
|
||||
|
||||
bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey) REQUIRES(S3fsCred::token_lock);
|
||||
bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken) REQUIRES(S3fsCred::token_lock);
|
||||
bool IsSetAccessKeys() const REQUIRES(S3fsCred::token_lock);
|
||||
|
||||
bool SetIsECS(bool flag);
|
||||
bool SetIsUseSessionToken(bool flag);
|
||||
|
||||
bool SetIsIBMIAMAuth(bool flag);
|
||||
|
||||
int SetIMDSVersionHasLock(int version) REQUIRES(S3fsCred::token_lock);
|
||||
int SetIMDSVersion(int version)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(token_lock);
|
||||
return SetIMDSVersionHasLock(version);
|
||||
}
|
||||
int GetIMDSVersion() const REQUIRES(S3fsCred::token_lock);
|
||||
|
||||
bool SetIAMv2APITokenHasLock(const std::string& token) REQUIRES(S3fsCred::token_lock);
|
||||
const std::string& GetIAMv2APIToken() const REQUIRES(S3fsCred::token_lock);
|
||||
|
||||
bool SetIAMRole(const char* role) REQUIRES(S3fsCred::token_lock);
|
||||
const std::string& GetIAMRoleHasLock() const REQUIRES(S3fsCred::token_lock);
|
||||
const std::string& GetIAMRole() const
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(token_lock);
|
||||
return GetIAMRoleHasLock();
|
||||
}
|
||||
bool IsSetIAMRole() const REQUIRES(S3fsCred::token_lock);
|
||||
size_t SetIAMFieldCount(size_t field_count);
|
||||
std::string SetIAMCredentialsURL(const char* url);
|
||||
std::string SetIAMTokenField(const char* token_field);
|
||||
std::string SetIAMExpiryField(const char* expiry_field);
|
||||
|
||||
bool IsReadableS3fsPasswdFile() const;
|
||||
bool CheckS3fsPasswdFilePerms();
|
||||
bool ParseS3fsPasswdFile(bucketkvmap_t& resmap);
|
||||
bool ReadS3fsPasswdFile() REQUIRES(S3fsCred::token_lock);
|
||||
|
||||
static int CheckS3fsCredentialAwsFormat(const kvmap_t& kvmap, std::string& access_key_id, std::string& secret_access_key);
|
||||
bool ReadAwsCredentialFile(const std::string &filename) REQUIRES(S3fsCred::token_lock);
|
||||
|
||||
bool InitialS3fsCredentials() REQUIRES(S3fsCred::token_lock);
|
||||
bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval) const;
|
||||
|
||||
bool GetIAMCredentialsURL(std::string& url, bool check_iam_role) REQUIRES(S3fsCred::token_lock);
|
||||
bool LoadIAMCredentials() REQUIRES(S3fsCred::token_lock);
|
||||
bool SetIAMCredentials(const char* response) REQUIRES(S3fsCred::token_lock);
|
||||
bool SetIAMRoleFromMetaData(const char* response);
|
||||
|
||||
bool SetExtCredLib(const char* arg);
|
||||
bool IsSetExtCredLib() const;
|
||||
bool SetExtCredLibOpts(const char* args);
|
||||
bool IsSetExtCredLibOpts() const;
|
||||
|
||||
bool InitExtCredLib();
|
||||
bool LoadExtCredLib();
|
||||
bool UnloadExtCredLib();
|
||||
bool UpdateExtCredentials() REQUIRES(S3fsCred::token_lock);
|
||||
|
||||
static bool CheckForbiddenBucketParams();
|
||||
|
||||
public:
|
||||
static bool SetBucket(const std::string& bucket);
|
||||
static const std::string& GetBucket();
|
||||
|
||||
S3fsCred();
|
||||
~S3fsCred();
|
||||
S3fsCred(const S3fsCred&) = delete;
|
||||
S3fsCred(S3fsCred&&) = delete;
|
||||
S3fsCred& operator=(const S3fsCred&) = delete;
|
||||
S3fsCred& operator=(S3fsCred&&) = delete;
|
||||
|
||||
bool IsIBMIAMAuth() const { return is_ibm_iam_auth; }
|
||||
|
||||
bool LoadIAMRoleFromMetaData();
|
||||
|
||||
bool CheckIAMCredentialUpdate(std::string* access_key_id = nullptr, std::string* secret_access_key = nullptr, std::string* access_token = nullptr);
|
||||
const char* GetCredFuncVersion(bool detail) const;
|
||||
|
||||
int DetectParam(const char* arg);
|
||||
bool CheckAllParams();
|
||||
};
|
||||
|
||||
#endif // S3FS_CRED_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
144
src/s3fs_extcred.h
Normal file
144
src/s3fs_extcred.h
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_EXTCRED_H_
|
||||
#define S3FS_EXTCRED_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Attributes(weak) : use only in s3fs-fuse internally
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// This macro is only used inside s3fs-fuse.
|
||||
// External projects that utilize this header file substitute empty
|
||||
//values as follows:
|
||||
//
|
||||
#ifndef S3FS_FUNCATTR_WEAK
|
||||
#define S3FS_FUNCATTR_WEAK
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
//-------------------------------------------------------------------
|
||||
// Prototype for External Credential 4 functions
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// [Required] VersionS3fsCredential
|
||||
//
|
||||
// Returns the library name and version as a string.
|
||||
//
|
||||
extern const char* VersionS3fsCredential(bool detail) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//
|
||||
// [Optional] InitS3fsCredential
|
||||
//
|
||||
// A function that does the necessary initialization after the library is
|
||||
// loaded. This function is called only once immediately after loading the
|
||||
// library.
|
||||
// If there is a required initialization inside the library, implement it.
|
||||
// Implementation of this function is optional and not required. If not
|
||||
// implemented, it will not be called.
|
||||
//
|
||||
// const char* popts : String passed with the credlib_opts option. If the
|
||||
// credlib_opts option is not specified, nullptr will be
|
||||
// passed.
|
||||
// char** pperrstr : pperrstr is used to pass the error message to the
|
||||
// caller when an error occurs.
|
||||
// If this pointer is not nullptr, you can allocate memory
|
||||
// and set an error message to it. The allocated memory
|
||||
// area is freed by the caller.
|
||||
//
|
||||
extern bool InitS3fsCredential(const char* popts, char** pperrstr) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//
|
||||
// [Optional] FreeS3fsCredential
|
||||
//
|
||||
// A function that is called only once just before the library is unloaded.
|
||||
// If there is a required discard process in the library, implement it.
|
||||
// Implementation of this feature is optional and not required.
|
||||
// If not implemented, it will not be called.
|
||||
//
|
||||
// char** pperrstr : pperrstr is used to pass the error message to the
|
||||
// caller when an error occurs.
|
||||
// If this pointer is not nullptr, you can allocate memory
|
||||
// and set an error message to it. The allocated memory
|
||||
// area is freed by the caller.
|
||||
//
|
||||
extern bool FreeS3fsCredential(char** pperrstr) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//
|
||||
// [Required] UpdateS3fsCredential
|
||||
//
|
||||
// A function that updates the token.
|
||||
//
|
||||
// char** ppaccess_key_id : Allocate and set "Access Key ID" string
|
||||
// area to *ppaccess_key_id.
|
||||
// char** ppsecret_access_key : Allocate and set "Access Secret Key ID"
|
||||
// string area to *ppsecret_access_key.
|
||||
// char** ppaccess_token : Allocate and set "Token" string area to
|
||||
// *ppaccess_token.
|
||||
// long long* ptoken_expire : Set token expire time(time_t) value to
|
||||
// *ptoken_expire.
|
||||
// This is essentially a time_t* variable.
|
||||
// To avoid system differences about time_t
|
||||
// size, long long* is used.
|
||||
// When setting the value, cast from time_t
|
||||
// to long long to set the value.
|
||||
// char** pperrstr : pperrstr is used to pass the error message to the
|
||||
// caller when an error occurs.
|
||||
//
|
||||
// For all argument of the character string pointer(char **) set the
|
||||
// allocated string area. The allocated area is freed by the caller.
|
||||
//
|
||||
extern bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr) S3FS_FUNCATTR_WEAK;
|
||||
|
||||
//---------------------------------------------------------
|
||||
// Typedef Prototype function
|
||||
//---------------------------------------------------------
|
||||
//
|
||||
// const char* VersionS3fsCredential()
|
||||
//
|
||||
typedef const char* (*fp_VersionS3fsCredential)(bool detail);
|
||||
|
||||
//
|
||||
// bool InitS3fsCredential(char** pperrstr)
|
||||
//
|
||||
typedef bool (*fp_InitS3fsCredential)(const char* popts, char** pperrstr);
|
||||
|
||||
//
|
||||
// bool FreeS3fsCredential(char** pperrstr)
|
||||
//
|
||||
typedef bool (*fp_FreeS3fsCredential)(char** pperrstr);
|
||||
|
||||
//
|
||||
// bool UpdateS3fsCredential(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr)
|
||||
//
|
||||
typedef bool (*fp_UpdateS3fsCredential)(char** ppaccess_key_id, char** ppsecret_access_key, char** ppaccess_token, long long* ptoken_expire, char** pperrstr);
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#endif // S3FS_EXTCRED_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
58
src/s3fs_global.cpp
Normal file
58
src/s3fs_global.cpp
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
bool foreground = false;
|
||||
bool nomultipart = false;
|
||||
bool pathrequeststyle = false;
|
||||
bool complement_stat = false;
|
||||
bool noxmlns = false;
|
||||
std::string program_name;
|
||||
std::string service_path = "/";
|
||||
std::string s3host = "https://s3.amazonaws.com";
|
||||
std::string region = "us-east-1";
|
||||
std::string cipher_suites;
|
||||
std::string instance_name;
|
||||
|
||||
std::atomic<long long unsigned> num_requests_head_object;
|
||||
std::atomic<long long unsigned> num_requests_put_object;
|
||||
std::atomic<long long unsigned> num_requests_get_object;
|
||||
std::atomic<long long unsigned> num_requests_delete_object;
|
||||
std::atomic<long long unsigned> num_requests_list_bucket;
|
||||
std::atomic<long long unsigned> num_requests_mpu_initiate;
|
||||
std::atomic<long long unsigned> num_requests_mpu_complete;
|
||||
std::atomic<long long unsigned> num_requests_mpu_abort;
|
||||
std::atomic<long long unsigned> num_requests_mpu_upload_part;
|
||||
std::atomic<long long unsigned> num_requests_mpu_copy_part;
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
675
src/s3fs_help.cpp
Normal file
675
src/s3fs_help.cpp
Normal file
@ -0,0 +1,675 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_help.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Contents
|
||||
//-------------------------------------------------------------------
|
||||
static constexpr char help_string[] =
|
||||
"\n"
|
||||
"Mount an Amazon S3 bucket as a file system.\n"
|
||||
"\n"
|
||||
"Usage:\n"
|
||||
" mounting\n"
|
||||
" s3fs bucket[:/path] mountpoint [options]\n"
|
||||
" s3fs mountpoint [options (must specify bucket= option)]\n"
|
||||
"\n"
|
||||
" unmounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs --incomplete-mpu-list (-u) bucket\n"
|
||||
" s3fs --incomplete-mpu-abort[=all | =<date format>] bucket\n"
|
||||
"\n"
|
||||
"s3fs Options:\n"
|
||||
"\n"
|
||||
" Most s3fs options are given in the form where \"opt\" is:\n"
|
||||
"\n"
|
||||
" <option_name>=<option_value>\n"
|
||||
"\n"
|
||||
" bucket\n"
|
||||
" - if it is not specified bucket name (and path) in command line,\n"
|
||||
" must specify this option after -o option for bucket name.\n"
|
||||
"\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. see\n"
|
||||
" https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n"
|
||||
" for the full list of canned ACLs\n"
|
||||
"\n"
|
||||
" retries (default=\"5\")\n"
|
||||
" - number of times to retry a failed S3 transaction\n"
|
||||
"\n"
|
||||
" tmpdir (default=\"/tmp\")\n"
|
||||
" - local folder for temporary files.\n"
|
||||
"\n"
|
||||
" use_cache (default=\"\" which means disabled)\n"
|
||||
" - local folder to use for local file cache\n"
|
||||
"\n"
|
||||
" check_cache_dir_exist (default is disable)\n"
|
||||
" - if use_cache is set, check if the cache directory exists.\n"
|
||||
" If this option is not specified, it will be created at runtime\n"
|
||||
" when the cache directory does not exist.\n"
|
||||
"\n"
|
||||
" del_cache (delete local file cache)\n"
|
||||
" - delete local file cache when s3fs starts and exits.\n"
|
||||
"\n"
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, onezone_ia, reduced_redundancy,\n"
|
||||
" intelligent_tiering, glacier, glacier_ir, and deep_archive.\n"
|
||||
"\n"
|
||||
" use_rrs (default is disable)\n"
|
||||
" - use Amazon's Reduced Redundancy Storage.\n"
|
||||
" this option can not be specified with use_sse.\n"
|
||||
" (can specify use_rrs=1 for old version)\n"
|
||||
" this option has been replaced by new storage_class option.\n"
|
||||
"\n"
|
||||
" use_sse (default is disable)\n"
|
||||
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
|
||||
" SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n"
|
||||
" keys, SSE-C uses customer-provided encryption keys, and\n"
|
||||
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
|
||||
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
|
||||
" type (use_sse=1 is old type parameter).\n"
|
||||
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
|
||||
" \"use_sse=custom:<custom key file path>\" or\n"
|
||||
" \"use_sse=<custom key file path>\" (only <custom key file path>\n"
|
||||
" specified is old type parameter). You can use \"c\" for\n"
|
||||
" short \"custom\".\n"
|
||||
" The custom key file must be 600 permission. The file can\n"
|
||||
" have some lines, each line is one SSE-C key. The first line\n"
|
||||
" in file is used as Customer-Provided Encryption Keys for\n"
|
||||
" uploading and changing headers etc. If there are some keys\n"
|
||||
" after first line, those are used downloading object which\n"
|
||||
" are encrypted by not first key. So that, you can keep all\n"
|
||||
" SSE-C keys in file, that is SSE-C key history.\n"
|
||||
" If you specify \"custom\" (\"c\") without file path, you\n"
|
||||
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
|
||||
" environment. (AWSSSECKEYS environment has some SSE-C keys\n"
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
" object at downloading, you can use load_sse_c option instead\n"
|
||||
" of this option.\n"
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
|
||||
" KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n"
|
||||
" specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n"
|
||||
" environment which value is <kms id>. You must be careful\n"
|
||||
" about that you can not use the KMS id which is not same EC2\n"
|
||||
" region.\n"
|
||||
" Additionally, if you specify SSE-KMS, your endpoints must use\n"
|
||||
" Secure Sockets Layer(SSL) or Transport Layer Security(TLS).\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
" at downloading.\n"
|
||||
" If you use the custom-provided encryption key at uploading, you\n"
|
||||
" specify with \"use_sse=custom\". The file has many lines, one line\n"
|
||||
" means one custom key. So that you can keep all SSE-C keys in file,\n"
|
||||
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
|
||||
" file contents.\n"
|
||||
"\n"
|
||||
" public_bucket (default=\"\" which means disabled)\n"
|
||||
" - anonymously mount a public bucket when set to 1, ignores the \n"
|
||||
" $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n"
|
||||
" S3 does not allow copy object api for anonymous users, then\n"
|
||||
" s3fs sets nocopyapi option automatically when public_bucket=1\n"
|
||||
" option is specified.\n"
|
||||
"\n"
|
||||
" passwd_file (default=\"\")\n"
|
||||
" - specify which s3fs password file to use\n"
|
||||
"\n"
|
||||
" ahbe_conf (default=\"\" which means disabled)\n"
|
||||
" - This option specifies the configuration file path which\n"
|
||||
" file is the additional HTTP header by file (object) extension.\n"
|
||||
" The configuration file format is below:\n"
|
||||
" -----------\n"
|
||||
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
|
||||
" file suffix = file (object) suffix, if this field is empty,\n"
|
||||
" it means \"reg:(.*)\".(=all object).\n"
|
||||
" regex = regular expression to match the file (object) path.\n"
|
||||
" this type starts with \"reg:\" prefix.\n"
|
||||
" HTTP-header = additional HTTP header name\n"
|
||||
" HTTP-values = additional HTTP header value\n"
|
||||
" -----------\n"
|
||||
" Sample:\n"
|
||||
" -----------\n"
|
||||
" .gz Content-Encoding gzip\n"
|
||||
" .Z Content-Encoding compress\n"
|
||||
" reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n"
|
||||
" -----------\n"
|
||||
" A sample configuration file is uploaded in \"test\" directory.\n"
|
||||
" If you specify this option for set \"Content-Encoding\" HTTP \n"
|
||||
" header, please take care for RFC 2616.\n"
|
||||
"\n"
|
||||
" profile (default=\"default\")\n"
|
||||
" - Choose a profile from ${HOME}/.aws/credentials to authenticate\n"
|
||||
" against S3. Note that this format matches the AWS CLI format and\n"
|
||||
" differs from the s3fs passwd format.\n"
|
||||
"\n"
|
||||
" connect_timeout (default=\"300\" seconds)\n"
|
||||
" - time to wait for connection before giving up\n"
|
||||
"\n"
|
||||
" readwrite_timeout (default=\"120\" seconds)\n"
|
||||
" - time to wait between read/write activity before giving up\n"
|
||||
"\n"
|
||||
" list_object_max_keys (default=\"1000\")\n"
|
||||
" - specify the maximum number of keys returned by S3 list object\n"
|
||||
" API. The default is 1000. you can set this value to 1000 or more.\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n"
|
||||
" - maximum number of entries in the stat cache, and this maximum is\n"
|
||||
" also treated as the number of symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_expire (default is 900))\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached.\n"
|
||||
"\n"
|
||||
" stat_cache_interval_expire (default is 900)\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache.\n"
|
||||
" This expire time is based on the time from the last access time\n"
|
||||
" of the stat cache. This option is exclusive with stat_cache_expire,\n"
|
||||
" and is left for compatibility with older versions.\n"
|
||||
"\n"
|
||||
" enable_negative_cache (default is enabled negative cache)\n"
|
||||
" - This option will keep non-existence of objects in a stat cache.\n"
|
||||
" When this negative cache is enabled, it will not process extra\n"
|
||||
" HeadObject requests to search for non-existent objects, improving\n"
|
||||
" performance.\n"
|
||||
" This feature is enabled by default, so there is no need to specify\n"
|
||||
" it.\n"
|
||||
"\n"
|
||||
" disable_negative_cache (default is enabled negative cache)\n"
|
||||
" - By default, s3fs keeps non-existent objects in the stat cache.\n"
|
||||
" This option disables this negative caching.\n"
|
||||
" This prevents delays in updates due to cache retention.\n"
|
||||
" However, it may increase the number of HeadObject requests to check\n"
|
||||
" if an object exists, which may decrease performance.\n"
|
||||
"\n"
|
||||
" no_check_certificate\n"
|
||||
" - server certificate won't be checked against the available \n"
|
||||
" certificate authorities.\n"
|
||||
"\n"
|
||||
" ssl_verify_hostname (default=\"2\")\n"
|
||||
" - When 0, do not verify the SSL certificate against the hostname.\n"
|
||||
"\n"
|
||||
" ssl_client_cert (default=\"\")\n"
|
||||
" - Specify an SSL client certificate.\n"
|
||||
" Specify this optional parameter in the following format:\n"
|
||||
" \"<SSL Cert>[:<Cert Type>[:<Private Key>[:<Key Type>\n"
|
||||
" [:<Password>]]]]\"\n"
|
||||
" <SSL Cert>: Client certificate.\n"
|
||||
" Specify the file path or NickName(for NSS, etc.).\n"
|
||||
" <Cert Type>: Type of certificate, default is \"PEM\"(optional).\n"
|
||||
" <Private Key>: Certificate's private key file(optional).\n"
|
||||
" <Key Type>: Type of private key, default is \"PEM\"(optional).\n"
|
||||
" <Password>: Passphrase of the private key(optional).\n"
|
||||
" It is also possible to omit this value and specify\n"
|
||||
" it using the environment variable\n"
|
||||
" \"S3FS_SSL_PRIVKEY_PASSWORD\".\n"
|
||||
"\n"
|
||||
" nodnscache (disable DNS cache)\n"
|
||||
" - s3fs is always using DNS cache, this option make DNS cache disable.\n"
|
||||
"\n"
|
||||
" nosscache (disable SSL session cache)\n"
|
||||
" - s3fs is always using SSL session cache, this option make SSL \n"
|
||||
" session cache disable.\n"
|
||||
"\n"
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
|
||||
"\n"
|
||||
" multipart_copy_size (default=\"512\")\n"
|
||||
" - part size, in MB, for each multipart copy request, used for\n"
|
||||
" renames and mixupload.\n"
|
||||
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
|
||||
" Must be at least 512 MB to copy the maximum 5 TB object size\n"
|
||||
" but lower values may improve performance.\n"
|
||||
"\n"
|
||||
" max_dirty_data (default=\"5120\")\n"
|
||||
" - flush dirty data to S3 after a certain number of MB written.\n"
|
||||
" The minimum value is 50 MB. -1 value means disable.\n"
|
||||
" Cannot be used with nomixupload.\n"
|
||||
"\n"
|
||||
" bucket_size (default=maximum long unsigned integer value)\n"
|
||||
" - The size of the bucket with which the corresponding\n"
|
||||
" elements of the statvfs structure will be filled. The option\n"
|
||||
" argument is an integer optionally followed by a\n"
|
||||
" multiplicative suffix (GB, GiB, TB, TiB, PB, PiB,\n"
|
||||
" EB, EiB) (no spaces in between). If no suffix is supplied,\n"
|
||||
" bytes are assumed; eg: 20000000, 30GB, 45TiB. Note that\n"
|
||||
" s3fs does not compute the actual volume size (too\n"
|
||||
" expensive): by default it will assume the maximum possible\n"
|
||||
" size; however, since this may confuse other software which\n"
|
||||
" uses s3fs, the advertised bucket size can be set with this\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default 0)\n"
|
||||
" - sets MB to ensure disk free space. This option means the\n"
|
||||
" threshold of free space size on disk which is used for the\n"
|
||||
" cache file by s3fs. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use disk space\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" free_space_ratio (default=\"10\")\n"
|
||||
" - sets min free space ratio of the disk.\n"
|
||||
" The value of this option can be between 0 and 100. It will control\n"
|
||||
" the size of the cache according to this ratio to ensure that the\n"
|
||||
" idle ratio of the disk is greater than this value.\n"
|
||||
" For example, when the disk space is 50GB, the default value will\n"
|
||||
" ensure that the disk will reserve at least 50GB * 10%% = 5GB of\n"
|
||||
" remaining space.\n"
|
||||
"\n"
|
||||
" multipart_threshold (default=\"25\")\n"
|
||||
" - threshold, in MB, to use multipart upload instead of\n"
|
||||
" single-part. Must be at least 5 MB.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"512\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" host (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - Set a non-Amazon host, e.g., https://example.com.\n"
|
||||
"\n"
|
||||
" servicepath (default=\"/\")\n"
|
||||
" - Set a service path when the non-Amazon host requires a prefix.\n"
|
||||
"\n"
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
" If you do not use https, please specify the URL with the url\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" region (default=\"us-east-1\")\n"
|
||||
" - sets the region to use on signature version 4\n"
|
||||
" If this option is not specified, s3fs uses \"us-east-1\" region as\n"
|
||||
" the default. If the s3fs could not connect to the region specified\n"
|
||||
" by this option, s3fs could not run. But if you do not specify this\n"
|
||||
" option, and if you can not connect with the default region, s3fs\n"
|
||||
" will retry to automatically connect to the other region. So s3fs\n"
|
||||
" can know the correct region name, because s3fs can find it in an\n"
|
||||
" error from the S3 server.\n"
|
||||
" You can also specify the legacy -o endpoint which means the same thing.\n"
|
||||
"\n"
|
||||
" sigv2 (default is signature version 4 falling back to version 2)\n"
|
||||
" - sets signing AWS requests by using only signature version 2\n"
|
||||
"\n"
|
||||
" sigv4 (default is signature version 4 falling back to version 2)\n"
|
||||
" - sets signing AWS requests by using only signature version 4\n"
|
||||
"\n"
|
||||
" mp_umask (default is \"0000\")\n"
|
||||
" - sets umask for the mount point directory.\n"
|
||||
" If allow_other option is not set, s3fs allows access to the mount\n"
|
||||
" point only to the owner. In the opposite case s3fs allows access\n"
|
||||
" to all users as the default. But if you set the allow_other with\n"
|
||||
" this option, you can control the permissions of the\n"
|
||||
" mount point by this option like umask.\n"
|
||||
"\n"
|
||||
" umask (default is \"0000\")\n"
|
||||
" - sets umask for files under the mountpoint. This can allow\n"
|
||||
" users other than the mounting user to read and write to files\n"
|
||||
" that they did not create.\n"
|
||||
"\n"
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" streamupload (default is disable)\n"
|
||||
" - Enable stream upload.\n"
|
||||
" If this option is enabled, a sequential upload will be performed\n"
|
||||
" in parallel with the write from the part that has been written\n"
|
||||
" during a multipart upload.\n"
|
||||
" This is expected to give better performance than other upload\n"
|
||||
" functions.\n"
|
||||
" Note that this option is still experimental and may change in the\n"
|
||||
" future.\n"
|
||||
"\n"
|
||||
" max_thread_count (default is \"10\")\n"
|
||||
" - This value is the maximum number of parallel requests to be\n"
|
||||
" sent, and the number of parallel processes for head requests,\n"
|
||||
" multipart uploads and stream uploads.\n"
|
||||
" Worker threads will be started to process requests according to\n"
|
||||
" this value.\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - Allow S3 server to check data integrity of uploads via the\n"
|
||||
" Content-MD5 header. This can add CPU overhead to transfers.\n"
|
||||
"\n"
|
||||
" enable_unsigned_payload (default is disable)\n"
|
||||
" - Do not calculate Content-SHA256 for PutObject and UploadPart\n"
|
||||
" payloads. This can reduce CPU overhead to transfers.\n"
|
||||
"\n"
|
||||
" ecs (default is disable)\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" imdsv1only (default is to use IMDSv2 with fallback to v1)\n"
|
||||
" - AWS instance metadata service, used with IAM role authentication,\n"
|
||||
" supports the use of an API token. If you're using an IAM role\n"
|
||||
" in an environment that does not support IMDSv2, setting this flag\n"
|
||||
" will skip retrieval and usage of the API token when retrieving\n"
|
||||
" IAM credentials.\n"
|
||||
"\n"
|
||||
" ibm_iam_auth (default is not using IBM IAM authentication)\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" ibm_iam_endpoint (default is https://iam.cloud.ibm.com)\n"
|
||||
" - sets the URL to use for IBM IAM authentication.\n"
|
||||
"\n"
|
||||
" credlib (default=\"\" which means disabled)\n"
|
||||
" - Specifies the shared library that handles the credentials\n"
|
||||
" containing the authentication token.\n"
|
||||
" If this option is specified, the specified credential and token\n"
|
||||
" processing provided by the shared library ant will be performed\n"
|
||||
" instead of the built-in credential processing.\n"
|
||||
" This option cannot be specified with passwd_file, profile,\n"
|
||||
" use_session_token, ecs, ibm_iam_auth, ibm_iam_endpoint, imdsv1only\n"
|
||||
" and iam_role option.\n"
|
||||
"\n"
|
||||
" credlib_opts (default=\"\" which means disabled)\n"
|
||||
" - Specifies the options to pass when the shared library specified\n"
|
||||
" in credlib is loaded and then initialized.\n"
|
||||
" For the string specified in this option, specify the string defined\n"
|
||||
" by the shared library.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute (xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
" For example, encfs and ecryptfs need to support the extended attribute.\n"
|
||||
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
|
||||
" copy command with preserve=mode.\n"
|
||||
"\n"
|
||||
" noxmlns (disable registering xml name space)\n"
|
||||
" disable registering xml name space for response of \n"
|
||||
" ListBucketResult and ListVersionsResult etc. Default name \n"
|
||||
" space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n"
|
||||
" This option should not be specified now, because s3fs looks up\n"
|
||||
" xmlns automatically after v1.66.\n"
|
||||
"\n"
|
||||
" nomixupload (disable copy in multipart uploads)\n"
|
||||
" Disable to use PUT (copy api) when multipart uploading large size objects.\n"
|
||||
" By default, when doing multipart upload, the range of unchanged data\n"
|
||||
" will use PUT (copy api) whenever possible.\n"
|
||||
" When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n"
|
||||
" invalidated even if this option is not specified.\n"
|
||||
"\n"
|
||||
" nocopyapi (for other incomplete compatibility object storage)\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" PUT (copy api).\n"
|
||||
" If you set this option, s3fs do not use PUT with \n"
|
||||
" \"x-amz-copy-source\" (copy api). Because traffic is increased\n"
|
||||
" 2-3 times by this option, we do not recommend this.\n"
|
||||
"\n"
|
||||
" norenameapi (for other incomplete compatibility object storage)\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" PUT (copy api).\n"
|
||||
" This option is a subset of nocopyapi option. The nocopyapi\n"
|
||||
" option does not use copy-api for all command (ex. chmod, chown,\n"
|
||||
" touch, mv, etc), but this option does not use copy-api for\n"
|
||||
" only rename command (ex. mv). If this option is specified with\n"
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" the virtual-host request style, by using the older path request\n"
|
||||
" style.\n"
|
||||
"\n"
|
||||
" listobjectsv2 (use ListObjectsV2)\n"
|
||||
" Issue ListObjectsV2 instead of ListObjects, useful on object\n"
|
||||
" stores without ListObjects support.\n"
|
||||
"\n"
|
||||
" noua (suppress User-Agent header)\n"
|
||||
" Usually s3fs outputs of the User-Agent in \"s3fs/<version> (commit\n"
|
||||
" hash <hash>; <using ssl library name>)\" format.\n"
|
||||
" If this option is specified, s3fs suppresses the output of the\n"
|
||||
" User-Agent.\n"
|
||||
"\n"
|
||||
" cipher_suites\n"
|
||||
" Customize the list of TLS cipher suites.\n"
|
||||
" Expects a colon separated list of cipher suite names.\n"
|
||||
" A list of available cipher suites, depending on your TLS engine,\n"
|
||||
" can be found on the CURL library documentation:\n"
|
||||
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
|
||||
"\n"
|
||||
" instance_name - The instance name of the current s3fs mountpoint.\n"
|
||||
" This name will be added to logging messages and user agent headers sent by s3fs.\n"
|
||||
"\n"
|
||||
" complement_stat (complement lack of file/directory mode)\n"
|
||||
" s3fs complements lack of information about file/directory mode\n"
|
||||
" if a file or a directory object does not have x-amz-meta-mode\n"
|
||||
" header. As default, s3fs does not complements stat information\n"
|
||||
" for a object, then the object will not be able to be allowed to\n"
|
||||
" list/modify.\n"
|
||||
"\n"
|
||||
" compat_dir (enable support of alternative directory names)\n"
|
||||
" s3fs supports two different naming schemas \"dir/\" and\n"
|
||||
" \"dir\" to map directory names to S3 objects and\n"
|
||||
" vice versa by default. As a third variant, directories can be\n"
|
||||
" determined indirectly if there is a file object with a path (e.g.\n"
|
||||
" \"/dir/file\") but without the parent directory.\n"
|
||||
" This option enables a fourth variant, \"dir_$folder$\", created by\n"
|
||||
" older applications.\n"
|
||||
" \n"
|
||||
" S3fs uses only the first schema \"dir/\" to create S3 objects for\n"
|
||||
" directories."
|
||||
" \n"
|
||||
" The support for these different naming schemas causes an increased\n"
|
||||
" communication effort.\n"
|
||||
"\n"
|
||||
" use_wtf8 - support arbitrary file system encoding.\n"
|
||||
" S3 requires all object names to be valid UTF-8. But some\n"
|
||||
" clients, notably Windows NFS clients, use their own encoding.\n"
|
||||
" This option re-encodes invalid UTF-8 object names into valid\n"
|
||||
" UTF-8 by mapping offending codes into a 'private' codepage of the\n"
|
||||
" Unicode set.\n"
|
||||
" Useful on clients not using UTF-8 as their file system encoding.\n"
|
||||
"\n"
|
||||
" use_session_token - indicate that session token should be provided.\n"
|
||||
" If credentials are provided by environment variables this switch\n"
|
||||
" forces presence check of AWSSESSIONTOKEN variable.\n"
|
||||
" Otherwise an error is returned.\n"
|
||||
"\n"
|
||||
" requester_pays (default is disable)\n"
|
||||
" This option instructs s3fs to enable requests involving\n"
|
||||
" Requester Pays buckets.\n"
|
||||
" It includes the 'x-amz-request-payer=requester' entry in the\n"
|
||||
" request header.\n"
|
||||
"\n"
|
||||
" mime (default is \"/etc/mime.types\")\n"
|
||||
" Specify the path of the mime.types file.\n"
|
||||
" If this option is not specified, the existence of \"/etc/mime.types\"\n"
|
||||
" is checked, and that file is loaded as mime information.\n"
|
||||
" If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n"
|
||||
" is checked as well.\n"
|
||||
"\n"
|
||||
" proxy (default=\"\")\n"
|
||||
" This option specifies a proxy to S3 server.\n"
|
||||
" Specify the proxy with '[<scheme://]hostname(fqdn)[:<port>]' formatted.\n"
|
||||
" '<schema>://' can be omitted, and 'http://' is used when omitted.\n"
|
||||
" Also, ':<port>' can also be omitted. If omitted, port 443 is used for\n"
|
||||
" HTTPS schema, and port 1080 is used otherwise.\n"
|
||||
" This option is the same as the curl command's '--proxy(-x)' option and\n"
|
||||
" libcurl's 'CURLOPT_PROXY' flag.\n"
|
||||
" This option is equivalent to and takes precedence over the environment\n"
|
||||
" variables 'http_proxy', 'all_proxy', etc.\n"
|
||||
"\n"
|
||||
" proxy_cred_file (default=\"\")\n"
|
||||
" This option specifies the file that describes the username and\n"
|
||||
" passphrase for authentication of the proxy when the HTTP schema\n"
|
||||
" proxy is specified by the 'proxy' option.\n"
|
||||
" Username and passphrase are valid only for HTTP schema. If the HTTP\n"
|
||||
" proxy does not require authentication, this option is not required.\n"
|
||||
" Separate the username and passphrase with a ':' character and\n"
|
||||
" specify each as a URL-encoded string.\n"
|
||||
"\n"
|
||||
" ipresolve (default=\"whatever\")\n"
|
||||
" Select what type of IP addresses to use when establishing a\n"
|
||||
" connection.\n"
|
||||
" Default('whatever') can use addresses of all IP versions(IPv4 and\n"
|
||||
" IPv6) that your system allows. If you specify 'IPv4', only IPv4\n"
|
||||
" addresses are used. And when 'IPv6'is specified, only IPv6 addresses\n"
|
||||
" will be used.\n"
|
||||
"\n"
|
||||
" logfile - specify the log output file.\n"
|
||||
" s3fs outputs the log file to syslog. Alternatively, if s3fs is\n"
|
||||
" started with the \"-f\" option specified, the log will be output\n"
|
||||
" to the stdout/stderr.\n"
|
||||
" You can use this option to specify the log file that s3fs outputs.\n"
|
||||
" If you specify a log file with this option, it will reopen the log\n"
|
||||
" file when s3fs receives a SIGHUP signal. You can use the SIGHUP\n"
|
||||
" signal for log rotation.\n"
|
||||
"\n"
|
||||
" dbglevel (default=\"crit\")\n"
|
||||
" Set the debug message level. set value as crit (critical), err\n"
|
||||
" (error), warn (warning), info (information) to debug level.\n"
|
||||
" default debug level is critical. If s3fs run with \"-d\" option,\n"
|
||||
" the debug level is set information. When s3fs catch the signal\n"
|
||||
" SIGUSR2, the debug level is bump up.\n"
|
||||
"\n"
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
" Specify \"normal\" or \"body\" for the parameter.\n"
|
||||
" If the parameter is omitted, it is the same as \"normal\".\n"
|
||||
" If \"body\" is specified, some API communication body data will be\n"
|
||||
" output in addition to the debug message output as \"normal\".\n"
|
||||
"\n"
|
||||
" no_time_stamp_msg - no time stamp in debug message\n"
|
||||
" The time stamp is output to the debug message by default.\n"
|
||||
" If this option is specified, the time stamp will not be output\n"
|
||||
" in the debug message.\n"
|
||||
" It is the same even if the environment variable \"S3FS_MSGTIMESTAMP\"\n"
|
||||
" is set to \"no\".\n"
|
||||
"\n"
|
||||
" set_check_cache_sigusr1 (default is stdout)\n"
|
||||
" If the cache is enabled, you can check the integrity of the\n"
|
||||
" cache file and the cache file's stats info file.\n"
|
||||
" This option is specified and when sending the SIGUSR1 signal\n"
|
||||
" to the s3fs process checks the cache status at that time.\n"
|
||||
" This option can take a file path as parameter to output the\n"
|
||||
" check result to that file. The file path parameter can be omitted.\n"
|
||||
" If omitted, the result will be output to stdout or syslog.\n"
|
||||
"\n"
|
||||
" update_parent_dir_stat (default is disable)\n"
|
||||
" The parent directory's mtime and ctime are updated when a file or\n"
|
||||
" directory is created or deleted (when the parent directory's inode is\n"
|
||||
" updated).\n"
|
||||
" By default, parent directory statistics are not updated.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
" supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime,\n"
|
||||
" noatime, sync async, dirsync). Filesystems are mounted with\n"
|
||||
" '-onodev,nosuid' by default, which can only be overridden by a\n"
|
||||
" privileged user.\n"
|
||||
" \n"
|
||||
" There are many FUSE specific mount options that can be specified.\n"
|
||||
" e.g. allow_other See the FUSE's README for the full set.\n"
|
||||
"\n"
|
||||
"Utility mode Options:\n"
|
||||
"\n"
|
||||
" -u, --incomplete-mpu-list\n"
|
||||
" Lists multipart incomplete objects uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" --incomplete-mpu-abort (=all or =<date format>)\n"
|
||||
" Delete the multipart incomplete object uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" If \"all\" is specified for this option, all multipart incomplete\n"
|
||||
" objects will be deleted. If you specify no argument as an option,\n"
|
||||
" objects older than 24 hours (24H) will be deleted (This is the\n"
|
||||
" default value). You can specify an optional date format. It can\n"
|
||||
" be specified as year, month, day, hour, minute, second, and it is\n"
|
||||
" expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n"
|
||||
" For example, \"1Y6M10D12h30m30s\".\n"
|
||||
"\n"
|
||||
"Miscellaneous Options:\n"
|
||||
"\n"
|
||||
" -h, --help Output this help.\n"
|
||||
" --version Output version info.\n"
|
||||
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
|
||||
" twice turns on FUSE debug messages to STDOUT.\n"
|
||||
" -f FUSE foreground option - do not run as daemon.\n"
|
||||
" -s FUSE single-threaded option\n"
|
||||
" disable multi-threaded operation\n"
|
||||
"\n"
|
||||
"\n"
|
||||
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
|
||||
;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage()
|
||||
{
|
||||
printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n", program_name.c_str());
|
||||
}
|
||||
|
||||
void show_help()
|
||||
{
|
||||
show_usage();
|
||||
printf(help_string);
|
||||
}
|
||||
|
||||
void show_version()
|
||||
{
|
||||
printf(
|
||||
"Amazon Simple Storage Service File System V%s%s with %s\n"
|
||||
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
|
||||
"License GPL2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>\n"
|
||||
"This is free software: you are free to change and redistribute it.\n"
|
||||
"There is NO WARRANTY, to the extent permitted by law.\n",
|
||||
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
}
|
||||
|
||||
const char* short_version()
|
||||
{
|
||||
static constexpr char short_ver[] = "s3fs version " VERSION "" COMMIT_HASH_VAL;
|
||||
return short_ver;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
41
src/s3fs_help.h
Normal file
41
src/s3fs_help.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_HELP_H_
|
||||
#define S3FS_S3FS_HELP_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage();
|
||||
void show_help();
|
||||
void show_version();
|
||||
const char* short_version();
|
||||
|
||||
#endif // S3FS_S3FS_HELP_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
307
src/s3fs_logger.cpp
Normal file
307
src/s3fs_logger.cpp
Normal file
@ -0,0 +1,307 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <strings.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : variables
|
||||
//-------------------------------------------------------------------
|
||||
constexpr char S3fsLog::LOGFILEENV[];
|
||||
constexpr const char* S3fsLog::nest_spaces[];
|
||||
constexpr char S3fsLog::MSGTIMESTAMP[];
|
||||
S3fsLog* S3fsLog::pSingleton = nullptr;
|
||||
S3fsLog::Level S3fsLog::debug_level = S3fsLog::Level::CRIT;
|
||||
FILE* S3fsLog::logfp = nullptr;
|
||||
std::string S3fsLog::logfile;
|
||||
bool S3fsLog::time_stamp = true;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : class methods
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsLog::IsS3fsLogLevel(S3fsLog::Level level)
|
||||
{
|
||||
return static_cast<int>(level) == (static_cast<int>(S3fsLog::debug_level) & static_cast<int>(level));
|
||||
}
|
||||
|
||||
std::string S3fsLog::GetCurrentTime()
|
||||
{
|
||||
std::ostringstream current_time;
|
||||
if(time_stamp){
|
||||
struct timeval now;
|
||||
struct timespec tsnow;
|
||||
struct tm res;
|
||||
char tmp[32];
|
||||
if(-1 == clock_gettime(S3FS_CLOCK_MONOTONIC, &tsnow)){
|
||||
now.tv_sec = tsnow.tv_sec;
|
||||
now.tv_usec = (tsnow.tv_nsec / 1000);
|
||||
}else{
|
||||
gettimeofday(&now, nullptr);
|
||||
}
|
||||
strftime(tmp, sizeof(tmp), "%Y-%m-%dT%H:%M:%S", gmtime_r(&now.tv_sec, &res));
|
||||
current_time << tmp << "." << std::setfill('0') << std::setw(3) << (now.tv_usec / 1000) << "Z ";
|
||||
}
|
||||
return current_time.str();
|
||||
}
|
||||
|
||||
bool S3fsLog::SetLogfile(const char* pfile)
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return false;
|
||||
}
|
||||
return S3fsLog::pSingleton->LowSetLogfile(pfile);
|
||||
}
|
||||
|
||||
bool S3fsLog::ReopenLogfile()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return false;
|
||||
}
|
||||
if(!S3fsLog::logfp){
|
||||
S3FS_PRN_INFO("Currently the log file is output to stdout/stderr.");
|
||||
return true;
|
||||
}
|
||||
if(!S3fsLog::logfile.empty()){
|
||||
S3FS_PRN_ERR("There is a problem with the path to the log file being empty.");
|
||||
return false;
|
||||
}
|
||||
std::string tmp = S3fsLog::logfile;
|
||||
return S3fsLog::pSingleton->LowSetLogfile(tmp.c_str());
|
||||
}
|
||||
|
||||
S3fsLog::Level S3fsLog::SetLogLevel(S3fsLog::Level level)
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
return S3fsLog::pSingleton->LowSetLogLevel(level);
|
||||
}
|
||||
|
||||
S3fsLog::Level S3fsLog::BumpupLogLevel()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is nullptr.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
return S3fsLog::pSingleton->LowBumpupLogLevel();
|
||||
}
|
||||
|
||||
bool S3fsLog::SetTimeStamp(bool value)
|
||||
{
|
||||
bool old = S3fsLog::time_stamp;
|
||||
S3fsLog::time_stamp = value;
|
||||
return old;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : methods
|
||||
//-------------------------------------------------------------------
|
||||
S3fsLog::S3fsLog()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3fsLog::pSingleton = this;
|
||||
|
||||
// init syslog(default CRIT)
|
||||
openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER);
|
||||
LowLoadEnv();
|
||||
}else{
|
||||
S3FS_PRN_ERR("Already set singleton object for S3fsLog.");
|
||||
}
|
||||
}
|
||||
|
||||
S3fsLog::~S3fsLog()
|
||||
{
|
||||
if(S3fsLog::pSingleton == this){
|
||||
FILE* oldfp = S3fsLog::logfp;
|
||||
S3fsLog::logfp = nullptr;
|
||||
if(oldfp && 0 != fclose(oldfp)){
|
||||
S3FS_PRN_ERR("Could not close old log file(%s), but continue...", (S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
|
||||
}
|
||||
S3fsLog::logfile.clear();
|
||||
S3fsLog::pSingleton = nullptr;
|
||||
S3fsLog::debug_level = Level::CRIT;
|
||||
|
||||
closelog();
|
||||
}else{
|
||||
S3FS_PRN_ERR("This object is not singleton S3fsLog object.");
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsLog::LowLoadEnv()
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return false;
|
||||
}
|
||||
char* pEnvVal;
|
||||
if(nullptr != (pEnvVal = getenv(S3fsLog::LOGFILEENV))){
|
||||
if(!SetLogfile(pEnvVal)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(nullptr != (pEnvVal = getenv(S3fsLog::MSGTIMESTAMP))){
|
||||
if(0 == strcasecmp(pEnvVal, "true") || 0 == strcasecmp(pEnvVal, "yes") || 0 == strcasecmp(pEnvVal, "1")){
|
||||
S3fsLog::time_stamp = true;
|
||||
}else if(0 == strcasecmp(pEnvVal, "false") || 0 == strcasecmp(pEnvVal, "no") || 0 == strcasecmp(pEnvVal, "0")){
|
||||
S3fsLog::time_stamp = false;
|
||||
}else{
|
||||
S3FS_PRN_WARN("Unknown %s environment value(%s) is specified, skip to set time stamp mode.", S3fsLog::MSGTIMESTAMP, pEnvVal);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsLog::LowSetLogfile(const char* pfile)
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!pfile){
|
||||
// close log file if it is opened
|
||||
if(S3fsLog::logfp && 0 != fclose(S3fsLog::logfp)){
|
||||
S3FS_PRN_ERR("Could not close log file(%s).", (S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
|
||||
return false;
|
||||
}
|
||||
S3fsLog::logfp = nullptr;
|
||||
S3fsLog::logfile.clear();
|
||||
}else{
|
||||
// open new log file
|
||||
//
|
||||
// [NOTE]
|
||||
// It will reopen even if it is the same file.
|
||||
//
|
||||
FILE* newfp;
|
||||
if(nullptr == (newfp = fopen(pfile, "a+"))){
|
||||
S3FS_PRN_ERR("Could not open log file(%s).", pfile);
|
||||
return false;
|
||||
}
|
||||
|
||||
// switch new log file and close old log file if it is opened
|
||||
FILE* oldfp = S3fsLog::logfp;
|
||||
if(oldfp && 0 != fclose(oldfp)){
|
||||
S3FS_PRN_ERR("Could not close old log file(%s).", (!S3fsLog::logfile.empty() ? S3fsLog::logfile.c_str() : "null"));
|
||||
fclose(newfp);
|
||||
return false;
|
||||
}
|
||||
S3fsLog::logfp = newfp;
|
||||
S3fsLog::logfile = pfile;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
S3fsLog::Level S3fsLog::LowSetLogLevel(Level level)
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
if(level == S3fsLog::debug_level){
|
||||
return S3fsLog::debug_level;
|
||||
}
|
||||
Level old = S3fsLog::debug_level;
|
||||
S3fsLog::debug_level = level;
|
||||
setlogmask(LOG_UPTO(GetSyslogLevel(S3fsLog::debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", GetLevelString(old), GetLevelString(S3fsLog::debug_level));
|
||||
return old;
|
||||
}
|
||||
|
||||
S3fsLog::Level S3fsLog::LowBumpupLogLevel() const
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
Level old = S3fsLog::debug_level;
|
||||
S3fsLog::debug_level = ( Level::CRIT == S3fsLog::debug_level ? Level::ERR :
|
||||
Level::ERR == S3fsLog::debug_level ? Level::WARN :
|
||||
Level::WARN == S3fsLog::debug_level ? Level::INFO :
|
||||
Level::INFO == S3fsLog::debug_level ? Level::DBG : Level::CRIT );
|
||||
setlogmask(LOG_UPTO(GetSyslogLevel(S3fsLog::debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", GetLevelString(old), GetLevelString(S3fsLog::debug_level));
|
||||
return old;
|
||||
}
|
||||
|
||||
void s3fs_low_logprn(S3fsLog::Level level, const char* file, const char *func, int line, const char *fmt, ...)
|
||||
{
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){
|
||||
va_list va;
|
||||
va_start(va, fmt);
|
||||
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
|
||||
va_end(va);
|
||||
|
||||
auto message = std::make_unique<char[]>(len);
|
||||
va_start(va, fmt);
|
||||
vsnprintf(message.get(), len, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
if(foreground || S3fsLog::IsSetLogFile()){
|
||||
S3fsLog::SeekEnd();
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), file, func, line, message.get());
|
||||
S3fsLog::Flush();
|
||||
}else{
|
||||
// TODO: why does this differ from s3fs_low_logprn2?
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): %s", instance_name.c_str(), file, func, line, message.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void s3fs_low_logprn2(S3fsLog::Level level, int nest, const char* file, const char *func, int line, const char *fmt, ...)
|
||||
{
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){
|
||||
va_list va;
|
||||
va_start(va, fmt);
|
||||
size_t len = vsnprintf(nullptr, 0, fmt, va) + 1;
|
||||
va_end(va);
|
||||
|
||||
auto message = std::make_unique<char[]>(len);
|
||||
va_start(va, fmt);
|
||||
vsnprintf(message.get(), len, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
if(foreground || S3fsLog::IsSetLogFile()){
|
||||
S3fsLog::SeekEnd();
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): %s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), file, func, line, message.get());
|
||||
S3fsLog::Flush();
|
||||
}else{
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), message.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
273
src/s3fs_logger.h
Normal file
273
src/s3fs_logger.h
Normal file
@ -0,0 +1,273 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_LOGGER_H_
|
||||
#define S3FS_LOGGER_H_
|
||||
|
||||
#include <cstdarg>
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include <syslog.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#ifdef CLOCK_MONOTONIC_COARSE
|
||||
#define S3FS_CLOCK_MONOTONIC CLOCK_MONOTONIC_COARSE
|
||||
#else
|
||||
// case of OSX
|
||||
#define S3FS_CLOCK_MONOTONIC CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class
|
||||
//-------------------------------------------------------------------
|
||||
class S3fsLog
|
||||
{
|
||||
public:
|
||||
enum class Level : uint8_t {
|
||||
CRIT = 0, // LEVEL_CRIT
|
||||
ERR = 1, // LEVEL_ERR
|
||||
WARN = 3, // LEVEL_WARNING
|
||||
INFO = 7, // LEVEL_INFO
|
||||
DBG = 15 // LEVEL_DEBUG
|
||||
};
|
||||
|
||||
protected:
|
||||
static constexpr int NEST_MAX = 4;
|
||||
static constexpr const char* nest_spaces[NEST_MAX] = {"", " ", " ", " "};
|
||||
static constexpr char LOGFILEENV[] = "S3FS_LOGFILE";
|
||||
static constexpr char MSGTIMESTAMP[] = "S3FS_MSGTIMESTAMP";
|
||||
|
||||
static S3fsLog* pSingleton;
|
||||
static Level debug_level;
|
||||
static FILE* logfp;
|
||||
static std::string logfile;
|
||||
static bool time_stamp;
|
||||
|
||||
protected:
|
||||
bool LowLoadEnv();
|
||||
bool LowSetLogfile(const char* pfile);
|
||||
Level LowSetLogLevel(Level level);
|
||||
Level LowBumpupLogLevel() const;
|
||||
|
||||
public:
|
||||
static bool IsS3fsLogLevel(Level level);
|
||||
static bool IsS3fsLogCrit() { return IsS3fsLogLevel(Level::CRIT); }
|
||||
static bool IsS3fsLogErr() { return IsS3fsLogLevel(Level::ERR); }
|
||||
static bool IsS3fsLogWarn() { return IsS3fsLogLevel(Level::WARN); }
|
||||
static bool IsS3fsLogInfo() { return IsS3fsLogLevel(Level::INFO); }
|
||||
static bool IsS3fsLogDbg() { return IsS3fsLogLevel(Level::DBG); }
|
||||
|
||||
static constexpr int GetSyslogLevel(Level level)
|
||||
{
|
||||
int masked = static_cast<int>(level) & static_cast<int>(Level::DBG);
|
||||
return ( static_cast<int>(Level::DBG) == masked ? LOG_DEBUG :
|
||||
static_cast<int>(Level::INFO) == masked ? LOG_INFO :
|
||||
static_cast<int>(Level::WARN) == masked ? LOG_WARNING :
|
||||
static_cast<int>(Level::ERR) == masked ? LOG_ERR : LOG_CRIT );
|
||||
}
|
||||
|
||||
static std::string GetCurrentTime();
|
||||
|
||||
static constexpr const char* GetLevelString(Level level)
|
||||
{
|
||||
int masked = static_cast<int>(level) & static_cast<int>(Level::DBG);
|
||||
return ( static_cast<int>(Level::DBG) == masked ? "[DBG] " :
|
||||
static_cast<int>(Level::INFO) == masked ? "[INF] " :
|
||||
static_cast<int>(Level::WARN) == masked ? "[WAN] " :
|
||||
static_cast<int>(Level::ERR) == masked ? "[ERR] " : "[CRT] " );
|
||||
}
|
||||
|
||||
static constexpr const char* GetS3fsLogNest(int nest)
|
||||
{
|
||||
return nest_spaces[nest < NEST_MAX ? nest : NEST_MAX - 1];
|
||||
}
|
||||
|
||||
static bool IsSetLogFile()
|
||||
{
|
||||
return (nullptr != logfp);
|
||||
}
|
||||
|
||||
static FILE* GetOutputLogFile()
|
||||
{
|
||||
return (logfp ? logfp : stdout);
|
||||
}
|
||||
|
||||
static FILE* GetErrorLogFile()
|
||||
{
|
||||
return (logfp ? logfp : stderr);
|
||||
}
|
||||
|
||||
static void SeekEnd()
|
||||
{
|
||||
if(logfp){
|
||||
fseek(logfp, 0, SEEK_END);
|
||||
}
|
||||
}
|
||||
|
||||
static void Flush()
|
||||
{
|
||||
if(logfp){
|
||||
fflush(logfp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool SetLogfile(const char* pfile);
|
||||
static bool ReopenLogfile();
|
||||
static Level SetLogLevel(Level level);
|
||||
static Level BumpupLogLevel();
|
||||
static bool SetTimeStamp(bool value);
|
||||
|
||||
explicit S3fsLog();
|
||||
~S3fsLog();
|
||||
S3fsLog(const S3fsLog&) = delete;
|
||||
S3fsLog(S3fsLog&&) = delete;
|
||||
S3fsLog& operator=(const S3fsLog&) = delete;
|
||||
S3fsLog& operator=(S3fsLog&&) = delete;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Debug macros
|
||||
//-------------------------------------------------------------------
|
||||
void s3fs_low_logprn(S3fsLog::Level level, const char* file, const char *func, int line, const char *fmt, ...) __attribute__ ((format (printf, 5, 6)));
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
do{ \
|
||||
s3fs_low_logprn(level, __FILE__, __func__, __LINE__, fmt, ##__VA_ARGS__); \
|
||||
}while(0)
|
||||
|
||||
void s3fs_low_logprn2(S3fsLog::Level level, int nest, const char* file, const char *func, int line, const char *fmt, ...) __attribute__ ((format (printf, 6, 7)));
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
do{ \
|
||||
s3fs_low_logprn2(level, nest, __FILE__, __func__, __LINE__, fmt, ##__VA_ARGS__); \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_CURLDBG(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s[CURL DBG] " fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetErrorLogFile(), "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
fprintf(S3fsLog::GetErrorLogFile(), "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): " fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(S3fsLog::Level::INFO), S3fsLog::GetS3fsLogNest(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::INFO), "%s%s" fmt "%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(0), __VA_ARGS__, ""); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_PRN_LAUNCH_INFO(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s" fmt "%s\n", S3fsLog::GetCurrentTime().c_str(), S3fsLog::GetLevelString(S3fsLog::Level::INFO), __VA_ARGS__, ""); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::INFO), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__, ""); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for checking cache files
|
||||
#define S3FS_LOW_CACHE(fp, fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::Level::INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::CRIT, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::ERR, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::WARN, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::Level::DBG, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 0, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 1, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 2, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::Level::INFO, 3, fmt, ##__VA_ARGS__)
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
|
||||
|
||||
// Macros to print log with fuse context
|
||||
#define PRINT_FUSE_CTX(level, indent, fmt, ...) do { \
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){ \
|
||||
struct fuse_context *ctx = fuse_get_context(); \
|
||||
if(ctx == NULL){ \
|
||||
S3FS_LOW_LOGPRN2(level, indent, fmt, ##__VA_ARGS__); \
|
||||
}else{ \
|
||||
S3FS_LOW_LOGPRN2(level, indent, fmt"[pid=%u,uid=%u,gid=%u]",\
|
||||
##__VA_ARGS__, \
|
||||
(unsigned int)(ctx->pid), \
|
||||
(unsigned int)(ctx->uid), \
|
||||
(unsigned int)(ctx->gid)); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define FUSE_CTX_INFO(fmt, ...) do { \
|
||||
PRINT_FUSE_CTX(S3fsLog::Level::INFO, 0, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define FUSE_CTX_INFO1(fmt, ...) do { \
|
||||
PRINT_FUSE_CTX(S3fsLog::Level::INFO, 1, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define FUSE_CTX_DBG(fmt, ...) do { \
|
||||
PRINT_FUSE_CTX(S3fsLog::Level::DBG, 0, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#endif // S3FS_LOGGER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1517
src/s3fs_threadreqs.cpp
Normal file
1517
src/s3fs_threadreqs.cpp
Normal file
File diff suppressed because it is too large
Load Diff
267
src/s3fs_threadreqs.h
Normal file
267
src/s3fs_threadreqs.h
Normal file
@ -0,0 +1,267 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_THREADREQS_H_
|
||||
#define S3FS_THREADREQS_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
#include "curl.h"
|
||||
#include "s3objlist.h"
|
||||
#include "syncfiller.h"
|
||||
#include "psemaphore.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structures for MultiThread Request
|
||||
//-------------------------------------------------------------------
|
||||
typedef std::map<std::string, int> retrycnt_t;
|
||||
|
||||
//
|
||||
// Head Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct head_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
headers_t* pmeta = nullptr;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Multi Head Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct multi_head_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
SyncFiller* psyncfiller = nullptr;
|
||||
std::mutex* pthparam_lock = nullptr;
|
||||
int* pretrycount = nullptr;
|
||||
s3obj_list_t* pnotfound_list = nullptr;
|
||||
bool use_wtf8 = false;
|
||||
objtype_t objtype = objtype_t::UNKNOWN;
|
||||
int* presult = nullptr;
|
||||
};
|
||||
|
||||
//
|
||||
// Delete Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct delete_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Put Head Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct put_head_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
headers_t meta;
|
||||
bool isCopy = false;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Put Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct put_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
headers_t meta;
|
||||
int fd = -1;
|
||||
bool ahbe = false;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// List Bucket Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct list_bucket_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
std::string query;
|
||||
std::string* presponseBody = nullptr;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Check Service Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct check_service_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
bool forceNoSSE = false;
|
||||
bool support_compat_dir = false;
|
||||
long* presponseCode = nullptr;
|
||||
std::string* presponseBody = nullptr;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Pre Multipart Upload Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct pre_multipart_upload_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
headers_t meta;
|
||||
std::string upload_id;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Multipart Upload Part Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct multipart_upload_part_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
std::string upload_id;
|
||||
int upload_fd = -1;
|
||||
off_t start = 0;
|
||||
off_t size = 0;
|
||||
bool is_copy = false;
|
||||
int part_num = -1;
|
||||
std::mutex* pthparam_lock = nullptr;
|
||||
etagpair* petag = nullptr;
|
||||
int* presult = nullptr;
|
||||
};
|
||||
|
||||
//
|
||||
// Complete Multipart Upload Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct complete_multipart_upload_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
std::string upload_id;
|
||||
etaglist_t etaglist;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Abort Multipart Upload Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct abort_multipart_upload_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
std::string upload_id;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//
|
||||
// Multipart Put Head Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct multipart_put_head_req_thparam
|
||||
{
|
||||
std::string from;
|
||||
std::string to;
|
||||
std::string upload_id;
|
||||
int part_number = 0;
|
||||
headers_t meta;
|
||||
std::mutex* pthparam_lock = nullptr;
|
||||
filepart* ppartdata = nullptr;
|
||||
int* pretrycount = nullptr;
|
||||
int* presult = nullptr;
|
||||
};
|
||||
|
||||
//
|
||||
// Parallel Get Object Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct parallel_get_object_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
int fd = -1;
|
||||
off_t start = 0;
|
||||
off_t size = 0;
|
||||
sse_type_t ssetype = sse_type_t::SSE_DISABLE;
|
||||
std::string ssevalue;
|
||||
std::mutex* pthparam_lock = nullptr;
|
||||
int* pretrycount = nullptr;
|
||||
int* presult = nullptr;
|
||||
};
|
||||
|
||||
//
|
||||
// Get Object Request parameter structure for Thread Pool.
|
||||
//
|
||||
struct get_object_req_thparam
|
||||
{
|
||||
std::string path;
|
||||
int fd = -1;
|
||||
off_t start = 0;
|
||||
off_t size = 0;
|
||||
int result = 0;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Thread Worker functions for MultiThread Request
|
||||
//-------------------------------------------------------------------
|
||||
void* head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* multi_head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* delete_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* put_head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* put_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* list_bucket_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* check_service_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* pre_multipart_upload_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* multipart_upload_part_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* complete_multipart_upload_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* abort_multipart_upload_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* multipart_put_head_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* parallel_get_object_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
void* get_object_req_threadworker(S3fsCurl& s3fscurl, void* arg);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions
|
||||
//-------------------------------------------------------------------
|
||||
int head_request(const std::string& strpath, headers_t& header);
|
||||
int multi_head_request(const std::string& strpath, SyncFiller& syncfiller, std::mutex& thparam_lock, int& retrycount, s3obj_list_t& notfound_list, bool use_wtf8, objtype_t objtype, int& result, Semaphore& sem);
|
||||
int delete_request(const std::string& strpath);
|
||||
int put_head_request(const std::string& strpath, const headers_t& meta, bool is_copy);
|
||||
int put_request(const std::string& strpath, const headers_t& meta, int fd, bool ahbe);
|
||||
int list_bucket_request(const std::string& strpath, const std::string& query, std::string& responseBody);
|
||||
int check_service_request(const std::string& strpath, bool forceNoSSE, bool support_compat_dir, long& responseCode, std::string& responseBody);
|
||||
int pre_multipart_upload_request(const std::string& path, const headers_t& meta, std::string& upload_id);
|
||||
int multipart_upload_part_request(const std::string& path, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy, Semaphore* psem, std::mutex* pthparam_lock, int* req_result);
|
||||
int await_multipart_upload_part_request(const std::string& path, int upload_fd, off_t start, off_t size, int part_num, const std::string& upload_id, etagpair* petag, bool is_copy);
|
||||
int multipart_upload_request(const std::string& path, const headers_t& meta, int upload_fd);
|
||||
int mix_multipart_upload_request(const std::string& path, headers_t& meta, int upload_fd, const fdpage_list_t& mixuppages);
|
||||
int complete_multipart_upload_request(const std::string& path, const std::string& upload_id, const etaglist_t& parts);
|
||||
int abort_multipart_upload_request(const std::string& path, const std::string& upload_id);
|
||||
int multipart_put_head_request(const std::string& strfrom, const std::string& strto, off_t size, const headers_t& meta);
|
||||
int parallel_get_object_request(const std::string& path, int fd, off_t start, off_t size);
|
||||
int get_object_request(const std::string& path, int fd, off_t start, off_t size);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Direct Call Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
int get_iamv2api_token_request(const std::string& strurl, int tokenttl, const std::string& strttlhdr, std::string& token);
|
||||
int get_iamrole_request(const std::string& strurl, const std::string& striamtoken, std::string& token);
|
||||
int get_iamcred_request(const std::string& strurl, const std::string& striamtoken, const std::string& stribmsecret, std::string& cred);
|
||||
|
||||
#endif // S3FS_THREADREQS_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1481
src/s3fs_util.cpp
1481
src/s3fs_util.cpp
File diff suppressed because it is too large
Load Diff
158
src/s3fs_util.h
158
src/s3fs_util.h
@ -17,125 +17,83 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_UTIL_H_
|
||||
#define S3FS_S3FS_UTIL_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Typedef
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Struct
|
||||
//
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
|
||||
s3obj_entry() : is_dir(false) {}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::list<std::string> s3obj_list_t;
|
||||
|
||||
//
|
||||
// Class
|
||||
//
|
||||
class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const {
|
||||
return objects.begin();
|
||||
}
|
||||
s3obj_t::const_iterator end(void) const {
|
||||
return objects.end();
|
||||
}
|
||||
|
||||
public:
|
||||
S3ObjList() {}
|
||||
~S3ObjList() {}
|
||||
|
||||
bool IsEmpty(void) const {
|
||||
return objects.empty();
|
||||
}
|
||||
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
bool IsDir(const char* name) const;
|
||||
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
bool GetLastName(std::string& lastname) const;
|
||||
|
||||
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);
|
||||
};
|
||||
|
||||
typedef struct mvnode {
|
||||
char *old_path;
|
||||
char *new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
struct mvnode *prev;
|
||||
struct mvnode *next;
|
||||
} MVNODE;
|
||||
|
||||
class AutoLock
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t* auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
|
||||
public:
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, bool no_wait = false);
|
||||
bool isLockAcquired() const;
|
||||
~AutoLock();
|
||||
};
|
||||
#ifndef CLOCK_REALTIME
|
||||
#define CLOCK_REALTIME 0
|
||||
#endif
|
||||
#ifndef CLOCK_MONOTONIC
|
||||
#define CLOCK_MONOTONIC CLOCK_REALTIME
|
||||
#endif
|
||||
#ifndef CLOCK_MONOTONIC_COARSE
|
||||
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
std::string get_realpath(const char *path);
|
||||
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
void init_sysconf_vars();
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(std::string path);
|
||||
std::string mydirname(const std::string& path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(std::string path);
|
||||
std::string mybasename(const std::string& path);
|
||||
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(headers_t& meta);
|
||||
mode_t get_mode(const char *s);
|
||||
mode_t get_mode(headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(headers_t& meta);
|
||||
bool is_need_check_obj_detail(headers_t& meta);
|
||||
bool compare_sysname(const char* target);
|
||||
|
||||
void show_usage(void);
|
||||
void show_help(void);
|
||||
void show_version(void);
|
||||
void print_launch_message(int argc, char** argv);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility for nanosecond time(timespec)
|
||||
//-------------------------------------------------------------------
|
||||
// Wrap fclose since it is illegal to take the address of a stdlib function
|
||||
int s3fs_fclose(FILE* fp);
|
||||
|
||||
class scope_guard
|
||||
{
|
||||
public:
|
||||
template<class Callable>
|
||||
|
||||
explicit scope_guard(Callable&& undo_func)
|
||||
: func(std::forward<Callable>(undo_func))
|
||||
{}
|
||||
|
||||
~scope_guard()
|
||||
{
|
||||
if(func != nullptr) {
|
||||
func();
|
||||
}
|
||||
}
|
||||
|
||||
void dismiss()
|
||||
{
|
||||
func = nullptr;
|
||||
}
|
||||
|
||||
scope_guard(const scope_guard&) = delete;
|
||||
scope_guard(scope_guard&& other) = delete;
|
||||
scope_guard& operator=(const scope_guard&) = delete;
|
||||
scope_guard& operator=(scope_guard&&) = delete;
|
||||
|
||||
private:
|
||||
std::function<void()> func;
|
||||
};
|
||||
|
||||
#endif // S3FS_S3FS_UTIL_H_
|
||||
|
||||
@ -144,6 +102,6 @@ void show_version(void);
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
512
src/s3fs_xml.cpp
Normal file
512
src/s3fs_xml.cpp
Normal file
@ -0,0 +1,512 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <libxml/xpathInternals.h>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "s3objlist.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Symbols
|
||||
//-------------------------------------------------------------------
|
||||
enum class get_object_name_result : std::uint8_t {
|
||||
SUCCESS,
|
||||
FAILURE,
|
||||
FILE_OR_SUBDIR_IN_DIR
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE]
|
||||
// mutex for static variables in GetXmlNsUrl
|
||||
//
|
||||
static std::mutex xml_parser_mutex;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if(!doc){
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string tmpNs;
|
||||
{
|
||||
static time_t tmLast = 0; // cache for 60 sec.
|
||||
static std::string strNs;
|
||||
|
||||
const std::lock_guard<std::mutex> lock(xml_parser_mutex);
|
||||
|
||||
if((tmLast + 60) < time(nullptr)){
|
||||
// refresh
|
||||
tmLast = time(nullptr);
|
||||
strNs = "";
|
||||
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
|
||||
if(pRootNode){
|
||||
std::unique_ptr<xmlNsPtr, decltype(xmlFree)> nslist(xmlGetNsList(doc, pRootNode), xmlFree);
|
||||
if(nslist){
|
||||
if(*nslist && (*nslist)[0].href){
|
||||
int len = xmlStrlen((*nslist)[0].href);
|
||||
if(0 < len){
|
||||
strNs = std::string(reinterpret_cast<const char*>((*nslist)[0].href), len);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
tmpNs = strNs;
|
||||
}
|
||||
if(!tmpNs.empty()){
|
||||
nsurl = tmpNs;
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static unique_ptr_xmlChar get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
{
|
||||
std::string xmlnsurl;
|
||||
std::string exp_string;
|
||||
|
||||
if(!doc){
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
|
||||
exp_string = "/s3:ListBucketResult/s3:";
|
||||
} else {
|
||||
exp_string = "/ListBucketResult/";
|
||||
}
|
||||
|
||||
exp_string += exp;
|
||||
|
||||
unique_ptr_xmlXPathObject marker_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_string.c_str()), ctx.get()), xmlXPathFreeObject);
|
||||
if(nullptr == marker_xp){
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){
|
||||
S3FS_PRN_INFO("marker_xp->nodesetval is empty.");
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
xmlNodeSetPtr nodes = marker_xp->nodesetval;
|
||||
|
||||
unique_ptr_xmlChar result(xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
|
||||
return result;
|
||||
}
|
||||
|
||||
static unique_ptr_xmlChar get_prefix(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "Prefix");
|
||||
}
|
||||
|
||||
unique_ptr_xmlChar get_next_continuation_token(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextContinuationToken");
|
||||
}
|
||||
|
||||
unique_ptr_xmlChar get_next_marker(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextMarker");
|
||||
}
|
||||
|
||||
static std::pair<get_object_name_result, std::string> get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
|
||||
{
|
||||
// Get full path
|
||||
unique_ptr_xmlChar fullpath(xmlNodeListGetString(doc, node, 1), xmlFree);
|
||||
if(!fullpath){
|
||||
S3FS_PRN_ERR("could not get object full path name..");
|
||||
return {get_object_name_result::FAILURE, ""};
|
||||
}
|
||||
// basepath(path) is as same as fullpath.
|
||||
if(0 == strcmp(reinterpret_cast<char*>(fullpath.get()), path)){
|
||||
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
|
||||
}
|
||||
|
||||
// Make dir path and filename
|
||||
std::string strdirpath = mydirname(reinterpret_cast<const char*>(fullpath.get()));
|
||||
std::string strmybpath = mybasename(reinterpret_cast<const char*>(fullpath.get()));
|
||||
const char* dirpath = strdirpath.c_str();
|
||||
const char* mybname = strmybpath.c_str();
|
||||
const char* basepath= (path && '/' == path[0]) ? &path[1] : path;
|
||||
|
||||
if('\0' == mybname[0]){
|
||||
return {get_object_name_result::FAILURE, ""};
|
||||
}
|
||||
|
||||
// check subdir & file in subdir
|
||||
if(0 < strlen(dirpath)){
|
||||
// case of "/"
|
||||
if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){
|
||||
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
|
||||
}
|
||||
// case of "."
|
||||
if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){
|
||||
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
|
||||
}
|
||||
// case of ".."
|
||||
if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){
|
||||
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
|
||||
}
|
||||
// case of "name"
|
||||
if(0 == strcmp(dirpath, ".")){
|
||||
// OK
|
||||
return {get_object_name_result::SUCCESS, mybname};
|
||||
}else{
|
||||
if(basepath && 0 == strcmp(dirpath, basepath)){
|
||||
// OK
|
||||
return {get_object_name_result::SUCCESS, mybname};
|
||||
}else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){
|
||||
std::string withdirname;
|
||||
if(strlen(dirpath) > strlen(basepath)){
|
||||
withdirname = &dirpath[strlen(basepath)];
|
||||
}
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!withdirname.empty() && '/' != *withdirname.rbegin()){
|
||||
withdirname += "/";
|
||||
}
|
||||
withdirname += mybname;
|
||||
return {get_object_name_result::SUCCESS, withdirname};
|
||||
}
|
||||
}
|
||||
}
|
||||
// case of something wrong
|
||||
return {get_object_name_result::FILE_OR_SUBDIR_IN_DIR, ""};
|
||||
}
|
||||
|
||||
static unique_ptr_xmlChar get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
|
||||
{
|
||||
if(!doc || !ctx || !exp_key){
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
|
||||
xmlNodeSetPtr exp_nodes;
|
||||
|
||||
// search exp_key tag
|
||||
unique_ptr_xmlXPathObject exp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(exp_key), ctx), xmlXPathFreeObject);
|
||||
if(nullptr == exp){
|
||||
S3FS_PRN_ERR("Could not find key(%s).", exp_key);
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){
|
||||
S3FS_PRN_ERR("Key(%s) node is empty.", exp_key);
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
// get exp_key value & set in struct
|
||||
exp_nodes = exp->nodesetval;
|
||||
unique_ptr_xmlChar exp_value(xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
|
||||
if(nullptr == exp_value){
|
||||
S3FS_PRN_ERR("Key(%s) value is empty.", exp_key);
|
||||
return {nullptr, xmlFree};
|
||||
}
|
||||
|
||||
return exp_value;
|
||||
}
|
||||
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
{
|
||||
if(!doc){
|
||||
return false;
|
||||
}
|
||||
|
||||
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
|
||||
|
||||
std::string xmlnsurl;
|
||||
std::string ex_upload = "//";
|
||||
std::string ex_key;
|
||||
std::string ex_id;
|
||||
std::string ex_date;
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
|
||||
ex_upload += "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_id += "s3:";
|
||||
ex_date += "s3:";
|
||||
}
|
||||
ex_upload += "Upload";
|
||||
ex_key += "Key";
|
||||
ex_id += "UploadId";
|
||||
ex_date += "Initiated";
|
||||
|
||||
// get "Upload" Tags
|
||||
unique_ptr_xmlXPathObject upload_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_upload.c_str()), ctx.get()), xmlXPathFreeObject);
|
||||
if(nullptr == upload_xp){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return false;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){
|
||||
S3FS_PRN_INFO("upload_xp->nodesetval is empty.");
|
||||
return true;
|
||||
}
|
||||
|
||||
// Make list
|
||||
int cnt;
|
||||
xmlNodeSetPtr upload_nodes;
|
||||
list.clear();
|
||||
for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){
|
||||
ctx->node = upload_nodes->nodeTab[cnt];
|
||||
|
||||
INCOMP_MPU_INFO part;
|
||||
|
||||
// search "Key" tag
|
||||
unique_ptr_xmlChar ex_value(get_exp_value_xml(doc, ctx.get(), ex_key.c_str()));
|
||||
if(nullptr == ex_value){
|
||||
continue;
|
||||
}
|
||||
if('/' != *(reinterpret_cast<char*>(ex_value.get()))){
|
||||
part.key = "/";
|
||||
}else{
|
||||
part.key = "";
|
||||
}
|
||||
part.key += reinterpret_cast<char*>(ex_value.get());
|
||||
|
||||
// search "UploadId" tag
|
||||
if(nullptr == (ex_value = get_exp_value_xml(doc, ctx.get(), ex_id.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.id = reinterpret_cast<char*>(ex_value.get());
|
||||
|
||||
// search "Initiated" tag
|
||||
if(nullptr == (ex_value = get_exp_value_xml(doc, ctx.get(), ex_date.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.date = reinterpret_cast<char*>(ex_value.get());
|
||||
|
||||
list.push_back(part);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_truncated(xmlDocPtr doc)
|
||||
{
|
||||
unique_ptr_xmlChar strTruncate(get_base_exp(doc, "IsTruncated"));
|
||||
if(!strTruncate){
|
||||
return false;
|
||||
}
|
||||
return 0 == strcasecmp(reinterpret_cast<const char*>(strTruncate.get()), "true");
|
||||
}
|
||||
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix)
|
||||
{
|
||||
xmlNodeSetPtr content_nodes;
|
||||
|
||||
unique_ptr_xmlXPathObject contents_xp(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_contents), ctx), xmlXPathFreeObject);
|
||||
if(nullptr == contents_xp){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return -1;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||
S3FS_PRN_DBG("contents_xp->nodesetval is empty.");
|
||||
return 0;
|
||||
}
|
||||
content_nodes = contents_xp->nodesetval;
|
||||
|
||||
bool is_dir;
|
||||
std::string stretag;
|
||||
int i;
|
||||
for(i = 0; i < content_nodes->nodeNr; i++){
|
||||
ctx->node = content_nodes->nodeTab[i];
|
||||
|
||||
// object name
|
||||
unique_ptr_xmlXPathObject key(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_key), ctx), xmlXPathFreeObject);
|
||||
if(nullptr == key){
|
||||
S3FS_PRN_WARN("key is null. but continue.");
|
||||
continue;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(key->nodesetval)){
|
||||
S3FS_PRN_WARN("node is empty. but continue.");
|
||||
continue;
|
||||
}
|
||||
xmlNodeSetPtr key_nodes = key->nodesetval;
|
||||
auto result = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path);
|
||||
|
||||
switch(result.first){
|
||||
case get_object_name_result::FAILURE:
|
||||
S3FS_PRN_WARN("name is something wrong. but continue.");
|
||||
break;
|
||||
case get_object_name_result::SUCCESS: {
|
||||
is_dir = isCPrefix ? true : false;
|
||||
stretag = "";
|
||||
|
||||
if(!isCPrefix && ex_etag){
|
||||
// Get ETag
|
||||
unique_ptr_xmlXPathObject ETag(xmlXPathEvalExpression(reinterpret_cast<const xmlChar*>(ex_etag), ctx), xmlXPathFreeObject);
|
||||
if(nullptr != ETag){
|
||||
if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){
|
||||
S3FS_PRN_INFO("ETag->nodesetval is empty.");
|
||||
}else{
|
||||
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
|
||||
unique_ptr_xmlChar petag(xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1), xmlFree);
|
||||
if(petag){
|
||||
stretag = reinterpret_cast<const char*>(petag.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// The XML data passed to this function is CR code(\r) encoded.
|
||||
// The function below decodes that encoded CR code.
|
||||
//
|
||||
std::string decname = get_decoded_cr_code(result.second.c_str());
|
||||
|
||||
if(prefix){
|
||||
head.AddCommonPrefix(decname);
|
||||
}
|
||||
if(!head.insert(decname.c_str(), (!stretag.empty() ? stretag.c_str() : nullptr), is_dir)){
|
||||
S3FS_PRN_ERR("insert_object returns with error.");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case get_object_name_result::FILE_OR_SUBDIR_IN_DIR:
|
||||
S3FS_PRN_DBG("name is file or subdir in dir. but continue.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
|
||||
{
|
||||
std::string xmlnsurl;
|
||||
std::string ex_contents = "//";
|
||||
std::string ex_key;
|
||||
std::string ex_cprefix = "//";
|
||||
std::string ex_prefix;
|
||||
std::string ex_etag;
|
||||
|
||||
if(!doc){
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If there is not <Prefix>, use path instead of it.
|
||||
auto pprefix = get_prefix(doc);
|
||||
std::string prefix = (pprefix ? reinterpret_cast<char*>(pprefix.get()) : path ? path : "");
|
||||
|
||||
unique_ptr_xmlXPathContext ctx(xmlXPathNewContext(doc), xmlXPathFreeContext);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx.get(), reinterpret_cast<const xmlChar*>("s3"), reinterpret_cast<const xmlChar*>(xmlnsurl.c_str()));
|
||||
ex_contents+= "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_cprefix += "s3:";
|
||||
ex_prefix += "s3:";
|
||||
ex_etag += "s3:";
|
||||
}
|
||||
ex_contents+= "Contents";
|
||||
ex_key += "Key";
|
||||
ex_cprefix += "CommonPrefixes";
|
||||
ex_prefix += "Prefix";
|
||||
ex_etag += "ETag";
|
||||
|
||||
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx.get(), ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head, /*prefix=*/ false) ||
|
||||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx.get(), ex_cprefix.c_str(), ex_prefix.c_str(), nullptr, 1, head, /*prefix=*/ true) )
|
||||
{
|
||||
S3FS_PRN_ERR("append_objects_from_xml_ex returns with error.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions
|
||||
//-------------------------------------------------------------------
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if(!data || !key || 0 == len){
|
||||
return false;
|
||||
}
|
||||
value.clear();
|
||||
|
||||
// [NOTE]
|
||||
// If data is not nullptr and len is 0, this function will output the message
|
||||
// ":1: parser error : Document is empty" to stderr.
|
||||
// Make sure len is not 0 beforehand.
|
||||
//
|
||||
s3fsXmlBufferParserError parserError;
|
||||
parserError.SetXmlParseError();
|
||||
|
||||
std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> doc(xmlReadMemory(data, static_cast<int>(len), "", nullptr, 0), xmlFreeDoc);
|
||||
if(nullptr == doc){
|
||||
if(parserError.IsXmlParseError()){
|
||||
S3FS_PRN_ERR("xmlReadMemory returns with error: %s", parserError.GetXmlParseError().c_str());
|
||||
}else{
|
||||
S3FS_PRN_ERR("xmlReadMemory returns with error.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if(nullptr == doc->children){
|
||||
return false;
|
||||
}
|
||||
for(xmlNodePtr cur_node = doc->children->children; nullptr != cur_node; cur_node = cur_node->next){
|
||||
// For DEBUG
|
||||
// std::string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
||||
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
||||
|
||||
if(XML_ELEMENT_NODE == cur_node->type){
|
||||
std::string elementName = reinterpret_cast<const char*>(cur_node->name);
|
||||
// For DEBUG
|
||||
// printf("elementName: %s\n", elementName.c_str());
|
||||
|
||||
if(cur_node->children){
|
||||
if(XML_TEXT_NODE == cur_node->children->type){
|
||||
if(elementName == key) {
|
||||
value = reinterpret_cast<const char *>(cur_node->children->content);
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
99
src/s3fs_xml.h
Normal file
99
src/s3fs_xml.h
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_XML_H_
|
||||
#define S3FS_S3FS_XML_H_
|
||||
|
||||
#include <libxml/xpath.h>
|
||||
#include <libxml/parser.h> // [NOTE] include this header in some environments
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
|
||||
#include "mpu_util.h"
|
||||
|
||||
class S3ObjList;
|
||||
|
||||
typedef std::unique_ptr<xmlChar, decltype(xmlFree)> unique_ptr_xmlChar;
|
||||
typedef std::unique_ptr<xmlXPathObject, decltype(&xmlXPathFreeObject)> unique_ptr_xmlXPathObject;
|
||||
typedef std::unique_ptr<xmlXPathContext, decltype(&xmlXPathFreeContext)> unique_ptr_xmlXPathContext;
|
||||
typedef std::unique_ptr<xmlDoc, decltype(&xmlFreeDoc)> unique_ptr_xmlDoc;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Class
|
||||
//-------------------------------------------------------------------
|
||||
class s3fsXmlBufferParserError
|
||||
{
|
||||
private:
|
||||
static constexpr int ERROR_BUFFER_SIZE = 1024;
|
||||
std::array<char, ERROR_BUFFER_SIZE> error_buffer{};
|
||||
|
||||
static void ParserErrorHandler(void* ctx, const char *msg, ...)
|
||||
{
|
||||
auto* errbuf = static_cast<char*>(ctx);
|
||||
if(errbuf){
|
||||
va_list args;
|
||||
va_start(args, msg);
|
||||
vsnprintf(errbuf + strlen(errbuf), ERROR_BUFFER_SIZE - strlen(errbuf) - 1, msg, args);
|
||||
va_end(args);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void SetXmlParseError()
|
||||
{
|
||||
error_buffer.fill(0);
|
||||
xmlSetGenericErrorFunc(error_buffer.data(), s3fsXmlBufferParserError::ParserErrorHandler);
|
||||
}
|
||||
|
||||
std::string GetXmlParseError() const
|
||||
{
|
||||
return strlen(error_buffer.data()) ? error_buffer.data() : "";
|
||||
}
|
||||
|
||||
bool IsXmlParseError() const
|
||||
{
|
||||
return (0 < strlen(error_buffer.data()));
|
||||
}
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
bool is_truncated(xmlDocPtr doc);
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head, bool prefix);
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head);
|
||||
unique_ptr_xmlChar get_next_continuation_token(xmlDocPtr doc);
|
||||
unique_ptr_xmlChar get_next_marker(xmlDocPtr doc);
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list);
|
||||
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
#endif // S3FS_S3FS_XML_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
305
src/s3objlist.cpp
Normal file
305
src/s3objlist.cpp
Normal file
@ -0,0 +1,305 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#include "s3objlist.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
// New class S3ObjList is base on old s3_object struct.
|
||||
// This class is for S3 compatible clients.
|
||||
//
|
||||
// If name is terminated by "/", it is forced dir type.
|
||||
// If name is terminated by "_$folder$", it is forced dir type.
|
||||
// If is_dir is true, the name ends with "/", or the name ends with "_$folder$",
|
||||
// it will be determined to be a directory.
|
||||
// If it is determined to be a directory, one of the directory types in objtype_t
|
||||
// will be set to type. If it is not a directory(file, symbolic link), type will
|
||||
// be set to objtype_t::UNKNOWN.
|
||||
//
|
||||
bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
s3obj_t::iterator iter;
|
||||
std::string newname;
|
||||
std::string orgname = name;
|
||||
objtype_t type = objtype_t::UNKNOWN;
|
||||
|
||||
// Normalization
|
||||
std::string::size_type pos = orgname.find("_$folder$");
|
||||
if(std::string::npos != pos){
|
||||
newname = orgname.substr(0, pos);
|
||||
type = objtype_t::DIR_FOLDER_SUFFIX;
|
||||
}else{
|
||||
newname = orgname;
|
||||
}
|
||||
if('/' == *newname.rbegin()){
|
||||
if(!IS_DIR_OBJ(type)){
|
||||
type = objtype_t::DIR_NORMAL;
|
||||
}
|
||||
}else{
|
||||
if(is_dir || IS_DIR_OBJ(type)){
|
||||
newname += "/";
|
||||
if(!IS_DIR_OBJ(type)){
|
||||
type = objtype_t::DIR_NOT_TERMINATE_SLASH;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check derived name object.
|
||||
if(is_dir || IS_DIR_OBJ(type)){
|
||||
std::string chkname = newname.substr(0, newname.length() - 1);
|
||||
if(objects.cend() != (iter = objects.find(chkname))){
|
||||
// found "dir" object --> remove it.
|
||||
objects.erase(iter);
|
||||
}
|
||||
}else{
|
||||
std::string chkname = newname + "/";
|
||||
if(objects.cend() != (iter = objects.find(chkname))){
|
||||
// found "dir/" object --> not add new object.
|
||||
// and add normalization
|
||||
return insert_normalized(orgname.c_str(), chkname.c_str(), type);
|
||||
}
|
||||
}
|
||||
|
||||
// Add object
|
||||
if(objects.cend() != (iter = objects.find(newname))){
|
||||
// Found same object --> update information.
|
||||
iter->second.normalname.clear();
|
||||
iter->second.orgname = orgname;
|
||||
iter->second.type = type;
|
||||
if(etag){
|
||||
iter->second.etag = etag; // over write
|
||||
}
|
||||
}else{
|
||||
// add new object
|
||||
s3obj_entry newobject;
|
||||
newobject.orgname = orgname;
|
||||
newobject.type = type;
|
||||
if(etag){
|
||||
newobject.etag = etag;
|
||||
}
|
||||
objects[newname] = newobject;
|
||||
}
|
||||
|
||||
// add normalization
|
||||
return insert_normalized(orgname.c_str(), newname.c_str(), type);
|
||||
}
|
||||
|
||||
bool S3ObjList::insert_normalized(const char* name, const char* normalized, objtype_t type)
|
||||
{
|
||||
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
|
||||
return false;
|
||||
}
|
||||
if(0 == strcmp(name, normalized)){
|
||||
return true;
|
||||
}
|
||||
|
||||
s3obj_t::iterator iter;
|
||||
if(objects.cend() != (iter = objects.find(name))){
|
||||
// found name --> over write
|
||||
iter->second.orgname.clear();
|
||||
iter->second.etag.clear();
|
||||
iter->second.normalname = normalized;
|
||||
iter->second.type = type;
|
||||
}else{
|
||||
// not found --> add new object
|
||||
s3obj_entry newobject;
|
||||
newobject.normalname = normalized;
|
||||
newobject.type = type;
|
||||
objects[name] = newobject;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const
|
||||
{
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return nullptr;
|
||||
}
|
||||
if(objects.cend() == (iter = objects.find(name))){
|
||||
return nullptr;
|
||||
}
|
||||
return &(iter->second);
|
||||
}
|
||||
|
||||
std::string S3ObjList::GetOrgName(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return "";
|
||||
}
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return "";
|
||||
}
|
||||
return ps3obj->orgname;
|
||||
}
|
||||
|
||||
std::string S3ObjList::GetNormalizedName(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return "";
|
||||
}
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return "";
|
||||
}
|
||||
if(ps3obj->normalname.empty()){
|
||||
return name;
|
||||
}
|
||||
return ps3obj->normalname;
|
||||
}
|
||||
|
||||
std::string S3ObjList::GetETag(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return "";
|
||||
}
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return "";
|
||||
}
|
||||
return ps3obj->etag;
|
||||
}
|
||||
|
||||
bool S3ObjList::IsDir(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(nullptr == (ps3obj = GetS3Obj(name))){
|
||||
return false;
|
||||
}
|
||||
return IS_DIR_OBJ(ps3obj->type);
|
||||
}
|
||||
|
||||
bool S3ObjList::GetLastName(std::string& lastname) const
|
||||
{
|
||||
bool result = false;
|
||||
lastname = "";
|
||||
for(auto iter = objects.cbegin(); iter != objects.cend(); ++iter){
|
||||
if(!iter->second.orgname.empty()){
|
||||
if(lastname.compare(iter->second.orgname) < 0){
|
||||
lastname = iter->second.orgname;
|
||||
result = true;
|
||||
}
|
||||
}else{
|
||||
if(lastname.compare(iter->second.normalname) < 0){
|
||||
lastname = iter->second.normalname;
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool S3ObjList::RawGetNames(s3obj_list_t* plist, s3obj_type_map_t* pobjmap, bool OnlyNormalized, bool CutSlash) const
|
||||
{
|
||||
if(!plist && !pobjmap){
|
||||
return false;
|
||||
}
|
||||
for(auto iter = objects.cbegin(); objects.cend() != iter; ++iter){
|
||||
if(OnlyNormalized && !iter->second.normalname.empty()){
|
||||
continue;
|
||||
}
|
||||
std::string name = iter->first;
|
||||
if(CutSlash && 1 < name.length() && '/' == *name.rbegin()){
|
||||
// only "/" std::string is skipped this.
|
||||
name.erase(name.length() - 1);
|
||||
}
|
||||
if(plist){
|
||||
plist->push_back(name);
|
||||
}
|
||||
if(pobjmap){
|
||||
(*pobjmap)[name] = iter->second.type;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const
|
||||
{
|
||||
return RawGetNames(&list, nullptr, OnlyNormalized, CutSlash);
|
||||
}
|
||||
|
||||
bool S3ObjList::GetNameMap(s3obj_type_map_t& objmap, bool OnlyNormalized, bool CutSlash) const
|
||||
{
|
||||
return RawGetNames(nullptr, &objmap, OnlyNormalized, CutSlash);
|
||||
}
|
||||
|
||||
typedef std::map<std::string, bool> s3obj_h_t;
|
||||
|
||||
bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
|
||||
{
|
||||
s3obj_h_t h_map;
|
||||
|
||||
for(auto liter = list.cbegin(); list.cend() != liter; ++liter){
|
||||
std::string strtmp = (*liter);
|
||||
if(1 < strtmp.length() && '/' == *strtmp.rbegin()){
|
||||
strtmp.erase(strtmp.length() - 1);
|
||||
}
|
||||
h_map[strtmp] = true;
|
||||
|
||||
// check hierarchized directory
|
||||
for(std::string::size_type pos = strtmp.find_last_of('/'); std::string::npos != pos; pos = strtmp.find_last_of('/')){
|
||||
strtmp.erase(pos);
|
||||
if(strtmp.empty() || "/" == strtmp){
|
||||
break;
|
||||
}
|
||||
if(h_map.cend() == h_map.find(strtmp)){
|
||||
// not found
|
||||
h_map[strtmp] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check map and add lost hierarchized directory.
|
||||
for(auto hiter = h_map.cbegin(); hiter != h_map.cend(); ++hiter){
|
||||
if(false == (*hiter).second){
|
||||
// add hierarchized directory.
|
||||
std::string strtmp = (*hiter).first;
|
||||
if(haveSlash){
|
||||
strtmp += "/";
|
||||
}
|
||||
list.push_back(strtmp);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
86
src/s3objlist.h
Normal file
86
src/s3objlist.h
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3OBJLIST_H_
|
||||
#define S3FS_S3OBJLIST_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "types.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
objtype_t type = objtype_t::UNKNOWN; // only set for directories, UNKNOWN for non-directories.
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::vector<std::string> s3obj_list_t;
|
||||
typedef std::map<std::string, objtype_t> s3obj_type_map_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
std::vector<std::string> common_prefixes;
|
||||
|
||||
bool insert_normalized(const char* name, const char* normalized, objtype_t type);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
bool RawGetNames(s3obj_list_t* plist, s3obj_type_map_t* pobjmap, bool OnlyNormalized, bool CutSlash) const;
|
||||
|
||||
s3obj_t::const_iterator cbegin() const { return objects.cbegin(); }
|
||||
s3obj_t::const_iterator cend() const { return objects.cend(); }
|
||||
|
||||
public:
|
||||
bool IsEmpty() const { return objects.empty(); }
|
||||
bool insert(const char* name, const char* etag = nullptr, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
const std::vector<std::string>& GetCommonPrefixes() const { return common_prefixes; }
|
||||
void AddCommonPrefix(std::string prefix) { common_prefixes.push_back(std::move(prefix)); }
|
||||
bool IsDir(const char* name) const;
|
||||
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
bool GetNameMap(s3obj_type_map_t& objmap, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
bool GetLastName(std::string& lastname) const;
|
||||
|
||||
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);
|
||||
};
|
||||
|
||||
#endif // S3FS_S3OBJLIST_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
250
src/sighandlers.cpp
Normal file
250
src/sighandlers.cpp
Normal file
@ -0,0 +1,250 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <csignal>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
#include "psemaphore.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "sighandlers.h"
|
||||
#include "fdcache.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsSignals
|
||||
//-------------------------------------------------------------------
|
||||
std::unique_ptr<S3fsSignals> S3fsSignals::pSingleton;
|
||||
bool S3fsSignals::enableUsr1 = false;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class methods
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsSignals::Initialize()
|
||||
{
|
||||
if(!S3fsSignals::pSingleton){
|
||||
S3fsSignals::pSingleton = std::make_unique<S3fsSignals>();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::Destroy()
|
||||
{
|
||||
S3fsSignals::pSingleton.reset();
|
||||
return true;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR1(int sig)
|
||||
{
|
||||
if(SIGUSR1 != sig){
|
||||
S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig);
|
||||
return;
|
||||
}
|
||||
|
||||
S3fsSignals* pSigobj = S3fsSignals::get();
|
||||
if(!pSigobj){
|
||||
S3FS_PRN_ERR("S3fsSignals object is not initialized.");
|
||||
return;
|
||||
}
|
||||
|
||||
if(!pSigobj->WakeupUsr1Thread()){
|
||||
S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::SetUsr1Handler(const char* path)
|
||||
{
|
||||
if(!FdManager::HaveLseekHole()){
|
||||
S3FS_PRN_ERR("Could not set SIGUSR1 for checking cache, because this system does not support SEEK_DATA/SEEK_HOLE in lseek function.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// set output file
|
||||
if(!FdManager::SetCacheCheckOutput(path)){
|
||||
S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)");
|
||||
return false;
|
||||
}
|
||||
|
||||
S3fsSignals::enableUsr1 = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void S3fsSignals::CheckCacheWorker(Semaphore* pSem)
|
||||
{
|
||||
if(!pSem){
|
||||
return;
|
||||
}
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
return;
|
||||
}
|
||||
|
||||
// wait and loop
|
||||
while(S3fsSignals::enableUsr1){
|
||||
// wait
|
||||
pSem->acquire();
|
||||
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
break; // asap
|
||||
}
|
||||
|
||||
// check all cache
|
||||
if(!FdManager::get()->CheckAllCache()){
|
||||
S3FS_PRN_ERR("Processing failed due to some problem.");
|
||||
}
|
||||
|
||||
// do not allow request queuing
|
||||
while(pSem->try_acquire());
|
||||
}
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR2(int sig)
|
||||
{
|
||||
if(SIGUSR2 == sig){
|
||||
S3fsLog::BumpupLogLevel();
|
||||
}else{
|
||||
S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitUsr2Handler()
|
||||
{
|
||||
struct sigaction sa{};
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR2;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR2, &sa, nullptr)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerHUP(int sig)
|
||||
{
|
||||
if(SIGHUP == sig){
|
||||
S3fsLog::ReopenLogfile();
|
||||
}else{
|
||||
S3FS_PRN_ERR("The handler for SIGHUP received signal(%d)", sig);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitHupHandler()
|
||||
{
|
||||
struct sigaction sa{};
|
||||
sa.sa_handler = S3fsSignals::HandlerHUP;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGHUP, &sa, nullptr)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods
|
||||
//-------------------------------------------------------------------
|
||||
S3fsSignals::S3fsSignals()
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!InitUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue...");
|
||||
}
|
||||
}
|
||||
if(!S3fsSignals::InitUsr2Handler()){
|
||||
S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue...");
|
||||
}
|
||||
if(!S3fsSignals::InitHupHandler()){
|
||||
S3FS_PRN_ERR("failed to initialize SIGHUP handler for reopen log file, but continue...");
|
||||
}
|
||||
}
|
||||
|
||||
S3fsSignals::~S3fsSignals()
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!DestroyUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue...");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitUsr1Handler()
|
||||
{
|
||||
if(pThreadUsr1 || pSemUsr1){
|
||||
S3FS_PRN_ERR("Already run thread for SIGUSR1");
|
||||
return false;
|
||||
}
|
||||
|
||||
// create thread
|
||||
auto pSemUsr1_tmp = std::make_unique<Semaphore>(0);
|
||||
pThreadUsr1 = std::make_unique<std::thread>(S3fsSignals::CheckCacheWorker, pSemUsr1_tmp.get());
|
||||
pSemUsr1 = std::move(pSemUsr1_tmp);
|
||||
|
||||
// set handler
|
||||
struct sigaction sa{};
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR1;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR1, &sa, nullptr)){
|
||||
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
|
||||
DestroyUsr1Handler();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::DestroyUsr1Handler()
|
||||
{
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
return false;
|
||||
}
|
||||
// for thread exit
|
||||
S3fsSignals::enableUsr1 = false;
|
||||
|
||||
// wakeup thread
|
||||
pSemUsr1->release();
|
||||
|
||||
// wait for thread exiting
|
||||
pThreadUsr1->join();
|
||||
pSemUsr1.reset();
|
||||
pThreadUsr1.reset();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::WakeupUsr1Thread()
|
||||
{
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
|
||||
return false;
|
||||
}
|
||||
pSemUsr1->release();
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
80
src/sighandlers.h
Normal file
80
src/sighandlers.h
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_SIGHANDLERS_H_
|
||||
#define S3FS_SIGHANDLERS_H_
|
||||
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
#include "psemaphore.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsSignals
|
||||
//----------------------------------------------
|
||||
class S3fsSignals
|
||||
{
|
||||
private:
|
||||
static std::unique_ptr<S3fsSignals> pSingleton;
|
||||
static bool enableUsr1;
|
||||
|
||||
std::unique_ptr<std::thread> pThreadUsr1;
|
||||
std::unique_ptr<Semaphore> pSemUsr1;
|
||||
|
||||
protected:
|
||||
static S3fsSignals* get() { return pSingleton.get(); }
|
||||
|
||||
static void HandlerUSR1(int sig);
|
||||
static void CheckCacheWorker(Semaphore* pSem);
|
||||
|
||||
static void HandlerUSR2(int sig);
|
||||
static bool InitUsr2Handler();
|
||||
|
||||
static void HandlerHUP(int sig);
|
||||
static bool InitHupHandler();
|
||||
|
||||
bool InitUsr1Handler();
|
||||
bool DestroyUsr1Handler();
|
||||
bool WakeupUsr1Thread();
|
||||
|
||||
public:
|
||||
S3fsSignals();
|
||||
~S3fsSignals();
|
||||
S3fsSignals(const S3fsSignals&) = delete;
|
||||
S3fsSignals(S3fsSignals&&) = delete;
|
||||
S3fsSignals& operator=(const S3fsSignals&) = delete;
|
||||
S3fsSignals& operator=(S3fsSignals&&) = delete;
|
||||
|
||||
static bool Initialize();
|
||||
static bool Destroy();
|
||||
|
||||
static bool SetUsr1Handler(const char* path);
|
||||
};
|
||||
|
||||
#endif // S3FS_SIGHANDLERS_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -17,354 +17,650 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <utility>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_logger.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
std::string str(const struct timespec& value)
|
||||
{
|
||||
if(!str || '\0' == *str){
|
||||
return 0;
|
||||
}
|
||||
off_t result;
|
||||
bool chk_space;
|
||||
bool chk_base16_prefix;
|
||||
for(result = 0, chk_space = false, chk_base16_prefix = false; '\0' != *str; str++){
|
||||
// check head space
|
||||
if(!chk_space && isspace(*str)){
|
||||
continue;
|
||||
}else if(!chk_space){
|
||||
chk_space = true;
|
||||
std::ostringstream s;
|
||||
|
||||
if(UTIME_OMIT == value.tv_nsec){
|
||||
s << "UTIME_OMIT";
|
||||
}else if(UTIME_NOW == value.tv_nsec){
|
||||
s << "UTIME_NOW";
|
||||
}else{
|
||||
s << value.tv_sec;
|
||||
if(value.tv_nsec != 0){
|
||||
s << "." << std::setfill('0') << std::setw(9) << value.tv_nsec;
|
||||
}
|
||||
}
|
||||
// check prefix for base 16
|
||||
if(!chk_base16_prefix){
|
||||
chk_base16_prefix = true;
|
||||
if('0' == *str && ('x' == str[1] || 'X' == str[1])){
|
||||
is_base_16 = true;
|
||||
str++;
|
||||
continue;
|
||||
}
|
||||
return s.str();
|
||||
}
|
||||
|
||||
// This source code is from https://gist.github.com/jeremyfromearth/5694aa3a66714254752179ecf3c95582 .
|
||||
const char* s3fs_strptime(const char* s, const char* f, struct tm* tm)
|
||||
{
|
||||
std::istringstream input(s);
|
||||
// TODO: call to setlocale required?
|
||||
input.imbue(std::locale(setlocale(LC_ALL, nullptr)));
|
||||
input >> std::get_time(tm, f);
|
||||
if (input.fail()) {
|
||||
return nullptr;
|
||||
}
|
||||
// check like isalnum and set data
|
||||
result *= (is_base_16 ? 16 : 10);
|
||||
if('0' <= *str || '9' < *str){
|
||||
result += static_cast<off_t>(*str - '0');
|
||||
}else if(is_base_16){
|
||||
if('A' <= *str && *str <= 'F'){
|
||||
result += static_cast<off_t>(*str - 'A' + 0x0a);
|
||||
}else if('a' <= *str && *str <= 'f'){
|
||||
result += static_cast<off_t>(*str - 'a' + 0x0a);
|
||||
}else{
|
||||
return s + input.tellg();
|
||||
}
|
||||
|
||||
bool s3fs_strtoofft(off_t* value, const char* str, int base)
|
||||
{
|
||||
if(value == nullptr || str == nullptr){
|
||||
return false;
|
||||
}
|
||||
errno = 0;
|
||||
char *temp;
|
||||
long long result = strtoll(str, &temp, base);
|
||||
|
||||
if(temp == str || *temp != '\0'){
|
||||
return false;
|
||||
}
|
||||
if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){
|
||||
return false;
|
||||
}
|
||||
|
||||
*value = result;
|
||||
return true;
|
||||
}
|
||||
|
||||
off_t cvt_strtoofft(const char* str, int base)
|
||||
{
|
||||
off_t result = 0;
|
||||
if(!s3fs_strtoofft(&result, str, base)){
|
||||
S3FS_PRN_WARN("something error is occurred in convert std::string(%s) to off_t, thus return 0 as default.", (str ? str : "null"));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string lower(std::string s)
|
||||
{
|
||||
std::transform(s.cbegin(), s.cend(), s.begin(), ::tolower);
|
||||
return s;
|
||||
}
|
||||
|
||||
std::string upper(std::string s)
|
||||
{
|
||||
std::transform(s.cbegin(), s.cend(), s.begin(), ::toupper);
|
||||
return s;
|
||||
}
|
||||
|
||||
std::string trim_left(std::string d, const char *t /* = SPACES */)
|
||||
{
|
||||
return d.erase(0, d.find_first_not_of(t));
|
||||
}
|
||||
|
||||
std::string trim_right(std::string d, const char *t /* = SPACES */)
|
||||
{
|
||||
std::string::size_type i(d.find_last_not_of(t));
|
||||
if(i == std::string::npos){
|
||||
return "";
|
||||
}else{
|
||||
return 0;
|
||||
return d.erase(d.find_last_not_of(t) + 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
string lower(string s)
|
||||
std::string trim(std::string s, const char *t /* = SPACES */)
|
||||
{
|
||||
// change each character of the string to lower case
|
||||
for(unsigned int i = 0; i < s.length(); i++){
|
||||
s[i] = tolower(s[i]);
|
||||
}
|
||||
return s;
|
||||
return trim_left(trim_right(std::move(s), t), t);
|
||||
}
|
||||
|
||||
string trim_left(const string &s, const string &t /* = SPACES */)
|
||||
std::string peeloff(std::string s)
|
||||
{
|
||||
string d(s);
|
||||
return d.erase(0, s.find_first_not_of(t));
|
||||
}
|
||||
|
||||
string trim_right(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
string::size_type i(d.find_last_not_of(t));
|
||||
if(i == string::npos){
|
||||
return "";
|
||||
}else{
|
||||
return d.erase(d.find_last_not_of(t) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
string trim(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
return trim_left(trim_right(d, t), t);
|
||||
}
|
||||
|
||||
/**
|
||||
* urlEncode a fuse path,
|
||||
* taking into special consideration "/",
|
||||
* otherwise regular urlEncode.
|
||||
*/
|
||||
string urlEncode(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')) {
|
||||
result += c;
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
if(s.size() < 2 || *s.cbegin() != '"' || *s.rbegin() != '"'){
|
||||
return s;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
s.erase(s.size() - 1);
|
||||
s.erase(0, 1);
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* urlEncode a fuse path,
|
||||
* taking into special consideration "/",
|
||||
* otherwise regular urlEncode.
|
||||
*/
|
||||
string urlEncode2(const string &s)
|
||||
//
|
||||
// Three url encode functions
|
||||
//
|
||||
// urlEncodeGeneral: A general URL encoding function.
|
||||
// urlEncodePath : A function that URL encodes by excluding the path
|
||||
// separator('/').
|
||||
// urlEncodeQuery : A function that does URL encoding by excluding
|
||||
// some characters('=', '&' and '%').
|
||||
// This function can be used when the target string
|
||||
// contains already URL encoded strings. It also
|
||||
// excludes the character () used in query strings.
|
||||
// Therefore, it is a function to use as URL encoding
|
||||
// for use in query strings.
|
||||
//
|
||||
static constexpr char encode_general_except_chars[] = ".-_~"; // For general URL encode
|
||||
static constexpr char encode_path_except_chars[] = ".-_~/"; // For fuse(included path) URL encode
|
||||
static constexpr char encode_query_except_chars[] = ".-_~=&%"; // For query params(and encoded string)
|
||||
|
||||
static std::string rawUrlEncode(const std::string &s, const char* except_chars)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
|| c == '%'
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')) {
|
||||
result += c;
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
std::string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
unsigned char c = s[i];
|
||||
if((except_chars && nullptr != strchr(except_chars, c)) ||
|
||||
(c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') )
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
result += "%";
|
||||
result += s3fs_hex_upper(&c, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
string urlDecode(const string& s)
|
||||
std::string urlEncodeGeneral(const std::string &s)
|
||||
{
|
||||
string result;
|
||||
for(unsigned i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
char ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += ch;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
return rawUrlEncode(s, encode_general_except_chars);
|
||||
}
|
||||
|
||||
bool takeout_str_dquart(string& str)
|
||||
std::string urlEncodePath(const std::string &s)
|
||||
{
|
||||
size_t pos;
|
||||
return rawUrlEncode(s, encode_path_except_chars);
|
||||
}
|
||||
|
||||
// '"' for start
|
||||
if(string::npos != (pos = str.find_first_of("\""))){
|
||||
str = str.substr(pos + 1);
|
||||
std::string urlEncodeQuery(const std::string &s)
|
||||
{
|
||||
return rawUrlEncode(s, encode_query_except_chars);
|
||||
}
|
||||
|
||||
// '"' for end
|
||||
if(string::npos == (pos = str.find_last_of("\""))){
|
||||
return false;
|
||||
std::string urlDecode(const std::string& s)
|
||||
{
|
||||
std::string result;
|
||||
for(size_t i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
int ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += static_cast<char>(ch);
|
||||
}
|
||||
}
|
||||
str = str.substr(0, pos);
|
||||
if(string::npos != str.find_first_of("\"")){
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
|
||||
bool takeout_str_dquart(std::string& str)
|
||||
{
|
||||
size_t pos;
|
||||
|
||||
// '"' for start
|
||||
if(std::string::npos != (pos = str.find_first_of('\"'))){
|
||||
str.erase(0, pos + 1);
|
||||
|
||||
// '"' for end
|
||||
if(std::string::npos == (pos = str.find_last_of('\"'))){
|
||||
return false;
|
||||
}
|
||||
str.erase(pos);
|
||||
if(std::string::npos != str.find_first_of('\"')){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// ex. target="http://......?keyword=value&..."
|
||||
//
|
||||
bool get_keyword_value(string& target, const char* keyword, string& value)
|
||||
bool get_keyword_value(const std::string& target, const char* keyword, std::string& value)
|
||||
{
|
||||
if(!keyword){
|
||||
return false;
|
||||
}
|
||||
size_t spos;
|
||||
size_t epos;
|
||||
if(string::npos == (spos = target.find(keyword))){
|
||||
return false;
|
||||
}
|
||||
spos += strlen(keyword);
|
||||
if('=' != target.at(spos)){
|
||||
return false;
|
||||
}
|
||||
spos++;
|
||||
if(string::npos == (epos = target.find('&', spos))){
|
||||
value = target.substr(spos);
|
||||
}else{
|
||||
value = target.substr(spos, (epos - spos));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current date
|
||||
* in a format suitable for a HTTP request header.
|
||||
*/
|
||||
string get_date_rfc850()
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t));
|
||||
return buf;
|
||||
}
|
||||
|
||||
void get_date_sigv3(string& date, string& date8601)
|
||||
{
|
||||
time_t tm = time(NULL);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
}
|
||||
|
||||
string get_date_string(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime(&tm));
|
||||
return buf;
|
||||
}
|
||||
|
||||
string get_date_iso8601(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime(&tm));
|
||||
return buf;
|
||||
}
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length)
|
||||
{
|
||||
std::string hex;
|
||||
for(size_t pos = 0; pos < length; ++pos){
|
||||
char hexbuf[3];
|
||||
snprintf(hexbuf, 3, "%02x", input[pos]);
|
||||
hex += hexbuf;
|
||||
}
|
||||
return hex;
|
||||
}
|
||||
|
||||
char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
{
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 >= length){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline unsigned char char_decode64(const char ch)
|
||||
{
|
||||
unsigned char by;
|
||||
if('A' <= ch && ch <= 'Z'){ // A - Z
|
||||
by = static_cast<unsigned char>(ch - 'A');
|
||||
}else if('a' <= ch && ch <= 'z'){ // a - z
|
||||
by = static_cast<unsigned char>(ch - 'a' + 26);
|
||||
}else if('0' <= ch && ch <= '9'){ // 0 - 9
|
||||
by = static_cast<unsigned char>(ch - '0' + 52);
|
||||
}else if('+' == ch){ // +
|
||||
by = 62;
|
||||
}else if('/' == ch){ // /
|
||||
by = 63;
|
||||
}else if('=' == ch){ // =
|
||||
by = 64;
|
||||
}else{ // something wrong
|
||||
by = UCHAR_MAX;
|
||||
}
|
||||
return by;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
{
|
||||
unsigned char* result;
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t input_len = strlen(input);
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
if(!keyword){
|
||||
return false;
|
||||
}
|
||||
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
size_t spos;
|
||||
size_t epos;
|
||||
if(std::string::npos == (spos = target.find(keyword))){
|
||||
return false;
|
||||
}
|
||||
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
*plength = wpos;
|
||||
return result;
|
||||
spos += strlen(keyword);
|
||||
if('=' != target[spos]){
|
||||
return false;
|
||||
}
|
||||
spos++;
|
||||
if(std::string::npos == (epos = target.find('&', spos))){
|
||||
value = target.substr(spos);
|
||||
}else{
|
||||
value = target.substr(spos, (epos - spos));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Returns the current date
|
||||
// in a format suitable for a HTTP request header.
|
||||
//
|
||||
std::string get_date_rfc850()
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(nullptr);
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
void get_date_sigv3(std::string& date, std::string& date8601)
|
||||
{
|
||||
time_t tm = time(nullptr);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
}
|
||||
|
||||
std::string get_date_string(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
std::string get_date_iso8601(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
{
|
||||
if(!pdate){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
const char* prest = s3fs_strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
}
|
||||
unixtime = mktime(&tm);
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Convert to unixtime from std::string which formatted by following:
|
||||
// "12Y12M12D12h12m12s", "86400s", "9h30m", etc
|
||||
//
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime)
|
||||
{
|
||||
if(!argv){
|
||||
return false;
|
||||
}
|
||||
unixtime = 0;
|
||||
const char* ptmp;
|
||||
int last_unit_type = 0; // unit flag.
|
||||
bool is_last_number;
|
||||
time_t tmptime;
|
||||
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
|
||||
if('0' <= *ptmp && *ptmp <= '9'){
|
||||
tmptime *= 10;
|
||||
tmptime += static_cast<time_t>(*ptmp - '0');
|
||||
is_last_number = true;
|
||||
}else if(is_last_number){
|
||||
if('Y' == *ptmp && 1 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
|
||||
last_unit_type = 1;
|
||||
}else if('M' == *ptmp && 2 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
|
||||
last_unit_type = 2;
|
||||
}else if('D' == *ptmp && 3 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24));
|
||||
last_unit_type = 3;
|
||||
}else if('h' == *ptmp && 4 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60));
|
||||
last_unit_type = 4;
|
||||
}else if('m' == *ptmp && 5 > last_unit_type){
|
||||
unixtime += (tmptime * 60);
|
||||
last_unit_type = 5;
|
||||
}else if('s' == *ptmp && 6 > last_unit_type){
|
||||
unixtime += tmptime;
|
||||
last_unit_type = 6;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
tmptime = 0;
|
||||
is_last_number = false;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(is_last_number){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static std::string s3fs_hex(const unsigned char* input, size_t length, const char *hexAlphabet)
|
||||
{
|
||||
std::string hex;
|
||||
for(size_t pos = 0; pos < length; ++pos){
|
||||
hex += hexAlphabet[input[pos] / 16];
|
||||
hex += hexAlphabet[input[pos] % 16];
|
||||
}
|
||||
return hex;
|
||||
}
|
||||
|
||||
std::string s3fs_hex_lower(const unsigned char* input, size_t length)
|
||||
{
|
||||
return s3fs_hex(input, length, "0123456789abcdef");
|
||||
}
|
||||
|
||||
std::string s3fs_hex_upper(const unsigned char* input, size_t length)
|
||||
{
|
||||
return s3fs_hex(input, length, "0123456789ABCDEF");
|
||||
}
|
||||
|
||||
std::string s3fs_base64(const unsigned char* input, size_t length)
|
||||
{
|
||||
static constexpr char base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
|
||||
std::string result;
|
||||
result.reserve(((length + 3 - 1) / 3) * 4 + 1);
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
for(rpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result += base[parts[0]];
|
||||
result += base[parts[1]];
|
||||
result += base[parts[2]];
|
||||
result += base[parts[3]];
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static constexpr unsigned char char_decode64(const char ch)
|
||||
{
|
||||
if('A' <= ch && ch <= 'Z'){ // A - Z
|
||||
return static_cast<unsigned char>(ch - 'A');
|
||||
}else if('a' <= ch && ch <= 'z'){ // a - z
|
||||
return static_cast<unsigned char>(ch - 'a' + 26);
|
||||
}else if('0' <= ch && ch <= '9'){ // 0 - 9
|
||||
return static_cast<unsigned char>(ch - '0' + 52);
|
||||
}else if('+' == ch){ // +
|
||||
return 62;
|
||||
}else if('/' == ch){ // /
|
||||
return 63;
|
||||
}else if('=' == ch){ // =
|
||||
return 64;
|
||||
}else{ // something wrong
|
||||
return UCHAR_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
std::string s3fs_decode64(const char* input, size_t input_len)
|
||||
{
|
||||
std::string result;
|
||||
result.reserve(input_len / 4 * 3);
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
for(rpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result += static_cast<char>(((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03));
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
}
|
||||
result += static_cast<char>(((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f));
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
}
|
||||
result += static_cast<char>(((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// detect and rewrite invalid utf8. We take invalid bytes
|
||||
// and encode them into a private region of the unicode
|
||||
// space. This is sometimes known as wtf8, wobbly transformation format.
|
||||
// it is necessary because S3 validates the utf8 used for identifiers for
|
||||
// correctness, while some clients may provide invalid utf, notably
|
||||
// windows using cp1252.
|
||||
//
|
||||
|
||||
// Base location for transform. The range 0xE000 - 0xF8ff
|
||||
// is a private range, se use the start of this range.
|
||||
static constexpr unsigned int escape_base = 0xe000;
|
||||
|
||||
// encode bytes into wobbly utf8.
|
||||
// 'result' can be null. returns true if transform was needed.
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result)
|
||||
{
|
||||
bool invalid = false;
|
||||
|
||||
// Pass valid utf8 code through
|
||||
for (; *s; s++) {
|
||||
const unsigned char c = *s;
|
||||
|
||||
// single byte encoding
|
||||
if (c <= 0x7f) {
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise, it must be one of the valid start bytes
|
||||
if ( c >= 0xc2 && c <= 0xf5 ) {
|
||||
// two byte encoding
|
||||
// don't need bounds check, std::string is zero terminated
|
||||
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
|
||||
// all two byte encodings starting higher than c1 are valid
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// three byte encoding
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
|
||||
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
|
||||
// not overlong and not a surrogate pair
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// four byte encoding
|
||||
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
|
||||
if (code >= 0x10'000 && code <= 0x10f'fff) {
|
||||
// not overlong and in defined unicode space
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// printf("invalid %02x at %d\n", c, i);
|
||||
// Invalid utf8 code. Convert it to a private two byte area of unicode
|
||||
// e.g. the e000 - f8ff area. This will be a three byte encoding
|
||||
invalid = true;
|
||||
if (result) {
|
||||
unsigned escape = escape_base + c;
|
||||
*result += static_cast<char>(0xe0 | ((escape >> 12) & 0x0f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 06) & 0x3f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 00) & 0x3f));
|
||||
}
|
||||
}
|
||||
return invalid;
|
||||
}
|
||||
|
||||
std::string s3fs_wtf8_encode(const std::string &s)
|
||||
{
|
||||
std::string result;
|
||||
s3fs_wtf8_encode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// The reverse operation, turn encoded bytes back into their original values
|
||||
// The code assumes that we map to a three-byte code point.
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result)
|
||||
{
|
||||
bool encoded = false;
|
||||
for (; *s; s++) {
|
||||
unsigned char c = *s;
|
||||
// look for a three byte tuple matching our encoding code
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
unsigned code = (c & 0x0f) << 12;
|
||||
code |= (s[1] & 0x3f) << 6;
|
||||
code |= (s[2] & 0x3f) << 0;
|
||||
if (code >= escape_base && code <= escape_base + 0xff) {
|
||||
// convert back
|
||||
encoded = true;
|
||||
if(result){
|
||||
*result += static_cast<char>(code - escape_base);
|
||||
}
|
||||
s+=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
||||
std::string s3fs_wtf8_decode(const std::string &s)
|
||||
{
|
||||
std::string result;
|
||||
s3fs_wtf8_decode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// Encode only CR('\r'=0x0D) and it also encodes the '%' character accordingly.
|
||||
//
|
||||
// The xmlReadMemory() function in libxml2 replaces CR code with LF code('\n'=0x0A)
|
||||
// due to the XML specification.
|
||||
// s3fs uses libxml2 to parse the S3 response, and this automatic substitution
|
||||
// of libxml2 may change the object name(file/dir name). Therefore, before passing
|
||||
// the response to the xmlReadMemory() function, we need the string encoded by
|
||||
// this function.
|
||||
//
|
||||
// [NOTE]
|
||||
// Normally the quotes included in the XML content data are HTML encoded(""").
|
||||
// Encoding for CR can also be HTML encoded as binary code (ex, " "), but
|
||||
// if the same string content(as file name) as this encoded string exists, the
|
||||
// original string cannot be distinguished whichever encoded or not encoded.
|
||||
// Therefore, CR is encoded in the same manner as URL encoding("%0A").
|
||||
// And it is assumed that there is no CR code in the S3 response tag etc.(actually
|
||||
// it shouldn't exist)
|
||||
//
|
||||
std::string get_encoded_cr_code(const char* pbase)
|
||||
{
|
||||
std::string result;
|
||||
if(!pbase){
|
||||
return result;
|
||||
}
|
||||
std::string strbase(pbase);
|
||||
size_t baselength = strbase.length();
|
||||
size_t startpos = 0;
|
||||
size_t foundpos;
|
||||
while(startpos < baselength && std::string::npos != (foundpos = strbase.find_first_of("%\r", startpos))){
|
||||
if(0 < (foundpos - startpos)){
|
||||
result += strbase.substr(startpos, foundpos - startpos);
|
||||
}
|
||||
if('%' == strbase[foundpos]){
|
||||
result += "%45";
|
||||
}else if('\r' == strbase[foundpos]){
|
||||
result += "%0D";
|
||||
}
|
||||
startpos = foundpos + 1;
|
||||
}
|
||||
if(startpos < baselength){
|
||||
result += strbase.substr(startpos);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// Decode a string encoded with get_encoded_cr_code().
|
||||
//
|
||||
std::string get_decoded_cr_code(const char* pencode)
|
||||
{
|
||||
std::string result;
|
||||
if(!pencode){
|
||||
return result;
|
||||
}
|
||||
std::string strencode(pencode);
|
||||
size_t encodelength = strencode.length();
|
||||
size_t startpos = 0;
|
||||
size_t foundpos;
|
||||
while(startpos < encodelength && std::string::npos != (foundpos = strencode.find('%', startpos))){
|
||||
if(0 < (foundpos - startpos)){
|
||||
result += strencode.substr(startpos, foundpos - startpos);
|
||||
}
|
||||
if((foundpos + 2) < encodelength && 0 == strencode.compare(foundpos, 3, "%45")){
|
||||
result += '%';
|
||||
startpos = foundpos + 3;
|
||||
}else if((foundpos + 2) < encodelength && 0 == strencode.compare(foundpos, 3, "%0D")){
|
||||
result += '\r';
|
||||
startpos = foundpos + 3;
|
||||
}else if((foundpos + 1) < encodelength && 0 == strencode.compare(foundpos, 2, "%%")){
|
||||
result += '%';
|
||||
startpos = foundpos + 2;
|
||||
}else{
|
||||
result += '%';
|
||||
startpos = foundpos + 1;
|
||||
}
|
||||
}
|
||||
if(startpos < encodelength){
|
||||
result += strencode.substr(startpos);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -372,6 +668,6 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -17,47 +17,121 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_STRING_UTIL_H_
|
||||
#define S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
* A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <strings.h>
|
||||
|
||||
#define SPACES " \t\r\n"
|
||||
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
|
||||
//
|
||||
// A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
static constexpr char SPACES[] = " \t\r\n";
|
||||
|
||||
template<typename T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
//-------------------------------------------------------------------
|
||||
// Inline functions
|
||||
//-------------------------------------------------------------------
|
||||
class CaseInsensitiveStringView {
|
||||
public:
|
||||
explicit CaseInsensitiveStringView(const std::string &str) : str(str.c_str()) {}
|
||||
bool operator==(const char *other) const { return strcasecmp(str, other) == 0; }
|
||||
bool is_prefix(const char *prefix) const { return strncasecmp(str, prefix, strlen(prefix)) == 0; }
|
||||
private:
|
||||
const char *str;
|
||||
};
|
||||
static inline bool is_prefix(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; }
|
||||
static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
//-------------------------------------------------------------------
|
||||
// Macros(WTF8)
|
||||
//-------------------------------------------------------------------
|
||||
#define WTF8_ENCODE(ARG) \
|
||||
std::string ARG##_buf; \
|
||||
const char * ARG = _##ARG; \
|
||||
if (use_wtf8 && s3fs_wtf8_encode( _##ARG, 0 )) { \
|
||||
s3fs_wtf8_encode( _##ARG, &ARG##_buf); \
|
||||
ARG = ARG##_buf.c_str(); \
|
||||
}
|
||||
|
||||
std::string trim_left(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim_right(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim(const std::string &s, const std::string &t = SPACES);
|
||||
//-------------------------------------------------------------------
|
||||
// Utilities
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: rename to to_string?
|
||||
std::string str(const struct timespec& value);
|
||||
|
||||
//
|
||||
// Cross-platform strptime
|
||||
//
|
||||
const char* s3fs_strptime(const char* s, const char* f, struct tm* tm);
|
||||
//
|
||||
// Convert string to off_t. Returns false on bad input.
|
||||
// Replacement for C++11 std::stoll.
|
||||
//
|
||||
bool s3fs_strtoofft(off_t* value, const char* str, int base = 0);
|
||||
//
|
||||
// This function returns 0 if a value that cannot be converted is specified.
|
||||
// Only call if 0 is considered an error and the operation can continue.
|
||||
//
|
||||
off_t cvt_strtoofft(const char* str, int base);
|
||||
|
||||
//
|
||||
// String Manipulation
|
||||
//
|
||||
std::string trim_left(std::string s, const char *t = SPACES);
|
||||
std::string trim_right(std::string s, const char *t = SPACES);
|
||||
std::string trim(std::string s, const char *t = SPACES);
|
||||
std::string lower(std::string s);
|
||||
std::string get_date_rfc850(void);
|
||||
std::string upper(std::string s);
|
||||
std::string peeloff(std::string s);
|
||||
|
||||
//
|
||||
// Date string
|
||||
//
|
||||
std::string get_date_rfc850();
|
||||
void get_date_sigv3(std::string& date, std::string& date8601);
|
||||
std::string get_date_string(time_t tm);
|
||||
std::string get_date_iso8601(time_t tm);
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
bool takeout_str_dquart(std::string& str);
|
||||
bool get_keyword_value(std::string& target, const char* keyword, std::string& value);
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime);
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
//
|
||||
// For encoding
|
||||
//
|
||||
std::string urlEncodeGeneral(const std::string &s);
|
||||
std::string urlEncodePath(const std::string &s);
|
||||
std::string urlEncodeQuery(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
|
||||
bool takeout_str_dquart(std::string& str);
|
||||
bool get_keyword_value(const std::string& target, const char* keyword, std::string& value);
|
||||
|
||||
//
|
||||
// For binary string
|
||||
//
|
||||
std::string s3fs_hex_lower(const unsigned char* input, size_t length);
|
||||
std::string s3fs_hex_upper(const unsigned char* input, size_t length);
|
||||
std::string s3fs_base64(const unsigned char* input, size_t length);
|
||||
std::string s3fs_decode64(const char* input, size_t input_len);
|
||||
|
||||
//
|
||||
// WTF8
|
||||
//
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_encode(const std::string &s);
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_decode(const std::string &s);
|
||||
|
||||
//
|
||||
// For CR in XML
|
||||
//
|
||||
std::string get_encoded_cr_code(const char* pbase);
|
||||
std::string get_decoded_cr_code(const char* pencode);
|
||||
|
||||
#endif // S3FS_STRING_UTIL_H_
|
||||
|
||||
@ -66,6 +140,6 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
74
src/syncfiller.cpp
Normal file
74
src/syncfiller.cpp
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "syncfiller.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class SyncFiller
|
||||
//-------------------------------------------------------------------
|
||||
SyncFiller::SyncFiller(void* buff, fuse_fill_dir_t filler) : filler_buff(buff), filler_func(filler)
|
||||
{
|
||||
if(!filler_buff || !filler_func){
|
||||
S3FS_PRN_CRIT("Internal error: SyncFiller constructor parameter is critical value.");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// See. prototype fuse_fill_dir_t in fuse.h
|
||||
//
|
||||
int SyncFiller::Fill(const std::string& name, const struct stat *stbuf, off_t off)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(filler_lock);
|
||||
|
||||
int result = 0;
|
||||
if(filled.insert(name).second){
|
||||
result = filler_func(filler_buff, name.c_str(), stbuf, off);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int SyncFiller::SufficiencyFill(const std::vector<std::string>& pathlist)
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(filler_lock);
|
||||
|
||||
int result = 0;
|
||||
for(auto it = pathlist.cbegin(); it != pathlist.cend(); ++it) {
|
||||
if(filled.insert(*it).second){
|
||||
if(0 != filler_func(filler_buff, it->c_str(), nullptr, 0)){
|
||||
result = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
67
src/syncfiller.h
Normal file
67
src/syncfiller.h
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef SYNCFILLER_H_
|
||||
#define SYNCFILLER_H_
|
||||
|
||||
#include <string>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include <set>
|
||||
|
||||
#include "s3fs.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// class SyncFiller
|
||||
//----------------------------------------------
|
||||
//
|
||||
// A synchronous class that calls the fuse_fill_dir_t
|
||||
// function that processes the readdir data
|
||||
//
|
||||
class SyncFiller
|
||||
{
|
||||
private:
|
||||
mutable std::mutex filler_lock;
|
||||
void* filler_buff;
|
||||
fuse_fill_dir_t filler_func;
|
||||
std::set<std::string> filled;
|
||||
|
||||
public:
|
||||
explicit SyncFiller(void* buff = nullptr, fuse_fill_dir_t filler = nullptr);
|
||||
~SyncFiller() = default;
|
||||
SyncFiller(const SyncFiller&) = delete;
|
||||
SyncFiller(SyncFiller&&) = delete;
|
||||
SyncFiller& operator=(const SyncFiller&) = delete;
|
||||
SyncFiller& operator=(SyncFiller&&) = delete;
|
||||
|
||||
int Fill(const std::string& name, const struct stat *stbuf, off_t off);
|
||||
int SufficiencyFill(const std::vector<std::string>& pathlist);
|
||||
};
|
||||
|
||||
#endif // SYNCFILLER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
167
src/test_curl_util.cpp
Normal file
167
src/test_curl_util.cpp
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2020 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
|
||||
#include "curl_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
//---------------------------------------------------------
|
||||
// S3fsCred Stub
|
||||
//
|
||||
// [NOTE]
|
||||
// This test program links curl_util.cpp just to use the
|
||||
// curl_slist_sort_insert function.
|
||||
// This file has a call to S3fsCred::GetBucket(), which
|
||||
// results in a link error. That method is not used in
|
||||
// this test file, so define a stub class. Linking all
|
||||
// implementation of the S3fsCred class or making all
|
||||
// stubs is not practical, so this is the best answer.
|
||||
//
|
||||
class S3fsCred
|
||||
{
|
||||
private:
|
||||
static std::string bucket_name;
|
||||
public:
|
||||
static const std::string& GetBucket();
|
||||
};
|
||||
|
||||
std::string S3fsCred::bucket_name;
|
||||
|
||||
const std::string& S3fsCred::GetBucket()
|
||||
{
|
||||
return S3fsCred::bucket_name;
|
||||
}
|
||||
//---------------------------------------------------------
|
||||
|
||||
#define ASSERT_IS_SORTED(x) assert_is_sorted((x), __FILE__, __LINE__)
|
||||
|
||||
void assert_is_sorted(const struct curl_slist* list, const char *file, int line)
|
||||
{
|
||||
for(; list != nullptr; list = list->next){
|
||||
std::string key1 = list->data;
|
||||
key1.erase(key1.find(':'));
|
||||
std::string key2 = list->data;
|
||||
key2.erase(key2.find(':'));
|
||||
std::cerr << "key1: " << key1 << " key2: " << key2 << std::endl;
|
||||
|
||||
if(strcasecmp(key1.c_str(), key2.c_str()) > 0){
|
||||
std::cerr << "not sorted: " << key1 << " " << key2 << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
}
|
||||
|
||||
size_t curl_slist_length(const struct curl_slist* list)
|
||||
{
|
||||
size_t len = 0;
|
||||
for(; list != nullptr; list = list->next){
|
||||
++len;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
void test_sort_insert()
|
||||
{
|
||||
struct curl_slist* list = nullptr;
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to head
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to tail
|
||||
list = curl_slist_sort_insert(list, "4", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add in between
|
||||
list = curl_slist_sort_insert(list, "3", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to head
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
ASSERT_STREQUALS("1: val", list->data);
|
||||
// replace head
|
||||
list = curl_slist_sort_insert(list, "1", "val2");
|
||||
ASSERT_IS_SORTED(list);
|
||||
ASSERT_EQUALS(static_cast<size_t>(4), curl_slist_length(list));
|
||||
ASSERT_STREQUALS("1: val2", list->data);
|
||||
curl_slist_free_all(list);
|
||||
}
|
||||
|
||||
void test_slist_remove()
|
||||
{
|
||||
struct curl_slist* list = nullptr;
|
||||
|
||||
// remove no elements
|
||||
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "1");
|
||||
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
|
||||
|
||||
// remove only element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "1");
|
||||
ASSERT_EQUALS(static_cast<size_t>(0), curl_slist_length(list));
|
||||
|
||||
// remove head element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "1");
|
||||
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
|
||||
curl_slist_free_all(list);
|
||||
|
||||
// remove tail element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "2");
|
||||
ASSERT_EQUALS(static_cast<size_t>(1), curl_slist_length(list));
|
||||
curl_slist_free_all(list);
|
||||
|
||||
// remove middle element
|
||||
list = nullptr;
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
list = curl_slist_sort_insert(list, "3", "val");
|
||||
ASSERT_EQUALS(static_cast<size_t>(3), curl_slist_length(list));
|
||||
list = curl_slist_remove(list, "2");
|
||||
ASSERT_EQUALS(static_cast<size_t>(2), curl_slist_length(list));
|
||||
curl_slist_free_all(list);
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
test_sort_insert();
|
||||
test_slist_remove();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
80
src/test_page_list.cpp
Normal file
80
src/test_page_list.cpp
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2021 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include "fdcache_page.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "test_util.h"
|
||||
|
||||
bool CacheFileStat::Open() { return false; } // NOLINT(readability-convert-member-functions-to-static)
|
||||
bool CacheFileStat::OverWriteFile(const std::string& strall) const { return false; } // NOLINT(readability-convert-member-functions-to-static)
|
||||
|
||||
void test_compress()
|
||||
{
|
||||
PageList list;
|
||||
ASSERT_EQUALS(off_t(0), list.Size());
|
||||
|
||||
list.Init(42, /*is_loaded=*/ false, /*is_modified=*/ false);
|
||||
ASSERT_EQUALS(off_t(42), list.Size());
|
||||
ASSERT_FALSE(list.IsPageLoaded(0, 1));
|
||||
|
||||
list.SetPageLoadedStatus(0, 1, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
ASSERT_TRUE(list.IsPageLoaded(0, 1));
|
||||
ASSERT_FALSE(list.IsPageLoaded(0, 2));
|
||||
|
||||
off_t start = 0;
|
||||
off_t size = 0;
|
||||
ASSERT_TRUE(list.FindUnloadedPage(0, start, size));
|
||||
ASSERT_EQUALS(off_t(1), start);
|
||||
ASSERT_EQUALS(off_t(41), size);
|
||||
|
||||
// test adding subsequent page then compressing
|
||||
list.SetPageLoadedStatus(1, 3, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
list.Compress();
|
||||
ASSERT_TRUE(list.IsPageLoaded(0, 3));
|
||||
|
||||
ASSERT_TRUE(list.FindUnloadedPage(0, start, size));
|
||||
ASSERT_EQUALS(off_t(4), start);
|
||||
ASSERT_EQUALS(off_t(38), size);
|
||||
|
||||
// test adding non-contiguous page then compressing
|
||||
list.SetPageLoadedStatus(5, 1, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
list.Compress();
|
||||
|
||||
ASSERT_TRUE(list.FindUnloadedPage(0, start, size));
|
||||
ASSERT_EQUALS(off_t(4), start);
|
||||
ASSERT_EQUALS(off_t(1), size);
|
||||
list.Dump();
|
||||
printf("\n");
|
||||
|
||||
// test adding page between two pages then compressing
|
||||
list.SetPageLoadedStatus(4, 1, /*pstatus=*/ PageList::page_status::LOADED);
|
||||
list.Compress();
|
||||
|
||||
list.Dump();
|
||||
ASSERT_TRUE(list.FindUnloadedPage(0, start, size));
|
||||
ASSERT_EQUALS(off_t(6), start);
|
||||
ASSERT_EQUALS(off_t(36), size);
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
test_compress();
|
||||
return 0;
|
||||
}
|
||||
@ -18,66 +18,205 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <limits>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "string_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables for test_string_util
|
||||
//-------------------------------------------------------------------
|
||||
bool foreground = false;
|
||||
std::string instance_name;
|
||||
|
||||
void test_trim()
|
||||
{
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234"));
|
||||
ASSERT_EQUALS("1234"s, trim(" 1234 "));
|
||||
ASSERT_EQUALS("1234"s, trim("1234 "));
|
||||
ASSERT_EQUALS("1234"s, trim(" 1234"));
|
||||
ASSERT_EQUALS("1234"s, trim("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
|
||||
ASSERT_EQUALS("1234 "s, trim_left(" 1234 "));
|
||||
ASSERT_EQUALS("1234 "s, trim_left("1234 "));
|
||||
ASSERT_EQUALS("1234"s, trim_left(" 1234"));
|
||||
ASSERT_EQUALS("1234"s, trim_left("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
|
||||
ASSERT_EQUALS(" 1234"s, trim_right(" 1234 "));
|
||||
ASSERT_EQUALS("1234"s, trim_right("1234 "));
|
||||
ASSERT_EQUALS(" 1234"s, trim_right(" 1234"));
|
||||
ASSERT_EQUALS("1234"s, trim_right("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("0"), str(0));
|
||||
ASSERT_EQUALS(std::string("1"), str(1));
|
||||
ASSERT_EQUALS(std::string("-1"), str(-1));
|
||||
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
|
||||
ASSERT_EQUALS("1234"s, peeloff("\"1234\"")); // "1234" -> 1234
|
||||
ASSERT_EQUALS("\"1234\""s, peeloff("\"\"1234\"\"")); // ""1234"" -> "1234"
|
||||
ASSERT_EQUALS("\"1234"s, peeloff("\"\"1234\"")); // ""1234" -> "1234
|
||||
ASSERT_EQUALS("1234\""s, peeloff("\"1234\"\"")); // "1234"" -> 1234"
|
||||
ASSERT_EQUALS("\"1234"s, peeloff("\"1234")); // "1234 -> "1234
|
||||
ASSERT_EQUALS("1234\""s, peeloff("1234\"")); // 1234" -> 1234"
|
||||
ASSERT_EQUALS(" \"1234\""s, peeloff(" \"1234\"")); // _"1234" -> _"1234"
|
||||
ASSERT_EQUALS("\"1234\" "s, peeloff("\"1234\" ")); // "1234"_ -> "1234"_
|
||||
}
|
||||
|
||||
void test_base64()
|
||||
{
|
||||
size_t len;
|
||||
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
|
||||
std::string buf;
|
||||
char tmpbuf = '\0';
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(1));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(2));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(3));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(4));
|
||||
ASSERT_EQUALS(s3fs_base64(nullptr, 0), ""s);
|
||||
buf = s3fs_decode64(nullptr, 0);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
|
||||
|
||||
// TODO: invalid input
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), ""s);
|
||||
buf = s3fs_decode64("", 0);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), &tmpbuf, 0);
|
||||
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ=="s);
|
||||
buf = s3fs_decode64("MQ==", 4);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1", 1);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(1));
|
||||
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI="s);
|
||||
buf = s3fs_decode64("MTI=", 4);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "12", 2);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(2));
|
||||
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz"s);
|
||||
buf = s3fs_decode64("MTIz", 4);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "123", 3);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(3));
|
||||
|
||||
ASSERT_EQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA=="s);
|
||||
buf = s3fs_decode64("MTIzNA==", 8);
|
||||
ASSERT_BUFEQUALS(buf.c_str(), buf.length(), "1234", 4);
|
||||
ASSERT_EQUALS(buf.length(), static_cast<size_t>(4));
|
||||
|
||||
// TODO: invalid input
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
void test_strtoofft()
|
||||
{
|
||||
test_trim();
|
||||
test_base64();
|
||||
return 0;
|
||||
off_t value;
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "0"));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(0L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "9"));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(9L));
|
||||
|
||||
ASSERT_FALSE(s3fs_strtoofft(&value, "A"));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "A", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(10L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "F", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(15L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "a", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(10L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "f", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(15L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "deadbeef", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(3'735'928'559L));
|
||||
}
|
||||
|
||||
void test_wtf8_encoding()
|
||||
{
|
||||
std::string ascii("normal std::string");
|
||||
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
|
||||
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
|
||||
std::string broken = utf8;
|
||||
broken[14] = '\x97';
|
||||
std::string mixed = ascii + utf8 + cp1252;
|
||||
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
}
|
||||
|
||||
void test_cr_encoding()
|
||||
{
|
||||
// bse strings
|
||||
std::string base_no("STR");
|
||||
|
||||
std::string base_end_cr1("STR\r");
|
||||
std::string base_mid_cr1("STR\rSTR");
|
||||
std::string base_end_cr2("STR\r\r");
|
||||
std::string base_mid_cr2("STR\r\rSTR");
|
||||
|
||||
std::string base_end_per1("STR%");
|
||||
std::string base_mid_per1("STR%STR");
|
||||
std::string base_end_per2("STR%%");
|
||||
std::string base_mid_per2("STR%%STR");
|
||||
|
||||
std::string base_end_crlf1("STR\r\n");
|
||||
std::string base_mid_crlf1("STR\r\nSTR");
|
||||
std::string base_end_crlf2("STR\r\n\r\n");
|
||||
std::string base_mid_crlf2("STR\r\n\r\nSTR");
|
||||
|
||||
std::string base_end_crper1("STR%\r");
|
||||
std::string base_mid_crper1("STR%\rSTR");
|
||||
std::string base_end_crper2("STR%\r%\r");
|
||||
std::string base_mid_crper2("STR%\r%\rSTR");
|
||||
|
||||
// encode->decode->compare
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_no.c_str()).c_str()), base_no);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_cr1.c_str()).c_str()), base_end_cr1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_cr1.c_str()).c_str()), base_mid_cr1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_cr2.c_str()).c_str()), base_end_cr2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_cr2.c_str()).c_str()), base_mid_cr2);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_per1.c_str()).c_str()), base_end_per1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_per1.c_str()).c_str()), base_mid_per1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_per2.c_str()).c_str()), base_end_per2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_per2.c_str()).c_str()), base_mid_per2);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crlf1.c_str()).c_str()), base_end_crlf1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crlf1.c_str()).c_str()), base_mid_crlf1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crlf2.c_str()).c_str()), base_end_crlf2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crlf2.c_str()).c_str()), base_mid_crlf2);
|
||||
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crper1.c_str()).c_str()), base_end_crper1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crper1.c_str()).c_str()), base_mid_crper1);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_end_crper2.c_str()).c_str()), base_end_crper2);
|
||||
ASSERT_EQUALS(get_decoded_cr_code(get_encoded_cr_code(base_mid_crper2.c_str()).c_str()), base_mid_crper2);
|
||||
}
|
||||
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
S3fsLog singletonLog;
|
||||
|
||||
test_trim();
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
test_cr_encoding();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -18,30 +18,90 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_TEST_UTIL_H_
|
||||
#define S3FS_TEST_UTIL_H_
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
#include "string_util.h"
|
||||
|
||||
template <typename T> inline void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
template <> inline void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T> inline void assert_nequals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
template <> inline void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
|
||||
std::cerr << s3fs_hex_lower(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
inline void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
{
|
||||
if(x == nullptr && y == nullptr){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
} else if(x == nullptr || y == nullptr || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
#define ASSERT_EQUALS(x, y) \
|
||||
assert_equals((x), (y), __FILE__, __LINE__)
|
||||
inline void assert_bufequals(const char *x, size_t len1, const char *y, size_t len2, const char *file, int line)
|
||||
{
|
||||
if(x == nullptr && y == nullptr){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == nullptr || y == nullptr || len1 != len2 || memcmp(x, y, len1) != 0){
|
||||
std::cerr << (x ? std::string(x, len1) : "null") << " != " << (y ? std::string(y, len2) : "null") << " at " << file << ":" << line << std::endl;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
#define ASSERT_STREQUALS(x, y) \
|
||||
assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_TRUE(x) assert_equals((x), true, __FILE__, __LINE__)
|
||||
#define ASSERT_FALSE(x) assert_equals((x), false, __FILE__, __LINE__)
|
||||
#define ASSERT_EQUALS(x, y) assert_equals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_NEQUALS(x, y) assert_nequals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_STREQUALS(x, y) assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_BUFEQUALS(x, len1, y, len2) assert_bufequals((x), (len1), (y), (len2), __FILE__, __LINE__)
|
||||
|
||||
#endif // S3FS_TEST_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
296
src/threadpoolman.cpp
Normal file
296
src/threadpoolman.cpp
Normal file
@ -0,0 +1,296 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <future>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
#include "s3fs_logger.h"
|
||||
#include "threadpoolman.h"
|
||||
#include "curl.h"
|
||||
#include "curl_share.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// ThreadPoolMan class variables
|
||||
//------------------------------------------------
|
||||
int ThreadPoolMan::worker_count = 10; // default
|
||||
std::unique_ptr<ThreadPoolMan> ThreadPoolMan::singleton;
|
||||
|
||||
//------------------------------------------------
|
||||
// ThreadPoolMan class methods
|
||||
//------------------------------------------------
|
||||
bool ThreadPoolMan::Initialize(int count)
|
||||
{
|
||||
if(ThreadPoolMan::singleton){
|
||||
S3FS_PRN_CRIT("Already singleton for Thread Manager exists.");
|
||||
abort();
|
||||
}
|
||||
if(-1 != count){
|
||||
ThreadPoolMan::SetWorkerCount(count);
|
||||
}
|
||||
ThreadPoolMan::singleton = std::make_unique<ThreadPoolMan>(ThreadPoolMan::worker_count);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ThreadPoolMan::Destroy()
|
||||
{
|
||||
ThreadPoolMan::singleton.reset();
|
||||
}
|
||||
|
||||
int ThreadPoolMan::SetWorkerCount(int count)
|
||||
{
|
||||
if(0 >= count){
|
||||
S3FS_PRN_ERR("Thread worker count(%d) must be positive number.", count);
|
||||
return -1;
|
||||
}
|
||||
if(count == ThreadPoolMan::worker_count){
|
||||
return ThreadPoolMan::worker_count;
|
||||
}
|
||||
|
||||
// [TODO]
|
||||
// If we need to dynamically change worker threads, this is
|
||||
// where we would terminate/add workers.
|
||||
//
|
||||
int old = ThreadPoolMan::worker_count;
|
||||
ThreadPoolMan::worker_count = count;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::Instruct(const thpoolman_param& param)
|
||||
{
|
||||
if(!ThreadPoolMan::singleton){
|
||||
S3FS_PRN_WARN("The singleton object is not initialized yet.");
|
||||
return false;
|
||||
}
|
||||
if(!param.psem){
|
||||
S3FS_PRN_ERR("Thread parameter Semaphore is null.");
|
||||
return false;
|
||||
}
|
||||
ThreadPoolMan::singleton->SetInstruction(param);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::AwaitInstruct(const thpoolman_param& param)
|
||||
{
|
||||
if(!ThreadPoolMan::singleton){
|
||||
S3FS_PRN_WARN("The singleton object is not initialized yet.");
|
||||
return false;
|
||||
}
|
||||
if(param.psem){
|
||||
S3FS_PRN_ERR("Thread parameter Semaphore must be null.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Setup local thpoolman_param structure with local Semaphore
|
||||
thpoolman_param local_param;
|
||||
Semaphore await_sem(0);
|
||||
local_param.args = param.args;
|
||||
local_param.psem = &await_sem;
|
||||
local_param.pfunc = param.pfunc;
|
||||
|
||||
// Set parameters and run thread worker
|
||||
ThreadPoolMan::singleton->SetInstruction(local_param);
|
||||
|
||||
// wait until the thread is complete
|
||||
await_sem.acquire();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Thread worker
|
||||
//
|
||||
void ThreadPoolMan::Worker(ThreadPoolMan* psingleton, std::promise<int> promise)
|
||||
{
|
||||
if(!psingleton){
|
||||
S3FS_PRN_ERR("The parameter for worker thread is invalid.");
|
||||
promise.set_value(-EIO);
|
||||
return;
|
||||
}
|
||||
S3FS_PRN_INFO3("Start worker thread in ThreadPoolMan.");
|
||||
|
||||
// The only object in this thread worker
|
||||
S3fsCurl s3fscurl(true);
|
||||
|
||||
while(!psingleton->IsExit()){
|
||||
// wait
|
||||
psingleton->thpoolman_sem.acquire();
|
||||
|
||||
if(psingleton->IsExit()){
|
||||
break;
|
||||
}
|
||||
|
||||
// reset curl handle
|
||||
if(!s3fscurl.CreateCurlHandle(true)){
|
||||
S3FS_PRN_ERR("Failed to re-create curl handle.");
|
||||
break;
|
||||
}
|
||||
|
||||
// get instruction
|
||||
thpoolman_param param;
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(psingleton->thread_list_lock);
|
||||
|
||||
if(psingleton->instruction_list.empty()){
|
||||
S3FS_PRN_DBG("Got a semaphore, but the instruction is empty.");
|
||||
continue;
|
||||
}else{
|
||||
param = psingleton->instruction_list.front();
|
||||
psingleton->instruction_list.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
// run function
|
||||
void* retval;
|
||||
if(nullptr != (retval = param.pfunc(s3fscurl, param.args))){
|
||||
S3FS_PRN_WARN("The instruction function returned with something error code(%ld).", reinterpret_cast<long>(retval));
|
||||
}
|
||||
if(param.psem){
|
||||
param.psem->release();
|
||||
}
|
||||
}
|
||||
|
||||
if(!S3fsCurlShare::DestroyCurlShareHandleForThread()){
|
||||
S3FS_PRN_WARN("Failed to destroy curl share handle for this thread, but continue...");
|
||||
}
|
||||
|
||||
promise.set_value(0);
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// ThreadPoolMan methods
|
||||
//------------------------------------------------
|
||||
ThreadPoolMan::ThreadPoolMan(int count) : is_exit(false), thpoolman_sem(0)
|
||||
{
|
||||
if(count < 1){
|
||||
S3FS_PRN_CRIT("Failed to creating singleton for Thread Manager, because thread count(%d) is under 1.", count);
|
||||
abort();
|
||||
}
|
||||
if(ThreadPoolMan::singleton){
|
||||
S3FS_PRN_CRIT("Already singleton for Thread Manager exists.");
|
||||
abort();
|
||||
}
|
||||
|
||||
// create threads
|
||||
if(!StartThreads(count)){
|
||||
S3FS_PRN_ERR("Failed starting threads at initializing.");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
ThreadPoolMan::~ThreadPoolMan()
|
||||
{
|
||||
StopThreads();
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::IsExit() const
|
||||
{
|
||||
return is_exit;
|
||||
}
|
||||
|
||||
void ThreadPoolMan::SetExitFlag(bool exit_flag)
|
||||
{
|
||||
is_exit = exit_flag;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::StopThreads()
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(thread_list_lock);
|
||||
|
||||
if(thread_list.empty()){
|
||||
S3FS_PRN_INFO("Any threads are running now, then nothing to do.");
|
||||
return true;
|
||||
}
|
||||
|
||||
// all threads to exit
|
||||
SetExitFlag(true);
|
||||
for(size_t waitcnt = thread_list.size(); 0 < waitcnt; --waitcnt){
|
||||
thpoolman_sem.release();
|
||||
}
|
||||
|
||||
// wait for threads exiting
|
||||
for(auto& pair : thread_list){
|
||||
pair.first.join();
|
||||
long retval = pair.second.get();
|
||||
S3FS_PRN_DBG("join succeeded - return code(%ld)", reinterpret_cast<long>(retval));
|
||||
}
|
||||
thread_list.clear();
|
||||
|
||||
// reset semaphore(to zero)
|
||||
while(thpoolman_sem.try_acquire()){
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ThreadPoolMan::StartThreads(int count)
|
||||
{
|
||||
if(count < 1){
|
||||
S3FS_PRN_ERR("Failed to creating threads, because thread count(%d) is under 1.", count);
|
||||
return false;
|
||||
}
|
||||
|
||||
// stop all thread if they are running.
|
||||
// cppcheck-suppress unmatchedSuppression
|
||||
// cppcheck-suppress knownConditionTrueFalse
|
||||
if(!StopThreads()){
|
||||
S3FS_PRN_ERR("Failed to stop existed threads.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// create all threads
|
||||
SetExitFlag(false);
|
||||
for(int cnt = 0; cnt < count; ++cnt){
|
||||
// run thread
|
||||
std::promise<int> promise;
|
||||
std::future<int> future = promise.get_future();
|
||||
std::thread thread(ThreadPoolMan::Worker, this, std::move(promise));
|
||||
|
||||
const std::lock_guard<std::mutex> lock(thread_list_lock);
|
||||
thread_list.emplace_back(std::move(thread), std::move(future));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void ThreadPoolMan::SetInstruction(const thpoolman_param& param)
|
||||
{
|
||||
// set parameter to list
|
||||
{
|
||||
const std::lock_guard<std::mutex> lock(thread_list_lock);
|
||||
instruction_list.push_back(param);
|
||||
}
|
||||
|
||||
// run thread
|
||||
thpoolman_sem.release();
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
111
src/threadpoolman.h
Normal file
111
src/threadpoolman.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_THREADPOOLMAN_H_
|
||||
#define S3FS_THREADPOOLMAN_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <future>
|
||||
#include <list>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "psemaphore.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Typedefs for functions and structures
|
||||
//------------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
//
|
||||
// Prototype function
|
||||
//
|
||||
typedef void* (*thpoolman_worker)(S3fsCurl&, void*);
|
||||
|
||||
//
|
||||
// Parameter structure
|
||||
//
|
||||
// [NOTE]
|
||||
// The args member is a value that is an argument of the worker function.
|
||||
// The psem member is allowed nullptr. If it is not nullptr, the post() method is
|
||||
// called when finishing the function.
|
||||
//
|
||||
struct thpoolman_param
|
||||
{
|
||||
void* args = nullptr;
|
||||
Semaphore* psem = nullptr;
|
||||
thpoolman_worker pfunc = nullptr;
|
||||
};
|
||||
|
||||
typedef std::list<thpoolman_param> thpoolman_params_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class ThreadPoolMan
|
||||
//------------------------------------------------
|
||||
class ThreadPoolMan
|
||||
{
|
||||
private:
|
||||
static int worker_count;
|
||||
static std::unique_ptr<ThreadPoolMan> singleton;
|
||||
|
||||
std::atomic<bool> is_exit;
|
||||
Semaphore thpoolman_sem;
|
||||
|
||||
std::mutex thread_list_lock;
|
||||
std::vector<std::pair<std::thread, std::future<int>>> thread_list GUARDED_BY(thread_list_lock);
|
||||
thpoolman_params_t instruction_list GUARDED_BY(thread_list_lock);
|
||||
|
||||
private:
|
||||
static void Worker(ThreadPoolMan* psingleton, std::promise<int> promise);
|
||||
|
||||
bool IsExit() const;
|
||||
void SetExitFlag(bool exit_flag);
|
||||
|
||||
bool StopThreads();
|
||||
bool StartThreads(int count);
|
||||
void SetInstruction(const thpoolman_param& pparam);
|
||||
|
||||
public:
|
||||
explicit ThreadPoolMan(int count = 1);
|
||||
~ThreadPoolMan();
|
||||
ThreadPoolMan(const ThreadPoolMan&) = delete;
|
||||
ThreadPoolMan(ThreadPoolMan&&) = delete;
|
||||
ThreadPoolMan& operator=(const ThreadPoolMan&) = delete;
|
||||
ThreadPoolMan& operator=(ThreadPoolMan&&) = delete;
|
||||
|
||||
static bool Initialize(int count = -1);
|
||||
static void Destroy();
|
||||
static int SetWorkerCount(int count);
|
||||
static int GetWorkerCount() { return ThreadPoolMan::worker_count; }
|
||||
static bool Instruct(const thpoolman_param& pparam);
|
||||
static bool AwaitInstruct(const thpoolman_param& param);
|
||||
};
|
||||
|
||||
#endif // S3FS_THREADPOOLMAN_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
468
src/types.h
Normal file
468
src/types.h
Normal file
@ -0,0 +1,468 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_TYPES_H_
|
||||
#define S3FS_TYPES_H_
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
//
|
||||
// For extended attribute
|
||||
// (HAVE_XXX symbols are defined in config.h)
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// xattrs_t
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef std::map<std::string, std::string> xattrs_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// acl_t
|
||||
//-------------------------------------------------------------------
|
||||
enum class acl_t : uint8_t {
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
constexpr const char* str(acl_t value)
|
||||
{
|
||||
switch(value){
|
||||
case acl_t::PRIVATE:
|
||||
return "private";
|
||||
case acl_t::PUBLIC_READ:
|
||||
return "public-read";
|
||||
case acl_t::PUBLIC_READ_WRITE:
|
||||
return "public-read-write";
|
||||
case acl_t::AWS_EXEC_READ:
|
||||
return "aws-exec-read";
|
||||
case acl_t::AUTHENTICATED_READ:
|
||||
return "authenticated-read";
|
||||
case acl_t::BUCKET_OWNER_READ:
|
||||
return "bucket-owner-read";
|
||||
case acl_t::BUCKET_OWNER_FULL_CONTROL:
|
||||
return "bucket-owner-full-control";
|
||||
case acl_t::LOG_DELIVERY_WRITE:
|
||||
return "log-delivery-write";
|
||||
case acl_t::UNKNOWN:
|
||||
return nullptr;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
inline acl_t to_acl(const char *acl)
|
||||
{
|
||||
if(0 == strcmp(acl, "private")){
|
||||
return acl_t::PRIVATE;
|
||||
}else if(0 == strcmp(acl, "public-read")){
|
||||
return acl_t::PUBLIC_READ;
|
||||
}else if(0 == strcmp(acl, "public-read-write")){
|
||||
return acl_t::PUBLIC_READ_WRITE;
|
||||
}else if(0 == strcmp(acl, "aws-exec-read")){
|
||||
return acl_t::AWS_EXEC_READ;
|
||||
}else if(0 == strcmp(acl, "authenticated-read")){
|
||||
return acl_t::AUTHENTICATED_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-read")){
|
||||
return acl_t::BUCKET_OWNER_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
|
||||
return acl_t::BUCKET_OWNER_FULL_CONTROL;
|
||||
}else if(0 == strcmp(acl, "log-delivery-write")){
|
||||
return acl_t::LOG_DELIVERY_WRITE;
|
||||
}else{
|
||||
return acl_t::UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// sse_type_t
|
||||
//-------------------------------------------------------------------
|
||||
enum class sse_type_t : uint8_t {
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
enum class signature_type_t : uint8_t {
|
||||
V2_ONLY,
|
||||
V4_ONLY,
|
||||
V2_OR_V4
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// etaglist_t / filepart / untreatedpart
|
||||
//----------------------------------------------
|
||||
//
|
||||
// Etag string and part number pair
|
||||
//
|
||||
struct etagpair
|
||||
{
|
||||
std::string etag; // expected etag value
|
||||
int part_num; // part number
|
||||
|
||||
explicit etagpair(const char* petag = nullptr, int part = -1) : etag(petag ? petag : ""), part_num(part) {}
|
||||
|
||||
~etagpair()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
etag.clear();
|
||||
part_num = -1;
|
||||
}
|
||||
};
|
||||
|
||||
// Requires pointer stability and thus must be a list not a vector
|
||||
typedef std::list<etagpair> etaglist_t;
|
||||
|
||||
struct petagpool
|
||||
{
|
||||
// Requires pointer stability and thus must be a list not a vector
|
||||
std::list<etagpair> petaglist;
|
||||
|
||||
~petagpool()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
petaglist.clear();
|
||||
}
|
||||
|
||||
etagpair* add(const etagpair& etag_entity)
|
||||
{
|
||||
petaglist.push_back(etag_entity);
|
||||
return &petaglist.back();
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Each part information for Multipart upload
|
||||
//
|
||||
struct filepart
|
||||
{
|
||||
bool uploaded = false; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
off_t size; // uploading size
|
||||
bool is_copy; // whether is copy multipart
|
||||
etagpair* petag; // use only parallel upload
|
||||
|
||||
explicit filepart(bool is_uploaded = false, int _fd = -1, off_t part_start = 0, off_t part_size = -1, bool is_copy_part = false, etagpair* petagpair = nullptr) : fd(_fd), startpos(part_start), size(part_size), is_copy(is_copy_part), petag(petagpair) {}
|
||||
|
||||
~filepart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
uploaded = false;
|
||||
etag = "";
|
||||
fd = -1;
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
is_copy = false;
|
||||
petag = nullptr;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t& list, int partnum = -1)
|
||||
{
|
||||
if(-1 == partnum){
|
||||
partnum = static_cast<int>(list.size()) + 1;
|
||||
}
|
||||
list.emplace_back(nullptr, partnum);
|
||||
petag = &list.back();
|
||||
}
|
||||
|
||||
void set_etag(etagpair* petagobj)
|
||||
{
|
||||
petag = petagobj;
|
||||
}
|
||||
|
||||
int get_part_number() const
|
||||
{
|
||||
if(!petag){
|
||||
return -1;
|
||||
}
|
||||
return petag->part_num;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::vector<filepart> filepart_list_t;
|
||||
|
||||
//
|
||||
// Each part information for Untreated parts
|
||||
//
|
||||
struct untreatedpart
|
||||
{
|
||||
off_t start; // untreated start position
|
||||
off_t size; // number of untreated bytes
|
||||
long untreated_tag; // untreated part tag
|
||||
|
||||
explicit untreatedpart(off_t part_start = 0, off_t part_size = 0, long part_untreated_tag = 0) : start(part_start), size(part_size), untreated_tag(part_untreated_tag)
|
||||
{
|
||||
if(part_start < 0 || part_size <= 0){
|
||||
clear(); // wrong parameter, so clear value.
|
||||
}
|
||||
}
|
||||
|
||||
~untreatedpart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
start = 0;
|
||||
size = 0;
|
||||
untreated_tag = 0;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// Check if the areas overlap
|
||||
// However, even if the areas do not overlap, this method returns true if areas are adjacent.
|
||||
//
|
||||
bool check_overlap(off_t chk_start, off_t chk_size) const
|
||||
{
|
||||
if(chk_start < 0 || chk_size <= 0 || start < 0 || size <= 0 || (chk_start + chk_size) < start || (start + size) < chk_start){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool stretch(off_t add_start, off_t add_size, long tag)
|
||||
{
|
||||
if(!check_overlap(add_start, add_size)){
|
||||
return false;
|
||||
}
|
||||
off_t new_start = std::min(start, add_start);
|
||||
off_t new_next_start = std::max((start + size), (add_start + add_size));
|
||||
|
||||
start = new_start;
|
||||
size = new_next_start - new_start;
|
||||
untreated_tag = tag;
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::vector<untreatedpart> untreated_list_t;
|
||||
|
||||
//
|
||||
// Information on each part of multipart upload
|
||||
//
|
||||
struct mp_part
|
||||
{
|
||||
off_t start;
|
||||
off_t size;
|
||||
int part_num; // Set only for information to upload
|
||||
|
||||
explicit mp_part(off_t set_start = 0, off_t set_size = 0, int part = 0) : start(set_start), size(set_size), part_num(part) {}
|
||||
};
|
||||
|
||||
typedef std::vector<struct mp_part> mp_part_list_t;
|
||||
|
||||
inline off_t total_mp_part_list(const mp_part_list_t& mplist)
|
||||
{
|
||||
off_t size = 0;
|
||||
for(auto iter = mplist.cbegin(); iter != mplist.cend(); ++iter){
|
||||
size += iter->size;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
//
|
||||
// Rename directory struct
|
||||
//
|
||||
struct mvnode
|
||||
{
|
||||
mvnode(std::string old_path, std::string new_path, bool is_dir, bool is_normdir)
|
||||
: old_path(std::move(old_path))
|
||||
, new_path(std::move(new_path))
|
||||
, is_dir(is_dir)
|
||||
, is_normdir(is_normdir)
|
||||
{}
|
||||
std::string old_path;
|
||||
std::string new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// mimes_t
|
||||
//-------------------------------------------------------------------
|
||||
struct case_insensitive_compare_func
|
||||
{
|
||||
bool operator()(const std::string& a, const std::string& b) const {
|
||||
return strcasecmp(a.c_str(), b.c_str()) < 0;
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3 Object Type Enum : objtype_t
|
||||
//-------------------------------------------------------------------
|
||||
// The type defines what files, symlinks, and directories can be
|
||||
// represented in S3.
|
||||
// The stats cache has a negative cache, which also defines its type.
|
||||
// Directory objects can have multiple types depending on the client
|
||||
// that created them.
|
||||
// To accommodate these, this enumeration also defines the type of the
|
||||
// original object.
|
||||
//
|
||||
enum class objtype_t : int8_t {
|
||||
UNKNOWN = -1,
|
||||
FILE = 0,
|
||||
SYMLINK = 1,
|
||||
DIR_NORMAL = 2,
|
||||
DIR_NOT_TERMINATE_SLASH = 3,
|
||||
DIR_FOLDER_SUFFIX = 4,
|
||||
DIR_NOT_EXIST_OBJECT = 5,
|
||||
NEGATIVE = 6 // Negative type means an object does not exist in Stats cache.
|
||||
};
|
||||
|
||||
constexpr bool IS_FILE_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::FILE == type);
|
||||
}
|
||||
|
||||
constexpr bool IS_SYMLINK_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::SYMLINK == type);
|
||||
}
|
||||
|
||||
constexpr bool IS_NORMALDIR_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::DIR_NORMAL == type);
|
||||
}
|
||||
|
||||
constexpr bool IS_DIR_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::DIR_NORMAL == type || objtype_t::DIR_NOT_TERMINATE_SLASH == type || objtype_t::DIR_FOLDER_SUFFIX == type || objtype_t::DIR_NOT_EXIST_OBJECT == type);
|
||||
}
|
||||
|
||||
constexpr bool IS_NEGATIVE_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::NEGATIVE == type);
|
||||
}
|
||||
|
||||
constexpr bool IS_SAME_OBJ(objtype_t type1, objtype_t type2)
|
||||
{
|
||||
if(type1 == type2){
|
||||
return true;
|
||||
}
|
||||
if(IS_DIR_OBJ(type1) && IS_DIR_OBJ(type2)){
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
constexpr bool NEED_REPLACEDIR_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::DIR_NOT_TERMINATE_SLASH == type || objtype_t::DIR_FOLDER_SUFFIX == type || objtype_t::DIR_NOT_EXIST_OBJECT == type);
|
||||
}
|
||||
|
||||
constexpr bool NEED_RMDIR_OBJ(objtype_t type)
|
||||
{
|
||||
return (objtype_t::DIR_NOT_TERMINATE_SLASH == type || objtype_t::DIR_FOLDER_SUFFIX == type);
|
||||
}
|
||||
|
||||
inline std::string STR_OBJTYPE(objtype_t type)
|
||||
{
|
||||
std::string strType;
|
||||
switch(type){
|
||||
case objtype_t::UNKNOWN:
|
||||
strType = "UNKNOWN(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::FILE:
|
||||
strType = "FILE(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::SYMLINK:
|
||||
strType = "SYMLINK(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::DIR_NORMAL:
|
||||
strType = "DIR_NORMAL(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::DIR_NOT_TERMINATE_SLASH:
|
||||
strType = "DIR_NOT_TERMINATE_SLASH(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::DIR_FOLDER_SUFFIX:
|
||||
strType = "DIR_FOLDER_SUFFIX(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::DIR_NOT_EXIST_OBJECT:
|
||||
strType = "DIR_NOT_EXIST_OBJECT(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
case objtype_t::NEGATIVE:
|
||||
strType = "NEGATIVE(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
default:
|
||||
strType = "not defined value(" + std::to_string(static_cast<int>(type)) + ")";
|
||||
break;
|
||||
}
|
||||
return strType;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Typedefs specialized for use
|
||||
//-------------------------------------------------------------------
|
||||
typedef std::vector<std::string> readline_t;
|
||||
typedef std::map<std::string, std::string> kvmap_t;
|
||||
typedef std::map<std::string, kvmap_t> bucketkvmap_t;
|
||||
|
||||
#endif // S3FS_TYPES_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -17,14 +17,45 @@
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
|
||||
TESTS=small-integration-test.sh
|
||||
|
||||
EXTRA_DIST = \
|
||||
integration-test-common.sh \
|
||||
require-root.sh \
|
||||
small-integration-test.sh \
|
||||
mergedir.sh \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
integration-test-common.sh \
|
||||
small-integration-test.sh \
|
||||
mergedir.sh \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
|
||||
testdir = test
|
||||
|
||||
noinst_PROGRAMS = \
|
||||
junk_data \
|
||||
write_multiblock \
|
||||
mknod_test \
|
||||
truncate_read_file \
|
||||
cr_filename
|
||||
|
||||
junk_data_SOURCES = junk_data.cc
|
||||
write_multiblock_SOURCES = write_multiblock.cc
|
||||
mknod_test_SOURCES = mknod_test.cc
|
||||
truncate_read_file_SOURCES = truncate_read_file.cc
|
||||
cr_filename_SOURCES = cr_filename.cc
|
||||
|
||||
clang-tidy:
|
||||
clang-tidy -extra-arg=-std=@CPP_VERSION@ \
|
||||
$(junk_data_SOURCES) \
|
||||
$(write_multiblock_SOURCES) \
|
||||
$(mknod_test_SOURCES) \
|
||||
$(truncate_read_file_SOURCES) \
|
||||
$(cr_filename_SOURCES) \
|
||||
-- $(DEPS_CFLAGS) $(CPPFLAGS)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
2
test/chaos-http-proxy.conf
Normal file
2
test/chaos-http-proxy.conf
Normal file
@ -0,0 +1,2 @@
|
||||
com.bouncestorage.chaoshttpproxy.http_503=1
|
||||
com.bouncestorage.chaoshttpproxy.success=9
|
||||
63
test/compile_all_targets.sh
Executable file
63
test/compile_all_targets.sh
Executable file
@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
COMMON_FLAGS='-O -Wall -Werror'
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-gnutls
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-gnutls --with-nettle
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-nss
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS" ./configure --with-openssl
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS -std=c++23" ./configure
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXXFLAGS="$COMMON_FLAGS -m32" ./configure
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
make clean
|
||||
CXX=clang++ CXXFLAGS="$COMMON_FLAGS -Wshorten-64-to-32" ./configure
|
||||
make --jobs "$(nproc)"
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
77
test/cr_filename.cc
Normal file
77
test/cr_filename.cc
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2021 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// [NOTE]
|
||||
// This is a program used for file size inspection.
|
||||
// File size checking should be done by the caller of this program.
|
||||
// This program truncates the file and reads the file in another process
|
||||
// between truncate and flush(close file).
|
||||
//
|
||||
int main(int argc, const char *argv[])
|
||||
{
|
||||
if(argc != 2){
|
||||
fprintf(stderr, "[ERROR] Wrong parameters\n");
|
||||
fprintf(stdout, "[Usage] cr_filename <base file path>\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int fd;
|
||||
char filepath[4096];
|
||||
snprintf(filepath, sizeof(filepath), "%s\r", argv[1]);
|
||||
filepath[sizeof(filepath) - 1] = '\0'; // for safety
|
||||
|
||||
// create empty file
|
||||
if(-1 == (fd = open(filepath, O_CREAT|O_RDWR, 0644))){
|
||||
fprintf(stderr, "[ERROR] Could not open file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
close(fd);
|
||||
|
||||
// stat
|
||||
struct stat buf;
|
||||
if(0 != stat(filepath, &buf)){
|
||||
fprintf(stderr, "[ERROR] Could not get stat for file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// remove file
|
||||
if(0 != unlink(filepath)){
|
||||
fprintf(stderr, "[ERROR] Could not remove file(%s)\n", filepath);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
143
test/filter-suite-log.sh
Executable file
143
test/filter-suite-log.sh
Executable file
@ -0,0 +1,143 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
func_usage()
|
||||
{
|
||||
echo ""
|
||||
echo "Usage: $1 [-h] <log file path>"
|
||||
echo " -h print help"
|
||||
echo " log file path path for test-suite.log"
|
||||
echo ""
|
||||
}
|
||||
|
||||
PRGNAME=$(basename "$0")
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
S3FSDIR=$(cd "${SCRIPTDIR}"/.. || exit 1; pwd)
|
||||
TOPDIR=$(cd "${S3FSDIR}"/test || exit 1; pwd)
|
||||
SUITELOG="${TOPDIR}/test-suite.log"
|
||||
TMP_LINENO_FILE="/tmp/.lineno.tmp"
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
if [ "$1" = "" ]; then
|
||||
break
|
||||
elif [ "$1" = "-h" ] || [ "$1" = "-H" ] || [ "$1" = "--help" ] || [ "$1" = "--HELP" ]; then
|
||||
func_usage "${PRGNAME}"
|
||||
exit 0
|
||||
else
|
||||
SUITELOG=$1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
if [ ! -f "${SUITELOG}" ]; then
|
||||
echo "[ERROR] not found ${SUITELOG} log file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Extract keyword line numbers and types
|
||||
#
|
||||
# 0 : normal line
|
||||
# 1 : start line for one small test(specified in integration-test-main.sh)
|
||||
# 2 : passed line of end of one small test(specified in test-utils.sh)
|
||||
# 3 : failed line of end of one small test(specified in test-utils.sh)
|
||||
#
|
||||
grep -n -e 'test_.*: ".*"' -o -e 'test_.* passed' -o -e 'test_.* failed' "${SUITELOG}" 2>/dev/null | sed 's/:test_.*: ".*"/ 1/g' | sed 's/:test_.* passed/ 2/g' | sed 's/:test_.* failed/ 3/g' > "${TMP_LINENO_FILE}"
|
||||
|
||||
#
|
||||
# Loop for printing result
|
||||
#
|
||||
prev_line_type=0
|
||||
prev_line_number=1
|
||||
while read -r line; do
|
||||
# line is "<line number> <line type>"
|
||||
#
|
||||
# shellcheck disable=SC2206
|
||||
number_type=(${line})
|
||||
|
||||
head_line_cnt=$((number_type[0] - 1))
|
||||
tail_line_cnt=$((number_type[0] - prev_line_number))
|
||||
|
||||
if [ "${number_type[1]}" -eq 2 ]; then
|
||||
echo ""
|
||||
fi
|
||||
if [ "${prev_line_type}" -eq 1 ]; then
|
||||
if [ "${number_type[1]}" -eq 2 ]; then
|
||||
# if passed, cut s3fs information messages
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
elif [ "${number_type[1]}" -eq 3 ]; then
|
||||
# if failed, print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
# there is start keyword but not end keyword, so print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
fi
|
||||
elif [ "${prev_line_type}" -eq 2 ] || [ "${prev_line_type}" -eq 3 ]; then
|
||||
if [ "${number_type[1]}" -eq 2 ] || [ "${number_type[1]}" -eq 3 ]; then
|
||||
# previous is end of chmpx, but this type is end of chmpx without start keyword. then print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
# this area is not from start to end, cut s3fs information messages
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
else
|
||||
if [ "${number_type[1]}" -eq 2 ] || [ "${number_type[1]}" -eq 3 ]; then
|
||||
# previous is normal, but this type is end of chmpx without start keyword. then print all
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
# this area is normal, cut s3fs information messages
|
||||
head "-${head_line_cnt}" "${SUITELOG}" | tail "-${tail_line_cnt}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
fi
|
||||
if [ "${number_type[1]}" -eq 3 ]; then
|
||||
echo ""
|
||||
fi
|
||||
prev_line_type="${number_type[1]}"
|
||||
prev_line_number="${number_type[0]}"
|
||||
|
||||
done < "${TMP_LINENO_FILE}"
|
||||
|
||||
#
|
||||
# Print rest lines
|
||||
#
|
||||
file_line_cnt=$(wc -l < "${SUITELOG}")
|
||||
tail_line_cnt=$((file_line_cnt - prev_line_number))
|
||||
|
||||
if [ "${prev_line_type}" -eq 1 ]; then
|
||||
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+%'
|
||||
else
|
||||
tail "-${tail_line_cnt}" "${SUITELOG}" | grep -v -e '[0-9]\+%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
|
||||
#
|
||||
# Remove temp file
|
||||
#
|
||||
rm -f "${TMP_LINENO_FILE}"
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
@ -1,4 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance
|
||||
@ -8,13 +27,19 @@
|
||||
# environment variables:
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
|
||||
# S3FS_PROFILE=name s3fs profile to use (overrides key file)
|
||||
# TEST_BUCKET_1=bucketname Name of bucket to use
|
||||
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
|
||||
# S3_URL="http://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
|
||||
# S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
|
||||
# S3_ENDPOINT="us-east-1" Specify region
|
||||
# TMPDIR="/var/tmp" Set to use a temporary directory different
|
||||
# from /var/tmp
|
||||
# CHAOS_HTTP_PROXY=1 Test proxy(environment) by CHAOS HTTP PROXY
|
||||
# CHAOS_HTTP_PROXY_OPT=1 Test proxy(option) by CHAOS HTTP PROXY
|
||||
#
|
||||
# Example of running against Amazon S3 using a bucket named "bucket:
|
||||
# Example of running against Amazon S3 using a bucket named "bucket":
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" ./small-integration-test.sh
|
||||
#
|
||||
# To change the s3fs-fuse debug level:
|
||||
#
|
||||
@ -27,7 +52,7 @@
|
||||
#
|
||||
# Run all of the tests from the makefile
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" make check
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" make check
|
||||
#
|
||||
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
|
||||
# useful for poking around with plain old curl
|
||||
@ -38,55 +63,93 @@
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
S3FS=../src/s3fs
|
||||
|
||||
# Allow these defaulted values to be overridden
|
||||
: ${S3_URL:="http://127.0.0.1:8080"}
|
||||
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
|
||||
: ${TEST_BUCKET_1:="s3fs-integration-test"}
|
||||
#
|
||||
# [NOTE]
|
||||
# CHAOS HTTP PROXY does not support HTTPS.
|
||||
#
|
||||
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
: "${S3_URL:="https://127.0.0.1:8080"}"
|
||||
else
|
||||
: "${S3_URL:="http://127.0.0.1:8080"}"
|
||||
fi
|
||||
: "${S3_ENDPOINT:="us-east-1"}"
|
||||
: "${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}"
|
||||
: "${TEST_BUCKET_1:="s3fs-integration-test"}"
|
||||
|
||||
export TEST_BUCKET_1
|
||||
export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export S3_ENDPOINT
|
||||
export S3PROXY_CACERT_FILE
|
||||
TEST_SCRIPT_DIR=$(pwd)
|
||||
export TEST_SCRIPT_DIR
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.5.2"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
S3PROXY_VERSION="2.7.0"
|
||||
S3PROXY_HASH="1a13c27f78902b57db871a2e638f520f439811b1c98b2208ff71ba64b61c4f3f"
|
||||
S3PROXY_BINARY="${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}"
|
||||
|
||||
CHAOS_HTTP_PROXY_VERSION="1.1.0"
|
||||
CHAOS_HTTP_PROXY_HASH="9ad1b9ac6569e99b2db3e7edfdd78fae0ea5c83069beccdf6bceebc848add2e7"
|
||||
CHAOS_HTTP_PROXY_BINARY="chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}"
|
||||
|
||||
PJDFSTEST_HASH="c711b5f6b666579846afba399a998f74f60c488b"
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
then
|
||||
echo "Missing credentials file: $S3FS_CREDENTIALS_FILE"
|
||||
echo "Missing credentials file: ${S3FS_CREDENTIALS_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
chmod 600 "$S3FS_CREDENTIALS_FILE"
|
||||
chmod 600 "${S3FS_CREDENTIALS_FILE}"
|
||||
|
||||
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
|
||||
then
|
||||
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ -z "${S3FS_PROFILE}" ]; then
|
||||
AWS_ACCESS_KEY_ID=$(cut -d: -f1 "${S3FS_CREDENTIALS_FILE}")
|
||||
export AWS_ACCESS_KEY_ID
|
||||
|
||||
AWS_SECRET_ACCESS_KEY=$(cut -d: -f2 "${S3FS_CREDENTIALS_FILE}")
|
||||
export AWS_SECRET_ACCESS_KEY
|
||||
fi
|
||||
|
||||
if [ ! -d "${TEST_BUCKET_MOUNT_POINT_1}" ]; then
|
||||
mkdir -p "${TEST_BUCKET_MOUNT_POINT_1}"
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# For the Github Actions macos-14 Runner,
|
||||
# Set variables for when stdbuf is used and when it is not.
|
||||
#
|
||||
if [ -n "${STDBUF_BIN}" ]; then
|
||||
STDBUF_COMMAND_LINE=("${STDBUF_BIN}" -oL -eL)
|
||||
else
|
||||
STDBUF_COMMAND_LINE=()
|
||||
fi
|
||||
|
||||
# This function execute the function parameters $1 times
|
||||
# before giving up, with 1 second delays.
|
||||
function retry {
|
||||
set +o errexit
|
||||
N=$1; shift;
|
||||
status=0
|
||||
for i in $(seq $N); do
|
||||
echo "Trying: $@"
|
||||
$@
|
||||
status=$?
|
||||
if [ $status == 0 ]; then
|
||||
local N="$1"
|
||||
shift
|
||||
rc=0
|
||||
for _ in $(seq "${N}"); do
|
||||
echo "Trying: $*"
|
||||
# shellcheck disable=SC2068,SC2294
|
||||
eval $@
|
||||
rc=$?
|
||||
if [ "${rc}" -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
echo "Retrying: $@"
|
||||
echo "Retrying: $*"
|
||||
done
|
||||
|
||||
if [ $status != 0 ]; then
|
||||
echo "timeout waiting for $@"
|
||||
if [ "${rc}" -ne 0 ]; then
|
||||
echo "timeout waiting for $*"
|
||||
fi
|
||||
set -o errexit
|
||||
return $status
|
||||
return "${rc}"
|
||||
}
|
||||
|
||||
# Proxy is not started if S3PROXY_BINARY is an empty string
|
||||
@ -95,73 +158,160 @@ function retry {
|
||||
#
|
||||
function start_s3proxy {
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
S3PROXY_CONFIG="s3proxy-noauth.conf"
|
||||
local S3PROXY_CONFIG="s3proxy-noauth.conf"
|
||||
else
|
||||
S3PROXY_CONFIG="s3proxy.conf"
|
||||
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
local S3PROXY_CONFIG="s3proxy.conf"
|
||||
else
|
||||
local S3PROXY_CONFIG="s3proxy_http.conf"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "${S3PROXY_BINARY}" ]
|
||||
then
|
||||
if [ ! -e "${S3PROXY_BINARY}" ]; then
|
||||
wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
|
||||
--quiet -O "${S3PROXY_BINARY}"
|
||||
curl "https://github.com/gaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
|
||||
--fail --location --silent --output "/tmp/${S3PROXY_BINARY}"
|
||||
echo "$S3PROXY_HASH" "/tmp/${S3PROXY_BINARY}" | sha256sum --check
|
||||
mv "/tmp/${S3PROXY_BINARY}" "${S3PROXY_BINARY}"
|
||||
chmod +x "${S3PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
|
||||
# generate self-signed SSL certificate
|
||||
#
|
||||
# [NOTE]
|
||||
# The PROXY test is HTTP only, so do not create CA certificates.
|
||||
#
|
||||
if [ -z "${CHAOS_HTTP_PROXY}" ] && [ -z "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
S3PROXY_CACERT_FILE="/tmp/keystore.pem"
|
||||
rm -f /tmp/keystore.jks "${S3PROXY_CACERT_FILE}"
|
||||
printf 'password\npassword\n\n\n\n\n\n\ny' | keytool -genkey -keystore /tmp/keystore.jks -keyalg RSA -keysize 2048 -validity 365 -ext SAN=IP:127.0.0.1
|
||||
echo password | keytool -exportcert -keystore /tmp/keystore.jks -rfc -file "${S3PROXY_CACERT_FILE}"
|
||||
else
|
||||
S3PROXY_CACERT_FILE=""
|
||||
fi
|
||||
|
||||
"${STDBUF_COMMAND_LINE[@]}" java -jar "${S3PROXY_BINARY}" --properties "${S3PROXY_CONFIG}" &
|
||||
S3PROXY_PID=$!
|
||||
|
||||
# wait for S3Proxy to start
|
||||
for i in $(seq 30);
|
||||
do
|
||||
if exec 3<>"/dev/tcp/127.0.0.1/8080";
|
||||
then
|
||||
exec 3<&- # Close for read
|
||||
exec 3>&- # Close for write
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
wait_for_port 8080
|
||||
fi
|
||||
|
||||
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ] || [ -n "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
if [ ! -e "${CHAOS_HTTP_PROXY_BINARY}" ]; then
|
||||
curl "https://github.com/bouncestorage/chaos-http-proxy/releases/download/chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}/chaos-http-proxy" \
|
||||
--fail --location --silent --output "/tmp/${CHAOS_HTTP_PROXY_BINARY}"
|
||||
echo "$CHAOS_HTTP_PROXY_HASH" "/tmp/${CHAOS_HTTP_PROXY_BINARY}" | sha256sum --check
|
||||
mv "/tmp/${CHAOS_HTTP_PROXY_BINARY}" "${CHAOS_HTTP_PROXY_BINARY}"
|
||||
chmod +x "${CHAOS_HTTP_PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
"${STDBUF_COMMAND_LINE[@]}" java -jar "${CHAOS_HTTP_PROXY_BINARY}" --properties chaos-http-proxy.conf &
|
||||
CHAOS_HTTP_PROXY_PID=$!
|
||||
|
||||
# wait for Chaos HTTP Proxy to start
|
||||
wait_for_port 1080
|
||||
fi
|
||||
|
||||
if [ ! -d "pjd-pjdfstest-${PJDFSTEST_HASH:0:7}" ]; then
|
||||
curl "https://api.github.com/repos/pjd/pjdfstest/tarball/${PJDFSTEST_HASH}" \
|
||||
--fail --location --silent --output /tmp/pjdfstest.tar.gz
|
||||
tar zxf /tmp/pjdfstest.tar.gz
|
||||
rm -f /tmp/pjdfstest.tar.gz
|
||||
|
||||
rm -f pjdfstest
|
||||
ln -s "pjd-pjdfstest-${PJDFSTEST_HASH:0:7}" pjdfstest
|
||||
|
||||
(cd pjdfstest && autoreconf -ifs && ./configure && make)
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_s3proxy {
|
||||
if [ -n "${S3PROXY_PID}" ]
|
||||
then
|
||||
kill $S3PROXY_PID
|
||||
wait $S3PROXY_PID
|
||||
kill "${S3PROXY_PID}"
|
||||
fi
|
||||
|
||||
if [ -n "${CHAOS_HTTP_PROXY_PID}" ]
|
||||
then
|
||||
kill "${CHAOS_HTTP_PROXY_PID}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Mount the bucket, function arguments passed to s3fs in addition to
|
||||
# a set of common arguments.
|
||||
function start_s3fs {
|
||||
|
||||
# Public bucket if PUBLIC is set
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
AUTH_OPT="-o public_bucket=1"
|
||||
local AUTH_OPT="-o public_bucket=1"
|
||||
elif [ -n "${S3FS_PROFILE}" ]; then
|
||||
local AUTH_OPT="-o profile=${S3FS_PROFILE}"
|
||||
else
|
||||
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
|
||||
local AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
|
||||
fi
|
||||
|
||||
# If VALGRIND is set, pass it as options to valgrind.
|
||||
# start valgrind-listener in another shell.
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
# Start valgind-listener (default port is 1500)
|
||||
# Start valgrind-listener (default port is 1500)
|
||||
if [ -n "${VALGRIND}" ]; then
|
||||
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
|
||||
fi
|
||||
|
||||
# On OSX only, we need to specify the direct_io and auto_cache flag.
|
||||
#
|
||||
# And Turn off creation and reference of spotlight index.
|
||||
# (Leaving spotlight ON will result in a lot of wasted requests,
|
||||
# which will affect test execution time)
|
||||
#
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local DIRECT_IO_OPT="-o direct_io -o auto_cache"
|
||||
|
||||
# disable spotlight
|
||||
sudo mdutil -a -i off
|
||||
else
|
||||
local DIRECT_IO_OPT=""
|
||||
fi
|
||||
|
||||
# Set environment variables or options for proxy.
|
||||
# And the PROXY test is HTTP only and does not set CA certificates.
|
||||
#
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ]; then
|
||||
export http_proxy="127.0.0.1:1080"
|
||||
S3FS_HTTP_PROXY_OPT=""
|
||||
elif [ -n "${CHAOS_HTTP_PROXY_OPT}" ]; then
|
||||
S3FS_HTTP_PROXY_OPT="-o proxy=http://127.0.0.1:1080"
|
||||
else
|
||||
S3FS_HTTP_PROXY_OPT=""
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# For macos fuse-t, we need to specify the "noattrcache" option to
|
||||
# disable NFS caching.
|
||||
#
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local FUSE_T_ATTRCACHE_OPT="-o noattrcache"
|
||||
else
|
||||
local FUSE_T_ATTRCACHE_OPT=""
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# On macOS we may get a VERIFY error for the self-signed certificate used by s3proxy.
|
||||
# We can specify NO_CHECK_CERT=1 to avoid this.
|
||||
#
|
||||
if [ -n "${NO_CHECK_CERT}" ] && [ "${NO_CHECK_CERT}" -eq 1 ]; then
|
||||
local NO_CHECK_CERT_OPT="-o no_check_certificate"
|
||||
else
|
||||
local NO_CHECK_CERT_OPT=""
|
||||
fi
|
||||
|
||||
# Common s3fs options:
|
||||
#
|
||||
# TODO: Allow all these options to be overriden with env variables
|
||||
# TODO: Allow all these options to be overridden with env variables
|
||||
#
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
# createbucket
|
||||
# S3Proxy always starts with no buckets, this tests the s3fs-fuse
|
||||
# automatic bucket creation path.
|
||||
# $AUTH_OPT
|
||||
# Will be either "-o public_bucket=1"
|
||||
# or
|
||||
@ -173,27 +323,60 @@ function start_s3fs {
|
||||
#
|
||||
|
||||
# subshell with set -x to log exact invocation of s3fs-fuse
|
||||
# shellcheck disable=SC2086
|
||||
(
|
||||
set -x
|
||||
stdbuf -oL -eL \
|
||||
${VALGRIND_EXEC} ${S3FS} \
|
||||
$TEST_BUCKET_1 \
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
CURL_CA_BUNDLE="${S3PROXY_CACERT_FILE}" \
|
||||
"${STDBUF_COMMAND_LINE[@]}" \
|
||||
${VALGRIND_EXEC} \
|
||||
${S3FS} \
|
||||
${TEST_BUCKET_1} \
|
||||
${TEST_BUCKET_MOUNT_POINT_1} \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o createbucket \
|
||||
-o url="${S3_URL}" \
|
||||
-o region="${S3_ENDPOINT}" \
|
||||
-o use_xattr=1 \
|
||||
-o enable_unsigned_payload \
|
||||
${AUTH_OPT} \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
${DIRECT_IO_OPT} \
|
||||
${S3FS_HTTP_PROXY_OPT} \
|
||||
${NO_CHECK_CERT_OPT} \
|
||||
${FUSE_T_ATTRCACHE_OPT} \
|
||||
-o stat_cache_expire=1 \
|
||||
-o stat_cache_interval_expire=1 \
|
||||
-o dbglevel="${DBGLEVEL:=info}" \
|
||||
-o no_time_stamp_msg \
|
||||
-o retries=3 \
|
||||
-f \
|
||||
${@} \
|
||||
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
)
|
||||
"${@}" &
|
||||
echo $! >&3
|
||||
) 3>pid | "${STDBUF_COMMAND_LINE[@]}" "${SED_BIN}" "${SED_BUFFER_FLAG}" "s/^/s3fs: /" &
|
||||
sleep 1
|
||||
S3FS_PID=$(<pid)
|
||||
export S3FS_PID
|
||||
rm -f pid
|
||||
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
local TRYCOUNT=0
|
||||
while [ "${TRYCOUNT}" -le "${RETRIES:=20}" ]; do
|
||||
_DF_RESULT=$(df 2>/dev/null)
|
||||
if echo "${_DF_RESULT}" | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=$((TRYCOUNT + 1))
|
||||
done
|
||||
if [ "${TRYCOUNT}" -gt "${RETRIES}" ]; then
|
||||
echo "Waited ${TRYCOUNT} seconds, but it could not be mounted."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
retry "${RETRIES:=20}" grep -q "${TEST_BUCKET_MOUNT_POINT_1}" /proc/mounts || exit 1
|
||||
fi
|
||||
|
||||
# Quick way to start system up for manual testing with options under test
|
||||
if [[ -n ${INTERACT} ]]; then
|
||||
echo "Mountpoint $TEST_BUCKET_MOUNT_POINT_1 is ready"
|
||||
if [[ -n "${INTERACT}" ]]; then
|
||||
echo "Mountpoint ${TEST_BUCKET_MOUNT_POINT_1} is ready"
|
||||
echo "control-C to quit"
|
||||
sleep infinity
|
||||
exit 0
|
||||
@ -202,14 +385,29 @@ function start_s3fs {
|
||||
|
||||
function stop_s3fs {
|
||||
# Retry in case file system is in use
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
if df | grep -q "${TEST_BUCKET_MOUNT_POINT_1}"; then
|
||||
retry 10 df "|" grep -q "${TEST_BUCKET_MOUNT_POINT_1}" "&&" umount "${TEST_BUCKET_MOUNT_POINT_1}"
|
||||
fi
|
||||
else
|
||||
if grep -q "${TEST_BUCKET_MOUNT_POINT_1}" /proc/mounts; then
|
||||
retry 10 grep -q "${TEST_BUCKET_MOUNT_POINT_1}" /proc/mounts "&&" fusermount -u "${TEST_BUCKET_MOUNT_POINT_1}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
|
||||
function common_exit_handler {
|
||||
stop_s3proxy
|
||||
stop_s3fs
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user