Compare commits
600 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| bb20fc3c98 | |||
| 3e66e42ae5 | |||
| 0665d78550 | |||
| 55d670f22f | |||
| 32ae0d2c79 | |||
| 924eeb3587 | |||
| bc9126d774 | |||
| 4df50e7f85 | |||
| 4e26728cbf | |||
| 7135666060 | |||
| 018ccb9a11 | |||
| ee1d3a9057 | |||
| b762a0a85b | |||
| 9771be29b2 | |||
| 010a6b83ef | |||
| 87224b830b | |||
| 9e77650e8c | |||
| e0712f444d | |||
| 913b72fdaf | |||
| 39102608aa | |||
| 23945a0130 | |||
| bdfb9ee815 | |||
| 1a75a94253 | |||
| a9d527d517 | |||
| 94666f7754 | |||
| 41acbaa746 | |||
| d5042a73bd | |||
| f6756596b3 | |||
| a24f78f5a4 | |||
| 64d4b2c0b0 | |||
| cc4a307415 | |||
| a07a5336f6 | |||
| 9789ca1a4d | |||
| 4ec2d685e7 | |||
| dc62953040 | |||
| 0c42a74a8a | |||
| cf3e82d10a | |||
| 0e815c2fbc | |||
| 27a5536749 | |||
| db338b36b8 | |||
| 72b906255f | |||
| 2211678d91 | |||
| 80162c126b | |||
| 1db94a0b30 | |||
| b6349e9428 | |||
| bedd648d47 | |||
| 58b3cce320 | |||
| 81102a5963 | |||
| 42fb30852b | |||
| e51361cb94 | |||
| 80a9ed9d6a | |||
| e2129001eb | |||
| 805cc064af | |||
| 3c2279db39 | |||
| 412876ca33 | |||
| 461a346bf4 | |||
| ae4bcd405c | |||
| 0536dc1112 | |||
| 1c3507ede1 | |||
| 3f47037cc7 | |||
| d87321ef3c | |||
| 01ac815346 | |||
| 2daa1d53d9 | |||
| cc2eed84a5 | |||
| c644e4bef2 | |||
| a7d83df3eb | |||
| e1886b5343 | |||
| 873e376098 | |||
| 3c378a4a7a | |||
| ca7266fb76 | |||
| 4a0c23258e | |||
| ccc79ec139 | |||
| 5c4a0a862a | |||
| 2a779df4fd | |||
| f5bf41cf11 | |||
| f74c7407db | |||
| 433c04af26 | |||
| 2e51a339a9 | |||
| 0411872dda | |||
| d8f1aef7be | |||
| 14d3e12103 | |||
| fd13eb314b | |||
| daba563a1c | |||
| b79b0b1a92 | |||
| 01d4323b50 | |||
| dc85eda188 | |||
| ccf3e7bfa2 | |||
| d22acae9a3 | |||
| 7ecfba811e | |||
| 1e7330e499 | |||
| 68475e5fcf | |||
| 8cc008c501 | |||
| faaaf2ee3c | |||
| 3d42d0515d | |||
| 3d70e8966f | |||
| 6837cbfe06 | |||
| a5c20175a1 | |||
| 43d1439420 | |||
| d8cf26bd50 | |||
| c321c8c23f | |||
| 6227fce091 | |||
| aba8e6ccfa | |||
| f528a86219 | |||
| 5b15c7c4e9 | |||
| afd438d363 | |||
| 80972aa33d | |||
| 520995a7e8 | |||
| 5c3c6bff2f | |||
| fb937635f5 | |||
| 3ad1c95e86 | |||
| 2c4c78fd65 | |||
| 0afef077ed | |||
| 80f598f439 | |||
| bacd15714a | |||
| 5cb7a31c09 | |||
| 99aace4fc9 | |||
| c7f8f61d09 | |||
| 159cd2c682 | |||
| 513f41fddf | |||
| 543aed2a32 | |||
| 20ea96328c | |||
| 007edb1773 | |||
| f78bcc5229 | |||
| 43ec064fb9 | |||
| ffac4c8417 | |||
| 4adcd4a6c8 | |||
| e936854493 | |||
| 850a813171 | |||
| 5bbcd3b981 | |||
| a337c32840 | |||
| d39e4e4b1f | |||
| b51d60ef5e | |||
| 58037da061 | |||
| 1eb266588e | |||
| deb560067e | |||
| 4e351c59e3 | |||
| eb597289cb | |||
| 6fd42d9fe4 | |||
| efff9c01a6 | |||
| a83d5baa90 | |||
| 50d13255e4 | |||
| 5195fa93fa | |||
| e5e63d6ac3 | |||
| 7a65a414c3 | |||
| 4a192ffdf9 | |||
| 944d21cabb | |||
| d267212289 | |||
| 58d8e5586a | |||
| ce803daf4a | |||
| 9bf34e2fda | |||
| 52218d2ddb | |||
| 6bd1a7eac0 | |||
| 6177d7b096 | |||
| 3161bf4608 | |||
| 2349dafb98 | |||
| 1cd58d7828 | |||
| 8aa06d621a | |||
| ecf13a8cb9 | |||
| b8ff6a647e | |||
| 49110c671d | |||
| febaf6849f | |||
| 4893174652 | |||
| 5820c72092 | |||
| 4f23f38583 | |||
| bbfa91141a | |||
| f439c6382f | |||
| 21321a9d96 | |||
| f03b50fd13 | |||
| 15a870f9d9 | |||
| 9472ee4a01 | |||
| 1f1f824da7 | |||
| f02105c346 | |||
| c596441f58 | |||
| 455e29cbea | |||
| 511d223468 | |||
| 5324c1b588 | |||
| 554ea49294 | |||
| d7f77a6282 | |||
| 048aea1151 | |||
| f1ad626b46 | |||
| a78d8d1da4 | |||
| fbebc6fa57 | |||
| c18fc901c4 | |||
| 245f14c8c1 | |||
| d732eef776 | |||
| 56b184fd0c | |||
| 9e5eaad79b | |||
| 738eaadcbf | |||
| 1cf3d2452e | |||
| 670dce6f4a | |||
| 07cfdcf205 | |||
| 15b7450713 | |||
| 272e0d3d46 | |||
| 8d8a2a66e5 | |||
| befc2e9e6f | |||
| 059ab1f0f4 | |||
| f2fe1738cd | |||
| 0d4847596e | |||
| 8e86ef1634 | |||
| a32a05962e | |||
| a7e81fda9b | |||
| 9e4f9d4bdc | |||
| 0677a096a9 | |||
| 381835e2a9 | |||
| af070fa3de | |||
| f9cd43b684 | |||
| 7095787f1f | |||
| 4ca336aed0 | |||
| 8a18806a57 | |||
| e5e124b9aa | |||
| 090ac7a8a0 | |||
| 97af471aa6 | |||
| 0176fc712b | |||
| c426c896d0 | |||
| 0a99470369 | |||
| cd280d8702 | |||
| b1bade37d8 | |||
| 005c186e1b | |||
| 6f8ecb28c5 | |||
| 4c28eb2a78 | |||
| 042332bcec | |||
| 071cd0f849 | |||
| d7bb834bcb | |||
| 9b437fc1fe | |||
| 6f6a67807b | |||
| e5785d4690 | |||
| a4ce54d615 | |||
| ddbcec5c96 | |||
| 7cbb4c958b | |||
| 6c5adbb9af | |||
| 4db6e1a10a | |||
| ea517c80a4 | |||
| 9f6ed6c08e | |||
| b1ddb483a4 | |||
| 17352ef4fd | |||
| 71766039ff | |||
| c607c9be58 | |||
| df604e50fb | |||
| 876662ff89 | |||
| 058706014b | |||
| 99ec09f13a | |||
| 4a011d87e0 | |||
| c6edc2cd8f | |||
| cc196bfdf0 | |||
| 895d5006bb | |||
| 62dcda6a56 | |||
| cbf072bc55 | |||
| 1b4d2a32d2 | |||
| b71c90bbe1 | |||
| 80344aafd3 | |||
| b5ca400500 | |||
| 2e89439120 | |||
| 555410386c | |||
| 08b132ddb9 | |||
| 1e86cc643d | |||
| f53503438c | |||
| 0d43d070cc | |||
| 0791fdca2a | |||
| 6e8678d5e3 | |||
| 10d9f75366 | |||
| 77993e607e | |||
| 74d8671e54 | |||
| 4c41eac29c | |||
| 3c97c1b251 | |||
| 84c671a81a | |||
| f336bdebcc | |||
| e5b8377202 | |||
| 4f42f4ab0c | |||
| 11b385820d | |||
| f1a9eaee54 | |||
| ffee8d5f39 | |||
| eeb839242b | |||
| f7760976a5 | |||
| ca2d1d873d | |||
| 951761ee2c | |||
| 231fd001d9 | |||
| e00afa8128 | |||
| e9297f39ea | |||
| 314dc5a398 | |||
| e07cb020cc | |||
| 9f79b9e0da | |||
| e87e40b3b4 | |||
| f0f95478ec | |||
| bd66b57ad3 | |||
| a1d3ff9766 | |||
| 7f61a947c2 | |||
| 4d0bef1e90 | |||
| 960823fb40 | |||
| c04e8e7a9d | |||
| fb6debd986 | |||
| d8185a25aa | |||
| 53337a0a28 | |||
| ae51556d04 | |||
| b3de9195a7 | |||
| 055ecf6ea7 | |||
| c603680e02 | |||
| 814aadd7e3 | |||
| dce63d1529 | |||
| 8ff05d8e38 | |||
| dfa84b82a8 | |||
| 6ac8618381 | |||
| 8c527c3616 | |||
| 54a074647e | |||
| c5ebf5d328 | |||
| 43c6ef560e | |||
| 3076abc744 | |||
| 07636c8a8d | |||
| 35d55ee513 | |||
| a442e843be | |||
| c0cf90cf8b | |||
| 3b1cc3b197 | |||
| a0c1f30ae7 | |||
| 8822a86709 | |||
| 98f397de0e | |||
| fd4d23f8f7 | |||
| 4820f0a42b | |||
| 807a618cf7 | |||
| a93e500b44 | |||
| 92d3114584 | |||
| 5062d6fbd9 | |||
| 7d14ebaf09 | |||
| cd794a6985 | |||
| 84b421d6ef | |||
| 8316da5bbe | |||
| fa287aeef7 | |||
| caaf4cac55 | |||
| 010276ceab | |||
| f219817eb3 | |||
| d487348d21 | |||
| eb0b29708f | |||
| 877842a720 | |||
| 1fc25e8c3f | |||
| 61ecafd426 | |||
| 79bd3441eb | |||
| 5f5da4b2cb | |||
| dede19d8c0 | |||
| fada95f58e | |||
| 014b8c5982 | |||
| 46d79c5bc2 | |||
| 40ba3b44a1 | |||
| beadf95975 | |||
| 2887f8916b | |||
| 0c9a8932f7 | |||
| ac72431195 | |||
| 2a7877beff | |||
| 7a56459103 | |||
| 5292fa74d1 | |||
| f2184e34dd | |||
| 1d4867830b | |||
| 36a4903843 | |||
| c83a3e67c9 | |||
| 05014c49c8 | |||
| aa69107165 | |||
| d373b0eca3 | |||
| 6aa40b2747 | |||
| 34c3bfe408 | |||
| 6ac56e722d | |||
| 61dc7f0a70 | |||
| 9f000957dd | |||
| b2141313e2 | |||
| aa9bd1fa3c | |||
| 5a2dc03a1c | |||
| 508fafbe62 | |||
| e29548178b | |||
| ab2f36f202 | |||
| b8c9fcfd70 | |||
| 58ce544e83 | |||
| e98ce36301 | |||
| 6401b4ae92 | |||
| 25b49e1a2e | |||
| c7def35b54 | |||
| ddba1c63c5 | |||
| c512516e14 | |||
| 2c43b1e12b | |||
| b68d97c6bf | |||
| f1757e4343 | |||
| e2d5641d99 | |||
| 523fe1e309 | |||
| c985b5e4d0 | |||
| 786f1a8fc7 | |||
| 18cb2e2662 | |||
| 743c706b0a | |||
| 4ed0e5f35a | |||
| fd6b37d3da | |||
| 56e24de0d4 | |||
| 2780043a7d | |||
| 54c9e48bb7 | |||
| ed5795eead | |||
| 3d225163f8 | |||
| 0569cec3ea | |||
| a2f8ac535e | |||
| 29355d75b0 | |||
| d9e89deef6 | |||
| 6b051eac47 | |||
| da997de918 | |||
| d97094fb8d | |||
| b91fc5409e | |||
| 3c970646d1 | |||
| a92668ae78 | |||
| 88cd8feb05 | |||
| 91c16f826a | |||
| d4d60ff315 | |||
| e8033f96de | |||
| 5fba542a29 | |||
| 44de3ffa05 | |||
| 2efa6df028 | |||
| 9e530c86ae | |||
| 95857733a1 | |||
| 664f910083 | |||
| 735e4b0848 | |||
| e8d76a6f58 | |||
| 0a6926be54 | |||
| 830a971bde | |||
| 4779d14d7d | |||
| 8929a27a24 | |||
| eea624c171 | |||
| cdaf4a9674 | |||
| 6fe92d5ed6 | |||
| 8649a68766 | |||
| af005b6e5e | |||
| b19d2ae78f | |||
| 5634f9bdcd | |||
| c703fa15c0 | |||
| d9c106cfde | |||
| 203f78fdae | |||
| c5af62b023 | |||
| dcd70daf48 | |||
| 8263919b0e | |||
| 97488e603f | |||
| 41c23adb0e | |||
| a85183d42c | |||
| 45b67b9604 | |||
| c376efdd28 | |||
| 4c5f510207 | |||
| 06032aa661 | |||
| e8fb2aefb3 | |||
| 3cb6c5e161 | |||
| 7e0c53dfe9 | |||
| c2ca7e43b6 | |||
| ae47d5d349 | |||
| 35d3fce7a0 | |||
| 4177d8bd3b | |||
| ad5349a488 | |||
| 6b57a8c1fc | |||
| 92a4034c5e | |||
| 3e4002df0d | |||
| 1b9ec7f4fc | |||
| 4a7c4a9e9d | |||
| 0d3fb0658a | |||
| 73cf2ba95d | |||
| 5a481e6a01 | |||
| d8e12839af | |||
| 3bf05dabea | |||
| d4e86a17d1 | |||
| 6555e7ebb0 | |||
| ae9d8eb734 | |||
| e49d594db4 | |||
| 66bb0898db | |||
| b323312312 | |||
| 58e52bad4f | |||
| 57b2a60172 | |||
| 212bbbbdf0 | |||
| a0e62b5588 | |||
| e9831dd772 | |||
| da95afba8a | |||
| 0bd875eb9e | |||
| af63a42773 | |||
| ad9a374229 | |||
| 1b86e4d414 | |||
| 86b0921ac4 | |||
| dbe98dcbd2 | |||
| 4a72b60707 | |||
| 7a4696fc17 | |||
| e3de6ea458 | |||
| 1db4739ed8 | |||
| 25375a6b48 | |||
| ca87df7d44 | |||
| d052dc0b9d | |||
| 3f542e9cf5 | |||
| 04493de767 | |||
| 4fdab46617 | |||
| 1a23b880d5 | |||
| b3c376afbe | |||
| adcf5754ae | |||
| 0863672e27 | |||
| 0f503ced25 | |||
| 987a166bf4 | |||
| 57b6f0eeaf | |||
| f71a28f9b9 | |||
| 45c7ea9194 | |||
| c9f4312588 | |||
| 8b657eee41 | |||
| b9c9de7f97 | |||
| e559f05326 | |||
| 824124fedc | |||
| be9d407fa0 | |||
| c494e54320 | |||
| b52b6f3fc5 | |||
| 82c9733101 | |||
| a45ff6cdaa | |||
| 960d45c853 | |||
| 246b767b64 | |||
| 0edf056e95 | |||
| 88819af2d8 | |||
| b048c981ad | |||
| e1dafe76dd | |||
| 1a2e63ecff | |||
| a60b32cb80 | |||
| 6b58220009 | |||
| a841057679 | |||
| ee6abea956 | |||
| 8b0acd75e0 | |||
| cea7d44717 | |||
| 0da87e75fe | |||
| 566961c7a5 | |||
| ac65258d30 | |||
| 35261e6dba | |||
| 2818f23ba5 | |||
| 88f071ea22 | |||
| bd4bc0e7f1 | |||
| 890c1d53ff | |||
| 026260e7a1 | |||
| 99fe93b7f1 | |||
| b764c53020 | |||
| 11bd7128d2 | |||
| 7cda32664b | |||
| 4c73a0ae56 | |||
| 97fc845a6a | |||
| 7d9ac0163b | |||
| d903e064e0 | |||
| e1928288fe | |||
| 6ab6412dd3 | |||
| 30b7a69d3d | |||
| ccd0a446d8 | |||
| 0418e53b3c | |||
| bad48ab59a | |||
| bbad76bb71 | |||
| 6c1bd98c14 | |||
| b95e4acaeb | |||
| c238701d09 | |||
| 60d2ac3c7a | |||
| 967ef4d56b | |||
| ad57bdda6c | |||
| a0b69d1d3d | |||
| 5df94d7e33 | |||
| 1cbe9fb7a3 | |||
| 395f736753 | |||
| 065516c5f3 | |||
| 8660abaea2 | |||
| 366f0705a0 | |||
| ccea87ca68 | |||
| 5d54883e2f | |||
| 662f65c3c8 | |||
| 259f028490 | |||
| 5db550a298 | |||
| e3c77d2906 | |||
| ba00e79253 | |||
| c1791f920e | |||
| df3803c7b7 | |||
| 384b4cbafa | |||
| 40501a7a73 | |||
| ab89b4cd4a | |||
| 48e0d55c8e | |||
| 1eba27a50a | |||
| 41206fa0e2 | |||
| 21cf1d64e5 | |||
| ae91b6f673 | |||
| f4515b5cfa | |||
| 6c57cde7f9 | |||
| 5014c1827b | |||
| f531e6aff2 | |||
| c5c110137b | |||
| 5957d9ead0 | |||
| 5675df2a44 | |||
| 00bc9142c4 | |||
| 5653ab39fc | |||
| 473dd7c940 | |||
| ee824d52ba | |||
| 7c5fba9890 | |||
| f214cb03b2 | |||
| 416c51799b | |||
| cf6f665f03 | |||
| 20da0e4dd3 | |||
| fa8c417526 | |||
| 2c65aec6c8 | |||
| 96d8e6d823 | |||
| 62b8084300 | |||
| 907aff5de4 | |||
| bc09129ec5 | |||
| cd94f638e2 | |||
| b1fe419870 | |||
| 98b724391f | |||
| 620f6ec616 | |||
| 0c6a3882a2 | |||
| a08880ae15 | |||
| f48826dfe9 | |||
| 9c3551478e | |||
| cc94e1da26 | |||
| 2b7ea5813c | |||
| 185192be67 |
33
.clang-tidy
Normal file
33
.clang-tidy
Normal file
@ -0,0 +1,33 @@
|
||||
Checks: '
|
||||
-*,
|
||||
bugprone-*,
|
||||
-bugprone-branch-clone,
|
||||
-bugprone-macro-parentheses,
|
||||
google-*,
|
||||
-google-build-using-namespace,
|
||||
-google-readability-casting,
|
||||
-google-readability-function-size,
|
||||
-google-readability-todo,
|
||||
-google-runtime-int,
|
||||
-google-runtime-references,
|
||||
misc-*,
|
||||
-misc-redundant-expression,
|
||||
-misc-unused-parameters,
|
||||
modernize-*,
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-deprecated-headers,
|
||||
-modernize-loop-convert,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-nullptr,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-use-using,
|
||||
performance-*,
|
||||
portability-*,
|
||||
readability-*,
|
||||
-readability-else-after-return,
|
||||
-readability-function-size,
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
-readability-named-parameter,
|
||||
-readability-simplify-boolean-expr'
|
||||
32
.gitattributes
vendored
Normal file
32
.gitattributes
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
* text eol=lf
|
||||
|
||||
*.png binary
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
27
.github/ISSUE_TEMPLATE.md
vendored
27
.github/ISSUE_TEMPLATE.md
vendored
@ -1,27 +1,28 @@
|
||||
#### Additional Information
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
_Keep in mind that the commands we provide to retrieve information are oriented to GNU/Linux Distributions, so you could need to use others if you use s3fs on macOS or BSD_
|
||||
|
||||
- Version of s3fs being used (s3fs --version)
|
||||
- _example: 1.0_
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
|
||||
- Version of fuse being used (pkg-config --modversion fuse)
|
||||
- _example: 2.9.4_
|
||||
#### Version of fuse being used (pkg-config --modversion fuse, rpm -qi fuse, dpkg -s fuse)
|
||||
_example: 2.9.4_
|
||||
|
||||
- System information (uname -a)
|
||||
- _command result: uname -a_
|
||||
#### Kernel information (uname -r)
|
||||
_command result: uname -r_
|
||||
|
||||
- Distro (cat /etc/issue)
|
||||
- _command result: result_
|
||||
#### GNU/Linux Distribution, if applicable (cat /etc/os-release)
|
||||
_command result: cat /etc/os-release_
|
||||
|
||||
- s3fs command line used (if applicable)
|
||||
#### s3fs command line used, if applicable
|
||||
```
|
||||
```
|
||||
- /etc/fstab entry (if applicable):
|
||||
#### /etc/fstab entry, if applicable
|
||||
```
|
||||
```
|
||||
- s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, journalctl | grep s3fs, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
#### Details about issue
|
||||
### Details about issue
|
||||
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,5 +1,5 @@
|
||||
#### Relevant Issue (if applicable)
|
||||
### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
|
||||
#### Details
|
||||
### Details
|
||||
_Please describe the details of PullRequest._
|
||||
|
||||
115
.gitignore
vendored
115
.gitignore
vendored
@ -1,31 +1,86 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Compiled Object files
|
||||
#
|
||||
*.slo
|
||||
*.lo
|
||||
*.o
|
||||
/Makefile
|
||||
/Makefile.in
|
||||
/aclocal.m4
|
||||
/autom4te.cache/
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/stamp-h1
|
||||
/config.h
|
||||
/config.h.in
|
||||
/config.h.in~
|
||||
/configure
|
||||
/depcomp
|
||||
/test-driver
|
||||
/compile
|
||||
/doc/Makefile
|
||||
/doc/Makefile.in
|
||||
/install-sh
|
||||
/missing
|
||||
/src/.deps/
|
||||
/src/Makefile
|
||||
/src/Makefile.in
|
||||
/src/s3fs
|
||||
/src/test_*
|
||||
/test/.deps/
|
||||
/test/Makefile
|
||||
/test/Makefile.in
|
||||
/test/*.log
|
||||
/default_commit_hash
|
||||
*.Po
|
||||
*.Plo
|
||||
|
||||
#
|
||||
# autotools/automake
|
||||
#
|
||||
aclocal.m4
|
||||
autom4te.cache
|
||||
autoscan.log
|
||||
config.guess
|
||||
config.h
|
||||
config.h.in
|
||||
config.h.in~
|
||||
config.log
|
||||
config.status
|
||||
config.sub
|
||||
configure
|
||||
configure.scan
|
||||
depcomp
|
||||
install-sh
|
||||
libtool
|
||||
ltmain.sh
|
||||
m4
|
||||
m4/*
|
||||
missing
|
||||
stamp-h1
|
||||
Makefile
|
||||
Makefile.in
|
||||
test-driver
|
||||
compile
|
||||
missing
|
||||
|
||||
#
|
||||
# object directories
|
||||
#
|
||||
.deps
|
||||
.libs
|
||||
*/.deps
|
||||
*/.deps/*
|
||||
*/.libs
|
||||
*/.libs/*
|
||||
|
||||
#
|
||||
# each directories
|
||||
#
|
||||
*.log
|
||||
*.trs
|
||||
default_commit_hash
|
||||
src/s3fs
|
||||
src/test_*
|
||||
test/s3proxy-*
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
134
.travis.yml
134
.travis.yml
@ -1,17 +1,119 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
language: cpp
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
- sudo pip install --upgrade awscli
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- test/filter-suite-log.sh test/test-suite.log
|
||||
|
||||
- os: osx
|
||||
osx_image: xcode9.2
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/Homebrew
|
||||
- /usr/local/Homebrew
|
||||
- $HOME/.osx_cache
|
||||
before_cache:
|
||||
- brew cleanup
|
||||
- cd /usr/local/Homebrew; find . \! -regex ".+\.git.+" -delete
|
||||
- mkdir -p $HOME/.osx_cache; touch $HOME/.osx_cache/cached
|
||||
before_install:
|
||||
- TAPS="$(brew --repository)/Library/Taps";
|
||||
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then
|
||||
rm -rf "$TAPS/caskroom/homebrew-cask";
|
||||
fi;
|
||||
if [ ! -f $HOME/.osx_cache/cached ]; then
|
||||
brew tap homebrew/homebrew-cask;
|
||||
else
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask;
|
||||
fi
|
||||
- HOMEBREW_NO_AUTO_UPDATE=1 brew cask install osxfuse
|
||||
- S3FS_BREW_PACKAGES='awscli cppcheck truncate';
|
||||
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do
|
||||
brew list | grep -q ${s3fs_brew_pkg};
|
||||
if [ $? -eq 0 ]; then
|
||||
brew outdated | grep -q ${s3fs_brew_pkg} && HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg};
|
||||
else
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg};
|
||||
fi;
|
||||
done
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then
|
||||
sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs;
|
||||
elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then
|
||||
sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse;
|
||||
fi
|
||||
- sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- make check -C test
|
||||
- test/filter-suite-log.sh test/test-suite.log
|
||||
|
||||
|
||||
- os: linux-ppc64le
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:openjdk-r/ppa
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-ppc64el/jre/bin/java
|
||||
- sudo pip install --upgrade awscli
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- test/filter-suite-log.sh test/test-suite.log
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
4
AUTHORS
4
AUTHORS
@ -17,3 +17,7 @@ Bugfixes, performance and other improvements.
|
||||
5. Takeshi Nakatani <ggtakec@gmail.com>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
6. Andrew Gaul <gaul@gaul.org>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
29
COMPILATION.md
Normal file
29
COMPILATION.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Compilation from source code
|
||||
|
||||
These are generic instructions should work on almost any GNU/Linux, macOS, BSD, or similar.
|
||||
|
||||
If you want specific instructions for some distributions, check the [wiki](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Installation-Notes).
|
||||
|
||||
Keep in mind using the pre-built packages when available.
|
||||
|
||||
1. Ensure your system satisfies build and runtime dependencies for:
|
||||
|
||||
* fuse >= 2.8.4
|
||||
* automake
|
||||
* gcc-c++
|
||||
* make
|
||||
* libcurl
|
||||
* libxml2
|
||||
* openssl
|
||||
* pkg-config (or your OS equivalent)
|
||||
|
||||
2. Then compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
255
ChangeLog
255
ChangeLog
@ -1,8 +1,187 @@
|
||||
ChangeLog for S3FS
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.86 -- 04 Feb, 2020 (major changes only)
|
||||
#965 - enable various optimizations when using modern curl
|
||||
#1002 - allow SSE-C keys to have NUL bytes
|
||||
#1008 - add session token support
|
||||
#1039 - allow large files on 32-bit systems like Raspberry Pi
|
||||
#1049 - fix data corruption when external modification changes a cached object
|
||||
#1063 - fix data corruption when opening a second fd to an unflushed file
|
||||
#1066 - fix clock skew errors when writing large files
|
||||
#1081 - allow concurrent metadata queries during data operations
|
||||
#1098 - use server-side copy for partially modified files
|
||||
#1107 - #1108 - fix multiple concurrency issues
|
||||
#1199 - add requester_pays support
|
||||
#1209 - add symlink cache
|
||||
#1224 - add intelligent_ia storage tier
|
||||
|
||||
Version 1.85 -- 11 Mar, 2019
|
||||
#804 - add Backblaze B2
|
||||
#812 - Fix typo s/mutliple/multiple/
|
||||
#819 - #691: Made instructions for creating password file more obvious.
|
||||
#820 - Enable big writes if capable
|
||||
#826 - For RPM distributions fuse-libs is enough
|
||||
#831 - Add support for storage class ONEZONE_IA.
|
||||
#832 - Simplify hex conversion
|
||||
#833 - New installation instructions for Fedora >= 27 and CentOS7
|
||||
#834 - Improve template for issues
|
||||
#835 - Make the compilation instructions generic
|
||||
#840 - Replace all mentions to MacOS X to macOS
|
||||
#849 - Correct typo
|
||||
#851 - Correctly compare list_object_max_keys
|
||||
#852 - Allow credentials from ${HOME}/.aws/credentials
|
||||
#853 - Replace ~ with ${HOME} in examples
|
||||
#855 - Include StackOverflow in FAQs
|
||||
#856 - Add icon for s3fs
|
||||
#859 - Upload S3 parts without batching
|
||||
#861 - Add 'profile' option to command line help.
|
||||
#865 - fix multihead warning check
|
||||
#866 - Multi-arch support for ppc64le
|
||||
#870 - Correct typos in command-line parsing
|
||||
#874 - Address cppcheck 1.86 errors
|
||||
#877 - Check arguments and environment before .aws/creds
|
||||
#882 - [curl] Assume long encryption keys are base64 encoded
|
||||
#885 - Update s3fs_util.cpp for correspondence of Nextcloud contype
|
||||
#888 - Add Server Fault to FAQs
|
||||
#892 - Repair xattr tests
|
||||
#893 - Store and retrieve file change time
|
||||
#894 - Default uid/gid/mode when object lacks permissions
|
||||
#895 - Emit more friendly error for buckets with dots
|
||||
#898 - Flush file before renaming
|
||||
#899 - Tighten up HTTP response code check
|
||||
#900 - Plug memory leak
|
||||
#901 - Plug memory leaks
|
||||
#902 - Avoid pass-by-value when not necessary
|
||||
#903 - Prefer find(char) over find(const char *)
|
||||
#904 - Remove unnecessary calls to std::string::c_str
|
||||
#905 - Fix comparison in s3fs_strtoofft
|
||||
#906 - Prefer HTTPS links where possible
|
||||
#908 - Added an error message when HTTP 301 status
|
||||
#909 - Ignore after period character of floating point in x-amz-meta-mtime
|
||||
#910 - Added a missing extension to .gitignore, and formatted dot files
|
||||
#911 - Added detail error message when HTTP 301/307 status
|
||||
#912 - Automatic region change made possible other than us-east-1(default)
|
||||
#913 - Prefer abort over assert(false)
|
||||
#914 - Issue readdir HEAD requests without batching
|
||||
#917 - Reference better-known AWS CLI for compatibility
|
||||
#918 - Load tail range during overwrite
|
||||
#919 - Add test for mv non-empty directory
|
||||
#920 - Remove unnecessary string copies
|
||||
#921 - Remove redundant string initializations
|
||||
#923 - Reverted automatic region change and changed messages
|
||||
#924 - Prefer empty over size checks
|
||||
#925 - Remove redundant null checks before delete
|
||||
#926 - Accept paths with : in them
|
||||
#930 - Correct enable_content_md5 docs
|
||||
#931 - Correct sigv2 typo
|
||||
#932 - Prefer AutoLock for synchronization
|
||||
#933 - Remove mirror path when deleting cache
|
||||
#934 - Checked and corrected all typo
|
||||
#937 - Disable malloc_trim
|
||||
#938 - Remove unneeded void parameter
|
||||
#939 - Prefer specific [io]stringstream where possible
|
||||
#940 - Copy parts in parallel
|
||||
#942 - Ensure s3fs compiles with C++03
|
||||
#943 - Return not supported when hard linking
|
||||
#944 - Repair utility mode
|
||||
#946 - Simplify async request completion code
|
||||
#948 - Add logging for too many parts
|
||||
#949 - Implement exponential backoff for 503
|
||||
#950 - Added S3FS_MALLOC_TRIM build switch
|
||||
#951 - Added a non-interactive option to utility mode
|
||||
#952 - Automatically abort failed multipart requests
|
||||
#953 - Update s3ql link
|
||||
#954 - Clear containers instead of individual erases
|
||||
#955 - Address miscellaneous clang-tidy warnings
|
||||
#957 - Upgrade to S3Proxy 1.6.1
|
||||
#958 - Document lack of inotify support
|
||||
#959 - Fixed code for latest cppcheck error on OSX
|
||||
#960 - Wtf8
|
||||
#961 - Work around cppcheck warnings
|
||||
#965 - Improvement of curl session pool for multipart
|
||||
#967 - Increase FdEntity reference count when returning
|
||||
#969 - Fix lazy typo
|
||||
#970 - Remove from file from stat cache during rename
|
||||
#972 - Add instructions for Amazon Linux
|
||||
#974 - Changed the description order of man page options
|
||||
#975 - Fixed ref-count when error occurred.
|
||||
#977 - Make macOS instructions consistent with others
|
||||
|
||||
Version 1.84 -- Jul 8, 2018
|
||||
#704 - Update README.md with details about .passwd-s3fs
|
||||
#710 - add disk space reservation
|
||||
#712 - Added Cygwin build options
|
||||
#714 - reduce lock contention on file open
|
||||
#724 - don't fail multirequest on single thread error
|
||||
#726 - add an instance_name option for logging
|
||||
#727 - Fixed Travis CI error about cppcheck - #713
|
||||
#729 - FreeBSD build fixes
|
||||
#733 - More useful error message for dupe entries in passwd file
|
||||
#739 - cleanup curl handle state on retries
|
||||
#745 - don't fail mkdir when directory exists
|
||||
#753 - fix xpath selector in bucket listing
|
||||
#754 - Validate the URL format for http/https
|
||||
#755 - Added reset curl handle when returning to handle pool
|
||||
#756 - Optimize defaults
|
||||
#761 - Simplify installation for Ubuntu 16.04
|
||||
#762 - Upgrade to S3Proxy 1.6.0
|
||||
#763 - cleanup curl handles before curl share
|
||||
#764 - Remove false multihead warnings
|
||||
#765 - Add Debian installation instructions
|
||||
#766 - Remove s3fs-python
|
||||
#768 - Fixed memory leak
|
||||
#769 - Revert "enable FUSE read_sync by default"
|
||||
#774 - Option for IAM authentication endpoint
|
||||
#780 - gnutls_auth: initialize libgcrypt
|
||||
#781 - Fixed an error by cppcheck on OSX
|
||||
#786 - Log messages for 5xx and 4xx HTTP response code
|
||||
#789 - Instructions for SUSE and openSUSE prebuilt packages
|
||||
#793 - Added list_object_max_keys option based on #783 PR
|
||||
|
||||
Version 1.83 -- Dec 17, 2017
|
||||
#606 - Add Homebrew instructions
|
||||
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
|
||||
#609 - Group permission checks sometimes fail with large number of groups
|
||||
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
|
||||
#621 - Upgrade to S3Proxy 1.5.3
|
||||
#627 - Update README.md
|
||||
#630 - Added travis test on osx for #601
|
||||
#631 - Merged macosx branch into master branch #601
|
||||
#636 - Fix intermittent upload failures on macOS
|
||||
#637 - Add blurb about non-Amazon S3 implementations
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argument - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
#659 - Do not fail updating directory when removing old-style object(ref #658)
|
||||
#660 - Refixed s3fs_init message(ref #652)
|
||||
#663 - Lock FdEntity when mutating orgmeta
|
||||
#664 - auth headers insertion refactoring
|
||||
#668 - Changed .travis.yml for fixing not found gpg2 on osx
|
||||
#669 - add IBM IAM authentication support
|
||||
#670 - Fixed a bug in S3fsCurl::LocateBundle
|
||||
#671 - Add support for ECS metadata endpoint
|
||||
#675 - Reduce use of preprocessor
|
||||
#676 - Move str definition from header to implementation
|
||||
#677 - Add s3proxy to .gitignore
|
||||
#679 - README.md Addition
|
||||
#681 - Changed functions about reading passwd file
|
||||
#684 - Correct signedness warning
|
||||
#686 - remove use of jsoncpp
|
||||
#688 - Improved use of temporary files - #678
|
||||
#690 - Added option ecs description to man page
|
||||
#692 - Updated template md files for issue and pr
|
||||
#695 - fix condition for parallel download
|
||||
#697 - Fixing race condition in FdEntity::GetStats
|
||||
#699 - Fix dbglevel usage
|
||||
|
||||
Version 1.82 -- May 13, 2017
|
||||
#597 - Not fallback to HTTP - #596
|
||||
#598 - Updated ChangeLog and configure.ac for release 1.82
|
||||
|
||||
Version 1.81 -- May 13, 2017
|
||||
#426 - Updated to correct ChangeLog
|
||||
@ -40,7 +219,7 @@ Version 1.81 -- May 13, 2017
|
||||
#540 - Address cppcheck 1.77 warnings
|
||||
#545 - Changed base cached time of stat_cache_expire option - #523
|
||||
#546 - Fixed double initialization of SSL library at foreground
|
||||
#550 - Add umount instruction for unplivileged user
|
||||
#550 - Add umount instruction for unprivileged user
|
||||
#551 - Updated stat_cache_expire option description - #545
|
||||
#552 - switch S3fsMultiCurl to use foreground threads
|
||||
#553 - add TLS cipher suites customization
|
||||
@ -93,7 +272,7 @@ Version 1.80 -- May 29, 2016
|
||||
#250 - s3fs can print version with short commit hash - #228
|
||||
#251 - Skip xattr tests if utilities are missing
|
||||
#252 - This fixes an issue with caching when the creation of a subdirectory …
|
||||
#253 - Added chacking cache dir perms at starting.
|
||||
#253 - Added checking cache dir perms at starting.
|
||||
#256 - Add no atomic rename to limitations
|
||||
#257 - Update README.md: Bugfix password file permissions errors
|
||||
#258 - Update README.md to better explain mount upon boot
|
||||
@ -121,7 +300,7 @@ Version 1.80 -- May 29, 2016
|
||||
#306 - Fix read concurrency to work in parallel count
|
||||
#307 - Fix pthread portability problem
|
||||
#308 - Changed ensure free disk space as additional change for #306
|
||||
#309 - Check pthread prtability in configure as additional change for #307
|
||||
#309 - Check pthread portability in configure as additional change for #307
|
||||
#310 - Update integration-test-main.sh as additional change for #300
|
||||
#311 - Change error log to debug log in s3fs_read()
|
||||
#313 - fix gitignore
|
||||
@ -133,14 +312,14 @@ Version 1.80 -- May 29, 2016
|
||||
#330 - Pass by const reference where possible
|
||||
#331 - Address various clang warnings
|
||||
#334 - Bucket host should include port and not path
|
||||
#336 - update REAME.md for fstab
|
||||
#336 - update README.md for fstab
|
||||
#338 - Fixed a bug about IAMCRED type could not be retried.
|
||||
#339 - Updated README.md for fstab example.
|
||||
#341 - Fix the memory leak issue in fdcache.
|
||||
#346 - Fix empty directory check against AWS S3
|
||||
#348 - Integration test summary, continue on error
|
||||
#350 - Changed cache out logic for stat - #340
|
||||
#351 - Check cache dirctory path and attributes - #347
|
||||
#351 - Check cache directory path and attributes - #347
|
||||
#352 - Remove stat file cache dir if specified del_cache - #337
|
||||
#354 - Supported regex type for additional header format - #343
|
||||
#355 - Fixed codes about clock_gettime for osx
|
||||
@ -217,7 +396,7 @@ issue #184 - Add usage information for multipart_size
|
||||
issue #185 - Correct obvious typos in usage and README
|
||||
issue #190 - Add a no_check_certificate option.
|
||||
issue #194 - Tilda in a file-name breaks things (EPERM)
|
||||
issue #198 - Disasble integration tests for Travis
|
||||
issue #198 - Disable integration tests for Travis
|
||||
issue #199 - Supported extended attributes(retry)
|
||||
issue #200 - fixed fallback to sigv2 for bucket create and GCS
|
||||
issue #202 - Specialize {set,get}xattr for OS X
|
||||
@ -254,97 +433,97 @@ issue #4 - Fix compilation error on MacOSX with missing const
|
||||
Version 1.74 -- Nov 24, 2013
|
||||
This version is initial version on Github, same as on GoogleCodes(s3fs).
|
||||
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
|
||||
see more detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
||||
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
||||
|
||||
Version 1.73 -- Aug 23, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
|
||||
|
||||
Version 1.72 -- Aug 10, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
|
||||
|
||||
Version 1.71 -- Jun 15, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
|
||||
|
||||
Version 1.70 -- Jun 01, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
|
||||
|
||||
Version 1.69 -- May 15, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
|
||||
|
||||
Version 1.68 -- Apr 30, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
|
||||
|
||||
Version 1.67 -- Apr 13, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
|
||||
|
||||
Version 1.66 -- Apr 06, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
|
||||
|
||||
Version 1.65 -- Mar 30, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
|
||||
|
||||
Version 1.64 -- Mar 23, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
|
||||
|
||||
Version 1.63 -- Feb 24, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
|
||||
|
||||
Version 1.62 -- Jan 27, 2013
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
|
||||
|
||||
Version 1.61 -- Aug 30, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
|
||||
|
||||
Version 1.60 -- Aug 29, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
|
||||
|
||||
Version 1.59 -- Jul 28, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
|
||||
|
||||
Version 1.58 -- Jul 19, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
|
||||
|
||||
Version 1.57 -- Jul 07, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
|
||||
|
||||
Version 1.56 -- Jul 07, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
|
||||
|
||||
Version 1.55 -- Jul 02, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
|
||||
|
||||
Version 1.54 -- Jun 25, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
|
||||
|
||||
Version 1.53 -- Jun 22, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
|
||||
|
||||
Version 1.40 -- Feb 11, 2011
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
|
||||
|
||||
Version 1.33 -- Dec 30, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
|
||||
|
||||
Version 1.25 -- Dec 16, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
|
||||
|
||||
Version 1.19 -- Dec 2, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
|
||||
|
||||
Version 1.16 -- Nov 22, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
|
||||
|
||||
Version 1.10 -- Nov 6, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
|
||||
|
||||
Version 1.02 -- Oct 29, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
|
||||
|
||||
Version 1.01 -- Oct 28, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
|
||||
|
||||
Version 1.0 -- Oct 24, 2010
|
||||
see detail on googlecodes: http://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
|
||||
|
||||
------
|
||||
Version 1.1 -- Mon Oct 18 2010
|
||||
|
||||
2
INSTALL
2
INSTALL
@ -124,7 +124,7 @@ architecture at a time in the source code directory. After you have
|
||||
installed the package for one architecture, use `make distclean' before
|
||||
reconfiguring for another architecture.
|
||||
|
||||
On MacOS X 10.5 and later systems, you can create libraries and
|
||||
On macOS 10.5 and later systems, you can create libraries and
|
||||
executables that work on multiple system types--known as "fat" or
|
||||
"universal" binaries--by specifying multiple `-arch' options to the
|
||||
compiler but only a single `-arch' option to the preprocessor. Like
|
||||
|
||||
10
Makefile.am
10
Makefile.am
@ -32,10 +32,14 @@ cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
--xml \
|
||||
-D HAVE_ATTR_XATTR_H \
|
||||
-D HAVE_SYS_EXTATTR_H \
|
||||
-D HAVE_MALLOC_TRIM \
|
||||
-U CURLE_PEER_FAILED_VERIFICATION \
|
||||
-U P_tmpdir \
|
||||
--enable=all \
|
||||
-U ENOATTR \
|
||||
--enable=warning,style,information,missingInclude \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unusedFunction \
|
||||
--suppress=variableScope \
|
||||
--suppress=unmatchedSuppression \
|
||||
src/ test/
|
||||
|
||||
167
README.md
167
README.md
@ -1,12 +1,11 @@
|
||||
s3fs
|
||||
====
|
||||
# s3fs
|
||||
|
||||
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
|
||||
s3fs allows Linux and macOS to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other
|
||||
tools like [AWS CLI](https://github.com/aws/aws-cli).
|
||||
[](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
|
||||
|
||||
Features
|
||||
--------
|
||||
## Features
|
||||
|
||||
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
|
||||
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
|
||||
@ -19,110 +18,158 @@ Features
|
||||
* user-specified regions, including Amazon GovCloud
|
||||
* authenticate via v2 or v4 signatures
|
||||
|
||||
Installation
|
||||
------------
|
||||
## Installation
|
||||
|
||||
Ensure you have all the dependencies:
|
||||
Many systems provide pre-built packages:
|
||||
|
||||
On Ubuntu 14.04:
|
||||
* Amazon Linux via EPEL:
|
||||
|
||||
```
|
||||
sudo amazon-linux-extras install epel
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* Debian 9 and Ubuntu 16.04 or newer:
|
||||
|
||||
```
|
||||
sudo apt install s3fs
|
||||
```
|
||||
|
||||
* Fedora 27 or newer:
|
||||
|
||||
```
|
||||
sudo dnf install s3fs-fuse
|
||||
```
|
||||
|
||||
* Gentoo:
|
||||
|
||||
```
|
||||
sudo emerge net-fs/s3fs
|
||||
```
|
||||
|
||||
* RHEL and CentOS 7 or newer through via EPEL:
|
||||
|
||||
```
|
||||
sudo yum install epel-release
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* SUSE 12 and openSUSE 42.1 or newer:
|
||||
|
||||
```
|
||||
sudo zypper install s3fs
|
||||
```
|
||||
|
||||
* macOS via [Homebrew](https://brew.sh/):
|
||||
|
||||
```
|
||||
brew cask install osxfuse
|
||||
brew install s3fs
|
||||
```
|
||||
|
||||
Otherwise consult the [compilation instructions](COMPILATION.md).
|
||||
|
||||
## Examples
|
||||
|
||||
s3fs supports the standard
|
||||
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
|
||||
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
|
||||
|
||||
The default location for the s3fs password file can be created:
|
||||
|
||||
* using a .passwd-s3fs file in the users home directory (i.e. ${HOME}/.passwd-s3fs)
|
||||
* using the system-wide /etc/passwd-s3fs file
|
||||
|
||||
Enter your credentials in a file `${HOME}/.passwd-s3fs` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
```
|
||||
|
||||
On CentOS 7:
|
||||
|
||||
```
|
||||
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
```
|
||||
|
||||
Compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Enter your S3 identity and credential in a file `/path/to/passwd`:
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
|
||||
```
|
||||
|
||||
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
|
||||
|
||||
```
|
||||
chmod 600 /path/to/passwd
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs
|
||||
chmod 600 ${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=https://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file:
|
||||
|
||||
```
|
||||
echo SERVICEINSTANCEID:APIKEY > /path/to/passwd
|
||||
```
|
||||
|
||||
The Service Instance ID is only required when using the `-o create_bucket` option.
|
||||
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > /etc/passwd-s3fs
|
||||
chmod 600 /etc/passwd-s3fs
|
||||
```
|
||||
|
||||
Note2: You may also need to make sure `netfs` service is start on boot
|
||||
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
## Limitations
|
||||
|
||||
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
|
||||
|
||||
* random writes or appends to files require rewriting the entire file
|
||||
* metadata operations such as listing directories have poor performance due to network latency
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* no atomic renames of files or directories
|
||||
* no coordination between multiple clients mounting the same bucket
|
||||
* no hard links
|
||||
* inotify detects only local modifications, not external ones by other clients or tools
|
||||
|
||||
References
|
||||
----------
|
||||
## References
|
||||
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
|
||||
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://github.com/s3ql/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
## Frequently Asked Questions
|
||||
|
||||
License
|
||||
-------
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
* [s3fs on Stack Overflow](https://stackoverflow.com/questions/tagged/s3fs)
|
||||
* [s3fs on Server Fault](https://serverfault.com/questions/tagged/s3fs)
|
||||
|
||||
## License
|
||||
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
Licensed under the GNU GPL version 2
|
||||
|
||||
|
||||
66
configure.ac
66
configure.ac
@ -20,7 +20,7 @@
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.82)
|
||||
AC_INIT(s3fs, 1.86)
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
@ -33,12 +33,17 @@ AC_CHECK_HEADERS([sys/xattr.h])
|
||||
AC_CHECK_HEADERS([attr/xattr.h])
|
||||
AC_CHECK_HEADERS([sys/extattr.h])
|
||||
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2"
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl For OSX
|
||||
dnl For macOS
|
||||
dnl ----------------------------------------------
|
||||
case "$target" in
|
||||
*-cygwin* )
|
||||
# Do something specific for windows using winfsp
|
||||
CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE=1"
|
||||
min_fuse_version=2.8
|
||||
;;
|
||||
*-darwin* )
|
||||
# Do something specific for mac
|
||||
min_fuse_version=2.7.3
|
||||
@ -176,13 +181,13 @@ dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6])
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
@ -232,7 +237,7 @@ dnl ----------------------------------------------
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS([malloc_trim])
|
||||
|
||||
dnl clock_gettime function(osx)
|
||||
dnl clock_gettime function(macos)
|
||||
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
|
||||
AC_CHECK_FUNCS([clock_gettime])
|
||||
|
||||
@ -259,6 +264,51 @@ AC_COMPILE_IFELSE(
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl check CURLoption
|
||||
dnl ----------------------------------------------
|
||||
dnl CURLOPT_TCP_KEEPALIVE (is supported by 7.25.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_TCP_KEEPALIVE])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_TCP_KEEPALIVE;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 1, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 0, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl CURLOPT_SSL_ENABLE_ALPN (is supported by 7.36.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_SSL_ENABLE_ALPN])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_SSL_ENABLE_ALPN;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 1, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 0, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl CURLOPT_KEEP_SENDING_ON_ERROR (is supported by 7.51.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_KEEP_SENDING_ON_ERROR])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_KEEP_SENDING_ON_ERROR;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 1, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 0, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl output files
|
||||
dnl ----------------------------------------------
|
||||
@ -268,10 +318,10 @@ dnl ----------------------------------------------
|
||||
dnl short commit hash
|
||||
dnl ----------------------------------------------
|
||||
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
|
||||
AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no])
|
||||
AS_IF([test -d .git], [DOTGITDIR=yes], [DOTGITDIR=no])
|
||||
|
||||
AC_MSG_CHECKING([github short commit hash])
|
||||
if test “x${GITCMD}” = “xyes” -a “x${DOTGITDIR}” = “xyes”; then
|
||||
if test "x${GITCMD}" = "xyes" -a "x${DOTGITDIR}" = "xyes"; then
|
||||
GITCOMMITHASH=`git rev-parse --short HEAD`
|
||||
elif test -f default_commit_hash; then
|
||||
GITCOMMITHASH=`cat default_commit_hash`
|
||||
|
||||
171
doc/man/s3fs.1
171
doc/man/s3fs.1
@ -6,7 +6,7 @@ S3FS \- FUSE-based file system backed by Amazon S3
|
||||
.TP
|
||||
\fBs3fs bucket[:/path] mountpoint \fP [options]
|
||||
.TP
|
||||
\fBs3fs mountpoint \fP [options(must specify bucket= option)]
|
||||
\fBs3fs mountpoint \fP [options (must specify bucket= option)]
|
||||
.SS unmounting
|
||||
.TP
|
||||
\fBumount mountpoint
|
||||
@ -14,12 +14,16 @@ For root.
|
||||
.TP
|
||||
\fBfusermount -u mountpoint
|
||||
For unprivileged user.
|
||||
.SS utility mode ( remove interrupted multipart uploading objects )
|
||||
.SS utility mode (remove interrupted multipart uploading objects)
|
||||
.TP
|
||||
\fBs3fs \-u bucket
|
||||
\fBs3fs --incomplete-mpu-list (-u) bucket
|
||||
.TP
|
||||
\fBs3fs --incomplete-mpu-abort[=all | =<expire date format>] bucket
|
||||
.SH DESCRIPTION
|
||||
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
|
||||
.SH AUTHENTICATION
|
||||
s3fs supports the standard AWS credentials file (https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`.
|
||||
Alternatively, s3fs supports a custom passwd file. Only AWS credentials file format can be used when AWS session token is required.
|
||||
The s3fs password file has this format (use this format if you have only one set of credentials):
|
||||
.RS 4
|
||||
\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
@ -35,6 +39,8 @@ Password files can be stored in two locations:
|
||||
\fB/etc/passwd-s3fs\fP [0640]
|
||||
\fB$HOME/.passwd-s3fs\fP [0600]
|
||||
.RE
|
||||
.PP
|
||||
s3fs also recognizes the \fBAWSACCESSKEYID\fP and \fBAWSSECRETACCESSKEY\fP environment variables.
|
||||
.SH OPTIONS
|
||||
.SS "general options"
|
||||
.TP
|
||||
@ -55,14 +61,13 @@ All s3fs options must given in the form where "opt" is:
|
||||
<option_name>=<option_value>
|
||||
.TP
|
||||
\fB\-o\fR bucket
|
||||
if it is not specified bucket name(and path) in command line, must specify this option after \-o option for bucket name.
|
||||
if it is not specified bucket name (and path) in command line, must specify this option after \-o option for bucket name.
|
||||
.TP
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
empty string means do not send header.
|
||||
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
|
||||
see https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for the full list of canned acls.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="2")
|
||||
\fB\-o\fR retries (default="5")
|
||||
number of times to retry a failed S3 transaction.
|
||||
.TP
|
||||
\fB\-o\fR use_cache (default="" which means disabled)
|
||||
@ -75,10 +80,10 @@ If this option is not specified, it will be created at runtime when the cache di
|
||||
\fB\-o\fR del_cache - delete local file cache
|
||||
delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
\fB\-o\fR storage_class (default is standard)
|
||||
\fB\-o\fR storage_class (default="standard")
|
||||
store object with specified storage class.
|
||||
this option replaces the old option use_rrs.
|
||||
Possible values: standard, standard_ia, and reduced_redundancy.
|
||||
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, and intelligent_tiering.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
@ -89,19 +94,19 @@ this option has been replaced by new storage_class option.
|
||||
\fB\-o\fR use_sse (default is disable)
|
||||
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
|
||||
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
|
||||
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>"(only <custom key file path> specified is old type parameter).
|
||||
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>" (only <custom key file path> specified is old type parameter).
|
||||
You can use "c" for short "custom".
|
||||
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
|
||||
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
|
||||
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
If you specify "custom" ("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment. (AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
|
||||
If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:" (or "k:").
|
||||
If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
You must be careful about that you can not use the KMS id which is not same EC2 region.
|
||||
.TP
|
||||
\fB\-o\fR load_sse_c - specify SSE-C keys
|
||||
@ -115,12 +120,12 @@ AWSSSECKEYS environment is as same as this file contents.
|
||||
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
|
||||
.TP
|
||||
\fB\-o\fR ahbe_conf (default="" which means disabled)
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file (object) extension.
|
||||
The configuration file format is below:
|
||||
-----------
|
||||
line = [file suffix or regex] HTTP-header [HTTP-values]
|
||||
file suffix = file(object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
|
||||
regex = regular expression to match the file(object) path. this type starts with "reg:" prefix.
|
||||
file suffix = file (object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
|
||||
regex = regular expression to match the file (object) path. this type starts with "reg:" prefix.
|
||||
HTTP-header = additional HTTP header name
|
||||
HTTP-values = additional HTTP header value
|
||||
-----------
|
||||
@ -133,6 +138,10 @@ This option specifies the configuration file path which file is the additional H
|
||||
A sample configuration file is uploaded in "test" directory.
|
||||
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
|
||||
.TP
|
||||
\fB\-o\fR profile (default="default")
|
||||
Choose a profile from ${HOME}/.aws/credentials to authenticate against S3.
|
||||
Note that this format matches the AWS CLI format and differs from the s3fs passwd format.
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
@ -140,29 +149,35 @@ S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi
|
||||
\fB\-o\fR connect_timeout (default="300" seconds)
|
||||
time to wait for connection before giving up.
|
||||
.TP
|
||||
\fB\-o\fR readwrite_timeout (default="60" seconds)
|
||||
\fB\-o\fR readwrite_timeout (default="120" seconds)
|
||||
time to wait between read/write activity before giving up.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
|
||||
maximum number of entries in the stat cache
|
||||
\fB\-o\fR list_object_max_keys (default="1000")
|
||||
specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
|
||||
maximum number of entries in the stat cache and symbolic link cache.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time indicates the time since stat cached.
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time indicates the time since cached.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_interval_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time is based on the time from the last access time of the stat cache.
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_noobj_cache (default is disable)
|
||||
enable cache entries for the object which does not exist.
|
||||
s3fs always has to check whether file(or sub directory) exists under object(path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
|
||||
s3fs always has to check whether file (or sub directory) exists under object (path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
|
||||
It increases ListBucket request and makes performance bad.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object (file or directory) does not exist.
|
||||
.TP
|
||||
\fB\-o\fR no_check_certificate (by default this option is disabled)
|
||||
do not check ssl certificate.
|
||||
server certificate won't be checked against the available certificate authorities.
|
||||
.TP
|
||||
\fB\-o\fR ssl_verify_hostname (default="2")
|
||||
When 0, do not verify the SSL certificate against the hostname.
|
||||
.TP
|
||||
\fB\-o\fR nodnscache - disable dns cache.
|
||||
s3fs is always using dns cache, this option make dns cache disable.
|
||||
.TP
|
||||
@ -174,53 +189,74 @@ maximum number of parallel request for listing objects.
|
||||
.TP
|
||||
\fB\-o\fR parallel_count (default="5")
|
||||
number of parallel request for uploading big objects.
|
||||
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
|
||||
s3fs uploads large object (over 20MB) by multipart post request, and sends parallel requests.
|
||||
This option limits parallel request count which s3fs requests at once.
|
||||
It is necessary to set this value depending on a CPU and a network band.
|
||||
.TP
|
||||
\fB\-o\fR multipart_size(default="10"(10MB))
|
||||
number of one part size in multipart uploading request.
|
||||
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
|
||||
Specify number of MB and over 5(MB).
|
||||
\fB\-o\fR multipart_size (default="10")
|
||||
part size, in MB, for each multipart request.
|
||||
The minimum value is 5 MB and the maximum value is 5 GB.
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
|
||||
\fB\-o\fR ensure_diskfree (default 0)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, and uploading and caching files.
|
||||
s3fs makes file for downloading, uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR singlepart_copy_limit (default="512")
|
||||
maximum size, in MB, of a single-part copy before trying
|
||||
multipart copy.
|
||||
.TP
|
||||
\fB\-o\fR host (default="https://s3.amazonaws.com")
|
||||
Set a non-Amazon host, e.g., https://example.com.
|
||||
.TP
|
||||
\fB\-o\fR sevicepath (default="/")
|
||||
Set a service path when the non-Amazon host requires a prefix.
|
||||
.TP
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR endpoint (default="us-east-1")
|
||||
sets the endpoint to use.
|
||||
sets the endpoint to use on signature version 4.
|
||||
If this option is not specified, s3fs uses "us-east-1" region as the default.
|
||||
If the s3fs could not connect to the region specified by this option, s3fs could not run.
|
||||
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
|
||||
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
|
||||
.TP
|
||||
\fB\-o\fR sigv2 (default is signature version 4)
|
||||
sets signing AWS requests by sing Signature Version 2.
|
||||
sets signing AWS requests by using Signature Version 2.
|
||||
.TP
|
||||
\fB\-o\fR mp_umask (default is "0000")
|
||||
sets umask for the mount point directory.
|
||||
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
|
||||
In the opposite case s3fs allows access to all users as the default.
|
||||
But if you set the allow_other with this option, you can control permissions of the mount point by this option like umask.
|
||||
But if you set the allow_other with this option, you can control the permissions of the mount point by this option like umask.
|
||||
.TP
|
||||
\fB\-o\fR umask (default is "0000")
|
||||
sets umask for files under the mountpoint. This can allow
|
||||
users other than the mounting user to read and write to files
|
||||
that they did not create.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
\fB\-o\fR enable_content_md5 ( default is disable )
|
||||
verifying uploaded data without multipart by content-md5 header.
|
||||
Enable to send "Content-MD5" header when uploading a object without multipart posting.
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
\fB\-o\fR enable_content_md5 (default is disable)
|
||||
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
|
||||
This can add CPU overhead to transfers.
|
||||
.TP
|
||||
\fB\-o\fR iam_role ( default is no IAM role )
|
||||
\fB\-o\fR ecs (default is disable)
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
.TP
|
||||
\fB\-o\fR iam_role (default is no IAM role)
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
|
||||
Enable to handle the extended attribute(xattrs).
|
||||
\fB\-o\fR ibm_iam_auth (default is not using IBM IAM authentication)
|
||||
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_endpoint (default is https://iam.bluemix.net)
|
||||
Sets the URL to use for IBM IAM authentication.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr (default is not handling the extended attribute)
|
||||
Enable to handle the extended attribute (xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
@ -229,13 +265,18 @@ Notice: if s3fs handles the extended attribute, s3fs can not work to copy comman
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nomixupload - disable copy in multipart uploads.
|
||||
Disable to use PUT (copy api) when multipart uploading large size objects.
|
||||
By default, when doing multipart upload, the range of unchanged data will use PUT (copy api) whenever possible.
|
||||
When nocopyapi or norenameapi is specified, use of PUT (copy api) is invalidated even if this option is not specified.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
|
||||
For a distributed object storage which is compatibility S3 API without PUT (copy api).
|
||||
If you set this option, s3fs do not use PUT with "x-amz-copy-source" (copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
|
||||
.TP
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
|
||||
For a distributed object storage which is compatibility S3 API without PUT (copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command (ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command (ex. mv).
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
@ -246,10 +287,14 @@ Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <
|
||||
If this option is specified, s3fs suppresses the output of the User-Agent.
|
||||
.TP
|
||||
\fB\-o\fR cipher_suites
|
||||
Customize TLS cipher suite list. Expects a colon separated list of cipher suite names.
|
||||
Customize the list of TLS cipher suites. Expects a colon separated list of cipher suite names.
|
||||
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
|
||||
https://curl.haxx.se/docs/ssl-ciphers.html
|
||||
.TP
|
||||
\fB\-o\fR instance_name
|
||||
The instance name of the current s3fs mountpoint.
|
||||
This name will be added to logging messages and user agent headers sent by s3fs.
|
||||
.TP
|
||||
\fB\-o\fR complement_stat (complement lack of file/directory mode)
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
@ -265,13 +310,41 @@ However, if there is a directory object other than "dir/" in the bucket, specify
|
||||
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
|
||||
Please use this option when the directory in the bucket is only "dir/" object.
|
||||
.TP
|
||||
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
|
||||
S3 requires all object names to be valid utf-8. But some
|
||||
clients, notably Windows NFS clients, use their own encoding.
|
||||
This option re-encodes invalid utf-8 object names into valid
|
||||
utf-8 by mapping offending codes into a 'private' codepage of the
|
||||
Unicode set.
|
||||
Useful on clients not using utf-8 as their file system encoding.
|
||||
.TP
|
||||
\fB\-o\fR use_session_token - indicate that session token should be provided.
|
||||
If credentials are provided by environment variables this switch
|
||||
forces presence check of AWSSESSIONTOKEN variable.
|
||||
Otherwise an error is returned.
|
||||
.TP
|
||||
\fB\-o\fR requester_pays (default is disable)
|
||||
This option instructs s3fs to enable requests involving Requester Pays buckets (It includes the 'x-amz-request-payer=requester' entry in the request header).
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
|
||||
Set the debug message level. set value as crit (critical), err (error), warn (warning), info (information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
|
||||
.TP
|
||||
\fB\-o\fR curldbg - put curl debug message
|
||||
Put the debug message from libcurl when this option is specified.
|
||||
.SS "utility mode options"
|
||||
.TP
|
||||
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
|
||||
Lists multipart incomplete objects uploaded to the specified bucket.
|
||||
.TP
|
||||
\fB\-\-incomplete\-mpu\-abort\fR all or date format (default="24H")
|
||||
Delete the multipart incomplete object uploaded to the specified bucket.
|
||||
If "all" is specified for this option, all multipart incomplete objects will be deleted.
|
||||
If you specify no argument as an option, objects older than 24 hours (24H) will be deleted (This is the default value).
|
||||
You can specify an optional date format.
|
||||
It can be specified as year, month, day, hour, minute, second, and it is expressed as "Y", "M", "D", "h", "m", "s" respectively.
|
||||
For example, "1Y6M10D12h30m30s".
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
@ -279,7 +352,7 @@ Most of the generic mount options described in 'man mount' are supported (ro, rw
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH NOTES
|
||||
.TP
|
||||
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
|
||||
.TP
|
||||
@ -288,6 +361,8 @@ The folder specified by use_cache is just a local cache. It can be deleted at an
|
||||
Local file caching works by calculating and comparing md5 checksums (ETag HTTP header).
|
||||
.TP
|
||||
s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types!
|
||||
.SH SEE ALSO
|
||||
fuse(8), mount(8), fusermount(1), fstab(5)
|
||||
.SH BUGS
|
||||
Due to S3's "eventual consistency" limitations, file creation can and will occasionally fail. Even after a successful create, subsequent reads can fail for an indeterminate time, even after one or more successful reads. Create and read enough files and you will eventually encounter this failure. This is not a flaw in s3fs and it is not something a FUSE wrapper like s3fs can work around. The retries option does not address this issue. Your application must either tolerate or compensate for these failures, for example by retrying creates or reads.
|
||||
.SH AUTHOR
|
||||
|
||||
BIN
doc/s3fs.png
Normal file
BIN
doc/s3fs.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.3 KiB |
@ -24,7 +24,15 @@ if USE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h addhead.cpp addhead.h common.h
|
||||
s3fs_SOURCES = \
|
||||
s3fs.cpp \
|
||||
curl.cpp \
|
||||
cache.cpp \
|
||||
string_util.cpp \
|
||||
s3fs_util.cpp \
|
||||
fdcache.cpp \
|
||||
common_auth.cpp \
|
||||
addhead.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
@ -39,6 +47,9 @@ s3fs_LDADD = $(DEPS_LIBS)
|
||||
|
||||
noinst_PROGRAMS = test_string_util
|
||||
|
||||
test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h
|
||||
test_string_util_SOURCES = string_util.cpp test_string_util.cpp
|
||||
|
||||
TESTS = test_string_util
|
||||
|
||||
clang-tidy:
|
||||
clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS)
|
||||
|
||||
@ -18,11 +18,10 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <syslog.h>
|
||||
#include <assert.h>
|
||||
#include <curl/curl.h>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
@ -56,7 +55,7 @@ AdditionalHeader::AdditionalHeader()
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,7 +64,7 @@ AdditionalHeader::~AdditionalHeader()
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,19 +84,19 @@ bool AdditionalHeader::Load(const char* file)
|
||||
|
||||
// read file
|
||||
string line;
|
||||
PADDHEAD paddhead;
|
||||
ADDHEAD *paddhead;
|
||||
while(getline(AH, line)){
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(0 == line.size()){
|
||||
if(line.empty()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
stringstream ss(line);
|
||||
string key(""); // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
istringstream ss(line);
|
||||
string key; // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
@ -109,8 +108,8 @@ bool AdditionalHeader::Load(const char* file)
|
||||
}
|
||||
|
||||
// check it
|
||||
if(0 == head.size()){
|
||||
if(0 == key.size()){
|
||||
if(head.empty()){
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
@ -123,6 +122,7 @@ bool AdditionalHeader::Load(const char* file)
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key = key.substr(strlen(ADD_HEAD_REGEX));
|
||||
@ -130,8 +130,8 @@ bool AdditionalHeader::Load(const char* file)
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
char errbuf[256];
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
@ -164,12 +164,12 @@ bool AdditionalHeader::Load(const char* file)
|
||||
return true;
|
||||
}
|
||||
|
||||
void AdditionalHeader::Unload(void)
|
||||
void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); iter = addheadlist.erase(iter)){
|
||||
PADDHEAD paddhead = *iter;
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
ADDHEAD *paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
@ -178,6 +178,7 @@ void AdditionalHeader::Unload(void)
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
addheadlist.clear();
|
||||
}
|
||||
|
||||
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
@ -198,7 +199,7 @@ bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const PADDHEAD paddhead = *iter;
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
}
|
||||
@ -239,19 +240,19 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch
|
||||
return list;
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Dump(void) const
|
||||
bool AdditionalHeader::Dump() const
|
||||
{
|
||||
if(!IS_S3FS_LOG_DBG()){
|
||||
return true;
|
||||
}
|
||||
|
||||
stringstream ssdbg;
|
||||
int cnt = 1;
|
||||
ostringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const PADDHEAD paddhead = *iter;
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << endl;
|
||||
|
||||
|
||||
@ -31,9 +31,9 @@ typedef struct add_header{
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD, *PADDHEAD;
|
||||
}ADDHEAD;
|
||||
|
||||
typedef std::vector<PADDHEAD> addheadlist_t;
|
||||
typedef std::vector<ADDHEAD *> addheadlist_t;
|
||||
|
||||
class AdditionalHeader
|
||||
{
|
||||
|
||||
346
src/cache.cpp
346
src/cache.cpp
@ -18,7 +18,7 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <cstdio>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#ifndef HAVE_CLOCK_GETTIME
|
||||
@ -27,8 +27,7 @@
|
||||
#include <unistd.h>
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <cstring>
|
||||
#include <syslog.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
@ -59,7 +58,7 @@ using namespace std;
|
||||
#ifdef HAVE_CLOCK_GETTIME
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
{
|
||||
return clock_gettime(clk_id, ts);
|
||||
return clock_gettime(static_cast<clockid_t>(clk_id), ts);
|
||||
}
|
||||
#else
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
@ -88,7 +87,7 @@ inline void InitStatCacheTime(struct timespec& ts)
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
|
||||
inline int CompareStatCacheTime(struct timespec& ts1, struct timespec& ts2)
|
||||
inline int CompareStatCacheTime(const struct timespec& ts1, const struct timespec& ts2)
|
||||
{
|
||||
// return -1: ts1 < ts2
|
||||
// 0: ts1 == ts2
|
||||
@ -115,7 +114,7 @@ inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expir
|
||||
}
|
||||
|
||||
//
|
||||
// For cache out
|
||||
// For stats cache out
|
||||
//
|
||||
typedef std::vector<stat_cache_t::iterator> statiterlist_t;
|
||||
|
||||
@ -133,6 +132,25 @@ struct sort_statiterlist{
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// For symbolic link cache out
|
||||
//
|
||||
typedef std::vector<symlink_cache_t::iterator> symlinkiterlist_t;
|
||||
|
||||
struct sort_symlinkiterlist{
|
||||
// ascending order
|
||||
bool operator()(const symlink_cache_t::iterator& src1, const symlink_cache_t::iterator& src2) const
|
||||
{
|
||||
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date); // use the same as Stats
|
||||
if(0 == result){
|
||||
if(src1->second->hit_count < src2->second->hit_count){
|
||||
result = -1;
|
||||
}
|
||||
}
|
||||
return (result < 0);
|
||||
}
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Static
|
||||
//-------------------------------------------------------------------
|
||||
@ -142,13 +160,18 @@ pthread_mutex_t StatCache::stat_cache_lock;
|
||||
//-------------------------------------------------------------------
|
||||
// Constructor/Destructor
|
||||
//-------------------------------------------------------------------
|
||||
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
|
||||
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(100000), IsCacheNoObject(false)
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
stat_cache.clear();
|
||||
pthread_mutex_init(&(StatCache::stat_cache_lock), NULL);
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
pthread_mutex_init(&StatCache::stat_cache_lock, &attr);
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,16 +179,16 @@ StatCache::~StatCache()
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
Clear();
|
||||
pthread_mutex_destroy(&(StatCache::stat_cache_lock));
|
||||
pthread_mutex_destroy(&StatCache::stat_cache_lock);
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods
|
||||
//-------------------------------------------------------------------
|
||||
unsigned long StatCache::GetCacheSize(void) const
|
||||
unsigned long StatCache::GetCacheSize() const
|
||||
{
|
||||
return CacheSize;
|
||||
}
|
||||
@ -177,7 +200,7 @@ unsigned long StatCache::SetCacheSize(unsigned long size)
|
||||
return old;
|
||||
}
|
||||
|
||||
time_t StatCache::GetExpireTime(void) const
|
||||
time_t StatCache::GetExpireTime() const
|
||||
{
|
||||
return (IsExpireTime ? ExpireTime : (-1));
|
||||
}
|
||||
@ -191,7 +214,7 @@ time_t StatCache::SetExpireTime(time_t expire, bool is_interval)
|
||||
return old;
|
||||
}
|
||||
|
||||
time_t StatCache::UnsetExpireTime(void)
|
||||
time_t StatCache::UnsetExpireTime()
|
||||
{
|
||||
time_t old = IsExpireTime ? ExpireTime : (-1);
|
||||
ExpireTime = 0;
|
||||
@ -207,45 +230,41 @@ bool StatCache::SetCacheNoObject(bool flag)
|
||||
return old;
|
||||
}
|
||||
|
||||
void StatCache::Clear(void)
|
||||
void StatCache::Clear()
|
||||
{
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); stat_cache.erase(iter++)){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
|
||||
delete (*iter).second;
|
||||
}
|
||||
stat_cache.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
}
|
||||
|
||||
bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce)
|
||||
bool StatCache::GetStat(const string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce)
|
||||
{
|
||||
bool is_delete_cache = false;
|
||||
string strpath = key;
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.end();
|
||||
if(overcheck && '/' != strpath[strpath.length() - 1]){
|
||||
strpath += "/";
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
if(iter == stat_cache.end()){
|
||||
strpath = key;
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second){
|
||||
stat_cache_entry* ent = (*iter).second;
|
||||
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
|
||||
if(ent->noobjcache){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
if(!IsCacheNoObject){
|
||||
// need to delete this cache.
|
||||
DelStat(strpath);
|
||||
DelStat(strpath, /*lock_already_held=*/ true);
|
||||
}else{
|
||||
// noobjcache = true means no object.
|
||||
}
|
||||
@ -255,10 +274,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
string stretag;
|
||||
if(petag){
|
||||
// find & check ETag
|
||||
for(headers_t::iterator iter = ent->meta.begin(); iter != ent->meta.end(); ++iter){
|
||||
string tag = lower(iter->first);
|
||||
for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){
|
||||
string tag = lower(hiter->first);
|
||||
if(tag == "etag"){
|
||||
stretag = iter->second;
|
||||
stretag = hiter->second;
|
||||
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
|
||||
is_delete_cache = true;
|
||||
}
|
||||
@ -268,12 +287,12 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
}
|
||||
if(is_delete_cache){
|
||||
// not hit by different ETag
|
||||
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%jd.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
|
||||
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
|
||||
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%lld.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
|
||||
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
|
||||
}else{
|
||||
// hit
|
||||
S3FS_PRN_DBG("stat cache hit [path=%s][time=%jd.%09ld][hit count=%lu]",
|
||||
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
|
||||
S3FS_PRN_DBG("stat cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
|
||||
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
|
||||
|
||||
if(pst!= NULL){
|
||||
*pst= ent->stbuf;
|
||||
@ -289,7 +308,6 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
if(IsExpireIntervalType){
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -298,15 +316,14 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
is_delete_cache = true;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(is_delete_cache){
|
||||
DelStat(strpath);
|
||||
DelStat(strpath, /*lock_already_held=*/ true);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
bool StatCache::IsNoObjectCache(const string& key, bool overcheck)
|
||||
{
|
||||
bool is_delete_cache = false;
|
||||
string strpath = key;
|
||||
@ -315,16 +332,16 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
return false;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.end();
|
||||
if(overcheck && '/' != strpath[strpath.length() - 1]){
|
||||
strpath += "/";
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
if(iter == stat_cache.end()){
|
||||
strpath = key;
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second) {
|
||||
@ -332,7 +349,6 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
if((*iter).second->noobjcache){
|
||||
// noobjcache = true means no object.
|
||||
SetStatCacheTime((*iter).second->cache_date);
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
}else{
|
||||
@ -340,27 +356,27 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
is_delete_cache = true;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(is_delete_cache){
|
||||
DelStat(strpath);
|
||||
DelStat(strpath, /*lock_already_held=*/ true);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
|
||||
bool StatCache::AddStat(const std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
|
||||
{
|
||||
if(!no_truncate && CacheSize< 1){
|
||||
return true;
|
||||
}
|
||||
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
bool found = stat_cache.end() != stat_cache.find(key);
|
||||
bool do_truncate = stat_cache.size() > CacheSize;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = stat_cache.end() != stat_cache.find(key);
|
||||
do_truncate = stat_cache.size() > CacheSize;
|
||||
}
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
@ -402,23 +418,27 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool n
|
||||
}
|
||||
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
|
||||
if(stat_cache.end() != iter){
|
||||
if(iter->second){
|
||||
delete iter->second;
|
||||
}
|
||||
delete iter->second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
stat_cache[key] = ent;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
// check symbolic link cache
|
||||
if(!S_ISLNK(ent->stbuf.st_mode)){
|
||||
if(symlink_cache.end() != symlink_cache.find(key)){
|
||||
// if symbolic link cache has key, thus remove it.
|
||||
DelSymlink(key.c_str(), true);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::AddNoObjectCache(string& key)
|
||||
bool StatCache::AddNoObjectCache(const string& key)
|
||||
{
|
||||
if(!IsCacheNoObject){
|
||||
return true; // pretend successful
|
||||
@ -428,12 +448,13 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
}
|
||||
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
bool found = stat_cache.end() != stat_cache.find(key);
|
||||
bool do_truncate = stat_cache.size() > CacheSize;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = stat_cache.end() != stat_cache.find(key);
|
||||
do_truncate = stat_cache.size() > CacheSize;
|
||||
}
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
@ -456,26 +477,27 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
SetStatCacheTime(ent->cache_date); // Set time.
|
||||
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
|
||||
if(stat_cache.end() != iter){
|
||||
if(iter->second){
|
||||
delete iter->second;
|
||||
}
|
||||
delete iter->second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
stat_cache[key] = ent;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
// check symbolic link cache
|
||||
if(symlink_cache.end() != symlink_cache.find(key)){
|
||||
// if symbolic link cache has key, thus remove it.
|
||||
DelSymlink(key.c_str(), true);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
|
||||
void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate)
|
||||
{
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
stat_cache_t::iterator iter = stat_cache.find(key);
|
||||
|
||||
if(stat_cache.end() != iter){
|
||||
@ -490,25 +512,22 @@ void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
}
|
||||
|
||||
bool StatCache::TruncateCache(void)
|
||||
bool StatCache::TruncateCache()
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(stat_cache.empty()){
|
||||
return true;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
// 1) erase over expire time
|
||||
if(IsExpireTime){
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
|
||||
stat_cache_entry* entry = iter->second;
|
||||
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
|
||||
if(entry){
|
||||
delete entry;
|
||||
}
|
||||
delete entry;
|
||||
stat_cache.erase(iter++);
|
||||
}else{
|
||||
++iter;
|
||||
@ -518,7 +537,6 @@ bool StatCache::TruncateCache(void)
|
||||
|
||||
// 2) check stat cache count
|
||||
if(stat_cache.size() < CacheSize){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -545,32 +563,26 @@ bool StatCache::TruncateCache(void)
|
||||
stat_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
|
||||
if(siter->second){
|
||||
delete siter->second;
|
||||
}
|
||||
delete siter->second;
|
||||
stat_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::DelStat(const char* key)
|
||||
bool StatCache::DelStat(const char* key, bool lock_already_held)
|
||||
{
|
||||
if(!key){
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
|
||||
stat_cache_t::iterator iter;
|
||||
if(stat_cache.end() != (iter = stat_cache.find(string(key)))){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
delete (*iter).second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
if(0 < strlen(key) && 0 != strcmp(key, "/")){
|
||||
@ -582,16 +594,157 @@ bool StatCache::DelStat(const char* key)
|
||||
// If there is "path/" cache, delete it.
|
||||
strpath += "/";
|
||||
}
|
||||
if(stat_cache.end() != (iter = stat_cache.find(strpath.c_str()))){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
if(stat_cache.end() != (iter = stat_cache.find(strpath))){
|
||||
delete (*iter).second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::GetSymlink(const string& key, string& value)
|
||||
{
|
||||
bool is_delete_cache = false;
|
||||
string strpath = key;
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
symlink_cache_t::iterator iter = symlink_cache.find(strpath);
|
||||
if(iter != symlink_cache.end() && iter->second){
|
||||
symlink_cache_entry* ent = iter->second;
|
||||
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){ // use the same as Stats
|
||||
// found
|
||||
S3FS_PRN_DBG("symbolic link cache hit [path=%s][time=%lld.%09ld][hit count=%lu]",
|
||||
strpath.c_str(), static_cast<long long>(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
|
||||
|
||||
value = ent->link;
|
||||
|
||||
ent->hit_count++;
|
||||
if(IsExpireIntervalType){
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
}
|
||||
return true;
|
||||
}else{
|
||||
// timeout
|
||||
is_delete_cache = true;
|
||||
}
|
||||
}
|
||||
|
||||
if(is_delete_cache){
|
||||
DelSymlink(strpath.c_str(), /*lock_already_held=*/ true);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StatCache::AddSymlink(const string& key, const string& value)
|
||||
{
|
||||
if(CacheSize< 1){
|
||||
return true;
|
||||
}
|
||||
S3FS_PRN_INFO3("add symbolic link cache entry[path=%s, value=%s]", key.c_str(), value.c_str());
|
||||
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = symlink_cache.end() != symlink_cache.find(key);
|
||||
do_truncate = symlink_cache.size() > CacheSize;
|
||||
}
|
||||
|
||||
if(found){
|
||||
DelSymlink(key.c_str());
|
||||
}else{
|
||||
if(do_truncate){
|
||||
if(!TruncateSymlink()){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make new
|
||||
symlink_cache_entry* ent = new symlink_cache_entry();
|
||||
ent->link = value;
|
||||
ent->hit_count = 0;
|
||||
SetStatCacheTime(ent->cache_date); // Set time(use the same as Stats).
|
||||
|
||||
// add
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
symlink_cache_t::iterator iter = symlink_cache.find(key); // recheck for same key exists
|
||||
if(symlink_cache.end() != iter){
|
||||
delete iter->second;
|
||||
symlink_cache.erase(iter);
|
||||
}
|
||||
symlink_cache[key] = ent;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::TruncateSymlink()
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(symlink_cache.empty()){
|
||||
return true;
|
||||
}
|
||||
|
||||
// 1) erase over expire time
|
||||
if(IsExpireTime){
|
||||
for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ){
|
||||
symlink_cache_entry* entry = iter->second;
|
||||
if(!entry || IsExpireStatCacheTime(entry->cache_date, ExpireTime)){ // use the same as Stats
|
||||
delete entry;
|
||||
symlink_cache.erase(iter++);
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2) check stat cache count
|
||||
if(symlink_cache.size() < CacheSize){
|
||||
return true;
|
||||
}
|
||||
|
||||
// 3) erase from the old cache in order
|
||||
size_t erase_count= symlink_cache.size() - CacheSize + 1;
|
||||
symlinkiterlist_t erase_iters;
|
||||
for(symlink_cache_t::iterator iter = symlink_cache.begin(); iter != symlink_cache.end(); ++iter){
|
||||
erase_iters.push_back(iter);
|
||||
sort(erase_iters.begin(), erase_iters.end(), sort_symlinkiterlist());
|
||||
if(erase_count < erase_iters.size()){
|
||||
erase_iters.pop_back();
|
||||
}
|
||||
}
|
||||
for(symlinkiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
|
||||
symlink_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate symbolic link cache[path=%s]", siter->first.c_str());
|
||||
delete siter->second;
|
||||
symlink_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StatCache::DelSymlink(const char* key, bool lock_already_held)
|
||||
{
|
||||
if(!key){
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_INFO3("delete symbolic link cache entry[path=%s]", key);
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock, lock_already_held ? AutoLock::ALREADY_LOCKED : AutoLock::NONE);
|
||||
|
||||
symlink_cache_t::iterator iter;
|
||||
if(symlink_cache.end() != (iter = symlink_cache.find(string(key)))){
|
||||
delete iter->second;
|
||||
symlink_cache.erase(iter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -620,6 +773,9 @@ bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst,
|
||||
// mtime
|
||||
pst->st_mtime = get_mtime(meta);
|
||||
|
||||
// ctime
|
||||
pst->st_ctime = get_ctime(meta);
|
||||
|
||||
// size
|
||||
pst->st_size = get_size(meta);
|
||||
|
||||
|
||||
73
src/cache.h
73
src/cache.h
@ -24,7 +24,7 @@
|
||||
#include "common.h"
|
||||
|
||||
//
|
||||
// Struct
|
||||
// Struct for stats cache
|
||||
//
|
||||
struct stat_cache_entry {
|
||||
struct stat stbuf;
|
||||
@ -45,29 +45,57 @@ struct stat_cache_entry {
|
||||
|
||||
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
|
||||
//
|
||||
// Struct for symbolic link cache
|
||||
//
|
||||
struct symlink_cache_entry {
|
||||
std::string link;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date; // The function that operates timespec uses the same as Stats
|
||||
|
||||
symlink_cache_entry() : link(""), hit_count(0) {
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
|
||||
|
||||
//
|
||||
// Class
|
||||
//
|
||||
// [NOTE] About Symbolic link cache
|
||||
// The Stats cache class now also has a symbolic link cache.
|
||||
// It is possible to take out the Symbolic link cache in another class,
|
||||
// but the cache out etc. should be synchronized with the Stats cache
|
||||
// and implemented in this class.
|
||||
// Symbolic link cache size and timeout use the same settings as Stats
|
||||
// cache. This simplifies user configuration, and from a user perspective,
|
||||
// the symbolic link cache appears to be included in the Stats cache.
|
||||
//
|
||||
class StatCache
|
||||
{
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
symlink_cache_t symlink_cache;
|
||||
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
void Clear(void);
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
// Truncate symbolic link cache
|
||||
bool TruncateSymlink(void);
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
@ -93,37 +121,42 @@ class StatCache
|
||||
}
|
||||
|
||||
// Get stat cache
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
}
|
||||
bool GetStat(std::string& key, struct stat* pst, bool overcheck = true) {
|
||||
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true) {
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool GetStat(std::string& key, headers_t* meta, bool overcheck = true) {
|
||||
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true) {
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(std::string& key, bool overcheck = true) {
|
||||
bool HasStat(const std::string& key, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(std::string& key, const char* etag, bool overcheck = true) {
|
||||
bool HasStat(const std::string& key, const char* etag, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
}
|
||||
|
||||
// Cache For no object
|
||||
bool IsNoObjectCache(std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(std::string& key);
|
||||
bool IsNoObjectCache(const std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(const std::string& key);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
|
||||
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key);
|
||||
bool DelStat(std::string& key) {
|
||||
return DelStat(key.c_str());
|
||||
bool DelStat(const char* key, bool lock_already_held = false);
|
||||
bool DelStat(std::string& key, bool lock_already_held = false) {
|
||||
return DelStat(key.c_str(), lock_already_held);
|
||||
}
|
||||
|
||||
// Cache for symbolic link
|
||||
bool GetSymlink(const std::string& key, std::string& value);
|
||||
bool AddSymlink(const std::string& key, const std::string& value);
|
||||
bool DelSymlink(const char* key, bool lock_already_held = false);
|
||||
};
|
||||
|
||||
//
|
||||
|
||||
37
src/common.h
37
src/common.h
@ -21,6 +21,7 @@
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "../config.h"
|
||||
|
||||
//
|
||||
@ -37,7 +38,7 @@
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
|
||||
static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
//
|
||||
// Debug level
|
||||
@ -75,30 +76,46 @@ enum s3fs_log_level{
|
||||
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
|
||||
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
do{ \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
do{ \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
|
||||
}
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
@ -138,9 +155,7 @@ typedef struct xattr_value{
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
if(pvalue){
|
||||
free(pvalue);
|
||||
}
|
||||
delete[] pvalue;
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
@ -149,6 +164,7 @@ typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
//
|
||||
// Global variables
|
||||
//
|
||||
// TODO: namespace these
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
@ -160,6 +176,7 @@ extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
extern std::string endpoint;
|
||||
extern std::string cipher_suites;
|
||||
extern std::string instance_name;
|
||||
extern s3fs_log_level debug_level;
|
||||
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
|
||||
|
||||
@ -18,10 +18,10 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <climits>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#include "s3fs_auth.h"
|
||||
@ -44,10 +44,10 @@ string s3fs_get_content_md5(int fd)
|
||||
if(NULL == (base64 = s3fs_base64(md5hex, get_md5_digest_length()))){
|
||||
return string(""); // ENOMEM
|
||||
}
|
||||
free(md5hex);
|
||||
delete[] md5hex;
|
||||
|
||||
Signature = base64;
|
||||
free(base64);
|
||||
delete[] base64;
|
||||
|
||||
return Signature;
|
||||
}
|
||||
@ -62,7 +62,7 @@ string s3fs_md5sum(int fd, off_t start, ssize_t size)
|
||||
}
|
||||
|
||||
std::string md5 = s3fs_hex(md5hex, digestlen);
|
||||
free(md5hex);
|
||||
delete[] md5hex;
|
||||
|
||||
return md5;
|
||||
}
|
||||
@ -71,7 +71,6 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
char sha256[2 * digestlen + 1];
|
||||
char hexbuf[3];
|
||||
unsigned char* sha256hex;
|
||||
|
||||
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
|
||||
@ -80,10 +79,9 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
|
||||
memset(sha256, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(hexbuf, 3, "%02x", sha256hex[pos]);
|
||||
strncat(sha256, hexbuf, 2);
|
||||
snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]);
|
||||
}
|
||||
free(sha256hex);
|
||||
delete[] sha256hex;
|
||||
|
||||
return string(sha256);
|
||||
}
|
||||
|
||||
2483
src/curl.cpp
2483
src/curl.cpp
File diff suppressed because it is too large
Load Diff
194
src/curl.h
194
src/curl.h
@ -23,10 +23,45 @@
|
||||
|
||||
#include <cassert>
|
||||
|
||||
#include "psemaphore.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Avoid dependency on libcurl version
|
||||
//----------------------------------------------
|
||||
// [NOTE]
|
||||
// The following symbols (enum) depend on the version of libcurl.
|
||||
// CURLOPT_TCP_KEEPALIVE 7.25.0 and later
|
||||
// CURLOPT_SSL_ENABLE_ALPN 7.36.0 and later
|
||||
// CURLOPT_KEEP_SENDING_ON_ERROR 7.51.0 and later
|
||||
//
|
||||
// s3fs uses these, if you build s3fs with the old libcurl,
|
||||
// substitute the following symbols to avoid errors.
|
||||
// If the version of libcurl linked at runtime is old,
|
||||
// curl_easy_setopt results in an error(CURLE_UNKNOWN_OPTION) and
|
||||
// a message is output.
|
||||
//
|
||||
#if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1)
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE
|
||||
#else
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE static_cast<CURLoption>(213)
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1)
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN
|
||||
#else
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast<CURLoption>(226)
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1)
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR
|
||||
#else
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast<CURLoption>(245)
|
||||
#endif
|
||||
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
//----------------------------------------------
|
||||
#define MIN_MULTIPART_SIZE 5242880 // 5MB
|
||||
static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
@ -75,7 +110,7 @@ struct filepart
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
ssize_t size; // uploading size
|
||||
off_t size; // uploading size
|
||||
etaglist_t* etaglist; // use only parallel upload
|
||||
int etagpos; // use only parallel upload
|
||||
|
||||
@ -126,14 +161,12 @@ class S3fsMultiCurl;
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
typedef std::list<CURL*> hcurllist_t;
|
||||
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers)
|
||||
: mMaxHandlers(maxHandlers)
|
||||
, mHandlers(NULL)
|
||||
, mIndex(-1)
|
||||
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
@ -141,20 +174,24 @@ public:
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler();
|
||||
void ReturnHandler(CURL* h);
|
||||
CURL* GetHandler(bool only_pool);
|
||||
void ReturnHandler(CURL* hCurl, bool restore_pool);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
|
||||
int mMaxHandlers;
|
||||
pthread_mutex_t mLock;
|
||||
CURL** mHandlers;
|
||||
int mIndex;
|
||||
hcurllist_t mPool;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
class PageList;
|
||||
class S3fsCurl;
|
||||
|
||||
// Prototype function for lazy setup options for curl handle
|
||||
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
|
||||
|
||||
typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
@ -163,7 +200,21 @@ typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
enum storage_class_t {
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
REDUCED_REDUNDANCY
|
||||
ONEZONE_IA,
|
||||
REDUCED_REDUNDANCY,
|
||||
INTELLIGENT_TIERING
|
||||
};
|
||||
|
||||
enum acl_t {
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
INVALID_ACL
|
||||
};
|
||||
|
||||
// sse type
|
||||
@ -175,9 +226,11 @@ enum sse_type_t {
|
||||
};
|
||||
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
enum {
|
||||
SHARE_MUTEX_DNS = 0,
|
||||
SHARE_MUTEX_SSL_SESSION = 1,
|
||||
SHARE_MUTEX_MAX = 2,
|
||||
};
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
@ -219,7 +272,7 @@ class S3fsCurl
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static std::string default_acl; // TODO: to enum
|
||||
static acl_t default_acl;
|
||||
static storage_class_t storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static std::string ssekmsid;
|
||||
@ -230,16 +283,26 @@ class S3fsCurl
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_use_session_token;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static int max_multireq;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
static bool is_ua; // User-Agent
|
||||
static bool requester_pays;
|
||||
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
@ -250,9 +313,9 @@ class S3fsCurl
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData* bodydata; // body data by WriteMemoryCallback
|
||||
BodyData* headdata; // header data by WriteMemoryCallback
|
||||
long LastResponseCode;
|
||||
BodyData bodydata; // body data by WriteMemoryCallback
|
||||
BodyData headdata; // header data by WriteMemoryCallback
|
||||
volatile long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
@ -266,6 +329,14 @@ class S3fsCurl
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string b_from; // backup for retrying(for copy request)
|
||||
headers_t b_meta; // backup for retrying(for copy request)
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
Semaphore *sem;
|
||||
pthread_mutex_t *completed_tids_lock;
|
||||
std::vector<pthread_t> *completed_tids;
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
@ -293,9 +364,19 @@ class S3fsCurl
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
|
||||
// lazy functions for set curl options
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
@ -311,21 +392,26 @@ class S3fsCurl
|
||||
bool ResetHandle(void);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
int GetIAMCredentials(void);
|
||||
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
bool CopyMultipartPostComplete();
|
||||
bool MixMultipartPostComplete();
|
||||
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl(const char* MimeFile = NULL);
|
||||
static bool DestroyS3fsCurl(void);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const PageList& pagelist);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
|
||||
@ -340,7 +426,8 @@ class S3fsCurl
|
||||
static int SetRetries(int count);
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static std::string SetDefaultAcl(const char* acl);
|
||||
static acl_t SetDefaultAcl(acl_t acl);
|
||||
static acl_t GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
@ -362,13 +449,27 @@ class S3fsCurl
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool IsSetAccessKeyId(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken);
|
||||
static bool IsSetAccessKeyID(void){
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
// maximum parallel GET and PUT requests
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
// maximum parallel HEAD requests
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
@ -377,15 +478,18 @@ class S3fsCurl
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent(void);
|
||||
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
|
||||
static bool IsRequesterPays(void) { return S3fsCurl::requester_pays; }
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
bool DestroyCurlHandle(void);
|
||||
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
|
||||
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode);
|
||||
int RequestPerform(void);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
|
||||
int RequestPerform(bool dontAddAuthHeaders=false);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
|
||||
bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath, int ssekey_pos = -1) {
|
||||
@ -399,13 +503,13 @@ class S3fsCurl
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(variables)
|
||||
@ -414,9 +518,10 @@ class S3fsCurl
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
std::string GetSpacialSavedPath(void) const { return saved_path; }
|
||||
std::string GetUrl(void) const { return url; }
|
||||
std::string GetOp(void) const { return op; }
|
||||
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
|
||||
BodyData* GetBodyData(void) const { return bodydata; }
|
||||
BodyData* GetHeadData(void) const { return headdata; }
|
||||
BodyData* GetBodyData(void) { return &bodydata; }
|
||||
BodyData* GetHeadData(void) { return &headdata; }
|
||||
long GetLastResponseCode(void) const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
|
||||
@ -433,21 +538,24 @@ class S3fsCurl
|
||||
//----------------------------------------------
|
||||
// Class for lapping multi curl
|
||||
//
|
||||
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
static int max_multireq;
|
||||
const int maxParallelism;
|
||||
|
||||
s3fscurlmap_t cMap_all; // all of curl requests
|
||||
s3fscurlmap_t cMap_req; // curl requests are sent
|
||||
s3fscurllist_t clist_all; // all of curl requests
|
||||
s3fscurllist_t clist_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
pthread_mutex_t completed_tids_lock;
|
||||
std::vector<pthread_t> completed_tids;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
@ -456,11 +564,10 @@ class S3fsMultiCurl
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
S3fsMultiCurl();
|
||||
explicit S3fsMultiCurl(int maxParallelism);
|
||||
~S3fsMultiCurl();
|
||||
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsMultiCurl::max_multireq; }
|
||||
int GetMaxParallelism() { return maxParallelism; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
@ -479,9 +586,12 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* d
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
const char *acl_to_string(acl_t acl);
|
||||
acl_t string_to_acl(const char *acl);
|
||||
|
||||
#endif // S3FS_CURL_H_
|
||||
|
||||
|
||||
1393
src/fdcache.cpp
1393
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
108
src/fdcache.h
108
src/fdcache.h
@ -39,6 +39,7 @@ class CacheFileStat
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStatDirectory(void);
|
||||
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
@ -56,22 +57,24 @@ class CacheFileStat
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
size_t bytes;
|
||||
off_t bytes;
|
||||
bool loaded;
|
||||
bool modified;
|
||||
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
|
||||
: offset(start), bytes(size), loaded(is_loaded) {}
|
||||
fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false)
|
||||
: offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
|
||||
|
||||
off_t next(void) const { return (offset + bytes); }
|
||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||
};
|
||||
typedef std::list<struct fdpage*> fdpage_list_t;
|
||||
typedef std::list<struct fdpage> fdpage_list_t;
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//
|
||||
// Management of loading area/modifying
|
||||
//
|
||||
// cppcheck-suppress copyCtorAndEqOperator
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
@ -79,26 +82,41 @@ class PageList
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
public:
|
||||
enum page_status{
|
||||
PAGE_NOT_LOAD_MODIFIED = 0,
|
||||
PAGE_LOADED,
|
||||
PAGE_MODIFIED,
|
||||
PAGE_LOAD_MODIFIED
|
||||
};
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
bool Compress(void);
|
||||
bool Compress(bool force_modified = false);
|
||||
bool Parse(off_t new_pos);
|
||||
bool RawGetUnloadPageList(fdpage_list_t& dlpages, off_t offset, off_t size);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(size_t size = 0, bool is_loaded = false);
|
||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false);
|
||||
explicit PageList(const PageList& other);
|
||||
~PageList();
|
||||
|
||||
bool Init(size_t size, bool is_loaded);
|
||||
size_t Size(void) const;
|
||||
bool Resize(size_t size, bool is_loaded);
|
||||
bool Init(off_t size, bool is_loaded, bool is_modified);
|
||||
off_t Size(void) const;
|
||||
bool Resize(off_t size, bool is_loaded, bool is_modified);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
|
||||
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
|
||||
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool GetLoadPageListForMultipartUpload(fdpage_list_t& dlpages);
|
||||
bool GetMultipartSizeList(fdpage_list_t& mplist, off_t partsize) const;
|
||||
|
||||
bool IsModified(void) const;
|
||||
bool ClearAllModified(void);
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output);
|
||||
void Dump(void);
|
||||
@ -110,27 +128,29 @@ class PageList
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
PageList pagelist;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
headers_t orgmeta; // original headers at opening
|
||||
size_t size_orgmeta; // original file size in original headers
|
||||
off_t size_orgmeta; // original file size in original headers
|
||||
|
||||
pthread_mutex_t fdent_data_lock;// protects the following members
|
||||
PageList pagelist;
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
size_t mp_size; // size for no cached multipart(write method only)
|
||||
off_t mp_size; // size for no cached multipart(write method only)
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
|
||||
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
|
||||
|
||||
void Clear(void);
|
||||
int OpenMirrorFile(void);
|
||||
@ -139,32 +159,37 @@ class FdEntity
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
|
||||
public:
|
||||
static bool SetNoMixMultipart(void);
|
||||
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool no_fd_lock_wait = false);
|
||||
int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool lock_already_held = false);
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
void SetPath(const std::string &newpath) { path = newpath; }
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetFd(void) const { return fd; }
|
||||
bool IsModified(void) const { return pagelist.IsModified(); }
|
||||
|
||||
bool GetStats(struct stat& st);
|
||||
int SetMtime(time_t time);
|
||||
bool GetStats(struct stat& st, bool lock_already_held = false);
|
||||
int SetCtime(time_t time);
|
||||
int SetMtime(time_t time, bool lock_already_held = false);
|
||||
bool UpdateCtime(void);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(size_t& size);
|
||||
bool GetSize(off_t& size);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost(void);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, off_t size);
|
||||
int NoCacheCompleteMultipartPost(void);
|
||||
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
@ -173,7 +198,7 @@ class FdEntity
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
void CleanupCache();
|
||||
bool ReserveDiskSpace(off_t size);
|
||||
};
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
|
||||
@ -186,15 +211,16 @@ class FdManager
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static pthread_mutex_t reserved_diskspace_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static size_t free_disk_space; // limit free disk space
|
||||
static off_t free_disk_space; // limit free disk space
|
||||
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static fsblkcnt_t GetFreeDiskSpace(const char* path);
|
||||
static off_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
|
||||
public:
|
||||
@ -215,13 +241,15 @@ class FdManager
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist(void);
|
||||
|
||||
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
|
||||
static size_t SetEnsureFreeDiskSpace(size_t size);
|
||||
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
|
||||
static bool IsSafeDiskSpace(const char* path, size_t size);
|
||||
static off_t GetEnsureFreeDiskSpace();
|
||||
static off_t SetEnsureFreeDiskSpace(off_t size);
|
||||
static bool IsSafeDiskSpace(const char* path, off_t size);
|
||||
static void FreeReservedDiskSpace(off_t size);
|
||||
static bool ReserveDiskSpace(off_t size);
|
||||
|
||||
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
|
||||
@ -57,7 +57,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
|
||||
@ -69,15 +69,20 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
#ifndef USE_GNUTLS_NETTLE
|
||||
if(NULL == gcry_check_version(NULL)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
@ -86,12 +91,12 @@ bool s3fs_destroy_global_ssl(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -107,9 +112,7 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA1_DIGEST_SIZE))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[SHA1_DIGEST_SIZE];
|
||||
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
@ -126,9 +129,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA256_DIGEST_SIZE))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[SHA256_DIGEST_SIZE];
|
||||
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
@ -150,11 +151,9 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
free(*digest);
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
@ -170,11 +169,9 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
free(*digest);
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
@ -186,11 +183,9 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
#define MD5_DIGEST_LENGTH 16
|
||||
|
||||
size_t get_md5_digest_length(void)
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
return 16;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
@ -201,17 +196,12 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
md5_init(&ctx_md5);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
@ -223,16 +213,9 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -254,11 +237,6 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
@ -267,29 +245,23 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -298,20 +270,16 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
#define SHA256_DIGEST_LENGTH 32
|
||||
|
||||
size_t get_sha256_digest_length(void)
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
@ -328,17 +296,12 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
@ -350,16 +313,9 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -367,16 +323,14 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[len];
|
||||
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
free(*digest);
|
||||
delete[] *digest;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
@ -402,11 +356,6 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
@ -415,29 +364,23 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "NSS";
|
||||
|
||||
@ -52,14 +52,18 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
NSS_Init(NULL);
|
||||
NSS_NoDB_Init(NULL);
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
if(SECSuccess != NSS_NoDB_Init(NULL)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
@ -70,12 +74,12 @@ bool s3fs_destroy_global_ssl(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -92,7 +96,6 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
SECStatus SecStatus;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
@ -111,9 +114,9 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
}
|
||||
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != (SecStatus = PK11_DigestBegin(Context)) ||
|
||||
SECSuccess != (SecStatus = PK11_DigestOp(Context, data, datalen)) ||
|
||||
SECSuccess != (SecStatus = PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest))) )
|
||||
if(SECSuccess != PK11_DigestBegin(Context) ||
|
||||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
|
||||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
|
||||
{
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
@ -124,9 +127,7 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen];
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
|
||||
return true;
|
||||
@ -145,7 +146,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_LENGTH;
|
||||
}
|
||||
@ -166,47 +167,35 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
@ -214,9 +203,7 @@ size_t get_sha256_digest_length(void)
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
@ -246,17 +233,12 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
@ -269,18 +251,10 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -18,15 +18,15 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <cstring>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
#include <openssl/evp.h>
|
||||
@ -46,7 +46,7 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "OpenSSL";
|
||||
|
||||
@ -56,7 +56,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
ERR_load_crypto_strings();
|
||||
ERR_load_BIO_strings();
|
||||
@ -64,7 +64,7 @@ bool s3fs_init_global_ssl(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
@ -82,6 +82,7 @@ struct CRYPTO_dynlock_value
|
||||
|
||||
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
|
||||
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
@ -93,25 +94,28 @@ static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long s3fs_crypt_get_threadid(void)
|
||||
static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused));
|
||||
static unsigned long s3fs_crypt_get_threadid()
|
||||
{
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
return (unsigned long)(pthread_self());
|
||||
}
|
||||
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused));
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
|
||||
{
|
||||
struct CRYPTO_dynlock_value* dyndata;
|
||||
|
||||
if(NULL == (dyndata = static_cast<struct CRYPTO_dynlock_value*>(malloc(sizeof(struct CRYPTO_dynlock_value))))){
|
||||
S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value");
|
||||
return NULL;
|
||||
}
|
||||
pthread_mutex_init(&(dyndata->dyn_mutex), NULL);
|
||||
struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value();
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
pthread_mutex_init(&(dyndata->dyn_mutex), &attr);
|
||||
return dyndata;
|
||||
}
|
||||
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
@ -123,15 +127,16 @@ static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyn
|
||||
}
|
||||
}
|
||||
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
pthread_mutex_destroy(&(dyndata->dyn_mutex));
|
||||
free(dyndata);
|
||||
delete dyndata;
|
||||
}
|
||||
}
|
||||
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
@ -140,12 +145,14 @@ bool s3fs_init_crypt_mutex(void)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(NULL == (s3fs_crypt_mutex = static_cast<pthread_mutex_t*>(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){
|
||||
S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()];
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
pthread_mutex_init(&s3fs_crypt_mutex[cnt], NULL);
|
||||
pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
|
||||
}
|
||||
// static lock
|
||||
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
|
||||
@ -158,7 +165,7 @@ bool s3fs_init_crypt_mutex(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
if(!s3fs_crypt_mutex){
|
||||
return true;
|
||||
@ -174,7 +181,7 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
free(s3fs_crypt_mutex);
|
||||
delete[] s3fs_crypt_mutex;
|
||||
s3fs_crypt_mutex = NULL;
|
||||
|
||||
return true;
|
||||
@ -189,9 +196,7 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
if(NULL == ((*digest) = (unsigned char*)malloc(*digestlen))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen];
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}else{
|
||||
@ -214,7 +219,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
}
|
||||
@ -234,17 +239,12 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
MD5_Init(&md5ctx);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
@ -257,23 +257,16 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
MD5_Final(result, &md5ctx);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
}
|
||||
@ -281,9 +274,7 @@ size_t get_sha256_digest_length(void)
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
@ -311,18 +302,13 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
|
||||
memset(buf, 0, 512);
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
@ -335,17 +321,10 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
75
src/psemaphore.h
Normal file
75
src/psemaphore.h
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_SEMAPHORE_H_
|
||||
#define S3FS_SEMAPHORE_H_
|
||||
|
||||
// portability wrapper for sem_t since macOS does not implement it
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
|
||||
~Semaphore() {
|
||||
// macOS cannot destroy a semaphore with posts less than the initializer
|
||||
for(int i = 0; i < get_value(); ++i){
|
||||
post();
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
void post() { dispatch_semaphore_signal(sem); }
|
||||
int get_value() const { return value; }
|
||||
private:
|
||||
const int value;
|
||||
dispatch_semaphore_t sem;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#include <errno.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
|
||||
~Semaphore() { sem_destroy(&mutex); }
|
||||
void wait()
|
||||
{
|
||||
int r;
|
||||
do {
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
void post() { sem_post(&mutex); }
|
||||
int get_value() const { return value; }
|
||||
private:
|
||||
const int value;
|
||||
sem_t mutex;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif // S3FS_SEMAPHORE_H_
|
||||
1662
src/s3fs.cpp
1662
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
71
src/s3fs.h
71
src/s3fs.h
@ -21,68 +21,65 @@
|
||||
#define S3FS_S3_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#define FIVE_GB 5368709120LL
|
||||
|
||||
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
#define S3FS_FUSE_EXIT() { \
|
||||
#define S3FS_FUSE_EXIT() \
|
||||
do{ \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// s3fs use many small allocated chunk in heap area for stats
|
||||
// cache and parsing xml, etc. The OS may decide that giving
|
||||
// this little memory back to the kernel will cause too much
|
||||
// overhead and delay the operation.
|
||||
// Address of gratitude, this workaround quotes a document of
|
||||
// libxml2.( http://xmlsoft.org/xmlmem.html )
|
||||
//
|
||||
// s3fs use many small allocated chunk in heap area for
|
||||
// stats cache and parsing xml, etc. The OS may decide
|
||||
// that giving this little memory back to the kernel
|
||||
// will cause too much overhead and delay the operation.
|
||||
// So s3fs calls malloc_trim function to really get the
|
||||
// memory back. Following macros is prepared for that
|
||||
// your system does not have it.
|
||||
//
|
||||
// Address of gratitude, this workaround quotes a document
|
||||
// of libxml2.
|
||||
// http://xmlsoft.org/xmlmem.html
|
||||
// When valgrind is used to test memory leak of s3fs, a large
|
||||
// amount of chunk may be reported. You can check the memory
|
||||
// release accurately by defining the S3FS_MALLOC_TRIM flag
|
||||
// and building it. Also, when executing s3fs, you can define
|
||||
// the MMAP_THRESHOLD environment variable and check more
|
||||
// accurate memory leak.( see, man 3 free )
|
||||
//
|
||||
#ifdef S3FS_MALLOC_TRIM
|
||||
#ifdef HAVE_MALLOC_TRIM
|
||||
|
||||
#include <malloc.h>
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
#else // S3FS_MALLOC_TRIM
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#endif // S3FS_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str)
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlFreeDoc(doc); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
#define S3FS_XMLFREE(ptr) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlFree(ptr); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlXPathFreeContext(ctx); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlXPathFreeObject(obj); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str) \
|
||||
fprintf(stderr, "Warning: %s without malloc_trim is possibility of the use memory increase.\n", program_name.c_str())
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) xmlFreeDoc(doc)
|
||||
#define S3FS_XMLFREE(ptr) xmlFree(ptr)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) xmlXPathFreeContext(ctx)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) xmlXPathFreeObject(obj)
|
||||
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
}while(0)
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
|
||||
|
||||
@ -18,11 +18,11 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <cerrno>
|
||||
#include <libgen.h>
|
||||
#include <sys/stat.h>
|
||||
#include <pwd.h>
|
||||
@ -31,6 +31,9 @@
|
||||
#include <pthread.h>
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
#include <libxml/xpath.h>
|
||||
#include <libxml/xpathInternals.h>
|
||||
#include <libxml/tree.h>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
@ -48,7 +51,10 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
std::string mount_prefix = "";
|
||||
std::string mount_prefix;
|
||||
|
||||
static size_t max_password_size;
|
||||
static size_t max_group_name_length;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility
|
||||
@ -150,10 +156,10 @@ bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool
|
||||
s3obj_t::iterator iter;
|
||||
if(objects.end() != (iter = objects.find(name))){
|
||||
// found name --> over write
|
||||
(*iter).second.orgname.erase();
|
||||
(*iter).second.etag.erase();
|
||||
(*iter).second.normalname = normalized;
|
||||
(*iter).second.is_dir = is_dir;
|
||||
iter->second.orgname.erase();
|
||||
iter->second.etag.erase();
|
||||
iter->second.normalname = normalized;
|
||||
iter->second.is_dir = is_dir;
|
||||
}else{
|
||||
// not found --> add new object
|
||||
s3obj_entry newobject;
|
||||
@ -259,7 +265,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla
|
||||
}
|
||||
string name = (*iter).first;
|
||||
if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){
|
||||
// only "/" string is skio this.
|
||||
// only "/" string is skipped this.
|
||||
name = name.substr(0, name.length() - 1);
|
||||
}
|
||||
list.push_back(name);
|
||||
@ -283,7 +289,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
|
||||
h_map[strtmp] = true;
|
||||
|
||||
// check hierarchized directory
|
||||
for(string::size_type pos = strtmp.find_last_of("/"); string::npos != pos; pos = strtmp.find_last_of("/")){
|
||||
for(string::size_type pos = strtmp.find_last_of('/'); string::npos != pos; pos = strtmp.find_last_of('/')){
|
||||
strtmp = strtmp.substr(0, pos);
|
||||
if(0 == strtmp.length() || "/" == strtmp){
|
||||
break;
|
||||
@ -318,22 +324,17 @@ MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, b
|
||||
char *p_old_path;
|
||||
char *p_new_path;
|
||||
|
||||
p = (MVNODE *) malloc(sizeof(MVNODE));
|
||||
if (p == NULL) {
|
||||
printf("create_mvnode: could not allocation memory for p\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
p = new MVNODE();
|
||||
|
||||
if(NULL == (p_old_path = strdup(old_path))){
|
||||
free(p);
|
||||
delete p;
|
||||
printf("create_mvnode: could not allocation memory for p_old_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(NULL == (p_new_path = strdup(new_path))){
|
||||
free(p);
|
||||
delete p;
|
||||
free(p_old_path);
|
||||
printf("create_mvnode: could not allocation memory for p_new_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
@ -417,20 +418,35 @@ void free_mvnodes(MVNODE *head)
|
||||
next = my_head->next;
|
||||
free(my_head->old_path);
|
||||
free(my_head->new_path);
|
||||
free(my_head);
|
||||
delete my_head;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AutoLock
|
||||
//-------------------------------------------------------------------
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex, bool no_wait) : auto_mutex(pmutex)
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex)
|
||||
{
|
||||
if (no_wait) {
|
||||
is_lock_acquired = pthread_mutex_trylock(auto_mutex) == 0;
|
||||
if (type == ALREADY_LOCKED) {
|
||||
is_lock_acquired = false;
|
||||
} else if (type == NO_WAIT) {
|
||||
int res = pthread_mutex_trylock(auto_mutex);
|
||||
if(res == 0){
|
||||
is_lock_acquired = true;
|
||||
}else if(res == EBUSY){
|
||||
is_lock_acquired = false;
|
||||
}else{
|
||||
S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
is_lock_acquired = pthread_mutex_lock(auto_mutex) == 0;
|
||||
int res = pthread_mutex_lock(auto_mutex);
|
||||
if(res == 0){
|
||||
is_lock_acquired = true;
|
||||
}else{
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -446,80 +462,99 @@ AutoLock::~AutoLock()
|
||||
}
|
||||
}
|
||||
|
||||
void init_sysconf_vars()
|
||||
{
|
||||
// SUSv4tc1 says the following about _SC_GETGR_R_SIZE_MAX and
|
||||
// _SC_GETPW_R_SIZE_MAX:
|
||||
// Note that sysconf(_SC_GETGR_R_SIZE_MAX) may return -1 if
|
||||
// there is no hard limit on the size of the buffer needed to
|
||||
// store all the groups returned.
|
||||
|
||||
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
if (errno != 0){
|
||||
S3FS_PRN_WARN("could not get max pw length.");
|
||||
abort();
|
||||
}
|
||||
res = 1024; // default initial length
|
||||
}
|
||||
max_password_size = res;
|
||||
|
||||
res = sysconf(_SC_GETGR_R_SIZE_MAX);
|
||||
if(0 > res) {
|
||||
if (errno != 0) {
|
||||
S3FS_PRN_ERR("could not get max name length.");
|
||||
abort();
|
||||
}
|
||||
res = 1024; // default initial length
|
||||
}
|
||||
max_group_name_length = res;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility for UID/GID
|
||||
//-------------------------------------------------------------------
|
||||
// get user name from uid
|
||||
string get_username(uid_t uid)
|
||||
{
|
||||
static size_t maxlen = 0; // set once
|
||||
size_t maxlen = max_password_size;
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
|
||||
// make buffer
|
||||
if(0 == maxlen){
|
||||
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
S3FS_PRN_WARN("could not get max pw length.");
|
||||
maxlen = 0;
|
||||
return string("");
|
||||
}
|
||||
maxlen = res;
|
||||
pbuf = new char[maxlen];
|
||||
// get pw information
|
||||
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
|
||||
delete[] pbuf;
|
||||
maxlen *= 2;
|
||||
pbuf = new char[maxlen];
|
||||
}
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return string("");
|
||||
}
|
||||
// get group information
|
||||
if(0 != getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo)){
|
||||
S3FS_PRN_WARN("could not get pw information.");
|
||||
free(pbuf);
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get pw information(%d).", result);
|
||||
delete[] pbuf;
|
||||
return string("");
|
||||
}
|
||||
|
||||
// check pw
|
||||
if(NULL == ppwinfo){
|
||||
free(pbuf);
|
||||
delete[] pbuf;
|
||||
return string("");
|
||||
}
|
||||
string name = SAFESTRPTR(ppwinfo->pw_name);
|
||||
free(pbuf);
|
||||
delete[] pbuf;
|
||||
return name;
|
||||
}
|
||||
|
||||
int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
static size_t maxlen = 0; // set once
|
||||
size_t maxlen = max_group_name_length;
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct group ginfo;
|
||||
struct group* pginfo = NULL;
|
||||
|
||||
// make buffer
|
||||
if(0 == maxlen){
|
||||
long res = sysconf(_SC_GETGR_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
S3FS_PRN_ERR("could not get max name length.");
|
||||
maxlen = 0;
|
||||
return -ERANGE;
|
||||
}
|
||||
maxlen = res;
|
||||
}
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pbuf = new char[maxlen];
|
||||
// get group information
|
||||
if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
S3FS_PRN_ERR("could not get group information.");
|
||||
free(pbuf);
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
delete[] pbuf;
|
||||
maxlen *= 2;
|
||||
pbuf = new char[maxlen];
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get group information(%d).", result);
|
||||
delete[] pbuf;
|
||||
return -result;
|
||||
}
|
||||
|
||||
// check group
|
||||
if(NULL == pginfo){
|
||||
// there is not gid in group.
|
||||
free(pbuf);
|
||||
delete[] pbuf;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -529,11 +564,11 @@ int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
for(ppgr_mem = pginfo->gr_mem; ppgr_mem && *ppgr_mem; ppgr_mem++){
|
||||
if(username == *ppgr_mem){
|
||||
// Found username in group.
|
||||
free(pbuf);
|
||||
delete[] pbuf;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
free(pbuf);
|
||||
delete[] pbuf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -550,7 +585,7 @@ string mydirname(const char* path)
|
||||
return mydirname(string(path));
|
||||
}
|
||||
|
||||
string mydirname(string path)
|
||||
string mydirname(const string& path)
|
||||
{
|
||||
return string(dirname((char*)path.c_str()));
|
||||
}
|
||||
@ -565,7 +600,7 @@ string mybasename(const char* path)
|
||||
return mybasename(string(path));
|
||||
}
|
||||
|
||||
string mybasename(string path)
|
||||
string mybasename(const string& path)
|
||||
{
|
||||
return string(basename((char*)path.c_str()));
|
||||
}
|
||||
@ -573,9 +608,9 @@ string mybasename(string path)
|
||||
// mkdir --parents
|
||||
int mkdirp(const string& path, mode_t mode)
|
||||
{
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
string base;
|
||||
string component;
|
||||
istringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
base += "/" + component;
|
||||
|
||||
@ -585,7 +620,7 @@ int mkdirp(const string& path, mode_t mode)
|
||||
return EPERM;
|
||||
}
|
||||
}else{
|
||||
if(0 != mkdir(base.c_str(), mode)){
|
||||
if(0 != mkdir(base.c_str(), mode) && errno != EEXIST){
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
@ -596,10 +631,10 @@ int mkdirp(const string& path, mode_t mode)
|
||||
// get existed directory path
|
||||
string get_exist_directory_path(const string& path)
|
||||
{
|
||||
string existed("/"); // "/" is existed.
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
string existed("/"); // "/" is existed.
|
||||
string base;
|
||||
string component;
|
||||
istringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
if(base != "/"){
|
||||
base += "/";
|
||||
@ -625,7 +660,7 @@ bool check_exist_dir_permission(const char* dirpath)
|
||||
struct stat st;
|
||||
if(0 != stat(dirpath, &st)){
|
||||
if(ENOENT == errno){
|
||||
// dir does not exitst
|
||||
// dir does not exist
|
||||
return true;
|
||||
}
|
||||
if(EACCES == errno){
|
||||
@ -712,15 +747,29 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for convert
|
||||
//-------------------------------------------------------------------
|
||||
time_t get_mtime(const char *s)
|
||||
time_t get_mtime(const char *str)
|
||||
{
|
||||
return static_cast<time_t>(s3fs_strtoofft(s));
|
||||
// [NOTE]
|
||||
// In rclone, there are cases where ns is set to x-amz-meta-mtime
|
||||
// with floating point number. s3fs uses x-amz-meta-mtime by
|
||||
// truncating the floating point or less (in seconds or less) to
|
||||
// correspond to this.
|
||||
//
|
||||
string strmtime;
|
||||
if(str && '\0' != *str){
|
||||
strmtime = str;
|
||||
string::size_type pos = strmtime.find('.', 0);
|
||||
if(string::npos != pos){
|
||||
strmtime = strmtime.substr(0, pos);
|
||||
}
|
||||
}
|
||||
return static_cast<time_t>(s3fs_strtoofft(strmtime.c_str()));
|
||||
}
|
||||
|
||||
time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
static time_t get_time(headers_t& meta, bool overcheck, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-mtime"))){
|
||||
if(meta.end() == (iter = meta.find(header))){
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
@ -729,6 +778,16 @@ time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
return get_mtime((*iter).second.c_str());
|
||||
}
|
||||
|
||||
time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
{
|
||||
return get_time(meta, overcheck, "x-amz-meta-mtime");
|
||||
}
|
||||
|
||||
time_t get_ctime(headers_t& meta, bool overcheck)
|
||||
{
|
||||
return get_time(meta, overcheck, "x-amz-meta-ctime");
|
||||
}
|
||||
|
||||
off_t get_size(const char *s)
|
||||
{
|
||||
return s3fs_strtoofft(s);
|
||||
@ -756,11 +815,13 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}else{
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}
|
||||
// If another tool creates an object without permissions, default to owner
|
||||
// read-write and group readable.
|
||||
mode = path[strlen(path) - 1] == '/' ? 0750 : 0640;
|
||||
}
|
||||
// Checking the bitmask, if the last 3 bits are all zero then process as a regular
|
||||
// file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO,
|
||||
@ -774,18 +835,19 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
if(meta.end() != (iter = meta.find("Content-Type"))){
|
||||
string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
string::size_type pos = strConType.find(";");
|
||||
string::size_type pos = strConType.find(';');
|
||||
if(string::npos != pos){
|
||||
strConType = strConType.substr(0, pos);
|
||||
}
|
||||
if(strConType == "application/x-directory"){
|
||||
if(strConType == "application/x-directory"
|
||||
|| strConType == "httpd/unix-directory"){ // Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' charactor at end of name
|
||||
// If complement lack stat mode, when the object has '/' character at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
@ -830,12 +892,13 @@ uid_t get_uid(const char *s)
|
||||
uid_t get_uid(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-uid"))){
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return 0;
|
||||
}
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else{
|
||||
return geteuid();
|
||||
}
|
||||
return get_uid((*iter).second.c_str());
|
||||
}
|
||||
|
||||
gid_t get_gid(const char *s)
|
||||
@ -846,12 +909,13 @@ gid_t get_gid(const char *s)
|
||||
gid_t get_gid(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-gid"))){
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return 0;
|
||||
}
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else{
|
||||
return getegid();
|
||||
}
|
||||
return get_gid((*iter).second.c_str());
|
||||
}
|
||||
|
||||
blkcnt_t get_blocks(off_t size)
|
||||
@ -925,16 +989,60 @@ bool is_need_check_obj_detail(headers_t& meta)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if(!data || !key){
|
||||
return false;
|
||||
}
|
||||
value.clear();
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(data, len, "", NULL, 0))){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == doc->children){
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
return false;
|
||||
}
|
||||
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
||||
// For DEBUG
|
||||
// string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
||||
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
||||
|
||||
if(XML_ELEMENT_NODE == cur_node->type){
|
||||
string elementName = reinterpret_cast<const char*>(cur_node->name);
|
||||
// For DEBUG
|
||||
// printf("elementName: %s\n", elementName.c_str());
|
||||
|
||||
if(cur_node->children){
|
||||
if(XML_TEXT_NODE == cur_node->children->type){
|
||||
if(elementName == key) {
|
||||
value = reinterpret_cast<const char *>(cur_node->children->content);
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Help
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage (void)
|
||||
void show_usage ()
|
||||
{
|
||||
printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n",
|
||||
program_name.c_str());
|
||||
}
|
||||
|
||||
void show_help (void)
|
||||
void show_help ()
|
||||
{
|
||||
show_usage();
|
||||
printf(
|
||||
@ -944,18 +1052,19 @@ void show_help (void)
|
||||
"Usage:\n"
|
||||
" mounting\n"
|
||||
" s3fs bucket[:/path] mountpoint [options]\n"
|
||||
" s3fs mountpoint [options(must specify bucket= option)]\n"
|
||||
" s3fs mountpoint [options (must specify bucket= option)]\n"
|
||||
"\n"
|
||||
" umounting\n"
|
||||
" unmounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs -u bucket\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs --incomplete-mpu-list (-u) bucket\n"
|
||||
" s3fs --incomplete-mpu-abort[=all | =<date format>] bucket\n"
|
||||
"\n"
|
||||
"s3fs Options:\n"
|
||||
"\n"
|
||||
" Most s3fs options are given in the form where \"opt\" is:\n"
|
||||
@ -963,24 +1072,24 @@ void show_help (void)
|
||||
" <option_name>=<option_value>\n"
|
||||
"\n"
|
||||
" bucket\n"
|
||||
" - if it is not specified bucket name(and path) in command line,\n"
|
||||
" - if it is not specified bucket name (and path) in command line,\n"
|
||||
" must specify this option after -o option for bucket name.\n"
|
||||
"\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. empty string means do not send\n"
|
||||
" header. see http://aws.amazon.com/documentation/s3/ for the\n"
|
||||
" full list of canned acls\n"
|
||||
" e.g., private, public-read. see\n"
|
||||
" https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n"
|
||||
" for the full list of canned acls\n"
|
||||
"\n"
|
||||
" retries (default=\"2\")\n"
|
||||
" - number of times to retry a failed s3 transaction\n"
|
||||
" retries (default=\"5\")\n"
|
||||
" - number of times to retry a failed S3 transaction\n"
|
||||
"\n"
|
||||
" use_cache (default=\"\" which means disabled)\n"
|
||||
" - local folder to use for local file cache\n"
|
||||
"\n"
|
||||
" check_cache_dir_exist (default is disable)\n"
|
||||
" - if use_cache is set, check if the cache directory exists.\n"
|
||||
" if this option is not specified, it will be created at runtime\n"
|
||||
" If this option is not specified, it will be created at runtime\n"
|
||||
" when the cache directory does not exist.\n"
|
||||
"\n"
|
||||
" del_cache (delete local file cache)\n"
|
||||
@ -988,7 +1097,13 @@ void show_help (void)
|
||||
"\n"
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, and reduced_redundancy.\n"
|
||||
" standard, standard_ia, onezone_ia, reduced_redundancy and intelligent_tiering.\n"
|
||||
"\n"
|
||||
" use_rrs (default is disable)\n"
|
||||
" - use Amazon's Reduced Redundancy Storage.\n"
|
||||
" this option can not be specified with use_sse.\n"
|
||||
" (can specify use_rrs=1 for old version)\n"
|
||||
" this option has been replaced by new storage_class option.\n"
|
||||
"\n"
|
||||
" use_sse (default is disable)\n"
|
||||
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
|
||||
@ -996,10 +1111,10 @@ void show_help (void)
|
||||
" keys, SSE-C uses customer-provided encryption keys, and\n"
|
||||
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
|
||||
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
|
||||
" type(use_sse=1 is old type parameter).\n"
|
||||
" type (use_sse=1 is old type parameter).\n"
|
||||
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
|
||||
" \"use_sse=custom:<custom key file path>\" or\n"
|
||||
" \"use_sse=<custom key file path>\"(only <custom key file path>\n"
|
||||
" \"use_sse=<custom key file path>\" (only <custom key file path>\n"
|
||||
" specified is old type parameter). You can use \"c\" for\n"
|
||||
" short \"custom\".\n"
|
||||
" The custom key file must be 600 permission. The file can\n"
|
||||
@ -1009,9 +1124,9 @@ void show_help (void)
|
||||
" after first line, those are used downloading object which\n"
|
||||
" are encrypted by not first key. So that, you can keep all\n"
|
||||
" SSE-C keys in file, that is SSE-C key history.\n"
|
||||
" If you specify \"custom\"(\"c\") without file path, you\n"
|
||||
" If you specify \"custom\" (\"c\") without file path, you\n"
|
||||
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
|
||||
" environment.(AWSSSECKEYS environment has some SSE-C keys\n"
|
||||
" environment. (AWSSSECKEYS environment has some SSE-C keys\n"
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
@ -1020,8 +1135,8 @@ void show_help (void)
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
|
||||
" KMS, you can set it after \"kmsid:\"(or \"k:\"). If you\n"
|
||||
" specify only \"kmsid\"(\"k\"), you need to set AWSSSEKMSID\n"
|
||||
" KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n"
|
||||
" specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n"
|
||||
" environment which value is <kms id>. You must be careful\n"
|
||||
" about that you can not use the KMS id which is not same EC2\n"
|
||||
" region.\n"
|
||||
@ -1047,13 +1162,13 @@ void show_help (void)
|
||||
"\n"
|
||||
" ahbe_conf (default=\"\" which means disabled)\n"
|
||||
" - This option specifies the configuration file path which\n"
|
||||
" file is the additional HTTP header by file(object) extension.\n"
|
||||
" file is the additional HTTP header by file (object) extension.\n"
|
||||
" The configuration file format is below:\n"
|
||||
" -----------\n"
|
||||
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
|
||||
" file suffix = file(object) suffix, if this field is empty,\n"
|
||||
" file suffix = file (object) suffix, if this field is empty,\n"
|
||||
" it means \"reg:(.*)\".(=all object).\n"
|
||||
" regex = regular expression to match the file(object) path.\n"
|
||||
" regex = regular expression to match the file (object) path.\n"
|
||||
" this type starts with \"reg:\" prefix.\n"
|
||||
" HTTP-header = additional HTTP header name\n"
|
||||
" HTTP-values = additional HTTP header value\n"
|
||||
@ -1068,33 +1183,54 @@ void show_help (void)
|
||||
" If you specify this option for set \"Content-Encoding\" HTTP \n"
|
||||
" header, please take care for RFC 2616.\n"
|
||||
"\n"
|
||||
" profile (default=\"default\")\n"
|
||||
" - Choose a profile from ${HOME}/.aws/credentials to authenticate\n"
|
||||
" against S3. Note that this format matches the AWS CLI format and\n"
|
||||
" differs from the s3fs passwd format.\n"
|
||||
"\n"
|
||||
" connect_timeout (default=\"300\" seconds)\n"
|
||||
" - time to wait for connection before giving up\n"
|
||||
"\n"
|
||||
" readwrite_timeout (default=\"60\" seconds)\n"
|
||||
" readwrite_timeout (default=\"120\" seconds)\n"
|
||||
" - time to wait between read/write activity before giving up\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"1000\" entries (about 4MB))\n"
|
||||
" - maximum number of entries in the stat cache\n"
|
||||
" list_object_max_keys (default=\"1000\")\n"
|
||||
" - specify the maximum number of keys returned by S3 list object\n"
|
||||
" API. The default is 1000. you can set this value to 1000 or more.\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n"
|
||||
" - maximum number of entries in the stat cache, and this maximum is\n"
|
||||
" also treated as the number of symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_expire (default is no expire)\n"
|
||||
" - specify expire time(seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached.\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached. and this\n"
|
||||
" is also set to the expire time of the symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_interval_expire (default is no expire)\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache(and\n"
|
||||
" symbolic link cache).\n"
|
||||
" This expire time is based on the time from the last access time\n"
|
||||
" of the stat cache. This option is exclusive with stat_cache_expire,\n"
|
||||
" and is left for compatibility with older versions.\n"
|
||||
"\n"
|
||||
" enable_noobj_cache (default is disable)\n"
|
||||
" - enable cache entries for the object which does not exist.\n"
|
||||
" s3fs always has to check whether file(or sub directory) exists \n"
|
||||
" under object(path) when s3fs does some command, since s3fs has \n"
|
||||
" s3fs always has to check whether file (or sub directory) exists \n"
|
||||
" under object (path) when s3fs does some command, since s3fs has \n"
|
||||
" recognized a directory which does not exist and has files or \n"
|
||||
" sub directories under itself. It increases ListBucket request \n"
|
||||
" and makes performance bad.\n"
|
||||
" You can specify this option for performance, s3fs memorizes \n"
|
||||
" in stat cache that the object(file or directory) does not exist.\n"
|
||||
" in stat cache that the object (file or directory) does not exist.\n"
|
||||
"\n"
|
||||
" no_check_certificate\n"
|
||||
" - server certificate won't be checked against the available \n"
|
||||
" certificate authorities.\n"
|
||||
"\n"
|
||||
" ssl_verify_hostname (default=\"2\")\n"
|
||||
" - When 0, do not verify the SSL certificate against the hostname.\n"
|
||||
"\n"
|
||||
" nodnscache (disable dns cache)\n"
|
||||
" - s3fs is always using dns cache, this option make dns cache disable.\n"
|
||||
"\n"
|
||||
@ -1107,7 +1243,7 @@ void show_help (void)
|
||||
"\n"
|
||||
" parallel_count (default=\"5\")\n"
|
||||
" - number of parallel request for uploading big objects.\n"
|
||||
" s3fs uploads large object(over 20MB) by multipart post request, \n"
|
||||
" s3fs uploads large object (over 20MB) by multipart post request, \n"
|
||||
" and sends parallel requests.\n"
|
||||
" This option limits parallel request count which s3fs requests \n"
|
||||
" at once. It is necessary to set this value depending on a CPU \n"
|
||||
@ -1115,17 +1251,26 @@ void show_help (void)
|
||||
"\n"
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default same multipart_size value)\n"
|
||||
" - sets MB to ensure disk free space. s3fs makes file for\n"
|
||||
" ensure_diskfree (default 0)\n"
|
||||
" - sets MB to ensure disk free space. This option means the\n"
|
||||
" threshold of free space size on disk which is used for the\n"
|
||||
" cache file by s3fs. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"5120\")\n"
|
||||
" singlepart_copy_limit (default=\"512\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" host (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - Set a non-Amazon host, e.g., https://example.com.\n"
|
||||
"\n"
|
||||
" servicepath (default=\"/\")\n"
|
||||
" - Set a service path when the non-Amazon host requires a prefix.\n"
|
||||
"\n"
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
@ -1143,7 +1288,7 @@ void show_help (void)
|
||||
" error from the S3 server.\n"
|
||||
"\n"
|
||||
" sigv2 (default is signature version 4)\n"
|
||||
" - sets signing AWS requests by sing Signature Version 2\n"
|
||||
" - sets signing AWS requests by using Signature Version 2\n"
|
||||
"\n"
|
||||
" mp_umask (default is \"0000\")\n"
|
||||
" - sets umask for the mount point directory.\n"
|
||||
@ -1153,10 +1298,20 @@ void show_help (void)
|
||||
" this option, you can control the permissions of the\n"
|
||||
" mount point by this option like umask.\n"
|
||||
"\n"
|
||||
" umask (default is \"0000\")\n"
|
||||
" - sets umask for files under the mountpoint. This can allow\n"
|
||||
" users other than the mounting user to read and write to files\n"
|
||||
" that they did not create.\n"
|
||||
"\n"
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - ensure data integrity during writes with MD5 hash.\n"
|
||||
" Allow S3 server to check data integrity of uploads via the\n"
|
||||
" Content-MD5 header. This can add CPU overhead to transfers.\n"
|
||||
"\n"
|
||||
" ecs (default is disable)\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
@ -1164,8 +1319,16 @@ void show_help (void)
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" ibm_iam_auth (default is not using IBM IAM authentication)\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" ibm_iam_endpoint (default is https://iam.bluemix.net)\n"
|
||||
" - sets the URL to use for IBM IAM authentication.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute(xattrs).\n"
|
||||
" Enable to handle the extended attribute (xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
" For example, encfs and ecryptfs need to support the extended attribute.\n"
|
||||
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
|
||||
@ -1178,20 +1341,27 @@ void show_help (void)
|
||||
" This option should not be specified now, because s3fs looks up\n"
|
||||
" xmlns automatically after v1.66.\n"
|
||||
"\n"
|
||||
" nomixupload (disable copy in multipart uploads)\n"
|
||||
" Disable to use PUT (copy api) when multipart uploading large size objects.\n"
|
||||
" By default, when doing multipart upload, the range of unchanged data\n"
|
||||
" will use PUT (copy api) whenever possible.\n"
|
||||
" When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n"
|
||||
" invalidated even if this option is not specified.\n"
|
||||
"\n"
|
||||
" nocopyapi (for other incomplete compatibility object storage)\n"
|
||||
" For a distributed object storage which is compatibility S3\n"
|
||||
" API without PUT(copy api).\n"
|
||||
" API without PUT (copy api).\n"
|
||||
" If you set this option, s3fs do not use PUT with \n"
|
||||
" \"x-amz-copy-source\"(copy api). Because traffic is increased\n"
|
||||
" \"x-amz-copy-source\" (copy api). Because traffic is increased\n"
|
||||
" 2-3 times by this option, we do not recommend this.\n"
|
||||
"\n"
|
||||
" norenameapi (for other incomplete compatibility object storage)\n"
|
||||
" For a distributed object storage which is compatibility S3\n"
|
||||
" API without PUT(copy api).\n"
|
||||
" API without PUT (copy api).\n"
|
||||
" This option is a subset of nocopyapi option. The nocopyapi\n"
|
||||
" option does not use copy-api for all command(ex. chmod, chown,\n"
|
||||
" option does not use copy-api for all command (ex. chmod, chown,\n"
|
||||
" touch, mv, etc), but this option does not use copy-api for\n"
|
||||
" only rename command(ex. mv). If this option is specified with\n"
|
||||
" only rename command (ex. mv). If this option is specified with\n"
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
@ -1205,23 +1375,16 @@ void show_help (void)
|
||||
" If this option is specified, s3fs suppresses the output of the\n"
|
||||
" User-Agent.\n"
|
||||
"\n"
|
||||
" dbglevel (default=\"crit\")\n"
|
||||
" Set the debug message level. set value as crit(critical), err\n"
|
||||
" (error), warn(warning), info(information) to debug level.\n"
|
||||
" default debug level is critical. If s3fs run with \"-d\" option,\n"
|
||||
" the debug level is set information. When s3fs catch the signal\n"
|
||||
" SIGUSR2, the debug level is bumpup.\n"
|
||||
"\n"
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
"\n"
|
||||
" cipher_suites - customize TLS cipher suite list\n"
|
||||
" cipher_suites\n"
|
||||
" Customize the list of TLS cipher suites.\n"
|
||||
" Expects a colon separated list of cipher suite names.\n"
|
||||
" A list of available cipher suites, depending on your TLS engine,\n"
|
||||
" can be found on the CURL library documentation:\n"
|
||||
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
|
||||
"\n"
|
||||
" instance_name - The instance name of the current s3fs mountpoint.\n"
|
||||
" This name will be added to logging messages and user agent headers sent by s3fs.\n"
|
||||
"\n"
|
||||
" complement_stat (complement lack of file/directory mode)\n"
|
||||
" s3fs complements lack of information about file/directory mode\n"
|
||||
" if a file or a directory object does not have x-amz-meta-mode\n"
|
||||
@ -1247,6 +1410,35 @@ void show_help (void)
|
||||
" Please use this option when the directory in the bucket is\n"
|
||||
" only \"dir/\" object.\n"
|
||||
"\n"
|
||||
" use_wtf8 - support arbitrary file system encoding.\n"
|
||||
" S3 requires all object names to be valid utf-8. But some\n"
|
||||
" clients, notably Windows NFS clients, use their own encoding.\n"
|
||||
" This option re-encodes invalid utf-8 object names into valid\n"
|
||||
" utf-8 by mapping offending codes into a 'private' codepage of the\n"
|
||||
" Unicode set.\n"
|
||||
" Useful on clients not using utf-8 as their file system encoding.\n"
|
||||
"\n"
|
||||
" use_session_token - indicate that session token should be provided.\n"
|
||||
" If credentials are provided by environment variables this switch\n"
|
||||
" forces presence check of AWSSESSIONTOKEN variable.\n"
|
||||
" Otherwise an error is returned."
|
||||
"\n"
|
||||
" requester_pays (default is disable)\n"
|
||||
" This option instructs s3fs to enable requests involving\n"
|
||||
" Requester Pays buckets.\n"
|
||||
" It includes the 'x-amz-request-payer=requester' entry in the\n"
|
||||
" request header."
|
||||
"\n"
|
||||
" dbglevel (default=\"crit\")\n"
|
||||
" Set the debug message level. set value as crit (critical), err\n"
|
||||
" (error), warn (warning), info (information) to debug level.\n"
|
||||
" default debug level is critical. If s3fs run with \"-d\" option,\n"
|
||||
" the debug level is set information. When s3fs catch the signal\n"
|
||||
" SIGUSR2, the debug level is bumpup.\n"
|
||||
"\n"
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
@ -1258,6 +1450,22 @@ void show_help (void)
|
||||
" There are many FUSE specific mount options that can be specified.\n"
|
||||
" e.g. allow_other See the FUSE's README for the full set.\n"
|
||||
"\n"
|
||||
"Utility mode Options:\n"
|
||||
"\n"
|
||||
" -u, --incomplete-mpu-list\n"
|
||||
" Lists multipart incomplete objects uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" --incomplete-mpu-abort (=all or =<date format>)\n"
|
||||
" Delete the multipart incomplete object uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" If \"all\" is specified for this option, all multipart incomplete\n"
|
||||
" objects will be deleted. If you specify no argument as an option,\n"
|
||||
" objects older than 24 hours (24H) will be deleted (This is the\n"
|
||||
" default value). You can specify an optional date format. It can\n"
|
||||
" be specified as year, month, day, hour, minute, second, and it is\n"
|
||||
" expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n"
|
||||
" For example, \"1Y6M10D12h30m30s\".\n"
|
||||
"\n"
|
||||
"Miscellaneous Options:\n"
|
||||
"\n"
|
||||
" -h, --help Output this help.\n"
|
||||
@ -1271,19 +1479,17 @@ void show_help (void)
|
||||
"\n"
|
||||
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
void show_version(void)
|
||||
void show_version()
|
||||
{
|
||||
printf(
|
||||
"Amazon Simple Storage Service File System V%s(commit:%s) with %s\n"
|
||||
"Amazon Simple Storage Service File System V%s (commit:%s) with %s\n"
|
||||
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
|
||||
"License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>\n"
|
||||
"License GPL2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>\n"
|
||||
"This is free software: you are free to change and redistribute it.\n"
|
||||
"There is NO WARRANTY, to the extent permitted by law.\n",
|
||||
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -86,14 +86,19 @@ typedef struct mvnode {
|
||||
|
||||
class AutoLock
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t* auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
|
||||
public:
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, bool no_wait = false);
|
||||
enum Type {
|
||||
NO_WAIT = 1,
|
||||
ALREADY_LOCKED = 2,
|
||||
NONE = 0
|
||||
};
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
|
||||
bool isLockAcquired() const;
|
||||
~AutoLock();
|
||||
|
||||
private:
|
||||
pthread_mutex_t* const auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -105,13 +110,14 @@ MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, b
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
void init_sysconf_vars();
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(std::string path);
|
||||
std::string mydirname(const std::string& path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(std::string path);
|
||||
std::string mybasename(const std::string& path);
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
@ -119,6 +125,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(headers_t& meta);
|
||||
mode_t get_mode(const char *s);
|
||||
@ -132,6 +139,7 @@ time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(headers_t& meta);
|
||||
bool is_need_check_obj_detail(headers_t& meta);
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
void show_usage(void);
|
||||
void show_help(void);
|
||||
|
||||
@ -17,12 +17,15 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <syslog.h>
|
||||
#include <ctime>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
@ -32,47 +35,35 @@
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <class T> std::string str(T value) {
|
||||
std::ostringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
template std::string str(unsigned short value);
|
||||
template std::string str(int value);
|
||||
template std::string str(unsigned int value);
|
||||
template std::string str(long value);
|
||||
template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
// replacement for C++11 std::stoll
|
||||
off_t s3fs_strtoofft(const char* str, int base)
|
||||
{
|
||||
if(!str || '\0' == *str){
|
||||
return 0;
|
||||
errno = 0;
|
||||
char *temp;
|
||||
long long result = strtoll(str, &temp, base);
|
||||
|
||||
if(temp == str || *temp != '\0'){
|
||||
throw std::invalid_argument("s3fs_strtoofft");
|
||||
}
|
||||
off_t result;
|
||||
bool chk_space;
|
||||
bool chk_base16_prefix;
|
||||
for(result = 0, chk_space = false, chk_base16_prefix = false; '\0' != *str; str++){
|
||||
// check head space
|
||||
if(!chk_space && isspace(*str)){
|
||||
continue;
|
||||
}else if(!chk_space){
|
||||
chk_space = true;
|
||||
}
|
||||
// check prefix for base 16
|
||||
if(!chk_base16_prefix){
|
||||
chk_base16_prefix = true;
|
||||
if('0' == *str && ('x' == str[1] || 'X' == str[1])){
|
||||
is_base_16 = true;
|
||||
str++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// check like isalnum and set data
|
||||
result *= (is_base_16 ? 16 : 10);
|
||||
if('0' <= *str || '9' < *str){
|
||||
result += static_cast<off_t>(*str - '0');
|
||||
}else if(is_base_16){
|
||||
if('A' <= *str && *str <= 'F'){
|
||||
result += static_cast<off_t>(*str - 'A' + 0x0a);
|
||||
}else if('a' <= *str && *str <= 'f'){
|
||||
result += static_cast<off_t>(*str - 'a' + 0x0a);
|
||||
}else{
|
||||
return 0;
|
||||
}
|
||||
}else{
|
||||
return 0;
|
||||
}
|
||||
if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){
|
||||
throw std::out_of_range("s3fs_strtoofft");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -80,7 +71,7 @@ off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
string lower(string s)
|
||||
{
|
||||
// change each character of the string to lower case
|
||||
for(unsigned int i = 0; i < s.length(); i++){
|
||||
for(size_t i = 0; i < s.length(); i++){
|
||||
s[i] = tolower(s[i]);
|
||||
}
|
||||
return s;
|
||||
@ -105,8 +96,7 @@ string trim_right(const string &s, const string &t /* = SPACES */)
|
||||
|
||||
string trim(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
return trim_left(trim_right(d, t), t);
|
||||
return trim_left(trim_right(s, t), t);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -117,7 +107,7 @@ string trim(const string &s, const string &t /* = SPACES */)
|
||||
string urlEncode(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
@ -145,7 +135,7 @@ string urlEncode(const string &s)
|
||||
string urlEncode2(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
@ -170,11 +160,11 @@ string urlEncode2(const string &s)
|
||||
string urlDecode(const string& s)
|
||||
{
|
||||
string result;
|
||||
for(unsigned i = 0; i < s.length(); ++i){
|
||||
for(size_t i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
char ch = 0;
|
||||
int ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
@ -184,7 +174,7 @@ string urlDecode(const string& s)
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += ch;
|
||||
result += static_cast<char>(ch);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@ -195,15 +185,15 @@ bool takeout_str_dquart(string& str)
|
||||
size_t pos;
|
||||
|
||||
// '"' for start
|
||||
if(string::npos != (pos = str.find_first_of("\""))){
|
||||
if(string::npos != (pos = str.find_first_of('\"'))){
|
||||
str = str.substr(pos + 1);
|
||||
|
||||
// '"' for end
|
||||
if(string::npos == (pos = str.find_last_of("\""))){
|
||||
if(string::npos == (pos = str.find_last_of('\"'))){
|
||||
return false;
|
||||
}
|
||||
str = str.substr(0, pos);
|
||||
if(string::npos != str.find_first_of("\"")){
|
||||
if(string::npos != str.find_first_of('\"')){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -244,7 +234,8 @@ string get_date_rfc850()
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t));
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
@ -258,17 +249,88 @@ void get_date_sigv3(string& date, string& date8601)
|
||||
string get_date_string(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime(&tm));
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
string get_date_iso8601(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime(&tm));
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
{
|
||||
if(!pdate){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
}
|
||||
unixtime = mktime(&tm);
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Convert to unixtime from string which formatted by following:
|
||||
// "12Y12M12D12h12m12s", "86400s", "9h30m", etc
|
||||
//
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime)
|
||||
{
|
||||
if(!argv){
|
||||
return false;
|
||||
}
|
||||
unixtime = 0;
|
||||
const char* ptmp;
|
||||
int last_unit_type = 0; // unit flag.
|
||||
bool is_last_number;
|
||||
time_t tmptime;
|
||||
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
|
||||
if('0' <= *ptmp && *ptmp <= '9'){
|
||||
tmptime *= 10;
|
||||
tmptime += static_cast<time_t>(*ptmp - '0');
|
||||
is_last_number = true;
|
||||
}else if(is_last_number){
|
||||
if('Y' == *ptmp && 1 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
|
||||
last_unit_type = 1;
|
||||
}else if('M' == *ptmp && 2 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
|
||||
last_unit_type = 2;
|
||||
}else if('D' == *ptmp && 3 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24));
|
||||
last_unit_type = 3;
|
||||
}else if('h' == *ptmp && 4 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60));
|
||||
last_unit_type = 4;
|
||||
}else if('m' == *ptmp && 5 > last_unit_type){
|
||||
unixtime += (tmptime * 60);
|
||||
last_unit_type = 5;
|
||||
}else if('s' == *ptmp && 6 > last_unit_type){
|
||||
unixtime += tmptime;
|
||||
last_unit_type = 6;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
tmptime = 0;
|
||||
is_last_number = false;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(is_last_number){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length)
|
||||
{
|
||||
std::string hex;
|
||||
@ -285,12 +347,10 @@ char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 >= length){
|
||||
if(!input || 0 == length){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
result = new char[((length / 3) + 1) * 4 + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
@ -338,9 +398,7 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
result = new unsigned char[strlen(input) + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t input_len = strlen(input);
|
||||
@ -367,6 +425,135 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* detect and rewrite invalid utf8. We take invalid bytes
|
||||
* and encode them into a private region of the unicode
|
||||
* space. This is sometimes known as wtf8, wobbly transformation format.
|
||||
* it is necessary because S3 validates the utf8 used for identifiers for
|
||||
* correctness, while some clients may provide invalid utf, notably
|
||||
* windows using cp1252.
|
||||
*/
|
||||
|
||||
// Base location for transform. The range 0xE000 - 0xF8ff
|
||||
// is a private range, se use the start of this range.
|
||||
static unsigned int escape_base = 0xe000;
|
||||
|
||||
// encode bytes into wobbly utf8.
|
||||
// 'result' can be null. returns true if transform was needed.
|
||||
bool s3fs_wtf8_encode(const char *s, string *result)
|
||||
{
|
||||
bool invalid = false;
|
||||
|
||||
// Pass valid utf8 code through
|
||||
for (; *s; s++) {
|
||||
const unsigned char c = *s;
|
||||
|
||||
// single byte encoding
|
||||
if (c <= 0x7f) {
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise, it must be one of the valid start bytes
|
||||
if ( c >= 0xc2 && c <= 0xf5 ) {
|
||||
|
||||
// two byte encoding
|
||||
// don't need bounds check, string is zero terminated
|
||||
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
|
||||
// all two byte encodings starting higher than c1 are valid
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// three byte encoding
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
|
||||
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
|
||||
// not overlong and not a surrogate pair
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// four byte encoding
|
||||
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
|
||||
if (code >= 0x10000 && code <= 0x10ffff) {
|
||||
// not overlong and in defined unicode space
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// printf("invalid %02x at %d\n", c, i);
|
||||
// Invalid utf8 code. Convert it to a private two byte area of unicode
|
||||
// e.g. the e000 - f8ff area. This will be a three byte encoding
|
||||
invalid = true;
|
||||
if (result) {
|
||||
unsigned escape = escape_base + c;
|
||||
*result += static_cast<char>(0xe0 | ((escape >> 12) & 0x0f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 06) & 0x3f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 00) & 0x3f));
|
||||
}
|
||||
}
|
||||
return invalid;
|
||||
}
|
||||
|
||||
string s3fs_wtf8_encode(const string &s)
|
||||
{
|
||||
string result;
|
||||
s3fs_wtf8_encode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// The reverse operation, turn encoded bytes back into their original values
|
||||
// The code assumes that we map to a three-byte code point.
|
||||
bool s3fs_wtf8_decode(const char *s, string *result)
|
||||
{
|
||||
bool encoded = false;
|
||||
for (; *s; s++) {
|
||||
unsigned char c = *s;
|
||||
// look for a three byte tuple matching our encoding code
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
unsigned code = (c & 0x0f) << 12;
|
||||
code |= (s[1] & 0x3f) << 6;
|
||||
code |= (s[2] & 0x3f) << 0;
|
||||
if (code >= escape_base && code <= escape_base + 0xff) {
|
||||
// convert back
|
||||
encoded = true;
|
||||
if(result){
|
||||
*result += static_cast<char>(code - escape_base);
|
||||
}
|
||||
s+=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
||||
string s3fs_wtf8_decode(const string &s)
|
||||
{
|
||||
string result;
|
||||
s3fs_wtf8_decode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -28,18 +28,15 @@
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
#define SPACES " \t\r\n"
|
||||
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
|
||||
static const std::string SPACES = " \t\r\n";
|
||||
|
||||
template<typename T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); }
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
// Convert string to off_t. Throws std::invalid_argument and std::out_of_range on bad input.
|
||||
off_t s3fs_strtoofft(const char* str, int base = 0);
|
||||
|
||||
std::string trim_left(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim_right(const std::string &s, const std::string &t = SPACES);
|
||||
@ -49,6 +46,8 @@ std::string get_date_rfc850(void);
|
||||
void get_date_sigv3(std::string& date, std::string& date8601);
|
||||
std::string get_date_string(time_t tm);
|
||||
std::string get_date_iso8601(time_t tm);
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime);
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
@ -59,6 +58,11 @@ std::string s3fs_hex(const unsigned char* input, size_t length);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_encode(const std::string &s);
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_decode(const std::string &s);
|
||||
|
||||
#endif // S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -75,9 +75,52 @@ void test_base64()
|
||||
// TODO: invalid input
|
||||
}
|
||||
|
||||
void test_strtoofft()
|
||||
{
|
||||
ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast<off_t>(0L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast<off_t>(9L));
|
||||
try{
|
||||
s3fs_strtoofft("A");
|
||||
abort();
|
||||
}catch(std::exception &e){
|
||||
// expected
|
||||
}
|
||||
ASSERT_EQUALS(s3fs_strtoofft("A", /*base=*/ 16), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("F", /*base=*/ 16), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("a", /*base=*/ 16), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("f", /*base=*/ 16), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*base=*/ 16), static_cast<off_t>(3735928559L));
|
||||
}
|
||||
|
||||
void test_wtf8_encoding()
|
||||
{
|
||||
std::string ascii("normal string");
|
||||
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
|
||||
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
|
||||
std::string broken = utf8;
|
||||
broken[14] = 0x97;
|
||||
std::string mixed = ascii + utf8 + cp1252;
|
||||
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_trim();
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -20,11 +20,50 @@
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T> void assert_nequals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
@ -34,8 +73,8 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
@ -43,5 +82,8 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
#define ASSERT_EQUALS(x, y) \
|
||||
assert_equals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_NEQUALS(x, y) \
|
||||
assert_nequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_STREQUALS(x, y) \
|
||||
assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
141
test/filter-suite-log.sh
Executable file
141
test/filter-suite-log.sh
Executable file
@ -0,0 +1,141 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
func_usage()
|
||||
{
|
||||
echo ""
|
||||
echo "Usage: $1 [-h] <log file path>"
|
||||
echo " -h print help"
|
||||
echo " log file path path for test-suite.log"
|
||||
echo ""
|
||||
}
|
||||
|
||||
PRGNAME=`basename $0`
|
||||
SCRIPTDIR=`dirname $0`
|
||||
S3FSDIR=`cd ${SCRIPTDIR}/..; pwd`
|
||||
TOPDIR=`cd ${S3FSDIR}/test; pwd`
|
||||
SUITELOG="${TOPDIR}/test-suite.log"
|
||||
TMP_LINENO_FILE="/tmp/.lineno.tmp"
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
if [ "X$1" = "X" ]; then
|
||||
break
|
||||
elif [ "X$1" = "X-h" -o "X$1" = "X-H" -o "X$1" = "X--help" -o "X$1" = "X--HELP" ]; then
|
||||
func_usage ${PRGNAME}
|
||||
exit 0
|
||||
else
|
||||
SUITELOG=$1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
if [ ! -f ${SUITELOG} ]; then
|
||||
echo "[ERROR] not found ${SUITELOG} log file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Extract keyword line numbers and types
|
||||
#
|
||||
# 0 : normal line
|
||||
# 1 : start line for one small test(specified in integration-test-main.sh)
|
||||
# 2 : passed line of end of one small test(specified in test-utils.sh)
|
||||
# 3 : failed line of end of one small test(specified in test-utils.sh)
|
||||
#
|
||||
grep -n -e 'test_.*: ".*"' -o -e 'test_.* passed' -o -e 'test_.* failed' ${SUITELOG} 2>/dev/null | sed 's/:test_.*: ".*"/ 1/g' | sed 's/:test_.* passed/ 2/g' | sed 's/:test_.* failed/ 3/g' > ${TMP_LINENO_FILE}
|
||||
|
||||
#
|
||||
# Loop for printing result
|
||||
#
|
||||
prev_line_type=0
|
||||
prev_line_number=1
|
||||
while read line; do
|
||||
# line is "<line number> <line type>"
|
||||
number_type=($line)
|
||||
|
||||
head_line_cnt=`expr ${number_type[0]} - 1`
|
||||
tail_line_cnt=`expr ${number_type[0]} - ${prev_line_number}`
|
||||
|
||||
if [ ${number_type[1]} -eq 2 ]; then
|
||||
echo ""
|
||||
fi
|
||||
if [ ${prev_line_type} -eq 1 ]; then
|
||||
if [ ${number_type[1]} -eq 2 ]; then
|
||||
# if passed, cut s3fs information messages
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
elif [ ${number_type[1]} -eq 3 ]; then
|
||||
# if failed, print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
# there is start keyword but not end keyword, so print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
fi
|
||||
elif [ ${prev_line_type} -eq 2 -o ${prev_line_type} -eq 3 ]; then
|
||||
if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then
|
||||
# previous is end of chmpx, but this type is end of chmpx without start keyword. then print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
# this area is not from start to end, cut s3fs information messages
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
else
|
||||
if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then
|
||||
# previous is normal, but this type is end of chmpx without start keyword. then print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
# this area is normal, cut s3fs information messages
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
fi
|
||||
if [ ${number_type[1]} -eq 3 ]; then
|
||||
echo ""
|
||||
fi
|
||||
prev_line_type=${number_type[1]}
|
||||
prev_line_number=${number_type[0]}
|
||||
|
||||
done < ${TMP_LINENO_FILE}
|
||||
|
||||
#
|
||||
# Print rest lines
|
||||
#
|
||||
file_line_cnt=`wc -l ${SUITELOG} | awk '{print $1}'`
|
||||
tail_line_cnt=`expr ${file_line_cnt} - ${prev_line_number}`
|
||||
|
||||
if [ ${prev_line_type} -eq 1 ]; then
|
||||
tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
|
||||
#
|
||||
# Remove temp file
|
||||
#
|
||||
rm -f ${TMP_LINENO_FILE}
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
@ -10,11 +10,11 @@
|
||||
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
|
||||
# TEST_BUCKET_1=bucketname Name of bucket to use
|
||||
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
|
||||
# S3_URL="http://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
|
||||
# S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
|
||||
#
|
||||
# Example of running against Amazon S3 using a bucket named "bucket:
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" ./small-integration-test.sh
|
||||
#
|
||||
# To change the s3fs-fuse debug level:
|
||||
#
|
||||
@ -27,7 +27,7 @@
|
||||
#
|
||||
# Run all of the tests from the makefile
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" make check
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" make check
|
||||
#
|
||||
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
|
||||
# useful for poking around with plain old curl
|
||||
@ -38,10 +38,12 @@
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
S3FS=../src/s3fs
|
||||
|
||||
# Allow these defaulted values to be overridden
|
||||
: ${S3_URL:="http://127.0.0.1:8080"}
|
||||
: ${S3_URL:="https://127.0.0.1:8080"}
|
||||
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
|
||||
: ${TEST_BUCKET_1:="s3fs-integration-test"}
|
||||
|
||||
@ -50,7 +52,7 @@ export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.5.2"
|
||||
S3PROXY_VERSION="1.7.0"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
@ -72,18 +74,18 @@ function retry {
|
||||
N=$1; shift;
|
||||
status=0
|
||||
for i in $(seq $N); do
|
||||
echo "Trying: $@"
|
||||
$@
|
||||
echo "Trying: $*"
|
||||
"$@"
|
||||
status=$?
|
||||
if [ $status == 0 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
echo "Retrying: $@"
|
||||
echo "Retrying: $*"
|
||||
done
|
||||
|
||||
if [ $status != 0 ]; then
|
||||
echo "timeout waiting for $@"
|
||||
echo "timeout waiting for $*"
|
||||
fi
|
||||
set -o errexit
|
||||
return $status
|
||||
@ -108,7 +110,8 @@ function start_s3proxy {
|
||||
chmod +x "${S3PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG &
|
||||
S3PROXY_PID=$!
|
||||
|
||||
# wait for S3Proxy to start
|
||||
for i in $(seq 30);
|
||||
@ -121,8 +124,6 @@ function start_s3proxy {
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
|
||||
fi
|
||||
}
|
||||
|
||||
@ -130,14 +131,12 @@ function stop_s3proxy {
|
||||
if [ -n "${S3PROXY_PID}" ]
|
||||
then
|
||||
kill $S3PROXY_PID
|
||||
wait $S3PROXY_PID
|
||||
fi
|
||||
}
|
||||
|
||||
# Mount the bucket, function arguments passed to s3fs in addition to
|
||||
# a set of common arguments.
|
||||
function start_s3fs {
|
||||
|
||||
# Public bucket if PUBLIC is set
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
AUTH_OPT="-o public_bucket=1"
|
||||
@ -148,14 +147,21 @@ function start_s3fs {
|
||||
# If VALGRIND is set, pass it as options to valgrind.
|
||||
# start valgrind-listener in another shell.
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
# Start valgind-listener (default port is 1500)
|
||||
# Start valgrind-listener (default port is 1500)
|
||||
if [ -n "${VALGRIND}" ]; then
|
||||
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
|
||||
fi
|
||||
|
||||
# On OSX only, we need to specify the direct_io and auto_cache flag.
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
DIRECT_IO_OPT="-o direct_io -o auto_cache"
|
||||
else
|
||||
DIRECT_IO_OPT=""
|
||||
fi
|
||||
|
||||
# Common s3fs options:
|
||||
#
|
||||
# TODO: Allow all these options to be overriden with env variables
|
||||
# TODO: Allow all these options to be overridden with env variables
|
||||
#
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
@ -181,15 +187,38 @@ function start_s3fs {
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o no_check_certificate \
|
||||
-o ssl_verify_hostname=0 \
|
||||
-o use_xattr=1 \
|
||||
-o createbucket \
|
||||
${AUTH_OPT} \
|
||||
${DIRECT_IO_OPT} \
|
||||
-o stat_cache_expire=1 \
|
||||
-o stat_cache_interval_expire=1 \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
-o retries=3 \
|
||||
-f \
|
||||
${@} \
|
||||
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
"${@}" | stdbuf -oL -eL sed $SED_BUFFER_FLAG "s/^/s3fs: /" &
|
||||
)
|
||||
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
set +o errexit
|
||||
TRYCOUNT=0
|
||||
while [ $TRYCOUNT -le 20 ]; do
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=`expr ${TRYCOUNT} + 1`
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
set -o errexit
|
||||
else
|
||||
retry 20 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
fi
|
||||
|
||||
# Quick way to start system up for manual testing with options under test
|
||||
if [[ -n ${INTERACT} ]]; then
|
||||
@ -202,14 +231,20 @@ function start_s3fs {
|
||||
|
||||
function stop_s3fs {
|
||||
# Retry in case file system is in use
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
if df | grep -q $TEST_BUCKET_MOUNT_POINT_1; then
|
||||
retry 10 df | grep -q $TEST_BUCKET_MOUNT_POINT_1 && umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
else
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
|
||||
function common_exit_handler {
|
||||
stop_s3proxy
|
||||
stop_s3fs
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
@ -1,25 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
source test-utils.sh
|
||||
|
||||
function test_append_file {
|
||||
describe "Testing append to file ..."
|
||||
TEST_INPUT="echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
|
||||
# Write a small test file
|
||||
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
|
||||
do
|
||||
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
echo $TEST_INPUT
|
||||
done > ${TEST_TEXT_FILE}
|
||||
|
||||
# Verify contents of file
|
||||
echo "Verifying length of test file"
|
||||
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
|
||||
then
|
||||
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
|
||||
return 1
|
||||
fi
|
||||
check_file_size "${TEST_TEXT_FILE}" $(($TEST_TEXT_FILE_LENGTH * $(echo $TEST_INPUT | wc -c)))
|
||||
|
||||
rm_test_file
|
||||
}
|
||||
@ -32,12 +28,8 @@ function test_truncate_file {
|
||||
# Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...)
|
||||
: > ${TEST_TEXT_FILE}
|
||||
|
||||
# Verify file is zero length
|
||||
if [ -s ${TEST_TEXT_FILE} ]
|
||||
then
|
||||
echo "error: expected ${TEST_TEXT_FILE} to be zero length"
|
||||
return 1
|
||||
fi
|
||||
check_file_size "${TEST_TEXT_FILE}" 0
|
||||
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
@ -50,13 +42,8 @@ function test_truncate_empty_file {
|
||||
t_size=1024
|
||||
truncate ${TEST_TEXT_FILE} -s $t_size
|
||||
|
||||
# Verify file is zero length
|
||||
size=$(stat -c %s ${TEST_TEXT_FILE})
|
||||
if [ $t_size -ne $size ]
|
||||
then
|
||||
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
|
||||
return 1
|
||||
fi
|
||||
check_file_size "${TEST_TEXT_FILE}" $t_size
|
||||
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
@ -77,6 +64,9 @@ function test_mv_file {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
# save file length
|
||||
ALT_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
|
||||
#rename the test file
|
||||
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ ! -e $ALT_TEST_TEXT_FILE ]
|
||||
@ -84,9 +74,14 @@ function test_mv_file {
|
||||
echo "Could not move file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
#check the renamed file content-type
|
||||
if [ -f "/etc/mime.types" ]
|
||||
then
|
||||
check_content_type "$1/$ALT_TEST_TEXT_FILE" "text/plain"
|
||||
fi
|
||||
|
||||
# Check the contents of the alt file
|
||||
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
|
||||
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
|
||||
then
|
||||
@ -98,7 +93,7 @@ function test_mv_file {
|
||||
rm_test_file $ALT_TEST_TEXT_FILE
|
||||
}
|
||||
|
||||
function test_mv_directory {
|
||||
function test_mv_empty_directory {
|
||||
describe "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
@ -108,7 +103,6 @@ function test_mv_directory {
|
||||
mk_test_dir
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
return 1
|
||||
@ -121,6 +115,30 @@ function test_mv_directory {
|
||||
fi
|
||||
}
|
||||
|
||||
function test_mv_nonempty_directory {
|
||||
describe "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
mk_test_dir
|
||||
|
||||
touch ${TEST_DIR}/file
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -r ${TEST_DIR}_rename
|
||||
if [ -e "${TEST_DIR}_rename" ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function test_redirects {
|
||||
describe "Testing redirects ..."
|
||||
|
||||
@ -179,12 +197,21 @@ function test_chmod {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chmod 777 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE permissions"
|
||||
return 1
|
||||
@ -200,12 +227,28 @@ function test_chown {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chown 1000:1000 $TEST_TEXT_FILE;
|
||||
# [NOTE]
|
||||
# Prevents test interruptions due to permission errors, etc.
|
||||
# If the chown command fails, an error will occur with the
|
||||
# following judgment statement. So skip the chown command error.
|
||||
# '|| true' was added due to a problem with Travis CI and MacOS
|
||||
# and ensure_diskfree option.
|
||||
#
|
||||
chown 1000:1000 $TEST_TEXT_FILE || true
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%u:%g $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
if [ $ORIGINAL_PERMISSIONS == "1000:1000" ]
|
||||
then
|
||||
@ -239,11 +282,33 @@ function test_remove_nonempty_directory {
|
||||
describe "Testing removing a non-empty directory"
|
||||
mk_test_dir
|
||||
touch "${TEST_DIR}/file"
|
||||
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
|
||||
(
|
||||
set +o pipefail
|
||||
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
|
||||
)
|
||||
rm "${TEST_DIR}/file"
|
||||
rm_test_dir
|
||||
}
|
||||
|
||||
function test_external_modification {
|
||||
describe "Test external modification to an object"
|
||||
echo "old" > ${TEST_TEXT_FILE}
|
||||
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
|
||||
sleep 2
|
||||
echo "new new" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
|
||||
cmp ${TEST_TEXT_FILE} <(echo "new new")
|
||||
rm -f ${TEST_TEXT_FILE}
|
||||
}
|
||||
|
||||
function test_read_external_object() {
|
||||
describe "create objects via aws CLI and read via s3fs"
|
||||
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
|
||||
sleep 3
|
||||
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
|
||||
cmp ${TEST_TEXT_FILE} <(echo "test")
|
||||
rm -f ${TEST_TEXT_FILE}
|
||||
}
|
||||
|
||||
function test_rename_before_close {
|
||||
describe "Testing rename before close ..."
|
||||
(
|
||||
@ -262,6 +327,7 @@ function test_rename_before_close {
|
||||
|
||||
function test_multipart_upload {
|
||||
describe "Testing multi-part upload ..."
|
||||
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
|
||||
@ -278,6 +344,7 @@ function test_multipart_upload {
|
||||
|
||||
function test_multipart_copy {
|
||||
describe "Testing multi-part copy ..."
|
||||
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
mv "${BIG_FILE}" "${BIG_FILE}-copy"
|
||||
@ -289,18 +356,109 @@ function test_multipart_copy {
|
||||
return 1
|
||||
fi
|
||||
|
||||
#check the renamed file content-type
|
||||
check_content_type "$1/${BIG_FILE}-copy" "application/octet-stream"
|
||||
|
||||
rm -f "/tmp/${BIG_FILE}"
|
||||
rm_test_file "${BIG_FILE}-copy"
|
||||
}
|
||||
|
||||
function test_multipart_mix {
|
||||
describe "Testing multi-part mix ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH seek=0 count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH seek=0 count=1
|
||||
|
||||
# (1) Edit the middle of an existing file
|
||||
# modify directly(seek 7.5MB offset)
|
||||
# In the case of nomultipart and nocopyapi,
|
||||
# it makes no sense, but copying files is because it leaves no cache.
|
||||
#
|
||||
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
|
||||
cp ${BIG_FILE} ${BIG_FILE}-mix
|
||||
|
||||
MODIFY_START_BLOCK=$((15*1024*1024/2/4))
|
||||
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
|
||||
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
|
||||
|
||||
# Verify contents of file
|
||||
echo "Comparing test file (1)"
|
||||
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# (2) Write to an area larger than the size of the existing file
|
||||
# modify directly(over file end offset)
|
||||
#
|
||||
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
|
||||
cp ${BIG_FILE} ${BIG_FILE}-mix
|
||||
|
||||
OVER_FILE_BLOCK_POS=$((26*1024*1024/4))
|
||||
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$OVER_FILE_BLOCK_POS conv=notrunc
|
||||
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$OVER_FILE_BLOCK_POS conv=notrunc
|
||||
|
||||
# Verify contents of file
|
||||
echo "Comparing test file (2)"
|
||||
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# (3) Writing from the 0th byte
|
||||
#
|
||||
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
|
||||
cp ${BIG_FILE} ${BIG_FILE}-mix
|
||||
|
||||
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc
|
||||
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=0 conv=notrunc
|
||||
|
||||
# Verify contents of file
|
||||
echo "Comparing test file (3)"
|
||||
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# (4) Write to the area within 5MB from the top
|
||||
# modify directly(seek 1MB offset)
|
||||
#
|
||||
cp /tmp/${BIG_FILE} /tmp/${BIG_FILE}-mix
|
||||
cp ${BIG_FILE} ${BIG_FILE}-mix
|
||||
|
||||
MODIFY_START_BLOCK=$((1*1024*1024))
|
||||
echo -n "0123456789ABCDEF" | dd of="${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
|
||||
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}-mix" bs=4 count=4 seek=$MODIFY_START_BLOCK conv=notrunc
|
||||
|
||||
# Verify contents of file
|
||||
echo "Comparing test file (4)"
|
||||
if ! cmp "/tmp/${BIG_FILE}-mix" "${BIG_FILE}-mix"
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -f "/tmp/${BIG_FILE}"
|
||||
rm -f "/tmp/${BIG_FILE}-mix"
|
||||
rm_test_file "${BIG_FILE}"
|
||||
rm_test_file "${BIG_FILE}-mix"
|
||||
}
|
||||
|
||||
function test_special_characters {
|
||||
describe "Testing special characters ..."
|
||||
|
||||
ls 'special' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special?' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special*' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special~' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
|
||||
(
|
||||
set +o pipefail
|
||||
ls 'special' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special?' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special*' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special~' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
|
||||
)
|
||||
|
||||
mkdir "TOYOTA TRUCK 8.2.2"
|
||||
}
|
||||
|
||||
function test_symlink {
|
||||
@ -317,30 +475,31 @@ function test_symlink {
|
||||
|
||||
[ -L $ALT_TEST_TEXT_FILE ]
|
||||
[ ! -f $ALT_TEST_TEXT_FILE ]
|
||||
|
||||
rm -f $ALT_TEST_TEXT_FILE
|
||||
}
|
||||
|
||||
function test_extended_attributes {
|
||||
command -v setfattr >/dev/null 2>&1 || \
|
||||
{ echo "Skipping extended attribute tests" ; return; }
|
||||
|
||||
describe "Testing extended attributes ..."
|
||||
|
||||
rm -f $TEST_TEXT_FILE
|
||||
touch $TEST_TEXT_FILE
|
||||
|
||||
# set value
|
||||
setfattr -n key1 -v value1 $TEST_TEXT_FILE
|
||||
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
|
||||
set_xattr key1 value1 $TEST_TEXT_FILE
|
||||
get_xattr key1 $TEST_TEXT_FILE | grep -q '^value1$'
|
||||
|
||||
# append value
|
||||
setfattr -n key2 -v value2 $TEST_TEXT_FILE
|
||||
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
|
||||
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
|
||||
set_xattr key2 value2 $TEST_TEXT_FILE
|
||||
get_xattr key1 $TEST_TEXT_FILE | grep -q '^value1$'
|
||||
get_xattr key2 $TEST_TEXT_FILE | grep -q '^value2$'
|
||||
|
||||
# remove value
|
||||
setfattr -x key1 $TEST_TEXT_FILE
|
||||
! getfattr -n key1 --only-values $TEST_TEXT_FILE
|
||||
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
|
||||
del_xattr key1 $TEST_TEXT_FILE
|
||||
! get_xattr key1 $TEST_TEXT_FILE
|
||||
get_xattr key2 $TEST_TEXT_FILE | grep -q '^value2$'
|
||||
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_mtime_file {
|
||||
@ -364,13 +523,67 @@ function test_mtime_file {
|
||||
|
||||
#copy the test file with preserve mode
|
||||
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
testmtime=`get_mtime $TEST_TEXT_FILE`
|
||||
altmtime=`get_mtime $ALT_TEST_TEXT_FILE`
|
||||
if [ "$testmtime" -ne "$altmtime" ]
|
||||
then
|
||||
echo "File times do not match: $testmtime != $altmtime"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm_test_file
|
||||
rm_test_file $ALT_TEST_TEXT_FILE
|
||||
}
|
||||
|
||||
function test_update_time() {
|
||||
describe "Testing update time function ..."
|
||||
|
||||
# create the test
|
||||
mk_test_file
|
||||
mtime=`get_ctime $TEST_TEXT_FILE`
|
||||
ctime=`get_mtime $TEST_TEXT_FILE`
|
||||
|
||||
sleep 2
|
||||
chmod +x $TEST_TEXT_FILE
|
||||
|
||||
ctime2=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime2=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime -eq $ctime2 -o $mtime -ne $mtime2 ]; then
|
||||
echo "Expected updated ctime: $ctime != $ctime2 and same mtime: $mtime == $mtime2"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
chown $UID:$UID $TEST_TEXT_FILE;
|
||||
|
||||
ctime3=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime3=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime2 -eq $ctime3 -o $mtime2 -ne $mtime3 ]; then
|
||||
echo "Expected updated ctime: $ctime2 != $ctime3 and same mtime: $mtime2 == $mtime3"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
set_xattr key value $TEST_TEXT_FILE
|
||||
|
||||
ctime4=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime4=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime3 -eq $ctime4 -o $mtime3 -ne $mtime4 ]; then
|
||||
echo "Expected updated ctime: $ctime3 != $ctime4 and same mtime: $mtime3 == $mtime4"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
echo foo >> $TEST_TEXT_FILE
|
||||
|
||||
ctime5=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime5=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime4 -eq $ctime5 -o $mtime4 -eq $mtime5 ]; then
|
||||
echo "Expected updated ctime: $ctime4 != $ctime5 and updated mtime: $mtime4 != $mtime5"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_rm_rf_dir {
|
||||
@ -390,35 +603,187 @@ function test_rm_rf_dir {
|
||||
fi
|
||||
}
|
||||
|
||||
function test_copy_file {
|
||||
describe "Test simple copy"
|
||||
|
||||
dd if=/dev/urandom of=/tmp/simple_file bs=1024 count=1
|
||||
cp /tmp/simple_file copied_simple_file
|
||||
cmp /tmp/simple_file copied_simple_file
|
||||
|
||||
rm_test_file /tmp/simple_file
|
||||
rm_test_file copied_simple_file
|
||||
}
|
||||
|
||||
function test_write_after_seek_ahead {
|
||||
describe "Test writes succeed after a seek ahead"
|
||||
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
|
||||
rm testfile
|
||||
rm_test_file testfile
|
||||
}
|
||||
|
||||
function test_overwrite_existing_file_range {
|
||||
describe "Test overwrite range succeeds"
|
||||
dd if=<(seq 1000) of=${TEST_TEXT_FILE}
|
||||
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=1 count=1 bs=1024 conv=notrunc
|
||||
cmp ${TEST_TEXT_FILE} <(
|
||||
seq 1000 | head -c 1024
|
||||
dd if=/dev/zero count=1 bs=1024
|
||||
seq 1000 | tail -c +2049
|
||||
)
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_concurrency {
|
||||
describe "Test concurrent updates to a directory"
|
||||
for i in `seq 5`; do echo foo > $i; done
|
||||
for process in `seq 10`; do
|
||||
for i in `seq 5`; do
|
||||
file=$(ls `seq 5` | sed -n "$(($RANDOM % 5 + 1))p")
|
||||
cat $file >/dev/null || true
|
||||
rm -f $file
|
||||
echo foo > $file || true
|
||||
done &
|
||||
done
|
||||
wait
|
||||
rm -f `seq 5`
|
||||
}
|
||||
|
||||
function test_concurrent_writes {
|
||||
describe "Test concurrent updates to a file"
|
||||
dd if=/dev/urandom of=${TEST_TEXT_FILE} bs=$BIG_FILE_LENGTH count=1
|
||||
for process in `seq 10`; do
|
||||
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=$(($RANDOM % $BIG_FILE_LENGTH)) count=1 bs=1024 conv=notrunc &
|
||||
done
|
||||
wait
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_open_second_fd {
|
||||
describe "read from an open fd"
|
||||
rm_test_file second_fd_file
|
||||
RESULT=$( (echo foo ; wc -c < second_fd_file >&2) 2>& 1>second_fd_file)
|
||||
if [ "$RESULT" -ne 4 ]; then
|
||||
echo "size mismatch, expected: 4, was: ${RESULT}"
|
||||
return 1
|
||||
fi
|
||||
rm_test_file second_fd_file
|
||||
}
|
||||
|
||||
function test_write_multiple_offsets {
|
||||
describe "test writing to multiple offsets"
|
||||
../../write_multiple_offsets.py ${TEST_TEXT_FILE}
|
||||
rm_test_file ${TEST_TEXT_FILE}
|
||||
}
|
||||
|
||||
function test_clean_up_cache() {
|
||||
describe "Test clean up cache"
|
||||
|
||||
dir="many_files"
|
||||
count=25
|
||||
mkdir -p $dir
|
||||
|
||||
for x in $(seq $count); do
|
||||
dd if=/dev/urandom of=$dir/file-$x bs=10485760 count=1
|
||||
done
|
||||
|
||||
file_cnt=$(ls $dir | wc -l)
|
||||
if [ $file_cnt != $count ]; then
|
||||
echo "Expected $count files but got $file_cnt"
|
||||
rm -rf $dir
|
||||
return 1
|
||||
fi
|
||||
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
|
||||
if [ "$CACHE_DISK_AVAIL_SIZE" -lt "$ENSURE_DISKFREE_SIZE" ];then
|
||||
echo "Cache disk avail size:$CACHE_DISK_AVAIL_SIZE less than ensure_diskfree size:$ENSURE_DISKFREE_SIZE"
|
||||
rm -rf $dir
|
||||
return 1
|
||||
fi
|
||||
rm -rf $dir
|
||||
}
|
||||
|
||||
function test_content_type() {
|
||||
describe "Test Content-Type detection"
|
||||
|
||||
DIR_NAME="$(basename $PWD)"
|
||||
|
||||
touch "test.txt"
|
||||
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.txt" | grep "ContentType")
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then
|
||||
echo "Unexpected Content-Type(MacOS): $CONTENT_TYPE"
|
||||
return 1;
|
||||
fi
|
||||
else
|
||||
if ! echo $CONTENT_TYPE | grep -q "text/plain"; then
|
||||
echo "Unexpected Content-Type: $CONTENT_TYPE"
|
||||
return 1;
|
||||
fi
|
||||
fi
|
||||
|
||||
touch "test.jpg"
|
||||
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.jpg" | grep "ContentType")
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then
|
||||
echo "Unexpected Content-Type(MacOS): $CONTENT_TYPE"
|
||||
return 1;
|
||||
fi
|
||||
else
|
||||
if ! echo $CONTENT_TYPE | grep -q "image/jpeg"; then
|
||||
echo "Unexpected Content-Type: $CONTENT_TYPE"
|
||||
return 1;
|
||||
fi
|
||||
fi
|
||||
|
||||
touch "test.bin"
|
||||
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.bin" | grep "ContentType")
|
||||
if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then
|
||||
echo "Unexpected Content-Type: $CONTENT_TYPE"
|
||||
return 1;
|
||||
fi
|
||||
|
||||
mkdir "test.dir"
|
||||
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.dir/" | grep "ContentType")
|
||||
if ! echo $CONTENT_TYPE | grep -q "application/x-directory"; then
|
||||
echo "Unexpected Content-Type: $CONTENT_TYPE"
|
||||
return 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function add_all_tests {
|
||||
if `ps -ef | grep -v grep | grep s3fs | grep -q ensure_diskfree` && ! `uname | grep -q Darwin`; then
|
||||
add_tests test_clean_up_cache
|
||||
fi
|
||||
add_tests test_append_file
|
||||
add_tests test_truncate_file
|
||||
add_tests test_truncate_empty_file
|
||||
add_tests test_mv_file
|
||||
add_tests test_mv_directory
|
||||
add_tests test_mv_empty_directory
|
||||
add_tests test_mv_nonempty_directory
|
||||
add_tests test_redirects
|
||||
add_tests test_mkdir_rmdir
|
||||
add_tests test_chmod
|
||||
add_tests test_chown
|
||||
add_tests test_list
|
||||
add_tests test_remove_nonempty_directory
|
||||
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
|
||||
#add_tests test_rename_before_close
|
||||
add_tests test_external_modification
|
||||
add_tests test_read_external_object
|
||||
add_tests test_rename_before_close
|
||||
add_tests test_multipart_upload
|
||||
add_tests test_multipart_copy
|
||||
add_tests test_multipart_mix
|
||||
add_tests test_special_characters
|
||||
add_tests test_symlink
|
||||
add_tests test_extended_attributes
|
||||
add_tests test_mtime_file
|
||||
add_tests test_update_time
|
||||
add_tests test_rm_rf_dir
|
||||
add_tests test_copy_file
|
||||
add_tests test_write_after_seek_ahead
|
||||
add_tests test_overwrite_existing_file_range
|
||||
add_tests test_concurrency
|
||||
add_tests test_concurrent_writes
|
||||
add_tests test_open_second_fd
|
||||
add_tests test_write_multiple_offsets
|
||||
add_tests test_content_type
|
||||
}
|
||||
|
||||
init_suite
|
||||
|
||||
BIN
test/keystore.jks
Normal file
BIN
test/keystore.jks
Normal file
Binary file not shown.
@ -7,12 +7,12 @@
|
||||
###
|
||||
### UsageFunction <program name>
|
||||
###
|
||||
UsageFuntion()
|
||||
UsageFunction()
|
||||
{
|
||||
echo "Usage: $1 [-h] [-y] [-all] <base directory>"
|
||||
echo " -h print usage"
|
||||
echo " -y no confirm"
|
||||
echo " -all force all directoris"
|
||||
echo " -all force all directories"
|
||||
echo " There is no -all option is only to merge for other S3 client."
|
||||
echo " If -all is specified, this shell script merge all directory"
|
||||
echo " for s3fs old version."
|
||||
@ -28,7 +28,7 @@ DIRPARAM=""
|
||||
|
||||
while [ "$1" != "" ]; do
|
||||
if [ "X$1" = "X-help" -o "X$1" = "X-h" -o "X$1" = "X-H" ]; then
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 0
|
||||
elif [ "X$1" = "X-y" -o "X$1" = "X-Y" ]; then
|
||||
AUTOYES="yes"
|
||||
@ -38,7 +38,7 @@ while [ "$1" != "" ]; do
|
||||
if [ "X$DIRPARAM" != "X" ]; then
|
||||
echo "*** Input error."
|
||||
echo ""
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 1
|
||||
fi
|
||||
DIRPARAM=$1
|
||||
@ -48,7 +48,7 @@ done
|
||||
if [ "X$DIRPARAM" = "X" ]; then
|
||||
echo "*** Input error."
|
||||
echo ""
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -62,7 +62,7 @@ fi
|
||||
echo "#############################################################################"
|
||||
echo "[CAUTION]"
|
||||
echo "This program merges a directory made in s3fs which is older than version 1.64."
|
||||
echo "And made in other S3 client appilication."
|
||||
echo "And made in other S3 client application."
|
||||
echo "This program may be have bugs which are not fixed yet."
|
||||
echo "Please execute this program by responsibility of your own."
|
||||
echo "#############################################################################"
|
||||
@ -104,7 +104,7 @@ for DIR in $DIRLIST; do
|
||||
if [ "$ALLYES" = "no" ]; then
|
||||
### Skip "d---------" directories.
|
||||
### Other clients make directory object "dir/" which don't have
|
||||
### "x-amz-meta-mode" attribyte.
|
||||
### "x-amz-meta-mode" attribute.
|
||||
### Then these directories is "d---------", it is target directory.
|
||||
DIRPERMIT=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $1}'`
|
||||
if [ "$DIRPERMIT" != "d---------" ]; then
|
||||
@ -112,7 +112,7 @@ for DIR in $DIRLIST; do
|
||||
fi
|
||||
fi
|
||||
|
||||
### Comfirm
|
||||
### Confirm
|
||||
ANSWER=""
|
||||
if [ "$AUTOYES" = "yes" ]; then
|
||||
ANSWER="y"
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
s3proxy.endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v4
|
||||
s3proxy.secure-endpoint=https://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v2-or-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
s3proxy.keystore-path=keystore.jks
|
||||
s3proxy.keystore-password=password
|
||||
|
||||
jclouds.provider=transient
|
||||
jclouds.identity=remote-identity
|
||||
|
||||
@ -34,60 +34,59 @@ if [ "X$1" = "X-h" -o "X$1" = "X-H" ]; then
|
||||
fi
|
||||
if [ "X$1" = "X" -o "X$2" = "X" -o "X$3" = "X" ]; then
|
||||
func_usage $PRGNAME
|
||||
exit -1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUCKET=$1
|
||||
CDIR=$2
|
||||
CDIR="$2"
|
||||
LIMIT=$3
|
||||
SILENT=0
|
||||
if [ "X$4" = "X-silent" ]; then
|
||||
SILENT=1
|
||||
fi
|
||||
FILES_CDIR=$CDIR/$BUCKET
|
||||
STATS_CDIR=$CDIR/\.$BUCKET\.stat
|
||||
|
||||
FILES_CDIR="${CDIR}/${BUCKET}"
|
||||
STATS_CDIR="${CDIR}/.${BUCKET}.stat"
|
||||
CURRENT_CACHE_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
#
|
||||
# Check total size
|
||||
#
|
||||
if [ $LIMIT -ge `du -sb $FILES_CDIR | awk '{print $1}'` ]; then
|
||||
if [ $LIMIT -ge $CURRENT_CACHE_SIZE ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "$FILES_CDIR is below allowed $LIMIT"
|
||||
echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
#
|
||||
# Make file list by sorted access time
|
||||
#
|
||||
ALL_STATS_ATIMELIST=`find $STATS_CDIR -type f -exec echo -n {} \; -exec echo -n " " \; -exec stat -c %X {} \; | awk '{print $2":"$1}' | sort`
|
||||
|
||||
#
|
||||
# Remove loop
|
||||
#
|
||||
TMP_ATIME=0
|
||||
TMP_STATS=""
|
||||
TMP_CFILE=""
|
||||
for part in $ALL_STATS_ATIMELIST; do
|
||||
TMP_ATIME=`echo $part | sed 's/\:/ /' | awk '{print $1}'`
|
||||
TMP_STATS=`echo $part | sed 's/\:/ /' | awk '{print $2}'`
|
||||
TMP_CFILE=`echo $TMP_STATS | sed s/\.$BUCKET\.stat/$BUCKET/`
|
||||
|
||||
if [ `stat -c %X $TMP_STATS` -eq $TMP_ATIME ]; then
|
||||
rm -f $TMP_STATS $TMP_CFILE > /dev/null 2>&1
|
||||
#
|
||||
# Make file list by sorted access time
|
||||
#
|
||||
find "$STATS_CDIR" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read part
|
||||
do
|
||||
echo Looking at $part
|
||||
TMP_ATIME=`echo "$part" | cut -d: -f1`
|
||||
TMP_STATS="`echo "$part" | cut -d: -f2`"
|
||||
TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/`
|
||||
|
||||
if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then
|
||||
rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)"
|
||||
fi
|
||||
exit -1
|
||||
exit 1
|
||||
else
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "remove file: $TMP_CFILE $TMP_STATS"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $LIMIT -ge `du -sb $FILES_CDIR | awk '{print $1}'` ]; then
|
||||
if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "finish removing files"
|
||||
fi
|
||||
@ -96,7 +95,7 @@ for part in $ALL_STATS_ATIMELIST; do
|
||||
done
|
||||
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
TOTAL_SIZE=`du -sb $FILES_CDIR | awk '{print $1}'`
|
||||
TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE"
|
||||
fi
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
# Require root
|
||||
REQUIRE_ROOT=require-root.sh
|
||||
@ -12,19 +13,52 @@ REQUIRE_ROOT=require-root.sh
|
||||
|
||||
source integration-test-common.sh
|
||||
|
||||
CACHE_DIR="/tmp/s3fs-cache"
|
||||
rm -rf "${CACHE_DIR}"
|
||||
mkdir "${CACHE_DIR}"
|
||||
|
||||
#reserve 200MB for data cache
|
||||
source test-utils.sh
|
||||
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
# [FIXME]
|
||||
# Only on MacOS, there are cases where process or system
|
||||
# other than the s3fs cache uses disk space.
|
||||
# We can imagine that this is caused by Timemachine, but
|
||||
# there is no workaround, so s3fs cache size is set +1gb
|
||||
# for error bypass.
|
||||
#
|
||||
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 1200))
|
||||
else
|
||||
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 200))
|
||||
fi
|
||||
|
||||
export CACHE_DIR
|
||||
export ENSURE_DISKFREE_SIZE
|
||||
FLAGS=(
|
||||
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}"
|
||||
enable_content_md5
|
||||
enable_noobj_cache
|
||||
nocopyapi
|
||||
nomultipart
|
||||
notsup_compat_dir
|
||||
sigv2
|
||||
singlepart_copy_limit=$((10 * 1024)) # limit size to exercise multipart code paths
|
||||
#use_sse # TODO: S3Proxy does not support SSE
|
||||
)
|
||||
|
||||
start_s3proxy
|
||||
|
||||
#
|
||||
# enable_content_md5
|
||||
# Causes s3fs to validate file contents. This isn't included in the common
|
||||
# options used by start_s3fs because tests may be performance tests
|
||||
# singlepart_copy_limit
|
||||
# Appeared in upstream s3fs-fuse tests, possibly a limitation of S3Proxy
|
||||
# TODO: github archaeology to see why it was added.
|
||||
#
|
||||
start_s3fs -o enable_content_md5 \
|
||||
-o singlepart_copy_limit=$((10 * 1024))
|
||||
for flag in "${FLAGS[@]}"; do
|
||||
echo "testing s3fs flag: $flag"
|
||||
|
||||
./integration-test-main.sh
|
||||
start_s3fs -o $flag
|
||||
|
||||
./integration-test-main.sh
|
||||
|
||||
stop_s3fs
|
||||
done
|
||||
|
||||
stop_s3proxy
|
||||
|
||||
echo "$0: tests complete."
|
||||
|
||||
@ -1,6 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
#### Test utils
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
# Configuration
|
||||
TEST_TEXT="HELLO WORLD"
|
||||
@ -12,6 +15,65 @@ BIG_FILE=big-file-s3fs.txt
|
||||
BIG_FILE_LENGTH=$((25 * 1024 * 1024))
|
||||
export RUN_DIR
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
export SED_BUFFER_FLAG="-l"
|
||||
else
|
||||
export SED_BUFFER_FLAG="--unbuffered"
|
||||
fi
|
||||
|
||||
function get_xattr() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
xattr -p "$1" "$2"
|
||||
else
|
||||
getfattr -n "$1" --only-values "$2"
|
||||
fi
|
||||
}
|
||||
|
||||
function set_xattr() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
xattr -w "$1" "$2" "$3"
|
||||
else
|
||||
setfattr -n "$1" -v "$2" "$3"
|
||||
fi
|
||||
}
|
||||
|
||||
function del_xattr() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
xattr -d "$1" "$2"
|
||||
else
|
||||
setfattr -x "$1" "$2"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_size() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%z" "$1"
|
||||
else
|
||||
stat -c %s "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file_size() {
|
||||
FILE_NAME="$1"
|
||||
EXPECTED_SIZE="$2"
|
||||
|
||||
# Verify file is zero length via metadata
|
||||
size=$(get_size ${FILE_NAME})
|
||||
if [ $size -ne $EXPECTED_SIZE ]
|
||||
then
|
||||
echo "error: expected ${FILE_NAME} to be zero length"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify file is zero length via data
|
||||
size=$(cat ${FILE_NAME} | wc -c)
|
||||
if [ $size -ne $EXPECTED_SIZE ]
|
||||
then
|
||||
echo "error: expected ${FILE_NAME} to be $EXPECTED_SIZE length, got $size"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function mk_test_file {
|
||||
if [ $# == 0 ]; then
|
||||
TEXT=$TEST_TEXT
|
||||
@ -24,6 +86,21 @@ function mk_test_file {
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# wait & check
|
||||
BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'`
|
||||
TRY_COUNT=10
|
||||
while true; do
|
||||
MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then
|
||||
break
|
||||
fi
|
||||
TRY_COUNT=`expr $TRY_COUNT - 1`
|
||||
if [ $TRY_COUNT -le 0 ]; then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function rm_test_file {
|
||||
@ -65,9 +142,10 @@ function cd_run_dir {
|
||||
echo "TEST_BUCKET_MOUNT_POINT variable not set"
|
||||
exit 1
|
||||
fi
|
||||
RUN_DIR=$(mktemp --directory ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
|
||||
RUN_DIR=${TEST_BUCKET_MOUNT_POINT_1}/${1}
|
||||
mkdir -p ${RUN_DIR}
|
||||
cd ${RUN_DIR}
|
||||
}
|
||||
}
|
||||
|
||||
function clean_run_dir {
|
||||
if [ -d ${RUN_DIR} ]; then
|
||||
@ -105,7 +183,7 @@ function add_tests {
|
||||
# Log test name and description
|
||||
# describe [DESCRIPTION]
|
||||
function describe {
|
||||
echo "${FUNCNAME[1]}: "$@""
|
||||
echo "${FUNCNAME[1]}: \"$*\""
|
||||
}
|
||||
|
||||
# Runs each test in a suite and summarizes results. The list of
|
||||
@ -114,7 +192,8 @@ function describe {
|
||||
# made after the test run.
|
||||
function run_suite {
|
||||
orig_dir=$PWD
|
||||
cd_run_dir
|
||||
key_prefix="testrun-$RANDOM"
|
||||
cd_run_dir $key_prefix
|
||||
for t in "${TEST_LIST[@]}"; do
|
||||
# The following sequence runs tests in a subshell to allow continuation
|
||||
# on test failure, but still allowing errexit to be in effect during
|
||||
@ -125,7 +204,7 @@ function run_suite {
|
||||
# Other ways of trying to capture the return value will also disable
|
||||
# errexit in the function due to bash... compliance with POSIX?
|
||||
set +o errexit
|
||||
(set -o errexit; $t)
|
||||
(set -o errexit; $t $key_prefix)
|
||||
if [[ $? == 0 ]]; then
|
||||
report_pass $t
|
||||
else
|
||||
@ -154,3 +233,36 @@ function run_suite {
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
function get_ctime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%c" "$1"
|
||||
else
|
||||
stat -c %Z "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_mtime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%m" "$1"
|
||||
else
|
||||
stat -c %Y "$1"
|
||||
fi
|
||||
}
|
||||
function check_content_type() {
|
||||
INFO_STR=`aws_cli s3api head-object --bucket ${TEST_BUCKET_1} --key $1`
|
||||
if [[ "${INFO_STR}" != *"$2"* ]]
|
||||
then
|
||||
echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function get_disk_avail_size() {
|
||||
DISK_AVAIL_SIZE=`BLOCKSIZE=$((1024 * 1024)) df $1 | awk '{print $4}' | tail -n 1`
|
||||
echo ${DISK_AVAIL_SIZE}
|
||||
}
|
||||
|
||||
function aws_cli() {
|
||||
AWS_ACCESS_KEY_ID=local-identity AWS_SECRET_ACCESS_KEY=local-credential aws $* --endpoint-url "${S3_URL}" --no-verify-ssl
|
||||
}
|
||||
|
||||
18
test/write_multiple_offsets.py
Executable file
18
test/write_multiple_offsets.py
Executable file
@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
filename = sys.argv[1]
|
||||
data = bytes('a', 'utf-8')
|
||||
|
||||
fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY)
|
||||
try:
|
||||
os.pwrite(fd, data, 1024)
|
||||
os.pwrite(fd, data, 16 * 1024 * 1024)
|
||||
os.pwrite(fd, data, 18 * 1024 * 1024)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
stat = os.lstat(filename)
|
||||
assert stat.st_size == 18 * 1024 * 1024 + 1
|
||||
Reference in New Issue
Block a user