Compare commits
1412 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8c58ba8ac0 | |||
| a19d223434 | |||
| fc06419549 | |||
| 032fcf2a47 | |||
| 5b5bc3114a | |||
| 81e267d421 | |||
| 4fc92d59f3 | |||
| a0f347b10f | |||
| 5debf523b0 | |||
| 1c8aadafd1 | |||
| d33f252404 | |||
| 4da56acdcc | |||
| 493802a605 | |||
| 22b0ae9d51 | |||
| 134a54b32f | |||
| 7f6fbb0021 | |||
| ec8bae9827 | |||
| a4d916af13 | |||
| 32f096fa3f | |||
| c692093921 | |||
| 1a6d0826b5 | |||
| f2c5e38724 | |||
| bf33fe7f55 | |||
| ff9d6a75c7 | |||
| 4c6690f5f0 | |||
| 67b9381825 | |||
| 4b53d4bf6b | |||
| ed85b72bf5 | |||
| 17fda89ae9 | |||
| 1987bcbea3 | |||
| d019dda4f7 | |||
| dc9255bc5f | |||
| b0e8758b63 | |||
| 6d65e30dd5 | |||
| b70f8db037 | |||
| 4d833a4fb9 | |||
| a6563211af | |||
| 8d66b0e4a8 | |||
| 555c1dde3d | |||
| b04bca37a5 | |||
| 168e588ac7 | |||
| 7158e50ee2 | |||
| bd0fadbe5f | |||
| d1c638ab7a | |||
| 51f65d7b14 | |||
| a16d00d673 | |||
| 4d0daddad4 | |||
| d246b9e8bf | |||
| ef3d4e506d | |||
| 462b37b0bb | |||
| 4341291cc2 | |||
| c589886ba5 | |||
| d0363b118e | |||
| 533322859d | |||
| f8d5b76edb | |||
| 834862f8a4 | |||
| d9f6469b7b | |||
| 852e6ee4c6 | |||
| ecb24c9c26 | |||
| 543231c9f2 | |||
| d96a08d4ad | |||
| f2f930300a | |||
| 81ad3ce0ae | |||
| 38e1eaa8a3 | |||
| 6aa786b886 | |||
| 58750cc441 | |||
| 2188fb067e | |||
| 910255745e | |||
| cf86fa51b0 | |||
| a4e4ce8aea | |||
| 7f43b7fa53 | |||
| 0492f75197 | |||
| 493cf20f95 | |||
| 3553fb65a0 | |||
| 059cc57ba6 | |||
| 4df4ffe06f | |||
| 462347256d | |||
| 133feb67c3 | |||
| be308e9d11 | |||
| 2cf195741c | |||
| f61baada46 | |||
| 6e1e0d1d31 | |||
| 66419e7292 | |||
| 9e998877e9 | |||
| 5ebd4039e6 | |||
| 3628b9d1e2 | |||
| 8d2bd874d7 | |||
| 7e27c6cf7d | |||
| b8ff4ede49 | |||
| 081d6c1245 | |||
| eb8004c355 | |||
| b3bf9f8f54 | |||
| 503c86bb8a | |||
| 757f4caee8 | |||
| 95fabd1f3a | |||
| 0b42e08636 | |||
| d5e4f99e72 | |||
| 781d4dd857 | |||
| f35fe850c0 | |||
| 7102b9eb74 | |||
| 9a55c9fd9f | |||
| 041b4ec05c | |||
| 2438066d52 | |||
| 3c5b35b3b9 | |||
| e98827ec6f | |||
| 864941d4d5 | |||
| 05863a3178 | |||
| 075d161bb1 | |||
| f8b5c911ed | |||
| 6f40503328 | |||
| 3440c3348c | |||
| 853be26612 | |||
| 89b1c32b24 | |||
| 44d2cc15f7 | |||
| 76d88f2291 | |||
| 72340cfbd9 | |||
| 81805715bd | |||
| ebcbb02d16 | |||
| 8c29b60129 | |||
| 93cf67c65d | |||
| 211cc0f5f2 | |||
| 8fb70c5e4a | |||
| c58c91fc4f | |||
| 78e2345c19 | |||
| 3bc565b986 | |||
| b7187352e1 | |||
| 1520ca6220 | |||
| f7a63d5c97 | |||
| 3958450c05 | |||
| 5121c73ed1 | |||
| 44eaac8471 | |||
| 77501c3600 | |||
| 8205607716 | |||
| c7132b7f56 | |||
| 1043e08dfa | |||
| e5f4f9b69e | |||
| 1e3c10d803 | |||
| 6fa4477673 | |||
| 6d1e704e34 | |||
| ffc33a447f | |||
| 39ec8e242e | |||
| 0d4e39ad1c | |||
| 6112eb6a49 | |||
| 986fab8738 | |||
| b5ffd419d8 | |||
| c6e23212bb | |||
| e75c11956c | |||
| 1ec8528502 | |||
| 892e7129c5 | |||
| bdea2ee5c8 | |||
| a5186c73c2 | |||
| 4580e6ff93 | |||
| 60d456a993 | |||
| bcf6838e86 | |||
| 41a66d9706 | |||
| 958ad83a4b | |||
| ba61470bae | |||
| 0e895f60a0 | |||
| 8210a1b2f2 | |||
| 2feefeec47 | |||
| 55cb8920d5 | |||
| 46acbf10ba | |||
| f28e3bd89e | |||
| 2c0408b95a | |||
| 057da86d87 | |||
| 8de6cb3504 | |||
| 2bb745cdd7 | |||
| 35090ba4d5 | |||
| 132a1bebbb | |||
| c8e13300e1 | |||
| a23d02923c | |||
| 194262c0ef | |||
| e2fbcb4d30 | |||
| 0c1bc0f803 | |||
| 83361e7905 | |||
| 19abd9ffaf | |||
| cbd925c56f | |||
| 63bbb47378 | |||
| 0fbd0eac80 | |||
| e5231fa3c7 | |||
| ad1961417d | |||
| 4154e539ea | |||
| e0a38adaf6 | |||
| c3e711da58 | |||
| 700e288718 | |||
| e72a64785b | |||
| 5ace2b70fc | |||
| 62c8be85d6 | |||
| 3f6b5ae6a5 | |||
| dc365b65a0 | |||
| 9c88ec2128 | |||
| 3dd9832f61 | |||
| 4d1f5c899f | |||
| 1f796d432d | |||
| 35006e318f | |||
| 7d0c66e08a | |||
| 9dc4148743 | |||
| f324d8e04f | |||
| f16ee96d7e | |||
| 0d849b38c2 | |||
| 8ed020610f | |||
| d8766b2051 | |||
| 9db70bab63 | |||
| 8a7548a9d4 | |||
| 0cb057dadd | |||
| 0f5db0d1bf | |||
| 94e67c9c58 | |||
| 274321524c | |||
| 40f7007263 | |||
| 66597ec5f2 | |||
| 75e72385cc | |||
| eb58460175 | |||
| 0852521a7e | |||
| 56ed6bb97f | |||
| 73098220bf | |||
| ca7756fa77 | |||
| 8b15db6dcb | |||
| 0b60aa81eb | |||
| da70cb92a8 | |||
| 746a027e98 | |||
| 80c11b6c12 | |||
| b76226a06d | |||
| 8945e98d8b | |||
| 97c249d5b9 | |||
| 6e134a23f9 | |||
| a4803543a1 | |||
| 2cc88b933f | |||
| ce1221c867 | |||
| 005a684600 | |||
| 3af17c3019 | |||
| f26a0aa71d | |||
| 2b4619842d | |||
| cf529e0af7 | |||
| 4da02d023b | |||
| fe0677651e | |||
| 811ea0cb85 | |||
| a5f84535f3 | |||
| 84bf460f99 | |||
| 538fbed302 | |||
| feafb44bae | |||
| a44fc1103d | |||
| 48a872e285 | |||
| c44a60f3f5 | |||
| f373df9682 | |||
| 9e01d5b8d1 | |||
| 7fbda230f5 | |||
| 56141557dc | |||
| fe2b269b6e | |||
| eb6fe69af2 | |||
| 6489c5d394 | |||
| 854a8a8356 | |||
| d34475d6a1 | |||
| b72f4b43a4 | |||
| 34e797d6f5 | |||
| bb20fc3c98 | |||
| 3e66e42ae5 | |||
| 0665d78550 | |||
| 55d670f22f | |||
| 32ae0d2c79 | |||
| 924eeb3587 | |||
| bc9126d774 | |||
| 4df50e7f85 | |||
| 4e26728cbf | |||
| 7135666060 | |||
| 018ccb9a11 | |||
| ee1d3a9057 | |||
| b762a0a85b | |||
| 9771be29b2 | |||
| 010a6b83ef | |||
| 87224b830b | |||
| 9e77650e8c | |||
| e0712f444d | |||
| 913b72fdaf | |||
| 39102608aa | |||
| 23945a0130 | |||
| bdfb9ee815 | |||
| 1a75a94253 | |||
| a9d527d517 | |||
| 94666f7754 | |||
| 41acbaa746 | |||
| d5042a73bd | |||
| f6756596b3 | |||
| a24f78f5a4 | |||
| 64d4b2c0b0 | |||
| cc4a307415 | |||
| a07a5336f6 | |||
| 9789ca1a4d | |||
| 4ec2d685e7 | |||
| dc62953040 | |||
| 0c42a74a8a | |||
| cf3e82d10a | |||
| 0e815c2fbc | |||
| 27a5536749 | |||
| db338b36b8 | |||
| 72b906255f | |||
| 2211678d91 | |||
| 80162c126b | |||
| 1db94a0b30 | |||
| b6349e9428 | |||
| bedd648d47 | |||
| 58b3cce320 | |||
| 81102a5963 | |||
| 42fb30852b | |||
| e51361cb94 | |||
| 80a9ed9d6a | |||
| e2129001eb | |||
| 805cc064af | |||
| 3c2279db39 | |||
| 412876ca33 | |||
| 461a346bf4 | |||
| ae4bcd405c | |||
| 0536dc1112 | |||
| 1c3507ede1 | |||
| 3f47037cc7 | |||
| d87321ef3c | |||
| 01ac815346 | |||
| 2daa1d53d9 | |||
| cc2eed84a5 | |||
| c644e4bef2 | |||
| a7d83df3eb | |||
| e1886b5343 | |||
| 873e376098 | |||
| 3c378a4a7a | |||
| ca7266fb76 | |||
| 4a0c23258e | |||
| ccc79ec139 | |||
| 5c4a0a862a | |||
| 2a779df4fd | |||
| f5bf41cf11 | |||
| f74c7407db | |||
| 433c04af26 | |||
| 2e51a339a9 | |||
| 0411872dda | |||
| d8f1aef7be | |||
| 14d3e12103 | |||
| fd13eb314b | |||
| daba563a1c | |||
| b79b0b1a92 | |||
| 01d4323b50 | |||
| dc85eda188 | |||
| ccf3e7bfa2 | |||
| d22acae9a3 | |||
| 7ecfba811e | |||
| 1e7330e499 | |||
| 68475e5fcf | |||
| 8cc008c501 | |||
| faaaf2ee3c | |||
| 3d42d0515d | |||
| 3d70e8966f | |||
| 6837cbfe06 | |||
| a5c20175a1 | |||
| 43d1439420 | |||
| d8cf26bd50 | |||
| c321c8c23f | |||
| 6227fce091 | |||
| aba8e6ccfa | |||
| f528a86219 | |||
| 5b15c7c4e9 | |||
| afd438d363 | |||
| 80972aa33d | |||
| 520995a7e8 | |||
| 5c3c6bff2f | |||
| fb937635f5 | |||
| 3ad1c95e86 | |||
| 2c4c78fd65 | |||
| 0afef077ed | |||
| 80f598f439 | |||
| bacd15714a | |||
| 5cb7a31c09 | |||
| 99aace4fc9 | |||
| c7f8f61d09 | |||
| 159cd2c682 | |||
| 513f41fddf | |||
| 543aed2a32 | |||
| 20ea96328c | |||
| 007edb1773 | |||
| f78bcc5229 | |||
| 43ec064fb9 | |||
| ffac4c8417 | |||
| 4adcd4a6c8 | |||
| e936854493 | |||
| 850a813171 | |||
| 5bbcd3b981 | |||
| a337c32840 | |||
| d39e4e4b1f | |||
| b51d60ef5e | |||
| 58037da061 | |||
| 1eb266588e | |||
| deb560067e | |||
| 4e351c59e3 | |||
| eb597289cb | |||
| 6fd42d9fe4 | |||
| efff9c01a6 | |||
| a83d5baa90 | |||
| 50d13255e4 | |||
| 5195fa93fa | |||
| e5e63d6ac3 | |||
| 7a65a414c3 | |||
| 4a192ffdf9 | |||
| 944d21cabb | |||
| d267212289 | |||
| 58d8e5586a | |||
| ce803daf4a | |||
| 9bf34e2fda | |||
| 52218d2ddb | |||
| 6bd1a7eac0 | |||
| 6177d7b096 | |||
| 3161bf4608 | |||
| 2349dafb98 | |||
| 1cd58d7828 | |||
| 8aa06d621a | |||
| ecf13a8cb9 | |||
| b8ff6a647e | |||
| 49110c671d | |||
| febaf6849f | |||
| 4893174652 | |||
| 5820c72092 | |||
| 4f23f38583 | |||
| bbfa91141a | |||
| f439c6382f | |||
| 21321a9d96 | |||
| f03b50fd13 | |||
| 15a870f9d9 | |||
| 9472ee4a01 | |||
| 1f1f824da7 | |||
| f02105c346 | |||
| c596441f58 | |||
| 455e29cbea | |||
| 511d223468 | |||
| 5324c1b588 | |||
| 554ea49294 | |||
| d7f77a6282 | |||
| 048aea1151 | |||
| f1ad626b46 | |||
| a78d8d1da4 | |||
| fbebc6fa57 | |||
| c18fc901c4 | |||
| 245f14c8c1 | |||
| d732eef776 | |||
| 56b184fd0c | |||
| 9e5eaad79b | |||
| 738eaadcbf | |||
| 1cf3d2452e | |||
| 670dce6f4a | |||
| 07cfdcf205 | |||
| 15b7450713 | |||
| 272e0d3d46 | |||
| 8d8a2a66e5 | |||
| befc2e9e6f | |||
| 059ab1f0f4 | |||
| f2fe1738cd | |||
| 0d4847596e | |||
| 8e86ef1634 | |||
| a32a05962e | |||
| a7e81fda9b | |||
| 9e4f9d4bdc | |||
| 0677a096a9 | |||
| 381835e2a9 | |||
| af070fa3de | |||
| f9cd43b684 | |||
| 7095787f1f | |||
| 4ca336aed0 | |||
| 8a18806a57 | |||
| e5e124b9aa | |||
| 090ac7a8a0 | |||
| 97af471aa6 | |||
| 0176fc712b | |||
| c426c896d0 | |||
| 0a99470369 | |||
| cd280d8702 | |||
| b1bade37d8 | |||
| 005c186e1b | |||
| 6f8ecb28c5 | |||
| 4c28eb2a78 | |||
| 042332bcec | |||
| 071cd0f849 | |||
| d7bb834bcb | |||
| 9b437fc1fe | |||
| 6f6a67807b | |||
| e5785d4690 | |||
| a4ce54d615 | |||
| ddbcec5c96 | |||
| 7cbb4c958b | |||
| 6c5adbb9af | |||
| 4db6e1a10a | |||
| ea517c80a4 | |||
| 9f6ed6c08e | |||
| b1ddb483a4 | |||
| 17352ef4fd | |||
| 71766039ff | |||
| c607c9be58 | |||
| df604e50fb | |||
| 876662ff89 | |||
| 058706014b | |||
| 99ec09f13a | |||
| 4a011d87e0 | |||
| c6edc2cd8f | |||
| cc196bfdf0 | |||
| 895d5006bb | |||
| 62dcda6a56 | |||
| cbf072bc55 | |||
| 1b4d2a32d2 | |||
| b71c90bbe1 | |||
| 80344aafd3 | |||
| b5ca400500 | |||
| 2e89439120 | |||
| 555410386c | |||
| 08b132ddb9 | |||
| 1e86cc643d | |||
| f53503438c | |||
| 0d43d070cc | |||
| 0791fdca2a | |||
| 6e8678d5e3 | |||
| 10d9f75366 | |||
| 77993e607e | |||
| 74d8671e54 | |||
| 4c41eac29c | |||
| 3c97c1b251 | |||
| 84c671a81a | |||
| f336bdebcc | |||
| e5b8377202 | |||
| 4f42f4ab0c | |||
| 11b385820d | |||
| f1a9eaee54 | |||
| ffee8d5f39 | |||
| eeb839242b | |||
| f7760976a5 | |||
| ca2d1d873d | |||
| 951761ee2c | |||
| 231fd001d9 | |||
| e00afa8128 | |||
| e9297f39ea | |||
| 314dc5a398 | |||
| e07cb020cc | |||
| 9f79b9e0da | |||
| e87e40b3b4 | |||
| f0f95478ec | |||
| bd66b57ad3 | |||
| a1d3ff9766 | |||
| 7f61a947c2 | |||
| 4d0bef1e90 | |||
| 960823fb40 | |||
| c04e8e7a9d | |||
| fb6debd986 | |||
| d8185a25aa | |||
| 53337a0a28 | |||
| ae51556d04 | |||
| b3de9195a7 | |||
| 055ecf6ea7 | |||
| c603680e02 | |||
| 814aadd7e3 | |||
| dce63d1529 | |||
| 8ff05d8e38 | |||
| dfa84b82a8 | |||
| 6ac8618381 | |||
| 8c527c3616 | |||
| 54a074647e | |||
| c5ebf5d328 | |||
| 43c6ef560e | |||
| 3076abc744 | |||
| 07636c8a8d | |||
| 35d55ee513 | |||
| a442e843be | |||
| c0cf90cf8b | |||
| 3b1cc3b197 | |||
| a0c1f30ae7 | |||
| 8822a86709 | |||
| 98f397de0e | |||
| fd4d23f8f7 | |||
| 4820f0a42b | |||
| 807a618cf7 | |||
| a93e500b44 | |||
| 92d3114584 | |||
| 5062d6fbd9 | |||
| 7d14ebaf09 | |||
| cd794a6985 | |||
| 84b421d6ef | |||
| 8316da5bbe | |||
| fa287aeef7 | |||
| caaf4cac55 | |||
| 010276ceab | |||
| f219817eb3 | |||
| d487348d21 | |||
| eb0b29708f | |||
| 877842a720 | |||
| 1fc25e8c3f | |||
| 61ecafd426 | |||
| 79bd3441eb | |||
| 5f5da4b2cb | |||
| dede19d8c0 | |||
| fada95f58e | |||
| 014b8c5982 | |||
| 46d79c5bc2 | |||
| 40ba3b44a1 | |||
| beadf95975 | |||
| 2887f8916b | |||
| 0c9a8932f7 | |||
| ac72431195 | |||
| 2a7877beff | |||
| 7a56459103 | |||
| 5292fa74d1 | |||
| f2184e34dd | |||
| 1d4867830b | |||
| 36a4903843 | |||
| c83a3e67c9 | |||
| 05014c49c8 | |||
| aa69107165 | |||
| d373b0eca3 | |||
| 6aa40b2747 | |||
| 34c3bfe408 | |||
| 6ac56e722d | |||
| 61dc7f0a70 | |||
| 9f000957dd | |||
| b2141313e2 | |||
| aa9bd1fa3c | |||
| 5a2dc03a1c | |||
| 508fafbe62 | |||
| e29548178b | |||
| ab2f36f202 | |||
| b8c9fcfd70 | |||
| 58ce544e83 | |||
| e98ce36301 | |||
| 6401b4ae92 | |||
| 25b49e1a2e | |||
| c7def35b54 | |||
| ddba1c63c5 | |||
| c512516e14 | |||
| 2c43b1e12b | |||
| b68d97c6bf | |||
| f1757e4343 | |||
| e2d5641d99 | |||
| 523fe1e309 | |||
| c985b5e4d0 | |||
| 786f1a8fc7 | |||
| 18cb2e2662 | |||
| 743c706b0a | |||
| 4ed0e5f35a | |||
| fd6b37d3da | |||
| 56e24de0d4 | |||
| 2780043a7d | |||
| 54c9e48bb7 | |||
| ed5795eead | |||
| 3d225163f8 | |||
| 0569cec3ea | |||
| a2f8ac535e | |||
| 29355d75b0 | |||
| d9e89deef6 | |||
| 6b051eac47 | |||
| da997de918 | |||
| d97094fb8d | |||
| b91fc5409e | |||
| 3c970646d1 | |||
| a92668ae78 | |||
| 88cd8feb05 | |||
| 91c16f826a | |||
| d4d60ff315 | |||
| e8033f96de | |||
| 5fba542a29 | |||
| 44de3ffa05 | |||
| 2efa6df028 | |||
| 9e530c86ae | |||
| 95857733a1 | |||
| 664f910083 | |||
| 735e4b0848 | |||
| e8d76a6f58 | |||
| 0a6926be54 | |||
| 830a971bde | |||
| 4779d14d7d | |||
| 8929a27a24 | |||
| eea624c171 | |||
| cdaf4a9674 | |||
| 6fe92d5ed6 | |||
| 8649a68766 | |||
| af005b6e5e | |||
| b19d2ae78f | |||
| 5634f9bdcd | |||
| c703fa15c0 | |||
| d9c106cfde | |||
| 203f78fdae | |||
| c5af62b023 | |||
| dcd70daf48 | |||
| 8263919b0e | |||
| 97488e603f | |||
| 41c23adb0e | |||
| a85183d42c | |||
| 45b67b9604 | |||
| c376efdd28 | |||
| 4c5f510207 | |||
| 06032aa661 | |||
| e8fb2aefb3 | |||
| 3cb6c5e161 | |||
| 7e0c53dfe9 | |||
| c2ca7e43b6 | |||
| ae47d5d349 | |||
| 35d3fce7a0 | |||
| 4177d8bd3b | |||
| ad5349a488 | |||
| 6b57a8c1fc | |||
| 92a4034c5e | |||
| 3e4002df0d | |||
| 1b9ec7f4fc | |||
| 4a7c4a9e9d | |||
| 0d3fb0658a | |||
| 73cf2ba95d | |||
| 5a481e6a01 | |||
| d8e12839af | |||
| 3bf05dabea | |||
| d4e86a17d1 | |||
| 6555e7ebb0 | |||
| ae9d8eb734 | |||
| e49d594db4 | |||
| 66bb0898db | |||
| b323312312 | |||
| 58e52bad4f | |||
| 57b2a60172 | |||
| 212bbbbdf0 | |||
| a0e62b5588 | |||
| e9831dd772 | |||
| da95afba8a | |||
| 0bd875eb9e | |||
| af63a42773 | |||
| ad9a374229 | |||
| 1b86e4d414 | |||
| 86b0921ac4 | |||
| dbe98dcbd2 | |||
| 4a72b60707 | |||
| 7a4696fc17 | |||
| e3de6ea458 | |||
| 1db4739ed8 | |||
| 25375a6b48 | |||
| ca87df7d44 | |||
| d052dc0b9d | |||
| 3f542e9cf5 | |||
| 04493de767 | |||
| 4fdab46617 | |||
| 1a23b880d5 | |||
| b3c376afbe | |||
| adcf5754ae | |||
| 0863672e27 | |||
| 0f503ced25 | |||
| 987a166bf4 | |||
| 57b6f0eeaf | |||
| f71a28f9b9 | |||
| 45c7ea9194 | |||
| c9f4312588 | |||
| 8b657eee41 | |||
| b9c9de7f97 | |||
| e559f05326 | |||
| 824124fedc | |||
| be9d407fa0 | |||
| c494e54320 | |||
| b52b6f3fc5 | |||
| 82c9733101 | |||
| a45ff6cdaa | |||
| 960d45c853 | |||
| 246b767b64 | |||
| 0edf056e95 | |||
| 88819af2d8 | |||
| b048c981ad | |||
| e1dafe76dd | |||
| 1a2e63ecff | |||
| a60b32cb80 | |||
| 6b58220009 | |||
| a841057679 | |||
| ee6abea956 | |||
| 8b0acd75e0 | |||
| cea7d44717 | |||
| 0da87e75fe | |||
| 566961c7a5 | |||
| ac65258d30 | |||
| 35261e6dba | |||
| 2818f23ba5 | |||
| 88f071ea22 | |||
| bd4bc0e7f1 | |||
| 890c1d53ff | |||
| 026260e7a1 | |||
| 99fe93b7f1 | |||
| b764c53020 | |||
| 11bd7128d2 | |||
| 7cda32664b | |||
| 4c73a0ae56 | |||
| 97fc845a6a | |||
| 7d9ac0163b | |||
| d903e064e0 | |||
| e1928288fe | |||
| 6ab6412dd3 | |||
| 30b7a69d3d | |||
| ccd0a446d8 | |||
| 0418e53b3c | |||
| bad48ab59a | |||
| bbad76bb71 | |||
| 6c1bd98c14 | |||
| b95e4acaeb | |||
| c238701d09 | |||
| 60d2ac3c7a | |||
| 967ef4d56b | |||
| ad57bdda6c | |||
| a0b69d1d3d | |||
| 5df94d7e33 | |||
| 1cbe9fb7a3 | |||
| 395f736753 | |||
| 065516c5f3 | |||
| 8660abaea2 | |||
| 366f0705a0 | |||
| ccea87ca68 | |||
| 5d54883e2f | |||
| 662f65c3c8 | |||
| 259f028490 | |||
| 5db550a298 | |||
| e3c77d2906 | |||
| ba00e79253 | |||
| c1791f920e | |||
| df3803c7b7 | |||
| 384b4cbafa | |||
| 40501a7a73 | |||
| ab89b4cd4a | |||
| 48e0d55c8e | |||
| 1eba27a50a | |||
| 41206fa0e2 | |||
| 21cf1d64e5 | |||
| ae91b6f673 | |||
| f4515b5cfa | |||
| 6c57cde7f9 | |||
| 5014c1827b | |||
| f531e6aff2 | |||
| c5c110137b | |||
| 5957d9ead0 | |||
| 5675df2a44 | |||
| 00bc9142c4 | |||
| 5653ab39fc | |||
| 473dd7c940 | |||
| ee824d52ba | |||
| 7c5fba9890 | |||
| f214cb03b2 | |||
| 416c51799b | |||
| cf6f665f03 | |||
| 20da0e4dd3 | |||
| fa8c417526 | |||
| 2c65aec6c8 | |||
| 96d8e6d823 | |||
| 62b8084300 | |||
| 907aff5de4 | |||
| bc09129ec5 | |||
| cd94f638e2 | |||
| b1fe419870 | |||
| 98b724391f | |||
| 620f6ec616 | |||
| 0c6a3882a2 | |||
| a08880ae15 | |||
| f48826dfe9 | |||
| 9c3551478e | |||
| cc94e1da26 | |||
| 2b7ea5813c | |||
| 185192be67 | |||
| ae4caa96a0 | |||
| af13ae82c1 | |||
| 13503c063b | |||
| 337da59368 | |||
| b0681246b9 | |||
| 52853f6b47 | |||
| f6eb841a24 | |||
| caea087aec | |||
| d2ae14d8b7 | |||
| 7115835834 | |||
| 551c6acf67 | |||
| 24df69f688 | |||
| 23a10dd644 | |||
| 034042f511 | |||
| 465c15ef40 | |||
| a22675bafd | |||
| 0e0ae38f6d | |||
| 7b30d5d15b | |||
| 4a5c9bef89 | |||
| 9d10a5aa70 | |||
| 107757f11d | |||
| a12e0d5ec4 | |||
| 42cdcbc2dc | |||
| eef549dac7 | |||
| c8ee132813 | |||
| d07c3f38b7 | |||
| 73da168b93 | |||
| 1fe0334c08 | |||
| 7d09914f1f | |||
| 3ac39d61f8 | |||
| c5677b4726 | |||
| 67685c3d49 | |||
| 864e20e1f2 | |||
| 51b3183cba | |||
| f02b1bc352 | |||
| 758b92e823 | |||
| df0ff3a2fd | |||
| edcf4c6218 | |||
| 28efff5986 | |||
| efba9bcbc1 | |||
| 6bd179c92b | |||
| 96764b7410 | |||
| ff3eb1971f | |||
| 94ddcb8d4f | |||
| b4c90d6957 | |||
| 75b59a7c16 | |||
| 3bcca75a88 | |||
| 79ea1a1561 | |||
| f0f61b3b55 | |||
| b955391621 | |||
| 8de992d42d | |||
| fef3fbc225 | |||
| acb61880b9 | |||
| 8ee95ff7ab | |||
| 95578cad43 | |||
| 465bbd3729 | |||
| 0fa895594e | |||
| 15573cd21e | |||
| 43df94719b | |||
| 980ba398bc | |||
| 0d59ac51c1 | |||
| 523043a2aa | |||
| 277da2c64a | |||
| 03217baa99 | |||
| 6affefff5b | |||
| 2506fe73fa | |||
| 25a03c370a | |||
| d40da2c68b | |||
| 7d6312ac78 | |||
| e26c69a327 | |||
| ff196e4257 | |||
| 19f0d498aa | |||
| 97a806447e | |||
| a00af2385b | |||
| 6fc972972f | |||
| 989d403b1f | |||
| 7b307601b5 | |||
| d731ab3a8e | |||
| 174d934d52 | |||
| b428f68acf | |||
| 5350e03147 | |||
| 28c7888a50 | |||
| 915a1321c7 | |||
| 8a11d7bc2f | |||
| 7aae4782d9 | |||
| aba9e29471 | |||
| d375bca0d0 | |||
| cd0c8599cc | |||
| 20878a1618 | |||
| edd0a11fb5 | |||
| 5e4bafeab7 | |||
| 67a836223a | |||
| 7e2d6a3eed | |||
| 1ee5a468f4 | |||
| 81e209bdd1 | |||
| 90eda81624 | |||
| cafe6015e3 | |||
| 2492dc60ce | |||
| 6f688770fd | |||
| 8c0b1d9c5b | |||
| efde0ec9de | |||
| 632495374b | |||
| 15b797f3ee | |||
| a7a64d954a | |||
| cca217f613 | |||
| 1a9cf6f66d | |||
| 02d7296210 | |||
| a688df813e | |||
| 164424bc89 | |||
| f38aaa3d0e | |||
| 7fabd18b1f | |||
| 5db369d67e | |||
| dba32fdf78 | |||
| 716baada22 | |||
| 1a93897e85 | |||
| 9fd1368611 | |||
| 9f174d7614 | |||
| 65d52506c4 | |||
| a56fe0ea28 | |||
| ec110bb0f3 | |||
| 232befb52a | |||
| f363c21ff5 | |||
| 1a96f40a10 | |||
| 6be3236b28 | |||
| ccefd835d0 | |||
| 1ddc14d59d | |||
| 87f617374a | |||
| b76fc350b0 | |||
| 4deb6fdd84 | |||
| 2d5be2157a | |||
| a19206cf0f | |||
| 0f9428ad5a | |||
| d748b333ee | |||
| e8a8019a71 | |||
| e8680b485d | |||
| ab4b92074c | |||
| d57c12d3c3 | |||
| 676b2090fb | |||
| 6005929a96 | |||
| 49ffaa1d94 | |||
| 9fb3fd1a4d | |||
| 28b2b5cac3 | |||
| 320b8e1171 | |||
| 95cb5d201f | |||
| 880708ab5f | |||
| 36917f7780 | |||
| fe44f81ef2 | |||
| a81a2091c3 | |||
| 88d6c20cde | |||
| 4ff41f2ebf | |||
| a7d2148c60 | |||
| 980c0f81dd | |||
| 775e493b0a | |||
| 584ea488bf | |||
| 594c9ca7d2 | |||
| c2b7a7e453 | |||
| 34b604cdfe | |||
| d16d616f34 | |||
| 50f1ad51c8 | |||
| fe253c3d22 | |||
| 6cc30eea44 | |||
| 6be264a17f | |||
| 1ddbd4d6bb | |||
| 845fdb43f2 | |||
| 72f6c4d2dc | |||
| cf23dc78ab | |||
| b78adb4bb0 | |||
| 115bd51f3f | |||
| b979d40778 | |||
| 10589a9497 | |||
| 2f5973c02b | |||
| 090c37a1c1 | |||
| d048f380c1 | |||
| fff40bbff3 | |||
| daef00e38b | |||
| 4ca1b90d00 | |||
| c5691b6c7c | |||
| fb2ee7cc02 | |||
| 136ec654c2 | |||
| 4e583583cd | |||
| 91861e7fcd | |||
| ded4faf2e4 | |||
| cf56b35766 | |||
| 98d55582eb | |||
| 84bdd51021 | |||
| fbd8959d69 | |||
| 67efc11d94 | |||
| d6e6eebb95 | |||
| 4c65c09f4d | |||
| b281328ff4 | |||
| e9d2b38726 | |||
| f4aac111a4 | |||
| 230991782b | |||
| ac99df5c09 | |||
| f81e6103cb | |||
| cd04cb0875 | |||
| 0755c6f60c | |||
| 1c9d7a9ea9 | |||
| e01ded9e27 | |||
| bf056b213a | |||
| 1af7aaeccb | |||
| c7cf86c2ef | |||
| 6472eedddc | |||
| 938554e569 | |||
| 150b83f61e | |||
| 87faed0d04 | |||
| c5a94cfc0c | |||
| f548e8ad5e | |||
| 203df6b58a | |||
| 0ac2f7cded | |||
| b90b51f2c5 | |||
| 8b457133da | |||
| 7bfaa24d25 | |||
| 4eff6b4dd1 | |||
| e3765ad497 | |||
| dd9f3aed36 | |||
| ccfa13f295 | |||
| 540c04e6cc | |||
| 4b40727644 | |||
| 83937700dd | |||
| 2c156ceea2 | |||
| 0615338592 | |||
| b847872622 | |||
| e932583309 | |||
| 7410b7525f | |||
| 88a4f04217 | |||
| ff607e1a2d | |||
| 4bfbfa3621 | |||
| 43b91d3235 | |||
| 9fa205f1c3 | |||
| e003732f18 | |||
| b946b59522 | |||
| ea6b287d1a | |||
| a6455ef1bc | |||
| 8e5e44bfce | |||
| ea151a70c4 | |||
| 1e1f2a66de | |||
| 163daa5de1 | |||
| b581290c30 | |||
| 1927ccfe0a | |||
| 8162d4925d | |||
| 2b3ece467b | |||
| c2f9b38a95 | |||
| 8e688816d4 | |||
| 8dbd5a3f65 | |||
| 4bd5ffb0fa | |||
| 7b2e963636 | |||
| 87d04acb2f | |||
| 759b44135a | |||
| 8b53e0d931 | |||
| 7db23f9d03 | |||
| 3e655bad3b | |||
| 5e97cb0f48 | |||
| ef90e0deed | |||
| f44b61c403 | |||
| 6067af6ef1 | |||
| d7a4fc2927 | |||
| 7b62de80f6 | |||
| 8ffff5ba96 | |||
| e804441234 | |||
| 9cc0fd2240 | |||
| fff2952d5f | |||
| b85bd53336 | |||
| e1de134d94 | |||
| 5af6d4bd82 | |||
| c673d9d935 | |||
| 0fdda61fb5 | |||
| 331b8456a0 | |||
| 63b6f3635b | |||
| c04bcce206 | |||
| dd7d9268f2 | |||
| a3ef5c820d | |||
| e4da5c59b6 | |||
| ad2a406205 | |||
| 001206f7c1 | |||
| 2ef7f497f6 | |||
| 497b108109 | |||
| 86f95b05bf | |||
| 70db77af38 | |||
| 8dd234dd8f | |||
| 83d46ef8c6 | |||
| 1b323a6252 | |||
| d102eb752d | |||
| 4252fab685 | |||
| 94e3dbb2dc | |||
| 8f115078cd | |||
| f51ad1f33e | |||
| e29069b8dc | |||
| 92e52dadd4 | |||
| a4b00897c1 | |||
| f1b7f5ea95 | |||
| 6a9082f126 | |||
| 48f0a6f811 | |||
| 1b39b2d450 | |||
| 785ed642ba | |||
| 3d5b8a7672 | |||
| 0aef0cf765 | |||
| 489f9edec7 | |||
| 718db57ade | |||
| 639dcf19b0 | |||
| 53bc960224 | |||
| ead346c6d3 | |||
| 375059d9f8 | |||
| 6b21d9d424 | |||
| dac9844765 | |||
| 849e66f6a1 | |||
| 6a8a2e4800 | |||
| 0358908910 | |||
| 32ce1a7267 | |||
| 9ea8da839c | |||
| 39cec488d2 | |||
| 96436df18d | |||
| 3aabb5616c | |||
| 8e55f45818 | |||
| ec4135c9ed | |||
| cfdfecb4d1 | |||
| 97b8b34aab | |||
| ce66430fac | |||
| 1fc56e6665 | |||
| d7d96907cf | |||
| eb97054f49 | |||
| 7280ca6a69 | |||
| 30b2a833a8 | |||
| 8f8e52b91a | |||
| 751c868769 | |||
| c3a47c26ec | |||
| 632578f328 | |||
| 5a4240b18d | |||
| 236aeb9dfd | |||
| bcfadbe1a8 | |||
| b5c027f15d | |||
| 15db80b459 | |||
| 76c0ef86e4 | |||
| a3e820e733 | |||
| a3568a1419 | |||
| 4ad57bdea5 | |||
| 085733d7c9 | |||
| fcb58aec3c | |||
| 402c609316 | |||
| 026a9f2bdc | |||
| 1918d6fa2d | |||
| fd04b9a437 | |||
| ea99603b58 | |||
| 036612dbb0 | |||
| 67d1576dfb | |||
| 2850fe731b | |||
| a157ac59ca | |||
| 20f425fe15 | |||
| 32520fd1fb | |||
| c0b21d8808 | |||
| 17d223b542 | |||
| 9c5bf0bb66 | |||
| dfa63345ed | |||
| 3f59b8da01 | |||
| 0ea88a73c7 | |||
| 2e344bb48f | |||
| c91a645782 | |||
| 96f63a17c0 | |||
| 756d1e5e81 | |||
| 2482aada43 | |||
| 64146f69a4 | |||
| edb3c78fe9 | |||
| 49e32967ec | |||
| 5655cffd32 | |||
| 09dff484e1 | |||
| deb0e9eec3 | |||
| 5d1c8a7eda | |||
| ff8a0c2eea | |||
| cbf7777f41 | |||
| fcb55c2109 | |||
| b6fa2deb9f | |||
| 801ca0c2d3 | |||
| 5f792a9a2b | |||
| 8ee71caabb | |||
| ed70f7763a | |||
| 730262f000 | |||
| cbc057bca7 | |||
| 6442642656 | |||
| 07a5a36b6a | |||
| 912bc58df0 | |||
| 13a91a52e8 | |||
| 4190130194 | |||
| d9b124f91e | |||
| 9b3c87ec97 | |||
| 8f85e5e543 | |||
| 966d229787 | |||
| 4d49ace06b | |||
| ad8c64104e | |||
| d59eff4288 | |||
| 219b155037 | |||
| fe3abed9f0 | |||
| 0ecf4aa6b4 | |||
| 477573265a | |||
| 4e03acf17a | |||
| 84fb3d83d8 | |||
| 3522e5eda3 | |||
| 3056644969 | |||
| 91587ad2c8 | |||
| 8a73d9fff0 | |||
| 28ee9f27b9 | |||
| 7ac58a1c69 | |||
| 3914281f1b | |||
| 3d734ad3e3 | |||
| bb4075d7b9 | |||
| 5b11ac0f4c | |||
| 7bc5f0ca13 | |||
| 14ce061215 | |||
| adb5a35097 | |||
| b0a12bcac1 | |||
| 39d4715b82 | |||
| aac92bd6c0 | |||
| f258a14070 | |||
| 3701f1c16b | |||
| 92fcee824b | |||
| 00f8e1d0ba | |||
| 43191eea53 | |||
| 490ed8f689 | |||
| 30152284cc | |||
| 70097709b2 | |||
| 07e007052a | |||
| bd27294ab0 | |||
| 5e5c20757b | |||
| 6231ae208a | |||
| 42a4f5fd95 | |||
| 6e0a302f7d | |||
| 98af055d8b | |||
| fa5c7ff4df | |||
| d7327df885 | |||
| 0f13c8fe97 | |||
| 44d740080b | |||
| 2fc3a4e91e | |||
| 66e0233410 | |||
| a04bec85b2 | |||
| f861b11a91 | |||
| 37f9bbd231 | |||
| af004576f1 | |||
| 26453c4874 | |||
| 4e18bf0bc2 | |||
| 7c298e94f5 | |||
| 761d2399f2 | |||
| 1210cf8c6c | |||
| 524e005b5c | |||
| d06b6d7d41 | |||
| e66e5d1dfc | |||
| 114966e7c0 | |||
| d2246297bd | |||
| 8ec5decbce | |||
| 0f7d77d599 | |||
| 699e3b3d79 | |||
| 2f8ad7ace8 | |||
| 6b6567ec9b | |||
| c8c71650eb | |||
| a07e804f57 | |||
| e9656810e3 | |||
| 4ee32d7559 | |||
| 53083202ba | |||
| 574a48f81f | |||
| 1b1cf2d4bd | |||
| e811ae1104 | |||
| d65bf4128d | |||
| be5735edb8 | |||
| 5bf2b46fa3 | |||
| cf2b0cca22 | |||
| 4ae5043534 | |||
| 1424f87754 | |||
| 4f953f9bd7 | |||
| 0d2f3e2dc4 | |||
| bb1f1d3faa | |||
| 98daf16681 | |||
| 939ba2b4b3 | |||
| d0b82428d5 | |||
| 902911765e | |||
| 03d84a07d1 | |||
| 1f686d93ff | |||
| d95b9ef1ac | |||
| 045f1e7906 | |||
| 69ef7fbefb | |||
| a56b8db410 | |||
| 082eb24c12 | |||
| f04b659f5e | |||
| eedc621637 | |||
| b31ec5c4af | |||
| 651e8c3158 | |||
| 77d4d066b5 | |||
| 1e97e99aa0 | |||
| 7212072ff0 | |||
| 8bcab645e1 | |||
| 9013917d58 | |||
| 1eddf92c35 | |||
| 28d82c9ccd | |||
| 2f90a04513 | |||
| 2724728476 | |||
| ed8f424c1a | |||
| 50137fe026 | |||
| 9237d07226 | |||
| 8c2be4aa85 | |||
| ccaed9a91c | |||
| a1ca8b7124 | |||
| 6633366218 | |||
| 22ea65f02c | |||
| 3d69ee0c30 | |||
| c88a5f38be | |||
| 38e6857824 | |||
| ca72b9a6d0 | |||
| 741831344a | |||
| 7a7c7572ea | |||
| 4c32bc0aa5 | |||
| 0e9cfeb808 | |||
| ae4ae88b6d | |||
| f0c33f8ef2 | |||
| e3a33343b9 | |||
| 20b1c207be | |||
| f1ca5d0340 | |||
| cbec8da9a3 | |||
| 7a55eab399 | |||
| 95f8cab139 | |||
| c1a6d76fc3 | |||
| 08929696f7 | |||
| ba34ba181a | |||
| d2c887a371 | |||
| d5113c0501 | |||
| 29a37645dd | |||
| 601482eff5 | |||
| f141bbd4b4 | |||
| 61020370d5 | |||
| f1f7e76be5 | |||
| 160196798b | |||
| edad91186f | |||
| cd27f0aa54 | |||
| c7665f80ab | |||
| 1a4065b0fb | |||
| a4465105f7 | |||
| 8bba566774 | |||
| 157612e7e7 | |||
| 6148415b4b | |||
| d475e22774 | |||
| 4762e53b5d | |||
| e23ea87953 | |||
| d7563309a2 | |||
| c003076053 | |||
| 74fb29d9fb | |||
| b35f8ded46 | |||
| 16487c4e26 | |||
| 39402696ce | |||
| 52d56d15e4 | |||
| 36509351f0 | |||
| 00792a6555 | |||
| fdabb7cbbe | |||
| 775d8758ef | |||
| 31c979b290 | |||
| 5fd33405af | |||
| 1d1e8f3e7d | |||
| 654c58c90a | |||
| db3bd7c366 | |||
| 26187b954e | |||
| 33ec3739e2 | |||
| d9f13dbdcb | |||
| d5626fe595 |
37
.clang-tidy
Normal file
37
.clang-tidy
Normal file
@ -0,0 +1,37 @@
|
||||
Checks: '
|
||||
-*,
|
||||
bugprone-*,
|
||||
-bugprone-branch-clone,
|
||||
-bugprone-macro-parentheses,
|
||||
-bugprone-unhandled-self-assignment,
|
||||
google-*,
|
||||
-google-build-using-namespace,
|
||||
-google-readability-casting,
|
||||
-google-readability-function-size,
|
||||
-google-readability-todo,
|
||||
-google-runtime-int,
|
||||
-google-runtime-references,
|
||||
misc-*,
|
||||
-misc-no-recursion,
|
||||
-misc-redundant-expression,
|
||||
-misc-unused-parameters,
|
||||
modernize-*,
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-deprecated-headers,
|
||||
-modernize-loop-convert,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-nullptr,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-use-using,
|
||||
performance-*,
|
||||
-performance-inefficient-string-concatenation,
|
||||
portability-*,
|
||||
readability-*,
|
||||
-readability-else-after-return,
|
||||
-readability-function-size,
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-inconsistent-declaration-parameter-name,
|
||||
-readability-isolate-declaration,
|
||||
-readability-magic-numbers,
|
||||
-readability-named-parameter,
|
||||
-readability-simplify-boolean-expr'
|
||||
32
.gitattributes
vendored
Normal file
32
.gitattributes
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
* text eol=lf
|
||||
|
||||
*.png binary
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
28
.github/ISSUE_TEMPLATE.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
_Keep in mind that the commands we provide to retrieve information are oriented to GNU/Linux Distributions, so you could need to use others if you use s3fs on macOS or BSD_
|
||||
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
|
||||
#### Version of fuse being used (pkg-config --modversion fuse, rpm -qi fuse, dpkg -s fuse)
|
||||
_example: 2.9.4_
|
||||
|
||||
#### Kernel information (uname -r)
|
||||
_command result: uname -r_
|
||||
|
||||
#### GNU/Linux Distribution, if applicable (cat /etc/os-release)
|
||||
_command result: cat /etc/os-release_
|
||||
|
||||
#### s3fs command line used, if applicable
|
||||
```
|
||||
```
|
||||
#### /etc/fstab entry, if applicable
|
||||
```
|
||||
```
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, journalctl | grep s3fs, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
### Details about issue
|
||||
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
|
||||
### Details
|
||||
_Please describe the details of PullRequest._
|
||||
189
.github/workflows/ci.yml
vendored
Normal file
189
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
name: s3fs-fuse CI
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
#
|
||||
# CRON event is fire on every sunday(UTC).
|
||||
#
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
#
|
||||
# Jobs
|
||||
#
|
||||
# [NOTE]
|
||||
# Some tests using awscli may output a python warning.
|
||||
# The warning is about HTTPS connections using self-signed certificates.
|
||||
# That's why the PYTHONWARNINGS environment variable disables the
|
||||
# "Unverified HTTPS request" warning.
|
||||
#
|
||||
jobs:
|
||||
Linux:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
#
|
||||
# build matrix for containers
|
||||
#
|
||||
strategy:
|
||||
#
|
||||
# do not stop jobs automatically if any of the jobs fail
|
||||
#
|
||||
fail-fast: false
|
||||
|
||||
#
|
||||
# matrix for containers
|
||||
#
|
||||
matrix:
|
||||
container:
|
||||
- ubuntu:20.04
|
||||
- ubuntu:18.04
|
||||
- ubuntu:16.04
|
||||
- debian:buster
|
||||
- debian:stretch
|
||||
- centos:centos8
|
||||
- centos:centos7
|
||||
- fedora:32
|
||||
- fedora:31
|
||||
- opensuse/leap:15
|
||||
|
||||
container:
|
||||
image: ${{ matrix.container }}
|
||||
|
||||
options: "--privileged --cap-add SYS_ADMIN --device /dev/fuse"
|
||||
|
||||
env:
|
||||
# [NOTE]
|
||||
# Installation special environment variables for debian and ubuntu.
|
||||
#
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
# [NOTE]
|
||||
# Since using a self-signed certificate and have not registered a certificate authority,
|
||||
# we get a warning in python, so we suppress it(by PYTHONWARNINGS).
|
||||
#
|
||||
PYTHONWARNINGS: "ignore:Unverified HTTPS request"
|
||||
|
||||
steps:
|
||||
# [NOTE]
|
||||
# On openSUSE, tar and gzip must be installed before action/checkout.
|
||||
#
|
||||
- name: Install packages before checkout
|
||||
run: |
|
||||
if [ "${{ matrix.container }}" = "opensuse/leap:15" ]; then zypper install -y tar gzip; fi
|
||||
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# [NOTE]
|
||||
# Matters that depend on OS:VERSION are determined and executed in the following script.
|
||||
# Please note that the option to configure (CONFIGURE_OPTIONS) is set in the environment variable.
|
||||
#
|
||||
- name: Install pacakagse
|
||||
run: |
|
||||
.github/workflows/linux-ci-helper.sh ${{ matrix.container }}
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
/bin/sh -c "./configure ${CONFIGURE_OPTIONS}"
|
||||
make
|
||||
|
||||
- name: Cppcheck
|
||||
run: |
|
||||
make cppcheck
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
|
||||
|
||||
|
||||
# [NOTE]
|
||||
# A case of "runs-on: macos-11.0" does not work,
|
||||
# becase load_osxfuse returns exit code = 1.
|
||||
# Maybe it needs to reboot. Apple said
|
||||
# "Installing a new kernel extension requires signing in as an Admin user. You must also restart your Mac to load the extension".
|
||||
# Then we do not use macos 11 on Github Actions now.
|
||||
#
|
||||
macos10:
|
||||
runs-on: macos-10.15
|
||||
|
||||
env:
|
||||
# [NOTE]
|
||||
# Since using a self-signed certificate and have not registered a certificate authority,
|
||||
# we get a warning in python, so we suppress it(by PYTHONWARNINGS).
|
||||
#
|
||||
PYTHONWARNINGS: "ignore:Unverified HTTPS request"
|
||||
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Brew tap
|
||||
run: |
|
||||
TAPS="$(brew --repository)/Library/Taps";
|
||||
if [ -e "$TAPS/caskroom/homebrew-cask" ]; then rm -rf "$TAPS/caskroom/homebrew-cask"; fi;
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask
|
||||
|
||||
- name: Install osxfuse
|
||||
run: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install osxfuse
|
||||
|
||||
- name: Install brew other packages
|
||||
run: |
|
||||
S3FS_BREW_PACKAGES='automake cppcheck python3 coreutils gnu-sed';
|
||||
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do if brew list | grep -q ${s3fs_brew_pkg}; then if brew outdated | grep -q ${s3fs_brew_pkg}; then HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg}; fi; else HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg}; fi; done;
|
||||
|
||||
- name: Install awscli
|
||||
run: |
|
||||
if pip3 --version; then pip3 install awscli; else curl https://bootstrap.pypa.io/get-pip.py | sudo python; pip install awscli --ignore-installed matplotlib; fi
|
||||
|
||||
- name: Check osxfuse permission
|
||||
run: |
|
||||
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
./autogen.sh
|
||||
PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
|
||||
make
|
||||
|
||||
- name: Cppcheck
|
||||
run: |
|
||||
make cppcheck
|
||||
|
||||
- name: Test suite
|
||||
run: |
|
||||
make check -C src
|
||||
echo "user_allow_other" | sudo tee -a /etc/fuse.conf >/dev/null
|
||||
if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse; else exit 1; fi
|
||||
make check -C test || (test/filter-suite-log.sh test/test-suite.log; exit 1)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
218
.github/workflows/linux-ci-helper.sh
vendored
Executable file
218
.github/workflows/linux-ci-helper.sh
vendored
Executable file
@ -0,0 +1,218 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
echo "${PRGNAME} [INFO] Start Linux helper for installing packages."
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Common variables
|
||||
#-----------------------------------------------------------
|
||||
PRGNAME=`basename $0`
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Parameter check
|
||||
#-----------------------------------------------------------
|
||||
#
|
||||
# Usage: ${PRGNAME} "OS:VERSION"
|
||||
#
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "${PRGNAME} [ERROR] No container name options specified."
|
||||
fi
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Container OS variables
|
||||
#-----------------------------------------------------------
|
||||
CONTAINER_FULLNAME=$1
|
||||
CONTAINER_OSNAME=`echo ${CONTAINER_FULLNAME} | sed 's/:/ /g' | awk '{print $1}'`
|
||||
CONTAINER_OSVERSION=`echo ${CONTAINER_FULLNAME} | sed 's/:/ /g' | awk '{print $2}'`
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Common variables for pip
|
||||
#-----------------------------------------------------------
|
||||
PIP_BIN="pip3"
|
||||
PIP_OPTIONS="--upgrade"
|
||||
INSTALL_AWSCLI_PACKAGES="awscli"
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Parameters for configure(set environments)
|
||||
#-----------------------------------------------------------
|
||||
CONFIGURE_OPTIONS="CXXFLAGS='-std=c++11 -DS3FS_PTHREAD_ERRORCHECK=1' --prefix=/usr --with-openssl"
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# OS dependent variables
|
||||
#-----------------------------------------------------------
|
||||
if [ "${CONTAINER_FULLNAME}" = "ubuntu:20.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev fuse libfuse-dev libcurl4-openssl-dev libxml2-dev mime-support libtool pkg-config libssl-dev attr wget python2 python3-pip"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="openjdk-8-jdk"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:18.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev fuse libfuse-dev libcurl4-openssl-dev libxml2-dev mime-support libtool pkg-config libssl-dev attr wget python3-pip"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="openjdk-8-jdk"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "ubuntu:16.04" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev fuse libfuse-dev libcurl4-openssl-dev libxml2-dev mime-support libtool pkg-config libssl-dev attr wget python3-pip"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="openjdk-8-jdk"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:buster" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev fuse libfuse-dev libcurl4-openssl-dev libxml2-dev mime-support libtool pkg-config libssl-dev attr wget python2 procps python3-pip"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="adoptopenjdk-8-hotspot"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "debian:stretch" ]; then
|
||||
PACKAGE_MANAGER_BIN="apt-get"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="autoconf autotools-dev fuse libfuse-dev libcurl4-openssl-dev libxml2-dev mime-support libtool pkg-config libssl-dev attr wget procps python3-pip"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="openjdk-8-jdk"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "centos:centos8" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="gcc libstdc++-devel gcc-c++ fuse fuse-devel curl-devel libxml2-devel mailcap git automake make openssl-devel attr diffutils wget python2 python3"
|
||||
INSTALL_CPPCHECK_OPTIONS="--enablerepo=powertools"
|
||||
INSTALL_JDK_PACKAGES="java-1.8.0-openjdk"
|
||||
|
||||
# [NOTE]
|
||||
# Add -O2 to prevent the warning '_FORTIFY_SOURCE requires compiling with optimization(-O)'.
|
||||
#
|
||||
CONFIGURE_OPTIONS="CXXFLAGS='-O2 -std=c++11 -DS3FS_PTHREAD_ERRORCHECK=1' --prefix=/usr --with-openssl"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "centos:centos7" ]; then
|
||||
PACKAGE_MANAGER_BIN="yum"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y"
|
||||
|
||||
INSTALL_PACKAGES="gcc libstdc++-devel gcc-c++ fuse fuse-devel curl-devel libxml2-devel mailcap git automake make openssl-devel attr wget python3 epel-release"
|
||||
INSTALL_CPPCHECK_OPTIONS="--enablerepo=epel"
|
||||
INSTALL_JDK_PACKAGES="java-1.8.0-openjdk"
|
||||
|
||||
# [NOTE]
|
||||
# Add -O2 to prevent the warning '_FORTIFY_SOURCE requires compiling with optimization(-O)'.
|
||||
#
|
||||
CONFIGURE_OPTIONS="CXXFLAGS='-O2 -std=c++11 -DS3FS_PTHREAD_ERRORCHECK=1' --prefix=/usr --with-openssl"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "fedora:32" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="gcc libstdc++-devel gcc-c++ fuse fuse-devel curl-devel libxml2-devel mailcap git automake make openssl-devel wget attr diffutils python2 procps python3-pip"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="java-1.8.0-openjdk"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "fedora:31" ]; then
|
||||
PACKAGE_MANAGER_BIN="dnf"
|
||||
PACKAGE_UPDATE_OPTIONS="update -y -qq"
|
||||
|
||||
INSTALL_PACKAGES="gcc libstdc++-devel gcc-c++ fuse fuse-devel curl-devel libxml2-devel mailcap git automake make openssl-devel wget attr python2 procps"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="java-1.8.0-openjdk"
|
||||
|
||||
elif [ "${CONTAINER_FULLNAME}" = "opensuse/leap:15" ]; then
|
||||
PACKAGE_MANAGER_BIN="zypper"
|
||||
PACKAGE_UPDATE_OPTIONS="refresh"
|
||||
|
||||
INSTALL_PACKAGES="automake curl-devel fuse fuse-devel gcc-c++ libxml2-devel make openssl-devel python3-pip wget attr"
|
||||
INSTALL_CPPCHECK_OPTIONS=""
|
||||
INSTALL_JDK_PACKAGES="java-1_8_0-openjdk"
|
||||
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Install
|
||||
#-----------------------------------------------------------
|
||||
#
|
||||
# Update packages (ex. apt-get update -y -qq)
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Updates."
|
||||
${PACKAGE_MANAGER_BIN} ${PACKAGE_UPDATE_OPTIONS}
|
||||
|
||||
#
|
||||
# Install pacakages ( with cppcheck )
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Install packages."
|
||||
${PACKAGE_MANAGER_BIN} install -y ${INSTALL_PACKAGES}
|
||||
|
||||
echo "${PRGNAME} [INFO] Install cppcheck package."
|
||||
${PACKAGE_MANAGER_BIN} ${INSTALL_CPPCHECK_OPTIONS} install -y cppcheck
|
||||
|
||||
#
|
||||
# Install JDK 1.8
|
||||
#
|
||||
# [NOTE]
|
||||
# Now, the previous Java LTS version 8 is not available in the official Debian Buster repositories.
|
||||
# It'll enable the AdoptOpenJDK repository, which provides prebuilt OpenJDK packages.
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Install JDK 1.8 package."
|
||||
if [ "${CONTAINER_FULLNAME}" != "debian:buster" ]; then
|
||||
${PACKAGE_MANAGER_BIN} install -y ${INSTALL_JDK_PACKAGES}
|
||||
else
|
||||
# [NOTE]
|
||||
# Debian Buster is special case for installing JDK.
|
||||
#
|
||||
${PACKAGE_MANAGER_BIN} install -y apt-transport-https ca-certificates dirmngr gnupg software-properties-common
|
||||
wget -qO - https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public | apt-key add -
|
||||
add-apt-repository --yes https://adoptopenjdk.jfrog.io/adoptopenjdk/deb/
|
||||
${PACKAGE_MANAGER_BIN} ${PACKAGE_UPDATE_OPTIONS}
|
||||
${PACKAGE_MANAGER_BIN} install -y ${INSTALL_JDK_PACKAGES}
|
||||
fi
|
||||
java -version
|
||||
|
||||
#
|
||||
# Install awscli
|
||||
#
|
||||
echo "${PRGNAME} [INFO] Install awscli package."
|
||||
${PIP_BIN} install ${PIP_OPTIONS} ${INSTALL_AWSCLI_PACKAGES}
|
||||
${PIP_BIN} install ${PIP_OPTIONS} rsa
|
||||
|
||||
#-----------------------------------------------------------
|
||||
# Set environment for configure
|
||||
#-----------------------------------------------------------
|
||||
echo "${PRGNAME} [INFO] Set environment for configure options"
|
||||
export CONFIGURE_OPTIONS
|
||||
|
||||
echo "${PRGNAME} [INFO] Finish Linux helper for installing packages."
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
87
.gitignore
vendored
87
.gitignore
vendored
@ -1,21 +1,88 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Compiled Object files
|
||||
#
|
||||
*.slo
|
||||
*.lo
|
||||
*.o
|
||||
Makefile
|
||||
Makefile.in
|
||||
*.Po
|
||||
*.Plo
|
||||
|
||||
#
|
||||
# autotools/automake
|
||||
#
|
||||
aclocal.m4
|
||||
autom4te.cache/
|
||||
autom4te.cache
|
||||
autoscan.log
|
||||
config.guess
|
||||
config.h
|
||||
config.h.in
|
||||
config.h.in~
|
||||
config.log
|
||||
config.status
|
||||
config.sub
|
||||
configure
|
||||
configure.scan
|
||||
depcomp
|
||||
doc/Makefile
|
||||
doc/Makefile.in
|
||||
install-sh
|
||||
libtool
|
||||
ltmain.sh
|
||||
m4
|
||||
m4/*
|
||||
missing
|
||||
src/.deps/
|
||||
src/Makefile
|
||||
src/Makefile.in
|
||||
stamp-h1
|
||||
Makefile
|
||||
Makefile.in
|
||||
test-driver
|
||||
compile
|
||||
missing
|
||||
|
||||
#
|
||||
# object directories
|
||||
#
|
||||
.deps
|
||||
.libs
|
||||
*/.deps
|
||||
*/.deps/*
|
||||
*/.libs
|
||||
*/.libs/*
|
||||
|
||||
#
|
||||
# each directories
|
||||
#
|
||||
*.log
|
||||
*.trs
|
||||
default_commit_hash
|
||||
src/s3fs
|
||||
test/Makefile
|
||||
test/Makefile.in
|
||||
src/test_curl_util
|
||||
src/test_string_util
|
||||
test/chaos-http-proxy-*
|
||||
test/s3proxy-*
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
7
.mailmap
Normal file
7
.mailmap
Normal file
@ -0,0 +1,7 @@
|
||||
Adrian Petrescu <apetresc@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Adrian Petrescu <apetresc@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Ben Lemasurier <ben.lemasurier@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Dan Moore <mooredan@suncup.net@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Randy Rizun <rrizun@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Randy Rizun <rrizun@rrizun-ThinkPad-T530.(none)>
|
||||
Takeshi Nakatani <ggtakec@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
4
AUTHORS
4
AUTHORS
@ -17,3 +17,7 @@ Bugfixes, performance and other improvements.
|
||||
5. Takeshi Nakatani <ggtakec@gmail.com>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
6. Andrew Gaul <gaul@gaul.org>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
34
COMPILATION.md
Normal file
34
COMPILATION.md
Normal file
@ -0,0 +1,34 @@
|
||||
# Compilation from source code
|
||||
|
||||
These are generic instructions should work on almost any GNU/Linux, macOS, BSD, or similar.
|
||||
|
||||
If you want specific instructions for some distributions, check the [wiki](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Installation-Notes).
|
||||
|
||||
Keep in mind using the pre-built packages when available.
|
||||
|
||||
1. Ensure your system satisfies build and runtime dependencies for:
|
||||
|
||||
* fuse >= 2.8.4
|
||||
* automake
|
||||
* gcc-c++
|
||||
* make
|
||||
* libcurl
|
||||
* libxml2
|
||||
* openssl
|
||||
* mime.types (the package providing depends on the OS)
|
||||
* s3fs tries to detect `/etc/mime.types` as default regardless of the OS
|
||||
* Else s3fs tries to detect `/etc/apache2/mime.types` if OS is macOS
|
||||
* s3fs exits with an error if these files are not exist
|
||||
* Alternatively, you can set mime.types file path with `mime` option without detecting these default files
|
||||
* pkg-config (or your OS equivalent)
|
||||
|
||||
2. Then compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
577
ChangeLog
577
ChangeLog
@ -1,10 +1,583 @@
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.1 -- Mon Oct 18 2010
|
||||
Version 1.89 -- 22 Feb, 2021 (major changes only)
|
||||
#1520 - #1525 - #1534 - #1549 - Propagate S3 errors to errno more accurately
|
||||
#1546 - #1559 - Allow writing > 5 GB single-part objects supported by some non-AWS S3
|
||||
#1553 - #1555 - Allow configuration of multipart copy size and limit to 5 GB
|
||||
#1562 - Allow configuration of multipart upload threshold and reduce default to 25 MB
|
||||
#1565 - Set default stat timeout to 900 seconds correctly
|
||||
#1579 - #1582 - Fix data corruption while updating metadata with use_cache
|
||||
|
||||
Version 1.88 -- 4 Jan, 2021 (major changes only)
|
||||
#1349 - Fixed a bug about move file over limit of ensure space
|
||||
#1363 - #1366 - #1439 - Fix multiple race conditions
|
||||
#1365 - Dynamically determine whether lseek extended options are supported
|
||||
#1374 - Add support for deep archive storage class
|
||||
#1385 - Plug FdEntity leaks
|
||||
#1388 - Fix use_session_token option parsing
|
||||
#1392 - Allow 32-bit platforms to upload single-part objects > 2 GB
|
||||
#1404 - Fix dead lock in disk insufficient and optimize code
|
||||
#1408 - Ensure environment variable is set when using ECS
|
||||
#1413 - not call put headers if not exist pending meta
|
||||
#1425 - Do not send SSE headers during bucket creation
|
||||
#1432 - Add sigv4 only option
|
||||
#1437 - Add atime and correct atime/mtime/ctime operations
|
||||
#1447 - Fixed a bug that symlink could not be read after restarting s3fs
|
||||
#1448 - #1467 - Periodically flush written data to reduce temporary local storage
|
||||
#1449 - Added logfile option for non-syslog logging
|
||||
#1462 - Add AWS IMDSv2 support
|
||||
#1502 - #1503 - #1505 - Fix multiple issues when retrying requests
|
||||
|
||||
Version 1.87 -- 10 Aug, 2020 (major changes only)
|
||||
#1244 - use correct content-type when complete multipart upload
|
||||
#1265 - Fixed a bug of stats cache compression
|
||||
#1271 - Fixed the truncation bug of stat file for cache file
|
||||
#1274 - Improved strictness of cache file stats(file)
|
||||
#1277 - Fixed insufficient upload size for mix multipart upload
|
||||
#1282 - Warn about missing MIME types instead of exiting
|
||||
#1285 - Not abort process by exception threw from s3fs_strtoofft
|
||||
#1286 - Support Google Cloud Storage headers
|
||||
#1295 - Added a parameter to output body to curldbg option
|
||||
#1302 - Fix renames of open files with nocopyapi option
|
||||
#1303 - Relink cache stats file atomically via rename
|
||||
#1305 - Ignore case when comparing ETags
|
||||
#1306 - Retry with exponential backoff during 500 error
|
||||
#1312 - Fixed a bug about serializing from cache file
|
||||
#1313 - Fixed about ParallelMixMultipartUpload
|
||||
#1316 - Add support for glacier storage class
|
||||
#1319 - Fixed upload error about mixuploading sparse file and truncating file
|
||||
#1334 - Added SIGUSR1 option for cache file integrity test
|
||||
#1341 - Change default stat_cache_expire
|
||||
|
||||
Version 1.86 -- 04 Feb, 2020 (major changes only)
|
||||
#965 - enable various optimizations when using modern curl
|
||||
#1002 - allow SSE-C keys to have NUL bytes
|
||||
#1008 - add session token support
|
||||
#1039 - allow large files on 32-bit systems like Raspberry Pi
|
||||
#1049 - fix data corruption when external modification changes a cached object
|
||||
#1063 - fix data corruption when opening a second fd to an unflushed file
|
||||
#1066 - fix clock skew errors when writing large files
|
||||
#1081 - allow concurrent metadata queries during data operations
|
||||
#1098 - use server-side copy for partially modified files
|
||||
#1107 - #1108 - fix multiple concurrency issues
|
||||
#1199 - add requester_pays support
|
||||
#1209 - add symlink cache
|
||||
#1224 - add intelligent_ia storage tier
|
||||
|
||||
Version 1.85 -- 11 Mar, 2019
|
||||
#804 - add Backblaze B2
|
||||
#812 - Fix typo s/mutliple/multiple/
|
||||
#819 - #691: Made instructions for creating password file more obvious.
|
||||
#820 - Enable big writes if capable
|
||||
#826 - For RPM distributions fuse-libs is enough
|
||||
#831 - Add support for storage class ONEZONE_IA.
|
||||
#832 - Simplify hex conversion
|
||||
#833 - New installation instructions for Fedora >= 27 and CentOS7
|
||||
#834 - Improve template for issues
|
||||
#835 - Make the compilation instructions generic
|
||||
#840 - Replace all mentions to MacOS X to macOS
|
||||
#849 - Correct typo
|
||||
#851 - Correctly compare list_object_max_keys
|
||||
#852 - Allow credentials from ${HOME}/.aws/credentials
|
||||
#853 - Replace ~ with ${HOME} in examples
|
||||
#855 - Include StackOverflow in FAQs
|
||||
#856 - Add icon for s3fs
|
||||
#859 - Upload S3 parts without batching
|
||||
#861 - Add 'profile' option to command line help.
|
||||
#865 - fix multihead warning check
|
||||
#866 - Multi-arch support for ppc64le
|
||||
#870 - Correct typos in command-line parsing
|
||||
#874 - Address cppcheck 1.86 errors
|
||||
#877 - Check arguments and environment before .aws/creds
|
||||
#882 - [curl] Assume long encryption keys are base64 encoded
|
||||
#885 - Update s3fs_util.cpp for correspondence of Nextcloud contype
|
||||
#888 - Add Server Fault to FAQs
|
||||
#892 - Repair xattr tests
|
||||
#893 - Store and retrieve file change time
|
||||
#894 - Default uid/gid/mode when object lacks permissions
|
||||
#895 - Emit more friendly error for buckets with dots
|
||||
#898 - Flush file before renaming
|
||||
#899 - Tighten up HTTP response code check
|
||||
#900 - Plug memory leak
|
||||
#901 - Plug memory leaks
|
||||
#902 - Avoid pass-by-value when not necessary
|
||||
#903 - Prefer find(char) over find(const char *)
|
||||
#904 - Remove unnecessary calls to std::string::c_str
|
||||
#905 - Fix comparison in s3fs_strtoofft
|
||||
#906 - Prefer HTTPS links where possible
|
||||
#908 - Added an error message when HTTP 301 status
|
||||
#909 - Ignore after period character of floating point in x-amz-meta-mtime
|
||||
#910 - Added a missing extension to .gitignore, and formatted dot files
|
||||
#911 - Added detail error message when HTTP 301/307 status
|
||||
#912 - Automatic region change made possible other than us-east-1(default)
|
||||
#913 - Prefer abort over assert(false)
|
||||
#914 - Issue readdir HEAD requests without batching
|
||||
#917 - Reference better-known AWS CLI for compatibility
|
||||
#918 - Load tail range during overwrite
|
||||
#919 - Add test for mv non-empty directory
|
||||
#920 - Remove unnecessary string copies
|
||||
#921 - Remove redundant string initializations
|
||||
#923 - Reverted automatic region change and changed messages
|
||||
#924 - Prefer empty over size checks
|
||||
#925 - Remove redundant null checks before delete
|
||||
#926 - Accept paths with : in them
|
||||
#930 - Correct enable_content_md5 docs
|
||||
#931 - Correct sigv2 typo
|
||||
#932 - Prefer AutoLock for synchronization
|
||||
#933 - Remove mirror path when deleting cache
|
||||
#934 - Checked and corrected all typo
|
||||
#937 - Disable malloc_trim
|
||||
#938 - Remove unneeded void parameter
|
||||
#939 - Prefer specific [io]stringstream where possible
|
||||
#940 - Copy parts in parallel
|
||||
#942 - Ensure s3fs compiles with C++03
|
||||
#943 - Return not supported when hard linking
|
||||
#944 - Repair utility mode
|
||||
#946 - Simplify async request completion code
|
||||
#948 - Add logging for too many parts
|
||||
#949 - Implement exponential backoff for 503
|
||||
#950 - Added S3FS_MALLOC_TRIM build switch
|
||||
#951 - Added a non-interactive option to utility mode
|
||||
#952 - Automatically abort failed multipart requests
|
||||
#953 - Update s3ql link
|
||||
#954 - Clear containers instead of individual erases
|
||||
#955 - Address miscellaneous clang-tidy warnings
|
||||
#957 - Upgrade to S3Proxy 1.6.1
|
||||
#958 - Document lack of inotify support
|
||||
#959 - Fixed code for latest cppcheck error on OSX
|
||||
#960 - Wtf8
|
||||
#961 - Work around cppcheck warnings
|
||||
#965 - Improvement of curl session pool for multipart
|
||||
#967 - Increase FdEntity reference count when returning
|
||||
#969 - Fix lazy typo
|
||||
#970 - Remove from file from stat cache during rename
|
||||
#972 - Add instructions for Amazon Linux
|
||||
#974 - Changed the description order of man page options
|
||||
#975 - Fixed ref-count when error occurred.
|
||||
#977 - Make macOS instructions consistent with others
|
||||
|
||||
Version 1.84 -- Jul 8, 2018
|
||||
#704 - Update README.md with details about .passwd-s3fs
|
||||
#710 - add disk space reservation
|
||||
#712 - Added Cygwin build options
|
||||
#714 - reduce lock contention on file open
|
||||
#724 - don't fail multirequest on single thread error
|
||||
#726 - add an instance_name option for logging
|
||||
#727 - Fixed Travis CI error about cppcheck - #713
|
||||
#729 - FreeBSD build fixes
|
||||
#733 - More useful error message for dupe entries in passwd file
|
||||
#739 - cleanup curl handle state on retries
|
||||
#745 - don't fail mkdir when directory exists
|
||||
#753 - fix xpath selector in bucket listing
|
||||
#754 - Validate the URL format for http/https
|
||||
#755 - Added reset curl handle when returning to handle pool
|
||||
#756 - Optimize defaults
|
||||
#761 - Simplify installation for Ubuntu 16.04
|
||||
#762 - Upgrade to S3Proxy 1.6.0
|
||||
#763 - cleanup curl handles before curl share
|
||||
#764 - Remove false multihead warnings
|
||||
#765 - Add Debian installation instructions
|
||||
#766 - Remove s3fs-python
|
||||
#768 - Fixed memory leak
|
||||
#769 - Revert "enable FUSE read_sync by default"
|
||||
#774 - Option for IAM authentication endpoint
|
||||
#780 - gnutls_auth: initialize libgcrypt
|
||||
#781 - Fixed an error by cppcheck on OSX
|
||||
#786 - Log messages for 5xx and 4xx HTTP response code
|
||||
#789 - Instructions for SUSE and openSUSE prebuilt packages
|
||||
#793 - Added list_object_max_keys option based on #783 PR
|
||||
|
||||
Version 1.83 -- Dec 17, 2017
|
||||
#606 - Add Homebrew instructions
|
||||
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
|
||||
#609 - Group permission checks sometimes fail with large number of groups
|
||||
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
|
||||
#621 - Upgrade to S3Proxy 1.5.3
|
||||
#627 - Update README.md
|
||||
#630 - Added travis test on osx for #601
|
||||
#631 - Merged macosx branch into master branch #601
|
||||
#636 - Fix intermittent upload failures on macOS
|
||||
#637 - Add blurb about non-Amazon S3 implementations
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argument - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
#659 - Do not fail updating directory when removing old-style object(ref #658)
|
||||
#660 - Refixed s3fs_init message(ref #652)
|
||||
#663 - Lock FdEntity when mutating orgmeta
|
||||
#664 - auth headers insertion refactoring
|
||||
#668 - Changed .travis.yml for fixing not found gpg2 on osx
|
||||
#669 - add IBM IAM authentication support
|
||||
#670 - Fixed a bug in S3fsCurl::LocateBundle
|
||||
#671 - Add support for ECS metadata endpoint
|
||||
#675 - Reduce use of preprocessor
|
||||
#676 - Move str definition from header to implementation
|
||||
#677 - Add s3proxy to .gitignore
|
||||
#679 - README.md Addition
|
||||
#681 - Changed functions about reading passwd file
|
||||
#684 - Correct signedness warning
|
||||
#686 - remove use of jsoncpp
|
||||
#688 - Improved use of temporary files - #678
|
||||
#690 - Added option ecs description to man page
|
||||
#692 - Updated template md files for issue and pr
|
||||
#695 - fix condition for parallel download
|
||||
#697 - Fixing race condition in FdEntity::GetStats
|
||||
#699 - Fix dbglevel usage
|
||||
|
||||
Version 1.82 -- May 13, 2017
|
||||
#597 - Not fallback to HTTP - #596
|
||||
#598 - Updated ChangeLog and configure.ac for release 1.82
|
||||
|
||||
Version 1.81 -- May 13, 2017
|
||||
#426 - Updated to correct ChangeLog
|
||||
#431 - fix typo s/controll/control/
|
||||
#432 - Include location constraint when creating bucket
|
||||
#433 - Correct search and replace typo
|
||||
#440 - Handled all curl error without exiting process - #437
|
||||
#443 - Fix for leaks during stat cache entry expiry / truncation (#340)
|
||||
#444 - Add mirror file logic for removing cache file
|
||||
#447 - added fuse package for mounting via /etc/fstab, fixes #417
|
||||
#449 - Accept mount options compatible with mtab
|
||||
#451 - Correct path in README
|
||||
#454 - Changed for accepting mount options compatible with mtab - #449
|
||||
#466 - Fixed a bug about could not copy file mode from org file
|
||||
#471 - Added use_xattr option for #467 and #460
|
||||
#477 - OS-specific correspondence of the extended attribute header
|
||||
#483 - Trim symbolic link original path in file
|
||||
#487 - Split header debugging onto multiple lines for easier reading
|
||||
#488 - Fixed searching Content-Length without case sensitive - #480
|
||||
#489 - Changed headers_t map using nocase compare function - #488
|
||||
#494 - Fix typo s/destroied/destroyed/
|
||||
#495 - Fix invalid V4 signature on multipart copy requests
|
||||
#498 - Upgrade to S3Proxy 1.5.1
|
||||
#502 - Fixed issue#435 branch codes for remaining bugs(2)
|
||||
#503 - Add missing call to mtime test
|
||||
#504 - Use describe helper function
|
||||
#505 - Correct typos
|
||||
#509 - Use server-provided ETag during complete upload
|
||||
#511 - Fixed a bug about uploading NULL to some part of the file contents
|
||||
#512 - Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468
|
||||
#513 - Added issue and PR templates.
|
||||
#517 - Update s3fs.1 - removed duplicated word
|
||||
#520 - Added links for eventual consistency in README.md - #515
|
||||
#539 - Upgrade to S3Proxy 1.5.2
|
||||
#540 - Address cppcheck 1.77 warnings
|
||||
#545 - Changed base cached time of stat_cache_expire option - #523
|
||||
#546 - Fixed double initialization of SSL library at foreground
|
||||
#550 - Add umount instruction for unprivileged user
|
||||
#551 - Updated stat_cache_expire option description - #545
|
||||
#552 - switch S3fsMultiCurl to use foreground threads
|
||||
#553 - add TLS cipher suites customization
|
||||
#554 - cleanup cache directory when running out of disk space
|
||||
#555 - don't sign empty headers (as they are discarded
|
||||
#556 - fix multipart upload handling without cache
|
||||
#557 - Added check_cache_dir_exist option(refixed #347) - #538
|
||||
#558 - Fixed a bug in logic about truncating stat cache
|
||||
#560 - Fixed about multipart uploading at no free space related to #509
|
||||
#567 - Do not send ACL unless overridden
|
||||
#576 - Added option for complementing lack of stat mode
|
||||
#578 - Refactored the get_object_attribute function
|
||||
#579 - Added notsup_compat_dir option
|
||||
#580 - Enhanced bucket/path parameter check
|
||||
#582 - Check errors returned in 200 OK responses for put header request
|
||||
#583 - Updated limit object size in s3fs man page
|
||||
#585 - Fixed failure to upload/copy with SSE_C and SSE_KMS
|
||||
#587 - Changed copyright year format for debian pkg
|
||||
#588 - Default transport to HTTPS
|
||||
#590 - Updated man page for default_acl option - #567
|
||||
#593 - Backward compatible for changing default transport to HTTPS
|
||||
#594 - Check bucket at public bucket and add nocopyapi option automatically
|
||||
#595 - Updated ChangeLog and configure.ac for release 1.81
|
||||
|
||||
Version 1.80 -- May 29, 2016
|
||||
#213 - Parse ETag from copy multipart correctly
|
||||
#215 - Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
|
||||
#217 - Override install, so that the make install does not install rename_before_close under /test
|
||||
#219 - Address Coverity errors
|
||||
#220 - Test removing a non-empty directory
|
||||
#221 - Compare idiomatically
|
||||
#222 - Annotate constructors as explicit
|
||||
#224 - Configure cppcheck
|
||||
#229 - Convert rename_before_close to a shell script
|
||||
#231 - Rewrite AutoLock
|
||||
#232 - Always hold stat_cache_lock when using stat_cache
|
||||
#233 - Remove IntToStr
|
||||
#234 - Update README
|
||||
#235 - Plug leak during complete multipart upload
|
||||
#237 - Refactor tests into individual functions
|
||||
#238 - Enable all cppcheck rules
|
||||
#239 - Update stale Google Code reference in --help
|
||||
#240 - Enable Content-MD5 during multipart upload part
|
||||
#243 - Run cppcheck during Travis builds
|
||||
#245 - Elide duplicate lookups of std::map via iterators
|
||||
#246 - Unlock during early return in TruncateCache
|
||||
#247 - Base64 cleanup
|
||||
#248 - Enable integration tests for Travis
|
||||
#249 - Silence wget
|
||||
#250 - s3fs can print version with short commit hash - #228
|
||||
#251 - Skip xattr tests if utilities are missing
|
||||
#252 - This fixes an issue with caching when the creation of a subdirectory …
|
||||
#253 - Added checking cache dir perms at starting.
|
||||
#256 - Add no atomic rename to limitations
|
||||
#257 - Update README.md: Bugfix password file permissions errors
|
||||
#258 - Update README.md to better explain mount upon boot
|
||||
#260 - Wrap help text at 80 characters
|
||||
#261 - Correct help timeouts
|
||||
#263 - Allow integration testing against Amazon S3
|
||||
#265 - Fix integration tests
|
||||
#266 - Cleanup from PR #265
|
||||
#267 - Added the _netdev option to the fstab example.
|
||||
#268 - Use 127.0.0.1 not localhost in s3proxy wait loop
|
||||
#271 - Add support for standard_ia storage class
|
||||
#274 - Modified man page for storage_class option(#271)
|
||||
#275 - Changed and cleaned the logic for debug message.
|
||||
#278 - Supported for SSE KMS(#270)
|
||||
#280 - Supported a object which is larger than free disk space
|
||||
#285 - Add test for symlink
|
||||
#288 - Fixed a bug about head request(copy) for SSE - issue#286
|
||||
#289 - Print source file in log messages
|
||||
#291 - File opened with O_TRUNC is not flushed - Issue #290
|
||||
#293 - Fix a small spelling issue.
|
||||
#295 - File opened with O_TRUNC is not flushed - changed #291
|
||||
#300 - Update integration-test-main.sh
|
||||
#302 - Fix syslog level used by S3FS_PRN_EXIT()
|
||||
#304 - Fixed a bug about mtime - #299
|
||||
#306 - Fix read concurrency to work in parallel count
|
||||
#307 - Fix pthread portability problem
|
||||
#308 - Changed ensure free disk space as additional change for #306
|
||||
#309 - Check pthread portability in configure as additional change for #307
|
||||
#310 - Update integration-test-main.sh as additional change for #300
|
||||
#311 - Change error log to debug log in s3fs_read()
|
||||
#313 - fix gitignore
|
||||
#319 - Clean up mount point on errors in s3fs_init()
|
||||
#321 - delete stat cache entry in s3fs_fsync so st_size is refreshed - #320
|
||||
#323 - Add goofys to references
|
||||
#328 - Fix v4 signature with use_path_request_style
|
||||
#329 - Correct multiple issues with GET and v4 signing
|
||||
#330 - Pass by const reference where possible
|
||||
#331 - Address various clang warnings
|
||||
#334 - Bucket host should include port and not path
|
||||
#336 - update README.md for fstab
|
||||
#338 - Fixed a bug about IAMCRED type could not be retried.
|
||||
#339 - Updated README.md for fstab example.
|
||||
#341 - Fix the memory leak issue in fdcache.
|
||||
#346 - Fix empty directory check against AWS S3
|
||||
#348 - Integration test summary, continue on error
|
||||
#350 - Changed cache out logic for stat - #340
|
||||
#351 - Check cache directory path and attributes - #347
|
||||
#352 - Remove stat file cache dir if specified del_cache - #337
|
||||
#354 - Supported regex type for additional header format - #343
|
||||
#355 - Fixed codes about clock_gettime for osx
|
||||
#356 - Fixed codes about clock_gettime for osx(2)
|
||||
#357 - Fixed codes about clock_gettime for osx(3)
|
||||
#359 - Remove optional parameter from Content-Type header - #358
|
||||
#360 - Fix clock_gettime autotools detection on Linux
|
||||
#364 - Checked content-type by no case-sensitivity - #363
|
||||
#371 - Always set stats cache for opened file
|
||||
#372 - Fixed a bug about etag comparison in stats cache, etc.
|
||||
#376 - Test for writing after an lseek past end of file
|
||||
#379 - Fixed a bug about writing sparsed file - #375
|
||||
#385 - fix typo in curl.cpp: s/returing/returning/
|
||||
#391 - Update s3fs.1
|
||||
#394 - Revert "Fixed a bug about writing sparsed file - #375"
|
||||
#395 - Fixed writing sparsed file - #375,#379,#394
|
||||
#397 - Supported User-Agent header - #383
|
||||
#403 - Fix a bug of truncating empty file
|
||||
#404 - Add curl handler pool to reuse connections
|
||||
#409 - Fixed 'load_sse_c' option not working - #388
|
||||
#410 - Allow duplicate key in ahbe_conf - #386
|
||||
#411 - loading IAM role name automatically(iam_role option) - #387
|
||||
#415 - Fixed a bug about stat_cache_expire - #382
|
||||
#420 - Skip early credential checks when iam_role=auto
|
||||
#422 - Fixes for iam_role=auto
|
||||
#424 - Added travis CI badge in README.md
|
||||
#425 - Updated ChangeLog and configure.ac for release 1.80
|
||||
|
||||
Version 1.79 -- Jul 19, 2015
|
||||
issue #60 - Emit user-friendly log messages on failed CheckBucket requests
|
||||
issue #62 - Remove stray chars from source files
|
||||
issue #63 - Fix spelling errors
|
||||
issue #68 - FreeBSD issue
|
||||
issue #69 - Address clang always true warnings
|
||||
issue #73 - Small gitignore fixes
|
||||
issue #74 - url: handle scheme omission
|
||||
issue #83 - Changed option processing to use strtol() to get a umask
|
||||
issue #93 - Add simple unit tests for trim functions
|
||||
issue #100 - CURL handles not properly initialized to use DNS or SSL session caching
|
||||
issue #101 - Optimized function "bool directory_empty()"
|
||||
issue #103 - Remove prefix option in s3fs man page - issue#87
|
||||
issue #104 - fix rename before close
|
||||
issue #116 - Supported signature version 4
|
||||
issue #119 - Added new mp_umask option about issue#107, pr#110
|
||||
issue #124 - Fallback to v2 signatures correctly.
|
||||
issue #130 - refactor integration tests create/cleanup file
|
||||
issue #131 - Test ls
|
||||
issue #132 - Use S3Proxy to run integration tests
|
||||
issue #134 - Include Content-Type in complete MPU V2 signature
|
||||
issue #135 - Correct V4 signature for initiate multipart upload
|
||||
issue #136 - Small fixes to integration tests
|
||||
issue #137 - Add test for multi-part upload
|
||||
issue #138 - Fixed bugs, not turn use_cache off and ty to load to end - issue#97
|
||||
issue #143 - Fixed a bug no use_cache case about fixed #138 - issue#141
|
||||
issue #144 - Add Travis configuration
|
||||
issue #146 - add exit handler to cleanup on failures
|
||||
issue #147 - Use S3Proxy 1.4.0-SNAPSHOT
|
||||
issue #150 - Fixed a bug not handling fsync - #145
|
||||
issue #154 - Fixed url-encoding for ampersand etc on sigv4 - Improvement/#149
|
||||
issue #155 - Fixed a bug: unable to mount bucket subdirectory
|
||||
issue #156 - Fixed a bug about ssl session sharing with libcurl older 7.23.0 - issue#126
|
||||
issue #159 - Upgrade to S3Proxy 1.4.0
|
||||
issue #164 - send the correct Host header when using -o url
|
||||
issue #165 - Auth v4 refactor
|
||||
issue #167 - Increased default connecting/reading/writing timeout value
|
||||
issue #168 - switch to use region specific endpoints to compute correct v4 signature
|
||||
issue #170 - Reviewed and fixed response codes print in curl.cpp - #157
|
||||
issue #171 - Support buckets with mixed-case names
|
||||
issue #173 - Run integration tests via Travis
|
||||
issue #176 - configure.ac: detect target, if target is darwin (OSX), then #176
|
||||
issue #177 - Add .mailmap
|
||||
issue #178 - Update .gitignore
|
||||
issue #184 - Add usage information for multipart_size
|
||||
issue #185 - Correct obvious typos in usage and README
|
||||
issue #190 - Add a no_check_certificate option.
|
||||
issue #194 - Tilda in a file-name breaks things (EPERM)
|
||||
issue #198 - Disable integration tests for Travis
|
||||
issue #199 - Supported extended attributes(retry)
|
||||
issue #200 - fixed fallback to sigv2 for bucket create and GCS
|
||||
issue #202 - Specialize {set,get}xattr for OS X
|
||||
issue #204 - Add integration test for xattr
|
||||
issue #207 - Fixed a few small spelling issues.
|
||||
|
||||
Version 1.78 -- Sep 15, 2014
|
||||
issue #29 - Possible to create Debian/Ubuntu packages?(googlecode issue 109)
|
||||
issue 417(googlecode) - Password file with DOS format is not handled properly
|
||||
issue #41 - Failed making signature
|
||||
issue #40 - Moving a directory containing more than 1000 files truncates the directory
|
||||
issue #49 - use_sse is ignored when creating new files
|
||||
issue #39 - Support for SSE-C
|
||||
issue #50 - Cannot find pkg-config when configured with any SSL backend except openssl
|
||||
|
||||
Version 1.77 -- Apr 19, 2014
|
||||
issue 405(googlecode) - enable_content_md5 Input/output error
|
||||
issue #14 - s3fs -u should return 0 if there are no lost multiparts
|
||||
issue #16 - empty file is written to s3
|
||||
issue #18 - s3fs crashes with segfault
|
||||
issue #22 - Fix typos in docs for max_stat_cache_size
|
||||
issue #23 - curl ssl problems
|
||||
issue #28 - Address signedness warning in FdCache::Init
|
||||
|
||||
Version 1.76 -- Jan 21, 2014
|
||||
issue #5 - du shows incorrect usage stats
|
||||
issue #8 - version in configure.ac is 1.74 for release 1.75
|
||||
|
||||
Version 1.75 -- Jan 6, 2014
|
||||
issue #1 - Using %20 instead of the plus (+) sign for encoding spaces
|
||||
issue #3 - Fixed local timezone was incorrectly being applied to IAM and Last-Modified dates.
|
||||
issue #4 - Fix compilation error on MacOSX with missing const
|
||||
|
||||
Version 1.74 -- Nov 24, 2013
|
||||
This version is initial version on Github, same as on GoogleCodes(s3fs).
|
||||
https://github.com/s3fs-fuse/s3fs-fuse/releases/tag/v1.74
|
||||
see more detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.74.tar.gz
|
||||
|
||||
Version 1.73 -- Aug 23, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.73.tar.gz
|
||||
|
||||
Version 1.72 -- Aug 10, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.72.tar.gz
|
||||
|
||||
Version 1.71 -- Jun 15, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.71.tar.gz
|
||||
|
||||
Version 1.70 -- Jun 01, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.70.tar.gz
|
||||
|
||||
Version 1.69 -- May 15, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.69.tar.gz
|
||||
|
||||
Version 1.68 -- Apr 30, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.68.tar.gz
|
||||
|
||||
Version 1.67 -- Apr 13, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.67.tar.gz
|
||||
|
||||
Version 1.66 -- Apr 06, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.66.tar.gz
|
||||
|
||||
Version 1.65 -- Mar 30, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.65.tar.gz
|
||||
|
||||
Version 1.64 -- Mar 23, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.64.tar.gz
|
||||
|
||||
Version 1.63 -- Feb 24, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.63.tar.gz
|
||||
|
||||
Version 1.62 -- Jan 27, 2013
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.62.tar.gz
|
||||
|
||||
Version 1.61 -- Aug 30, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.61.tar.gz
|
||||
|
||||
Version 1.60 -- Aug 29, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.60.tar.gz
|
||||
|
||||
Version 1.59 -- Jul 28, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.59.tar.gz
|
||||
|
||||
Version 1.58 -- Jul 19, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.58.tar.gz
|
||||
|
||||
Version 1.57 -- Jul 07, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.57.tar.gz
|
||||
|
||||
Version 1.56 -- Jul 07, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.56.tar.gz
|
||||
|
||||
Version 1.55 -- Jul 02, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.55.tar.gz
|
||||
|
||||
Version 1.54 -- Jun 25, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.54.tar.gz
|
||||
|
||||
Version 1.53 -- Jun 22, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.53.tar.gz
|
||||
|
||||
Version 1.40 -- Feb 11, 2011
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.40.tar.gz
|
||||
|
||||
Version 1.33 -- Dec 30, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.33.tar.gz
|
||||
|
||||
Version 1.25 -- Dec 16, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.25.tar.gz
|
||||
|
||||
Version 1.19 -- Dec 2, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.19.tar.gz
|
||||
|
||||
Version 1.16 -- Nov 22, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.16.tar.gz
|
||||
|
||||
Version 1.10 -- Nov 6, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.10.tar.gz
|
||||
|
||||
Version 1.02 -- Oct 29, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.02.tar.gz
|
||||
|
||||
Version 1.01 -- Oct 28, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.01.tar.gz
|
||||
|
||||
Version 1.0 -- Oct 24, 2010
|
||||
see detail on googlecodes: https://code.google.com/p/s3fs/downloads/detail?name=s3fs-1.0.tar.gz
|
||||
|
||||
------
|
||||
Version 1.1 -- Mon Oct 18 2010
|
||||
Dan Moore reopens the project and fixes various issues that had accumulated in the tracker. Adrian Petrescu converts the project to autotools and posts it to GitHub.
|
||||
|
||||
Version 1.0 -- 2008
|
||||
|
||||
Randy Rizun releases a basic version of S3FS on Google Code.
|
||||
|
||||
|
||||
2
INSTALL
2
INSTALL
@ -124,7 +124,7 @@ architecture at a time in the source code directory. After you have
|
||||
installed the package for one architecture, use `make distclean' before
|
||||
reconfiguring for another architecture.
|
||||
|
||||
On MacOS X 10.5 and later systems, you can create libraries and
|
||||
On macOS 10.5 and later systems, you can create libraries and
|
||||
executables that work on multiple system types--known as "fat" or
|
||||
"universal" binaries--by specifying multiple `-arch' options to the
|
||||
compiler but only a single `-arch' option to the preprocessor. Like
|
||||
|
||||
46
Makefile.am
46
Makefile.am
@ -1,6 +1,25 @@
|
||||
######################################################################
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
SUBDIRS=src test doc
|
||||
|
||||
EXTRA_DIST=doc
|
||||
EXTRA_DIST=doc default_commit_hash
|
||||
|
||||
dist-hook:
|
||||
rm -rf `find $(distdir)/doc -type d -name .svn`
|
||||
@ -8,3 +27,28 @@ dist-hook:
|
||||
|
||||
release : dist ../utils/release.sh
|
||||
../utils/release.sh $(DIST_ARCHIVES)
|
||||
|
||||
cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
--xml \
|
||||
-D HAVE_ATTR_XATTR_H \
|
||||
-D HAVE_SYS_EXTATTR_H \
|
||||
-D HAVE_MALLOC_TRIM \
|
||||
-U CURLE_PEER_FAILED_VERIFICATION \
|
||||
-U P_tmpdir \
|
||||
-U ENOATTR \
|
||||
--enable=warning,style,information,missingInclude \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unmatchedSuppression \
|
||||
src/ test/
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
63
README
63
README
@ -1,63 +0,0 @@
|
||||
THIS README CONTAINS OUTDATED INFORMATION - please refer to the wiki or --help
|
||||
|
||||
S3FS-Fuse
|
||||
|
||||
S3FS is FUSE (File System in User Space) based solution to mount/unmount an Amazon S3 storage buckets and use system commands with S3 just like it was another Hard Disk.
|
||||
|
||||
In order to compile s3fs, You'll need the following requirements:
|
||||
|
||||
* Kernel-devel packages (or kernel source) installed that is the SAME version of your running kernel
|
||||
* LibXML2-devel packages
|
||||
* CURL-devel packages (or compile curl from sources at: curl.haxx.se/ use 7.15.X)
|
||||
* GCC, GCC-C++
|
||||
* pkgconfig
|
||||
* FUSE (>= 2.8.4)
|
||||
* FUSE Kernel module installed and running (RHEL 4.x/CentOS 4.x users - read below)
|
||||
* OpenSSL-devel (0.9.8)
|
||||
* Subversion
|
||||
|
||||
If you're using YUM or APT to install those packages, then it might require additional packaging, allow it to be installed.
|
||||
|
||||
Downloading & Compiling:
|
||||
------------------------
|
||||
In order to download s3fs, user the following command:
|
||||
svn checkout http://s3fs.googlecode.com/svn/trunk/ s3fs-read-only
|
||||
|
||||
Go inside the directory that has been created (s3fs-read-only/s3fs) and run: ./autogen.sh
|
||||
This will generate a number of scripts in the project directory, including a configure script which you should run with: ./configure
|
||||
If configure succeeded, you can now run: make. If it didn't, make sure you meet the dependencies above.
|
||||
This should compile the code. If everything goes OK, you'll be greated with "ok!" at the end and you'll have a binary file called "s3fs"
|
||||
in the src/ directory.
|
||||
|
||||
As root (you can use su, su -, sudo) do: "make install" -this will copy the "s3fs" binary to /usr/local/bin.
|
||||
|
||||
Congratulations. S3fs is now compiled and installed.
|
||||
|
||||
Usage:
|
||||
------
|
||||
In order to use s3fs, make sure you have the Access Key and the Secret Key handy. (refer to the wiki)
|
||||
First, create a directory where to mount the S3 bucket you want to use.
|
||||
Example (as root): mkdir -p /mnt/s3
|
||||
Then run: s3fs mybucket[:path] /mnt/s3
|
||||
|
||||
This will mount your bucket to /mnt/s3. You can do a simple "ls -l /mnt/s3" to see the content of your bucket.
|
||||
|
||||
If you want to allow other people access the same bucket in the same machine, you can add "-o allow _other" to read/write/delete content of the bucket.
|
||||
|
||||
You can add a fixed mount point in /etc/fstab, here's an example:
|
||||
|
||||
s3fs#mybucket /mnt/s3 fuse allow_other 0 0
|
||||
|
||||
This will mount upon reboot (or by launching: mount -a) your bucket on your machine.
|
||||
|
||||
All other options can be read at: http://code.google.com/p/s3fs/wiki/FuseOverAmazon
|
||||
|
||||
Known Issues:
|
||||
-------------
|
||||
s3fs should be working fine with S3 storage. However, There are couple of limitations:
|
||||
|
||||
* There is no full UID/GID support yet, everything looks as "root" and if you allow others to access the bucket, others can erase files. There is, however, permissions support built in.
|
||||
* Currently s3fs could hang the CPU if you have lots of time-outs. This is *NOT* a fault of s3fs but rather libcurl. This happends when you try to copy thousands of files in 1 session, it doesn't happend when you upload hundreds of files or less.
|
||||
* CentOS 4.x/RHEL 4.x users - if you use the kernel that shipped with your distribution and didn't upgrade to the latest kernel RedHat/CentOS gives, you might have a problem loading the "fuse" kernel. Please upgrade to the latest kernel (2.6.16 or above) and make sure "fuse" kernel module is compiled and loadable since FUSE requires this kernel module and s3fs requires it as well.
|
||||
* Moving/renaming/erasing files takes time since the whole file needs to be accessed first. A workaround could be to use s3fs's cache support with the use_cache option.
|
||||
|
||||
175
README.md
Normal file
175
README.md
Normal file
@ -0,0 +1,175 @@
|
||||
# s3fs
|
||||
|
||||
s3fs allows Linux and macOS to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other
|
||||
tools like [AWS CLI](https://github.com/aws/aws-cli).
|
||||
[](https://github.com/s3fs-fuse/s3fs-fuse/actions)
|
||||
[](https://twitter.com/s3fsfuse)
|
||||
|
||||
## Features
|
||||
|
||||
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
|
||||
* compatible with Amazon S3, and other [S3-based object stores](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Non-Amazon-S3)
|
||||
* allows random writes and appends
|
||||
* large files via multi-part upload
|
||||
* renames via server-side copy
|
||||
* optional server-side encryption
|
||||
* data integrity via MD5 hashes
|
||||
* in-memory metadata caching
|
||||
* local disk data caching
|
||||
* user-specified regions, including Amazon GovCloud
|
||||
* authenticate via v2 or v4 signatures
|
||||
|
||||
## Installation
|
||||
|
||||
Many systems provide pre-built packages:
|
||||
|
||||
* Amazon Linux via EPEL:
|
||||
|
||||
```
|
||||
sudo amazon-linux-extras install epel
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* Arch Linux:
|
||||
|
||||
```
|
||||
sudo pacman -S s3fs-fuse
|
||||
```
|
||||
|
||||
* Debian 9 and Ubuntu 16.04 or newer:
|
||||
|
||||
```
|
||||
sudo apt install s3fs
|
||||
```
|
||||
|
||||
* Fedora 27 or newer:
|
||||
|
||||
```
|
||||
sudo dnf install s3fs-fuse
|
||||
```
|
||||
|
||||
* Gentoo:
|
||||
|
||||
```
|
||||
sudo emerge net-fs/s3fs
|
||||
```
|
||||
|
||||
* RHEL and CentOS 7 or newer through via EPEL:
|
||||
|
||||
```
|
||||
sudo yum install epel-release
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* SUSE 12 and openSUSE 42.1 or newer:
|
||||
|
||||
```
|
||||
sudo zypper install s3fs
|
||||
```
|
||||
|
||||
* macOS via [Homebrew](https://brew.sh/):
|
||||
|
||||
```
|
||||
brew install --cask osxfuse
|
||||
brew install s3fs
|
||||
```
|
||||
|
||||
Otherwise consult the [compilation instructions](COMPILATION.md).
|
||||
|
||||
## Examples
|
||||
|
||||
s3fs supports the standard
|
||||
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
|
||||
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
|
||||
|
||||
The default location for the s3fs password file can be created:
|
||||
|
||||
* using a `.passwd-s3fs` file in the users home directory (i.e. `${HOME}/.passwd-s3fs`)
|
||||
* using the system-wide `/etc/passwd-s3fs` file
|
||||
|
||||
Enter your credentials in a file `${HOME}/.passwd-s3fs` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs
|
||||
chmod 600 ${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=https://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > /etc/passwd-s3fs
|
||||
chmod 600 /etc/passwd-s3fs
|
||||
```
|
||||
|
||||
Note2: You may also need to make sure `netfs` service is start on boot
|
||||
|
||||
## Limitations
|
||||
|
||||
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
|
||||
|
||||
* random writes or appends to files require rewriting the entire object, optimized with multi-part upload copy
|
||||
* metadata operations such as listing directories have poor performance due to network latency
|
||||
* non-AWS providers may have [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) so reads can temporarily yield stale data (AWS offers read-after-write consistency [since Dec 2020](https://aws.amazon.com/about-aws/whats-new/2020/12/amazon-s3-now-delivers-strong-read-after-write-consistency-automatically-for-all-applications/))
|
||||
* no atomic renames of files or directories
|
||||
* no coordination between multiple clients mounting the same bucket
|
||||
* no hard links
|
||||
* inotify detects only local modifications, not external ones by other clients or tools
|
||||
|
||||
## References
|
||||
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://github.com/s3ql/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
* [s3fs on Stack Overflow](https://stackoverflow.com/questions/tagged/s3fs)
|
||||
* [s3fs on Server Fault](https://serverfault.com/questions/tagged/s3fs)
|
||||
|
||||
## License
|
||||
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
Licensed under the GNU GPL version 2
|
||||
|
||||
30
autogen.sh
30
autogen.sh
@ -19,6 +19,36 @@
|
||||
#
|
||||
# See the file ChangeLog for a revision history.
|
||||
|
||||
echo "--- Make commit hash file -------"
|
||||
|
||||
SHORTHASH="unknown"
|
||||
type git > /dev/null 2>&1
|
||||
if [ $? -eq 0 -a -d .git ]; then
|
||||
RESULT=`git rev-parse --short HEAD`
|
||||
if [ $? -eq 0 ]; then
|
||||
SHORTHASH=${RESULT}
|
||||
fi
|
||||
fi
|
||||
echo ${SHORTHASH} > default_commit_hash
|
||||
|
||||
echo "--- Finished commit hash file ---"
|
||||
|
||||
echo "--- Start autotools -------------"
|
||||
|
||||
aclocal \
|
||||
&& autoheader \
|
||||
&& automake --add-missing \
|
||||
&& autoconf
|
||||
|
||||
echo "--- Finished autotools ----------"
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
361
configure.ac
361
configure.ac
@ -20,88 +20,333 @@
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.74)
|
||||
AC_INIT(s3fs, 1.89)
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
AM_INIT_AUTOMAKE()
|
||||
AM_INIT_AUTOMAKE([foreign])
|
||||
|
||||
AC_PROG_CXX
|
||||
AC_PROG_CC
|
||||
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
AC_CHECK_HEADERS([sys/xattr.h])
|
||||
AC_CHECK_HEADERS([attr/xattr.h])
|
||||
AC_CHECK_HEADERS([sys/extattr.h])
|
||||
AC_CHECK_FUNCS([fallocate])
|
||||
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
CXXFLAGS="$CXXFLAGS -Wall -fno-exceptions -D_FILE_OFFSET_BITS=64 -D_FORTIFY_SOURCE=2"
|
||||
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS(malloc_trim, , )
|
||||
dnl ----------------------------------------------
|
||||
dnl For macOS
|
||||
dnl ----------------------------------------------
|
||||
case "$target" in
|
||||
*-cygwin* )
|
||||
# Do something specific for windows using winfsp
|
||||
CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE=1"
|
||||
min_fuse_version=2.8
|
||||
;;
|
||||
*-darwin* )
|
||||
# Do something specific for mac
|
||||
min_fuse_version=2.7.3
|
||||
;;
|
||||
*)
|
||||
# Default Case
|
||||
# assume other supported linux system
|
||||
min_fuse_version=2.8.4
|
||||
;;
|
||||
esac
|
||||
|
||||
dnl Initializing NSS(temporally)
|
||||
AC_MSG_CHECKING([Initializing libcurl build with NSS])
|
||||
AC_ARG_ENABLE(
|
||||
nss-init,
|
||||
dnl ----------------------------------------------
|
||||
dnl Choice SSL library
|
||||
dnl ----------------------------------------------
|
||||
auth_lib=na
|
||||
nettle_lib=no
|
||||
|
||||
dnl
|
||||
dnl nettle library
|
||||
dnl
|
||||
AC_MSG_CHECKING([s3fs build with nettle(GnuTLS)])
|
||||
AC_ARG_WITH(
|
||||
nettle,
|
||||
[AS_HELP_STRING([--with-nettle], [s3fs build with nettle in GnuTLS(default no)])],
|
||||
[
|
||||
AS_HELP_STRING(
|
||||
[--enable-nss-init],
|
||||
[Inilializing libcurl with NSS (default is no)]
|
||||
)
|
||||
],
|
||||
[
|
||||
case "${enableval}" in
|
||||
case "${withval}" in
|
||||
yes)
|
||||
AC_MSG_RESULT(yes)
|
||||
nss_init_enabled=yes
|
||||
nettle_lib=yes
|
||||
;;
|
||||
*)
|
||||
AC_MSG_RESULT(no)
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl use openssl library for ssl
|
||||
dnl
|
||||
AC_MSG_CHECKING([s3fs build with OpenSSL])
|
||||
AC_ARG_WITH(
|
||||
openssl,
|
||||
[AS_HELP_STRING([--with-openssl], [s3fs build with OpenSSL(default is no)])],
|
||||
[
|
||||
case "${withval}" in
|
||||
yes)
|
||||
AC_MSG_RESULT(yes)
|
||||
AS_IF(
|
||||
[test $nettle_lib = no],
|
||||
[auth_lib=openssl],
|
||||
[AC_MSG_ERROR([could not set openssl with nettle, nettle is only for gnutls library])])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_RESULT(no)
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl use GnuTLS library for ssl
|
||||
dnl
|
||||
AC_MSG_CHECKING([s3fs build with GnuTLS])
|
||||
AC_ARG_WITH(
|
||||
gnutls,
|
||||
[AS_HELP_STRING([--with-gnutls], [s3fs build with GnuTLS(default is no)])],
|
||||
[
|
||||
case "${withval}" in
|
||||
yes)
|
||||
AC_MSG_RESULT(yes)
|
||||
AS_IF(
|
||||
[test $auth_lib = na],
|
||||
[
|
||||
AS_IF(
|
||||
[test $nettle_lib = no],
|
||||
[auth_lib=gnutls],
|
||||
[auth_lib=nettle])
|
||||
],
|
||||
[AC_MSG_ERROR([could not set gnutls because already set another ssl library])])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_RESULT(no)
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[
|
||||
AC_MSG_RESULT(no)
|
||||
])
|
||||
|
||||
dnl
|
||||
dnl use nss library for ssl
|
||||
dnl
|
||||
AC_MSG_CHECKING([s3fs build with NSS])
|
||||
AC_ARG_WITH(
|
||||
nss,
|
||||
[AS_HELP_STRING([--with-nss], [s3fs build with NSS(default is no)])],
|
||||
[
|
||||
case "${withval}" in
|
||||
yes)
|
||||
AC_MSG_RESULT(yes)
|
||||
AS_IF(
|
||||
[test $auth_lib = na],
|
||||
[
|
||||
AS_IF(
|
||||
[test $nettle_lib = no],
|
||||
[auth_lib=nss],
|
||||
[AC_MSG_ERROR([could not set openssl with nettle, nettle is only for gnutls library])])
|
||||
],
|
||||
[AC_MSG_ERROR([could not set nss because already set another ssl library])])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_RESULT(no)
|
||||
nss_init_enabled=no
|
||||
;;
|
||||
esac
|
||||
],
|
||||
[
|
||||
AC_MSG_RESULT(no)
|
||||
nss_init_enabled=no
|
||||
])
|
||||
|
||||
AS_IF(
|
||||
[test $nss_init_enabled = yes],
|
||||
[
|
||||
AC_DEFINE(NSS_INIT_ENABLED, 1)
|
||||
AC_CHECK_LIB(nss3, NSS_NoDB_Init, , [AC_MSG_ERROR(not found NSS libraries)])
|
||||
AC_CHECK_LIB(plds4, PL_ArenaFinish, , [AC_MSG_ERROR(not found PL_ArenaFinish)])
|
||||
AC_CHECK_LIB(nspr4, PR_Cleanup, , [AC_MSG_ERROR(not found PR_Cleanup)])
|
||||
AC_CHECK_HEADER(nss.h, , [AC_MSG_ERROR(not found nss.h)])
|
||||
AC_CHECK_HEADER(nspr4/prinit.h, , [AC_MSG_ERROR(not found prinit.h)])
|
||||
AC_PATH_PROG(NSSCONFIG, [nss-config], no)
|
||||
AS_IF(
|
||||
[test $NSSCONFIG = no],
|
||||
[
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS -I/usr/include/nss3"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnss3"
|
||||
],
|
||||
[
|
||||
addcflags=`nss-config --cflags`
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS $addcflags"
|
||||
dnl addlib=`nss-config --libs`
|
||||
dnl DEPS_LIBS="$DEPS_LIBS $addlib"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnss3"
|
||||
])
|
||||
AC_PATH_PROG(NSPRCONFIG, [nspr-config], no)
|
||||
AS_IF(
|
||||
[test $NSPRCONFIG = no],
|
||||
[
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS -I/usr/include/nspr4"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnspr4 -lplds4"
|
||||
],
|
||||
[
|
||||
addcflags=`nspr-config --cflags`
|
||||
DEPS_CFLAGS="$DEPS_CFLAGS $addcflags"
|
||||
dnl addlib=`nspr-config --libs`
|
||||
dnl DEPS_LIBS="$DEPS_LIBS $addlib"
|
||||
DEPS_LIBS="$DEPS_LIBS -lnspr4 -lplds4"
|
||||
])
|
||||
])
|
||||
[test $auth_lib = na],
|
||||
AS_IF(
|
||||
[test $nettle_lib = no],
|
||||
[auth_lib=openssl],
|
||||
[AC_MSG_ERROR([could not set nettle without GnuTLS library])]
|
||||
)
|
||||
)
|
||||
|
||||
AS_UNSET(nss_enabled)
|
||||
dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
gnutls_nettle=""
|
||||
AC_CHECK_LIB(gnutls, gcry_control, [gnutls_nettle=0])
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
|
||||
AS_IF([test $gnutls_nettle = 0],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
|
||||
LIBS="-lgnutls -lgcrypt $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(gcrypt)
|
||||
],
|
||||
[AC_MSG_ERROR([GnuTLS found, but gcrypt not found])])
|
||||
;;
|
||||
nettle)
|
||||
AC_MSG_RESULT(GnuTLS-nettle)
|
||||
gnutls_nettle=""
|
||||
AC_CHECK_LIB(gnutls, nettle_MD5Init, [gnutls_nettle=1])
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
|
||||
AS_IF([test $gnutls_nettle = 1],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
|
||||
LIBS="-lgnutls -lnettle $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(nettle)
|
||||
],
|
||||
[AC_MSG_ERROR([GnuTLS found, but nettle not found])])
|
||||
;;
|
||||
nss)
|
||||
AC_MSG_RESULT(NSS)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_ERROR([unknown ssl library type.])
|
||||
;;
|
||||
esac
|
||||
|
||||
AM_CONDITIONAL([USE_SSL_OPENSSL], [test "$auth_lib" = openssl])
|
||||
AM_CONDITIONAL([USE_SSL_GNUTLS], [test "$auth_lib" = gnutls -o "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl check functions
|
||||
dnl ----------------------------------------------
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS([malloc_trim])
|
||||
|
||||
dnl clock_gettime function(macos)
|
||||
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
|
||||
AC_CHECK_FUNCS([clock_gettime])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl check symbols/macros/enums
|
||||
dnl ----------------------------------------------
|
||||
dnl PTHREAD_MUTEX_RECURSIVE
|
||||
AC_MSG_CHECKING([pthread mutex recursive])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <pthread.h>]],
|
||||
[[int i = PTHREAD_MUTEX_RECURSIVE;]])
|
||||
],
|
||||
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE, [Define if you have PTHREAD_MUTEX_RECURSIVE])
|
||||
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE)
|
||||
],
|
||||
[AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <pthread.h>]],
|
||||
[[int i = PTHREAD_MUTEX_RECURSIVE_NP;]])
|
||||
],
|
||||
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE_NP, [Define if you have PTHREAD_MUTEX_RECURSIVE_NP])
|
||||
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
],
|
||||
[AC_MSG_ERROR([do not have PTHREAD_MUTEX_RECURSIVE symbol])])
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl check CURLoption
|
||||
dnl ----------------------------------------------
|
||||
dnl CURLOPT_TCP_KEEPALIVE (is supported by 7.25.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_TCP_KEEPALIVE])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_TCP_KEEPALIVE;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 1, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_TCP_KEEPALIVE, 0, [Define to 1 if libcurl has CURLOPT_TCP_KEEPALIVE CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl CURLOPT_SSL_ENABLE_ALPN (is supported by 7.36.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_SSL_ENABLE_ALPN])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_SSL_ENABLE_ALPN;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 1, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_SSL_ENABLE_ALPN, 0, [Define to 1 if libcurl has CURLOPT_SSL_ENABLE_ALPN CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl CURLOPT_KEEP_SENDING_ON_ERROR (is supported by 7.51.0 and later)
|
||||
AC_MSG_CHECKING([checking CURLOPT_KEEP_SENDING_ON_ERROR])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <curl/curl.h>]],
|
||||
[[CURLoption opt = CURLOPT_KEEP_SENDING_ON_ERROR;]])
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 1, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
|
||||
AC_MSG_RESULT(yes)
|
||||
],
|
||||
[AC_DEFINE(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR, 0, [Define to 1 if libcurl has CURLOPT_KEEP_SENDING_ON_ERROR CURLoption])
|
||||
AC_MSG_RESULT(no)
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl output files
|
||||
dnl ----------------------------------------------
|
||||
AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl short commit hash
|
||||
dnl ----------------------------------------------
|
||||
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
|
||||
AS_IF([test -d .git], [DOTGITDIR=yes], [DOTGITDIR=no])
|
||||
|
||||
AC_MSG_CHECKING([github short commit hash])
|
||||
if test "x${GITCMD}" = "xyes" -a "x${DOTGITDIR}" = "xyes"; then
|
||||
GITCOMMITHASH=`git rev-parse --short HEAD`
|
||||
elif test -f default_commit_hash; then
|
||||
GITCOMMITHASH=`cat default_commit_hash`
|
||||
else
|
||||
GITCOMMITHASH="unknown"
|
||||
fi
|
||||
AC_MSG_RESULT([${GITCOMMITHASH}])
|
||||
|
||||
AC_DEFINE_UNQUOTED([COMMIT_HASH_VAL], ["${GITCOMMITHASH}"], [short commit hash value on github])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl put
|
||||
dnl ----------------------------------------------
|
||||
AC_OUTPUT
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl end configuration
|
||||
dnl ----------------------------------------------
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
@ -1 +1,21 @@
|
||||
######################################################################
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
dist_man1_MANS = man/s3fs.1
|
||||
|
||||
|
||||
346
doc/man/s3fs.1
346
doc/man/s3fs.1
@ -5,12 +5,25 @@ S3FS \- FUSE-based file system backed by Amazon S3
|
||||
.SS mounting
|
||||
.TP
|
||||
\fBs3fs bucket[:/path] mountpoint \fP [options]
|
||||
.TP
|
||||
\fBs3fs mountpoint \fP [options (must specify bucket= option)]
|
||||
.SS unmounting
|
||||
.TP
|
||||
\fBumount mountpoint
|
||||
For root.
|
||||
.TP
|
||||
\fBfusermount -u mountpoint
|
||||
For unprivileged user.
|
||||
.SS utility mode (remove interrupted multipart uploading objects)
|
||||
.TP
|
||||
\fBs3fs --incomplete-mpu-list (-u) bucket
|
||||
.TP
|
||||
\fBs3fs --incomplete-mpu-abort[=all | =<expire date format>] bucket
|
||||
.SH DESCRIPTION
|
||||
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
|
||||
.SH AUTHENTICATION
|
||||
s3fs supports the standard AWS credentials file (https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`.
|
||||
Alternatively, s3fs supports a custom passwd file. Only AWS credentials file format can be used when AWS session token is required.
|
||||
The s3fs password file has this format (use this format if you have only one set of credentials):
|
||||
.RS 4
|
||||
\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
@ -26,6 +39,8 @@ Password files can be stored in two locations:
|
||||
\fB/etc/passwd-s3fs\fP [0640]
|
||||
\fB$HOME/.passwd-s3fs\fP [0600]
|
||||
.RE
|
||||
.PP
|
||||
s3fs also recognizes the \fBAWSACCESSKEYID\fP and \fBAWSSECRETACCESSKEY\fP environment variables.
|
||||
.SH OPTIONS
|
||||
.SS "general options"
|
||||
.TP
|
||||
@ -39,143 +54,366 @@ print version
|
||||
FUSE foreground option - do not run as daemon.
|
||||
.TP
|
||||
\fB\-s\fR
|
||||
FUSE singlethreaded option (disables multi-threaded operation)
|
||||
FUSE single-threaded option (disables multi-threaded operation)
|
||||
.SS "mount options"
|
||||
.TP
|
||||
All s3fs options must given in the form where "opt" is:
|
||||
<option_name>=<option_value>
|
||||
.TP
|
||||
\fB\-o\fR bucket
|
||||
if it is not specified bucket name (and path) in command line, must specify this option after \-o option for bucket name.
|
||||
.TP
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written S3 objects, e.g., "public-read".
|
||||
Any created files will have this canned acl.
|
||||
Any updated files will also have this canned acl applied!
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
see https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for the full list of canned acls.
|
||||
.TP
|
||||
\fB\-o\fR prefix (default="") (coming soon!)
|
||||
a prefix to append to all S3 objects.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="2")
|
||||
\fB\-o\fR retries (default="5")
|
||||
number of times to retry a failed S3 transaction.
|
||||
.TP
|
||||
\fB\-o\fR use_cache (default="" which means disabled)
|
||||
local folder to use for local file cache.
|
||||
.TP
|
||||
\fB\-o\fR check_cache_dir_exist (default is disable)
|
||||
If use_cache is set, check if the cache directory exists.
|
||||
If this option is not specified, it will be created at runtime when the cache directory does not exist.
|
||||
.TP
|
||||
\fB\-o\fR del_cache - delete local file cache
|
||||
delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
\fB\-o\fR storage_class (default="standard")
|
||||
store object with specified storage class.
|
||||
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering, glacier, and deep_archive.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
this option can not be specified with use_sse.
|
||||
(can specify use_rrs=1 for old version)
|
||||
this option has been replaced by new storage_class option.
|
||||
.TP
|
||||
\fB\-o\fR use_sse (default is disable)
|
||||
use Amazon's Server Site Encryption.
|
||||
this option can not be specified with use_rrs.
|
||||
(can specify use_sse=1 for old version)
|
||||
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
|
||||
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
|
||||
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>" (only <custom key file path> specified is old type parameter).
|
||||
You can use "c" for short "custom".
|
||||
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
|
||||
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
|
||||
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom" ("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment. (AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:" (or "k:").
|
||||
If you specify only "kmsid" ("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
You must be careful about that you can not use the KMS id which is not same EC2 region.
|
||||
.TP
|
||||
\fB\-o\fR load_sse_c - specify SSE-C keys
|
||||
Specify the custom-provided encryption keys file path for decrypting at downloading.
|
||||
If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom".
|
||||
The file has many lines, one line means one custom key.
|
||||
So that you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
AWSSSECKEYS environment is as same as this file contents.
|
||||
.TP
|
||||
\fB\-o\fR passwd_file (default="")
|
||||
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
|
||||
.TP
|
||||
\fB\-o\fR ahbe_conf (default="" which means disabled)
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file (object) extension.
|
||||
The configuration file format is below:
|
||||
-----------
|
||||
line = [file suffix] HTTP-header [HTTP-values]
|
||||
file suffix = file(object) suffix, if this field is empty, it means "*"(all object).
|
||||
line = [file suffix or regex] HTTP-header [HTTP-values]
|
||||
file suffix = file (object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
|
||||
regex = regular expression to match the file (object) path. this type starts with "reg:" prefix.
|
||||
HTTP-header = additional HTTP header name
|
||||
HTTP-values = additional HTTP header value
|
||||
-----------
|
||||
Sample:
|
||||
-----------
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
X-S3FS-MYHTTPHEAD myvalue
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
|
||||
-----------
|
||||
A sample configuration file is uploaded in "test" directory.
|
||||
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
|
||||
.TP
|
||||
\fB\-o\fR profile (default="default")
|
||||
Choose a profile from ${HOME}/.aws/credentials to authenticate against S3.
|
||||
Note that this format matches the AWS CLI format and differs from the s3fs passwd format.
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
.TP
|
||||
\fB\-o\fR connect_timeout (default="10" seconds)
|
||||
\fB\-o\fR connect_timeout (default="300" seconds)
|
||||
time to wait for connection before giving up.
|
||||
.TP
|
||||
\fB\-o\fR readwrite_timeout (default="30" seconds)
|
||||
\fB\-o\fR readwrite_timeout (default="120" seconds)
|
||||
time to wait between read/write activity before giving up.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="10000" entries (about 4MB))
|
||||
maximum number of entries in the stat cache
|
||||
\fB\-o\fR list_object_max_keys (default="1000")
|
||||
specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache
|
||||
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
|
||||
maximum number of entries in the stat cache and symbolic link cache.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is 900)
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time indicates the time since cached.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_interval_expire (default is 900)
|
||||
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_noobj_cache (default is disable)
|
||||
enable cache entries for the object which does not exist.
|
||||
s3fs always has to check whether file(or sub directory) exists under object(path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
|
||||
s3fs always has to check whether file (or sub directory) exists under object (path) when s3fs does some command, since s3fs has recognized a directory which does not exist and has files or sub directories under itself.
|
||||
It increases ListBucket request and makes performance bad.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object (file or directory) does not exist.
|
||||
.TP
|
||||
\fB\-o\fR nodnscache - disable dns cache.
|
||||
s3fs is always using dns cache, this option make dns cache disable.
|
||||
\fB\-o\fR no_check_certificate (by default this option is disabled)
|
||||
server certificate won't be checked against the available certificate authorities.
|
||||
.TP
|
||||
\fB\-o\fR nosscache - disable ssl session cache.
|
||||
s3fs is always using ssl session cache, this option make ssl session cache disable.
|
||||
\fB\-o\fR ssl_verify_hostname (default="2")
|
||||
When 0, do not verify the SSL certificate against the hostname.
|
||||
.TP
|
||||
\fB\-o\fR nodnscache - disable DNS cache.
|
||||
s3fs is always using DNS cache, this option make DNS cache disable.
|
||||
.TP
|
||||
\fB\-o\fR nosscache - disable SSL session cache.
|
||||
s3fs is always using SSL session cache, this option make SSL session cache disable.
|
||||
.TP
|
||||
\fB\-o\fR multireq_max (default="20")
|
||||
maximum number of parallel request for listing objects.
|
||||
.TP
|
||||
\fB\-o\fR parallel_count (default="5")
|
||||
number of parallel request for uploading big objects.
|
||||
s3fs uploads large object(over 20MB) by multipart post request, and sends parallel requests.
|
||||
s3fs uploads large object (over 20MB) by multipart post request, and sends parallel requests.
|
||||
This option limits parallel request count which s3fs requests at once.
|
||||
It is necessary to set this value depending on a CPU and a network band.
|
||||
.TP
|
||||
\fB\-o\fR fd_page_size(default="52428800"(50MB))
|
||||
number of internal management page size for each file discriptor.
|
||||
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
|
||||
This option should not be changed when you don't have a trouble with performance.
|
||||
\fB\-o\fR multipart_size (default="10")
|
||||
part size, in MB, for each multipart request.
|
||||
The minimum value is 5 MB and the maximum value is 5 GB.
|
||||
.TP
|
||||
\fB\-o\fR url (default="http://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
|
||||
\fB\-o\fR multipart_copy_size (default="512")
|
||||
part size, in MB, for each multipart copy request, used for
|
||||
renames and mixupload.
|
||||
The minimum value is 5 MB and the maximum value is 5 GB.
|
||||
Must be at least 512 MB to copy the maximum 5 TB object size
|
||||
but lower values may improve performance.
|
||||
.TP
|
||||
\fB\-o\fR max_dirty_data (default="5120")
|
||||
Flush dirty data to S3 after a certain number of MB written.
|
||||
The minimum value is 50 MB. -1 value means disable.
|
||||
Cannot be used with nomixupload.
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree (default 0)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR multipart_threshold (default="25")
|
||||
threshold, in MB, to use multipart upload instead of
|
||||
single-part. Must be at least 5 MB.
|
||||
.TP
|
||||
\fB\-o\fR singlepart_copy_limit (default="512")
|
||||
maximum size, in MB, of a single-part copy before trying
|
||||
multipart copy.
|
||||
.TP
|
||||
\fB\-o\fR host (default="https://s3.amazonaws.com")
|
||||
Set a non-Amazon host, e.g., https://example.com.
|
||||
.TP
|
||||
\fB\-o\fR servicepath (default="/")
|
||||
Set a service path when the non-Amazon host requires a prefix.
|
||||
.TP
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR endpoint (default="us-east-1")
|
||||
sets the endpoint to use on signature version 4.
|
||||
If this option is not specified, s3fs uses "us-east-1" region as the default.
|
||||
If the s3fs could not connect to the region specified by this option, s3fs could not run.
|
||||
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
|
||||
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
|
||||
.TP
|
||||
\fB\-o\fR sigv2 (default is signature version 4 falling back to version 2)
|
||||
sets signing AWS requests by using only signature version 2.
|
||||
.TP
|
||||
\fB\-o\fR sigv4 (default is signature version 4 falling back to version 2)
|
||||
sets signing AWS requests by using only signature version 4.
|
||||
.TP
|
||||
\fB\-o\fR mp_umask (default is "0000")
|
||||
sets umask for the mount point directory.
|
||||
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
|
||||
In the opposite case s3fs allows access to all users as the default.
|
||||
But if you set the allow_other with this option, you can control the permissions of the mount point by this option like umask.
|
||||
.TP
|
||||
\fB\-o\fR umask (default is "0000")
|
||||
sets umask for files under the mountpoint. This can allow
|
||||
users other than the mounting user to read and write to files
|
||||
that they did not create.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
\fB\-o\fR enable_content_md5 ( default is disable )
|
||||
verifying uploaded data without multipart by content-md5 header.
|
||||
Enable to send "Content-MD5" header when uploading a object without multipart posting.
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
\fB\-o\fR enable_content_md5 (default is disable)
|
||||
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
|
||||
This can add CPU overhead to transfers.
|
||||
.TP
|
||||
\fB\-o\fR iam_role ( default is no role )
|
||||
set the IAM Role that will supply the credentials from the instance meta-data.
|
||||
\fB\-o\fR ecs (default is disable)
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registing xml name space.
|
||||
disable registing xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
\fB\-o\fR iam_role (default is no IAM role)
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR imdsv1only (default is to use IMDSv2 with fallback to v1)
|
||||
AWS instance metadata service, used with IAM role authentication,
|
||||
supports the use of an API token. If you're using an IAM role in an
|
||||
environment that does not support IMDSv2, setting this flag will skip
|
||||
retrieval and usage of the API token when retrieving IAM credentials.
|
||||
|
||||
\fB\-o\fR ibm_iam_auth (default is not using IBM IAM authentication)
|
||||
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_endpoint (default is https://iam.bluemix.net)
|
||||
Sets the URL to use for IBM IAM authentication.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr (default is not handling the extended attribute)
|
||||
Enable to handle the extended attribute (xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registering xml name space.
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nomixupload - disable copy in multipart uploads.
|
||||
Disable to use PUT (copy api) when multipart uploading large size objects.
|
||||
By default, when doing multipart upload, the range of unchanged data will use PUT (copy api) whenever possible.
|
||||
When nocopyapi or norenameapi is specified, use of PUT (copy api) is invalidated even if this option is not specified.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
|
||||
For a distributed object storage which is compatibility S3 API without PUT (copy api).
|
||||
If you set this option, s3fs do not use PUT with "x-amz-copy-source" (copy api). Because traffic is increased 2-3 times by this option, we do not recommend this.
|
||||
.TP
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
|
||||
If this option is specified with nocopapi, the s3fs ignores it.
|
||||
For a distributed object storage which is compatibility S3 API without PUT (copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command (ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command (ex. mv).
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
.TP
|
||||
\fB\-o\fR listobjectsv2 (use ListObjectsV2)
|
||||
Issue ListObjectsV2 instead of ListObjects, useful on object
|
||||
stores without ListObjects support.
|
||||
.TP
|
||||
\fB\-o\fR noua (suppress User-Agent header)
|
||||
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
|
||||
If this option is specified, s3fs suppresses the output of the User-Agent.
|
||||
.TP
|
||||
\fB\-o\fR cipher_suites
|
||||
Customize the list of TLS cipher suites. Expects a colon separated list of cipher suite names.
|
||||
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
|
||||
https://curl.haxx.se/docs/ssl-ciphers.html
|
||||
.TP
|
||||
\fB\-o\fR instance_name
|
||||
The instance name of the current s3fs mountpoint.
|
||||
This name will be added to logging messages and user agent headers sent by s3fs.
|
||||
.TP
|
||||
\fB\-o\fR complement_stat (complement lack of file/directory mode)
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
.TP
|
||||
\fB\-o\fR notsup_compat_dir (not support compatibility directory types)
|
||||
As a default, s3fs supports objects of the directory type as much as possible and recognizes them as directories.
|
||||
Objects that can be recognized as directory objects are "dir/", "dir", "dir_$folder$", and there is a file object that does not have a directory object but contains that directory path.
|
||||
s3fs needs redundant communication to support all these directory types.
|
||||
The object as the directory created by s3fs is "dir/".
|
||||
By restricting s3fs to recognize only "dir/" as a directory, communication traffic can be reduced.
|
||||
This option is used to give this restriction to s3fs.
|
||||
However, if there is a directory object other than "dir/" in the bucket, specifying this option is not recommended.
|
||||
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
|
||||
Please use this option when the directory in the bucket is only "dir/" object.
|
||||
.TP
|
||||
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
|
||||
S3 requires all object names to be valid UTF-8. But some
|
||||
clients, notably Windows NFS clients, use their own encoding.
|
||||
This option re-encodes invalid UTF-8 object names into valid
|
||||
UTF-8 by mapping offending codes into a 'private' codepage of the
|
||||
Unicode set.
|
||||
Useful on clients not using UTF-8 as their file system encoding.
|
||||
.TP
|
||||
\fB\-o\fR use_session_token - indicate that session token should be provided.
|
||||
If credentials are provided by environment variables this switch
|
||||
forces presence check of AWSSESSIONTOKEN variable.
|
||||
Otherwise an error is returned.
|
||||
.TP
|
||||
\fB\-o\fR requester_pays (default is disable)
|
||||
This option instructs s3fs to enable requests involving Requester Pays buckets (It includes the 'x-amz-request-payer=requester' entry in the request header).
|
||||
.TP
|
||||
\fB\-o\fR mime (default is "/etc/mime.types")
|
||||
Specify the path of the mime.types file.
|
||||
If this option is not specified, the existence of "/etc/mime.types" is checked, and that file is loaded as mime information.
|
||||
If this file does not exist on macOS, then "/etc/apache2/mime.types" is checked as well.
|
||||
.TP
|
||||
\fB\-o\fR logfile - specify the log output file.
|
||||
s3fs outputs the log file to syslog. Alternatively, if s3fs is started with the "-f" option specified, the log will be output to the stdout/stderr.
|
||||
You can use this option to specify the log file that s3fs outputs.
|
||||
If you specify a log file with this option, it will reopen the log file when s3fs receives a SIGHUP signal. You can use the SIGHUP signal for log rotation.
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit (critical), err (error), warn (warning), info (information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
|
||||
.TP
|
||||
\fB\-o\fR curldbg - put curl debug message
|
||||
Put the debug message from libcurl when this option is specified.
|
||||
Specify "normal" or "body" for the parameter.
|
||||
If the parameter is omitted, it is the same as "normal".
|
||||
If "body" is specified, some API communication body data will be output in addition to the debug message output as "normal".
|
||||
.TP
|
||||
\fB\-o\fR no_time_stamp_msg - no time stamp in debug message
|
||||
The time stamp is output to the debug message by default.
|
||||
If this option is specified, the time stamp will not be output in the debug message.
|
||||
It is the same even if the environment variable "S3FS_MSGTIMESTAMP" is set to "no".
|
||||
.TP
|
||||
\fB\-o\fR set_check_cache_sigusr1 (default is stdout)
|
||||
If the cache is enabled, you can check the integrity of the cache file and the cache file's stats info file.
|
||||
This option is specified and when sending the SIGUSR1 signal to the s3fs process checks the cache status at that time.
|
||||
This option can take a file path as parameter to output the check result to that file.
|
||||
The file path parameter can be omitted. If omitted, the result will be output to stdout or syslog.
|
||||
.SS "utility mode options"
|
||||
.TP
|
||||
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
|
||||
Lists multipart incomplete objects uploaded to the specified bucket.
|
||||
.TP
|
||||
\fB\-\-incomplete\-mpu\-abort\fR all or date format (default="24H")
|
||||
Delete the multipart incomplete object uploaded to the specified bucket.
|
||||
If "all" is specified for this option, all multipart incomplete objects will be deleted.
|
||||
If you specify no argument as an option, objects older than 24 hours (24H) will be deleted (This is the default value).
|
||||
You can specify an optional date format.
|
||||
It can be specified as year, month, day, hour, minute, second, and it is expressed as "Y", "M", "D", "h", "m", "s" respectively.
|
||||
For example, "1Y6M10D12h30m30s".
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
.TP
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH NOTES
|
||||
.TP
|
||||
Maximum file size=64GB (limited by s3fs, not Amazon).
|
||||
The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses MD5 checksums to minimize downloads from S3.
|
||||
.TP
|
||||
The folder specified by use_cache is just a local cache. It can be deleted at any time. s3fs rebuilds it on demand.
|
||||
.TP
|
||||
Local file caching works by calculating and comparing md5 checksums (ETag HTTP header).
|
||||
Local file caching works by calculating and comparing MD5 checksums (ETag HTTP header).
|
||||
.TP
|
||||
s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types!
|
||||
.SH SEE ALSO
|
||||
fuse(8), mount(8), fusermount(1), fstab(5)
|
||||
.SH BUGS
|
||||
Due to S3's "eventual consistency" limitations, file creation can and will occasionally fail. Even after a successful create, subsequent reads can fail for an indeterminate time, even after one or more successful reads. Create and read enough files and you will eventually encounter this failure. This is not a flaw in s3fs and it is not something a FUSE wrapper like s3fs can work around. The retries option does not address this issue. Your application must either tolerate or compensate for these failures, for example by retrying creates or reads.
|
||||
.SH AUTHOR
|
||||
|
||||
BIN
doc/s3fs.png
Normal file
BIN
doc/s3fs.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.3 KiB |
@ -1,7 +1,99 @@
|
||||
######################################################################
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
bin_PROGRAMS=s3fs
|
||||
|
||||
AM_CPPFLAGS = $(DEPS_CFLAGS)
|
||||
if USE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = \
|
||||
s3fs.cpp \
|
||||
s3fs_global.cpp \
|
||||
s3fs_help.cpp \
|
||||
s3fs_logger.cpp \
|
||||
s3fs_xml.cpp \
|
||||
metaheader.cpp \
|
||||
mpu_util.cpp \
|
||||
mvnode.cpp \
|
||||
curl.cpp \
|
||||
curl_handlerpool.cpp \
|
||||
curl_multi.cpp \
|
||||
curl_util.cpp \
|
||||
bodydata.cpp \
|
||||
s3objlist.cpp \
|
||||
cache.cpp \
|
||||
string_util.cpp \
|
||||
s3fs_util.cpp \
|
||||
fdcache.cpp \
|
||||
fdcache_entity.cpp \
|
||||
fdcache_page.cpp \
|
||||
fdcache_stat.cpp \
|
||||
fdcache_auto.cpp \
|
||||
addhead.cpp \
|
||||
sighandlers.cpp \
|
||||
autolock.cpp \
|
||||
common_auth.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
if USE_SSL_GNUTLS
|
||||
s3fs_SOURCES += gnutls_auth.cpp
|
||||
endif
|
||||
if USE_SSL_NSS
|
||||
s3fs_SOURCES += nss_auth.cpp
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common.h
|
||||
s3fs_LDADD = $(DEPS_LIBS)
|
||||
|
||||
noinst_PROGRAMS = \
|
||||
test_curl_util \
|
||||
test_string_util
|
||||
|
||||
test_curl_util_SOURCES = common_auth.cpp curl_util.cpp string_util.cpp test_curl_util.cpp s3fs_global.cpp s3fs_logger.cpp
|
||||
if USE_SSL_OPENSSL
|
||||
test_curl_util_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
if USE_SSL_GNUTLS
|
||||
test_curl_util_SOURCES += gnutls_auth.cpp
|
||||
endif
|
||||
if USE_SSL_NSS
|
||||
test_curl_util_SOURCES += nss_auth.cpp
|
||||
endif
|
||||
|
||||
test_curl_util_LDADD = $(DEPS_LIBS)
|
||||
|
||||
test_string_util_SOURCES = string_util.cpp test_string_util.cpp s3fs_logger.cpp
|
||||
|
||||
TESTS = \
|
||||
test_curl_util \
|
||||
test_string_util
|
||||
|
||||
clang-tidy:
|
||||
clang-tidy $(s3fs_SOURCES) -- $(DEPS_CFLAGS) $(CPPFLAGS)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts= fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
278
src/addhead.cpp
Normal file
278
src/addhead.cpp
Normal file
@ -0,0 +1,278 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "addhead.h"
|
||||
#include "curl_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Symbols
|
||||
//-------------------------------------------------------------------
|
||||
#define ADD_HEAD_REGEX "reg:"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
//-------------------------------------------------------------------
|
||||
AdditionalHeader AdditionalHeader::singleton;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AdditionalHeader method
|
||||
//-------------------------------------------------------------------
|
||||
AdditionalHeader::AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
AdditionalHeader::~AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Load(const char* file)
|
||||
{
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is NULL.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
|
||||
std::ifstream AH(file);
|
||||
if(!AH.good()){
|
||||
S3FS_PRN_WARN("Could not open file(%s).", file);
|
||||
return false;
|
||||
}
|
||||
|
||||
// read file
|
||||
std::string line;
|
||||
ADDHEAD *paddhead;
|
||||
while(getline(AH, line)){
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(line.empty()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
std::istringstream ss(line);
|
||||
std::string key; // suffix(key)
|
||||
std::string head; // additional HTTP header
|
||||
std::string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
if(ss){
|
||||
ss >> head;
|
||||
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
|
||||
value = line.substr(static_cast<int>(ss.tellg()) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// check it
|
||||
if(head.empty()){
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
Unload();
|
||||
return false;
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key std::string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key.erase(0, strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
|
||||
// set flag
|
||||
if(!is_enable){
|
||||
is_enable = true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
ADDHEAD *paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
addheadlist.clear();
|
||||
}
|
||||
|
||||
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
{
|
||||
if(!is_enable){
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is NULL.");
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t pathlength = strlen(path);
|
||||
|
||||
// loop
|
||||
//
|
||||
// [NOTE]
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
}
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const
|
||||
{
|
||||
headers_t meta;
|
||||
|
||||
if(!AddHeader(meta, path)){
|
||||
return list;
|
||||
}
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return list;
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Dump() const
|
||||
{
|
||||
if(!S3fsLog::IsS3fsLogDbg()){
|
||||
return true;
|
||||
}
|
||||
|
||||
std::ostringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << std::endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const ADDHEAD *paddhead = *iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << std::endl;
|
||||
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << std::endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << std::endl;
|
||||
}
|
||||
ssdbg << " base std::string\t--->\t" << paddhead->basestring << std::endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << std::endl;
|
||||
}
|
||||
ssdbg << " }" << std::endl;
|
||||
}
|
||||
|
||||
|
||||
ssdbg << "}" << std::endl;
|
||||
|
||||
// print all
|
||||
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
75
src/addhead.h
Normal file
75
src/addhead.h
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_ADDHEAD_H_
|
||||
#define S3FS_ADDHEAD_H_
|
||||
|
||||
#include <regex.h>
|
||||
|
||||
#include "metaheader.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Structure / Typedef
|
||||
//----------------------------------------------
|
||||
typedef struct add_header{
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD;
|
||||
|
||||
typedef std::vector<ADDHEAD *> addheadlist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
//----------------------------------------------
|
||||
class AdditionalHeader
|
||||
{
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
addheadlist_t addheadlist;
|
||||
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get() { return &singleton; }
|
||||
|
||||
bool Load(const char* file);
|
||||
void Unload();
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump() const;
|
||||
};
|
||||
|
||||
#endif // S3FS_ADDHEAD_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
80
src/autolock.cpp
Normal file
80
src/autolock.cpp
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "autolock.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AutoLock
|
||||
//-------------------------------------------------------------------
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex, Type type) : auto_mutex(pmutex)
|
||||
{
|
||||
if (type == ALREADY_LOCKED) {
|
||||
is_lock_acquired = false;
|
||||
} else if (type == NO_WAIT) {
|
||||
int result = pthread_mutex_trylock(auto_mutex);
|
||||
if(result == 0){
|
||||
is_lock_acquired = true;
|
||||
}else if(result == EBUSY){
|
||||
is_lock_acquired = false;
|
||||
}else{
|
||||
S3FS_PRN_CRIT("pthread_mutex_trylock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
int result = pthread_mutex_lock(auto_mutex);
|
||||
if(result == 0){
|
||||
is_lock_acquired = true;
|
||||
}else{
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AutoLock::isLockAcquired() const
|
||||
{
|
||||
return is_lock_acquired;
|
||||
}
|
||||
|
||||
AutoLock::~AutoLock()
|
||||
{
|
||||
if (is_lock_acquired) {
|
||||
int result = pthread_mutex_unlock(auto_mutex);
|
||||
if(result != 0){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
60
src/autolock.h
Normal file
60
src/autolock.h
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_AUTOLOCK_H_
|
||||
#define S3FS_AUTOLOCK_H_
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// AutoLock Class
|
||||
//-------------------------------------------------------------------
|
||||
class AutoLock
|
||||
{
|
||||
public:
|
||||
enum Type {
|
||||
NO_WAIT = 1,
|
||||
ALREADY_LOCKED = 2,
|
||||
NONE = 0
|
||||
};
|
||||
|
||||
private:
|
||||
pthread_mutex_t* const auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
|
||||
private:
|
||||
AutoLock(const AutoLock&);
|
||||
|
||||
public:
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, Type type = NONE);
|
||||
~AutoLock();
|
||||
bool isLockAcquired() const;
|
||||
};
|
||||
|
||||
#endif // S3FS_AUTOLOCK_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
122
src/bodydata.cpp
Normal file
122
src/bodydata.cpp
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "bodydata.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
static const int BODYDATA_RESIZE_APPEND_MIN = 1024;
|
||||
static const int BODYDATA_RESIZE_APPEND_MID = 1024 * 1024;
|
||||
static const int BODYDATA_RESIZE_APPEND_MAX = 10 * 1024 * 1024;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
static size_t adjust_block(size_t bytes, size_t block)
|
||||
{
|
||||
return ((bytes / block) + ((bytes % block) ? 1 : 0)) * block;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class BodyData
|
||||
//-------------------------------------------------------------------
|
||||
bool BodyData::Resize(size_t addbytes)
|
||||
{
|
||||
if(IsSafeSize(addbytes)){
|
||||
return true;
|
||||
}
|
||||
|
||||
// New size
|
||||
size_t need_size = adjust_block((lastpos + addbytes + 1) - bufsize, sizeof(off_t));
|
||||
|
||||
if(BODYDATA_RESIZE_APPEND_MAX < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MAX < need_size ? need_size : BODYDATA_RESIZE_APPEND_MAX);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MID < bufsize){
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MID < need_size ? need_size : BODYDATA_RESIZE_APPEND_MID);
|
||||
}else if(BODYDATA_RESIZE_APPEND_MIN < bufsize){
|
||||
need_size = ((bufsize * 2) < need_size ? need_size : (bufsize * 2));
|
||||
}else{
|
||||
need_size = (BODYDATA_RESIZE_APPEND_MIN < need_size ? need_size : BODYDATA_RESIZE_APPEND_MIN);
|
||||
}
|
||||
// realloc
|
||||
char* newtext;
|
||||
if(NULL == (newtext = (char*)realloc(text, (bufsize + need_size)))){
|
||||
S3FS_PRN_CRIT("not enough memory (realloc returned NULL)");
|
||||
free(text);
|
||||
text = NULL;
|
||||
return false;
|
||||
}
|
||||
text = newtext;
|
||||
bufsize += need_size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void BodyData::Clear()
|
||||
{
|
||||
if(text){
|
||||
free(text);
|
||||
text = NULL;
|
||||
}
|
||||
lastpos = 0;
|
||||
bufsize = 0;
|
||||
}
|
||||
|
||||
bool BodyData::Append(void* ptr, size_t bytes)
|
||||
{
|
||||
if(!ptr){
|
||||
return false;
|
||||
}
|
||||
if(0 == bytes){
|
||||
return true;
|
||||
}
|
||||
if(!Resize(bytes)){
|
||||
return false;
|
||||
}
|
||||
memcpy(&text[lastpos], ptr, bytes);
|
||||
lastpos += bytes;
|
||||
text[lastpos] = '\0';
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const char* BodyData::str() const
|
||||
{
|
||||
if(!text){
|
||||
static const char* strnull = "";
|
||||
return strnull;
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
72
src/bodydata.h
Normal file
72
src/bodydata.h
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_BODYDATA_H_
|
||||
#define S3FS_BODYDATA_H_
|
||||
|
||||
//----------------------------------------------
|
||||
// Class BodyData
|
||||
//----------------------------------------------
|
||||
// memory class for curl write memory callback
|
||||
//
|
||||
class BodyData
|
||||
{
|
||||
private:
|
||||
char* text;
|
||||
size_t lastpos;
|
||||
size_t bufsize;
|
||||
|
||||
private:
|
||||
bool IsSafeSize(size_t addbytes) const
|
||||
{
|
||||
return ((lastpos + addbytes + 1) > bufsize ? false : true);
|
||||
}
|
||||
bool Resize(size_t addbytes);
|
||||
|
||||
public:
|
||||
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
|
||||
~BodyData()
|
||||
{
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Clear();
|
||||
bool Append(void* ptr, size_t bytes);
|
||||
bool Append(void* ptr, size_t blockSize, size_t numBlocks)
|
||||
{
|
||||
return Append(ptr, (blockSize * numBlocks));
|
||||
}
|
||||
const char* str() const;
|
||||
size_t size() const
|
||||
{
|
||||
return lastpos;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // S3FS_BODYDATA_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1080
src/cache.cpp
1080
src/cache.cpp
File diff suppressed because it is too large
Load Diff
254
src/cache.h
254
src/cache.h
@ -1,107 +1,191 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CACHE_H_
|
||||
#define S3FS_CACHE_H_
|
||||
|
||||
#include "common.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Struct
|
||||
// Struct for stats cache
|
||||
//
|
||||
struct stat_cache_entry {
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
time_t cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
unsigned long notruncate; // 0<: not remove automatically at checking truncate
|
||||
|
||||
stat_cache_entry() : hit_count(0), cache_date(0), isforce(false), noobjcache(false) {
|
||||
memset(&stbuf, 0, sizeof(struct stat));
|
||||
meta.clear();
|
||||
}
|
||||
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L)
|
||||
{
|
||||
memset(&stbuf, 0, sizeof(struct stat));
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
meta.clear();
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, stat_cache_entry*> stat_cache_t; // key=path
|
||||
|
||||
//
|
||||
// Class
|
||||
// Struct for symbolic link cache
|
||||
//
|
||||
class StatCache
|
||||
{
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
struct symlink_cache_entry {
|
||||
std::string link;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date; // The function that operates timespec uses the same as Stats
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
|
||||
public:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData(void) {
|
||||
return &singleton;
|
||||
}
|
||||
|
||||
// Attribute
|
||||
unsigned long GetCacheSize(void) const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime(void) const;
|
||||
time_t SetExpireTime(time_t expire);
|
||||
time_t UnsetExpireTime(void);
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject(void) {
|
||||
return SetCacheNoObject(true);
|
||||
}
|
||||
bool DisableCacheNoObject(void) {
|
||||
return SetCacheNoObject(false);
|
||||
}
|
||||
bool GetCacheNoObject(void) const {
|
||||
return IsCacheNoObject;
|
||||
}
|
||||
|
||||
// Get stat cache
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL) {
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
}
|
||||
bool GetStat(std::string& key, struct stat* pst, bool overcheck = true) {
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool GetStat(std::string& key, headers_t* meta, bool overcheck = true) {
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(std::string& key, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(std::string& key, const char* etag, bool overcheck = true) {
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
}
|
||||
|
||||
// Cache For no object
|
||||
bool IsNoObjectCache(std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(std::string& key);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key);
|
||||
bool DelStat(std::string& key) {
|
||||
return DelStat(key.c_str());
|
||||
symlink_cache_entry() : link(""), hit_count(0)
|
||||
{
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, symlink_cache_entry*> symlink_cache_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class StatCache
|
||||
//-------------------------------------------------------------------
|
||||
// [NOTE] About Symbolic link cache
|
||||
// The Stats cache class now also has a symbolic link cache.
|
||||
// It is possible to take out the Symbolic link cache in another class,
|
||||
// but the cache out etc. should be synchronized with the Stats cache
|
||||
// and implemented in this class.
|
||||
// Symbolic link cache size and timeout use the same settings as Stats
|
||||
// cache. This simplifies user configuration, and from a user perspective,
|
||||
// the symbolic link cache appears to be included in the Stats cache.
|
||||
//
|
||||
class StatCache
|
||||
{
|
||||
private:
|
||||
static StatCache singleton;
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
symlink_cache_t symlink_cache;
|
||||
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
void Clear();
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache();
|
||||
// Truncate symbolic link cache
|
||||
bool TruncateSymlink();
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData()
|
||||
{
|
||||
return &singleton;
|
||||
}
|
||||
|
||||
// Attribute
|
||||
unsigned long GetCacheSize() const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime() const;
|
||||
time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
time_t UnsetExpireTime();
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject()
|
||||
{
|
||||
return SetCacheNoObject(true);
|
||||
}
|
||||
bool DisableCacheNoObject()
|
||||
{
|
||||
return SetCacheNoObject(false);
|
||||
}
|
||||
bool GetCacheNoObject() const
|
||||
{
|
||||
return IsCacheNoObject;
|
||||
}
|
||||
|
||||
// Get stat cache
|
||||
bool GetStat(const std::string& key, struct stat* pst, headers_t* meta, bool overcheck = true, bool* pisforce = NULL)
|
||||
{
|
||||
return GetStat(key, pst, meta, overcheck, NULL, pisforce);
|
||||
}
|
||||
bool GetStat(const std::string& key, struct stat* pst, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, pst, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool GetStat(const std::string& key, headers_t* meta, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, meta, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(const std::string& key, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, NULL, overcheck, NULL, NULL);
|
||||
}
|
||||
bool HasStat(const std::string& key, const char* etag, bool overcheck = true)
|
||||
{
|
||||
return GetStat(key, NULL, NULL, overcheck, etag, NULL);
|
||||
}
|
||||
|
||||
// Cache For no object
|
||||
bool IsNoObjectCache(const std::string& key, bool overcheck = true);
|
||||
bool AddNoObjectCache(const std::string& key);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(const std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key, bool lock_already_held = false);
|
||||
bool DelStat(const std::string& key, bool lock_already_held = false)
|
||||
{
|
||||
return DelStat(key.c_str(), lock_already_held);
|
||||
}
|
||||
|
||||
// Cache for symbolic link
|
||||
bool GetSymlink(const std::string& key, std::string& value);
|
||||
bool AddSymlink(const std::string& key, const std::string& value);
|
||||
bool DelSymlink(const char* key, bool lock_already_held = false);
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//
|
||||
bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst, bool forcedir = false);
|
||||
//-------------------------------------------------------------------
|
||||
bool convert_header_to_stat(const char* path, const headers_t& meta, struct stat* pst, bool forcedir = false);
|
||||
|
||||
#endif // S3FS_CACHE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
124
src/common.h
124
src/common.h
@ -1,78 +1,58 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
|
||||
#include "../config.h"
|
||||
#include "types.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
// for debug
|
||||
#define FPRINT_NEST_SPACE_0 ""
|
||||
#define FPRINT_NEST_SPACE_1 " "
|
||||
#define FPRINT_NEST_SPACE_2 " "
|
||||
#define FPRINT_NEST_CHECK(NEST) \
|
||||
(0 == NEST ? FPRINT_NEST_SPACE_0 : 1 == NEST ? FPRINT_NEST_SPACE_1 : FPRINT_NEST_SPACE_2)
|
||||
|
||||
#define LOWFPRINT(NEST, ...) \
|
||||
printf("%s%s(%d): ", FPRINT_NEST_CHECK(NEST), __func__, __LINE__); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
|
||||
#define FPRINT(NEST, ...) \
|
||||
if(foreground){ \
|
||||
LOWFPRINT(NEST, __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define FPRINT2(NEST, ...) \
|
||||
if(foreground2){ \
|
||||
LOWFPRINT(NEST, __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define LOWSYSLOGPRINT(LEVEL, ...) \
|
||||
syslog(LEVEL, __VA_ARGS__);
|
||||
|
||||
#define SYSLOGPRINT(LEVEL, ...) \
|
||||
if(LEVEL <= LOG_CRIT || debug){ \
|
||||
LOWSYSLOGPRINT(LEVEL, __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define DPRINT(LEVEL, NEST, ...) \
|
||||
FPRINT(NEST, __VA_ARGS__); \
|
||||
SYSLOGPRINT(LEVEL, __VA_ARGS__);
|
||||
|
||||
#define DPRINT2(LEVEL, ...) \
|
||||
FPRINT2(2, __VA_ARGS__); \
|
||||
SYSLOGPRINT(LEVEL, __VA_ARGS__);
|
||||
|
||||
// print debug message
|
||||
#define FPRN(...) FPRINT(0, __VA_ARGS__)
|
||||
#define FPRNN(...) FPRINT(1, __VA_ARGS__)
|
||||
#define FPRNNN(...) FPRINT(2, __VA_ARGS__)
|
||||
#define FPRNINFO(...) FPRINT2(2, __VA_ARGS__)
|
||||
|
||||
// print debug message with putting syslog
|
||||
#define DPRNCRIT(...) DPRINT(LOG_CRIT, 0, __VA_ARGS__)
|
||||
#define DPRN(...) DPRINT(LOG_ERR, 0, __VA_ARGS__)
|
||||
#define DPRNN(...) DPRINT(LOG_DEBUG, 1, __VA_ARGS__)
|
||||
#define DPRNNN(...) DPRINT(LOG_DEBUG, 2, __VA_ARGS__)
|
||||
#define DPRNINFO(...) DPRINT2(LOG_INFO, __VA_ARGS__)
|
||||
|
||||
//
|
||||
// Typedef
|
||||
//
|
||||
typedef std::map<std::string, std::string> headers_t;
|
||||
|
||||
//
|
||||
// Global valiables
|
||||
//
|
||||
extern bool debug;
|
||||
extern bool foreground;
|
||||
extern bool foreground2;
|
||||
extern bool nomultipart;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string host;
|
||||
extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
// TODO: namespace these
|
||||
extern int64_t FIVE_GB;
|
||||
extern off_t MIN_MULTIPART_SIZE;
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
extern bool complement_stat;
|
||||
extern bool noxmlns;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string s3host;
|
||||
extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
extern std::string endpoint;
|
||||
extern std::string cipher_suites;
|
||||
extern std::string instance_name;
|
||||
extern std::string aws_profile;
|
||||
|
||||
#endif // S3FS_COMMON_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
78
src/common_auth.cpp
Normal file
78
src/common_auth.cpp
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <climits>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function
|
||||
//-------------------------------------------------------------------
|
||||
std::string s3fs_get_content_md5(int fd)
|
||||
{
|
||||
unsigned char* md5;
|
||||
char* base64;
|
||||
std::string Signature;
|
||||
|
||||
if(NULL == (md5 = s3fs_md5_fd(fd, 0, -1))){
|
||||
return std::string("");
|
||||
}
|
||||
if(NULL == (base64 = s3fs_base64(md5, get_md5_digest_length()))){
|
||||
delete[] md5;
|
||||
return std::string(""); // ENOMEM
|
||||
}
|
||||
delete[] md5;
|
||||
|
||||
Signature = base64;
|
||||
delete[] base64;
|
||||
|
||||
return Signature;
|
||||
}
|
||||
|
||||
std::string s3fs_sha256_hex_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
unsigned char* sha256;
|
||||
|
||||
if(NULL == (sha256 = s3fs_sha256_fd(fd, start, size))){
|
||||
return std::string("");
|
||||
}
|
||||
|
||||
std::string sha256hex = s3fs_hex(sha256, digestlen);
|
||||
delete[] sha256;
|
||||
|
||||
return sha256hex;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
6523
src/curl.cpp
6523
src/curl.cpp
File diff suppressed because it is too large
Load Diff
748
src/curl.h
748
src/curl.h
@ -1,402 +1,430 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_H_
|
||||
#define S3FS_CURL_H_
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "curl_handlerpool.h"
|
||||
#include "bodydata.h"
|
||||
#include "psemaphore.h"
|
||||
#include "metaheader.h"
|
||||
#include "fdcache_page.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
// Avoid dependency on libcurl version
|
||||
//----------------------------------------------
|
||||
// memory class for curl write memory callback
|
||||
// [NOTE]
|
||||
// The following symbols (enum) depend on the version of libcurl.
|
||||
// CURLOPT_TCP_KEEPALIVE 7.25.0 and later
|
||||
// CURLOPT_SSL_ENABLE_ALPN 7.36.0 and later
|
||||
// CURLOPT_KEEP_SENDING_ON_ERROR 7.51.0 and later
|
||||
//
|
||||
class BodyData
|
||||
{
|
||||
private:
|
||||
char* text;
|
||||
size_t lastpos;
|
||||
size_t bufsize;
|
||||
// s3fs uses these, if you build s3fs with the old libcurl,
|
||||
// substitute the following symbols to avoid errors.
|
||||
// If the version of libcurl linked at runtime is old,
|
||||
// curl_easy_setopt results in an error(CURLE_UNKNOWN_OPTION) and
|
||||
// a message is output.
|
||||
//
|
||||
#if defined(HAVE_CURLOPT_TCP_KEEPALIVE) && (HAVE_CURLOPT_TCP_KEEPALIVE == 1)
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE CURLOPT_TCP_KEEPALIVE
|
||||
#else
|
||||
#define S3FS_CURLOPT_TCP_KEEPALIVE static_cast<CURLoption>(213)
|
||||
#endif
|
||||
|
||||
private:
|
||||
bool IsSafeSize(size_t addbytes) const {
|
||||
return ((lastpos + addbytes + 1) > bufsize ? false : true);
|
||||
}
|
||||
bool Resize(size_t addbytes);
|
||||
#if defined(HAVE_CURLOPT_SSL_ENABLE_ALPN) && (HAVE_CURLOPT_SSL_ENABLE_ALPN == 1)
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN CURLOPT_SSL_ENABLE_ALPN
|
||||
#else
|
||||
#define S3FS_CURLOPT_SSL_ENABLE_ALPN static_cast<CURLoption>(226)
|
||||
#endif
|
||||
|
||||
public:
|
||||
BodyData() : text(NULL), lastpos(0), bufsize(0) {}
|
||||
~BodyData() {
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Clear(void);
|
||||
bool Append(void* ptr, size_t bytes);
|
||||
bool Append(void* ptr, size_t blockSize, size_t numBlocks) {
|
||||
return Append(ptr, (blockSize * numBlocks));
|
||||
}
|
||||
const char* str() const;
|
||||
size_t size() const {
|
||||
return lastpos;
|
||||
}
|
||||
};
|
||||
#if defined(HAVE_CURLOPT_KEEP_SENDING_ON_ERROR) && (HAVE_CURLOPT_KEEP_SENDING_ON_ERROR == 1)
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR CURLOPT_KEEP_SENDING_ON_ERROR
|
||||
#else
|
||||
#define S3FS_CURLOPT_KEEP_SENDING_ON_ERROR static_cast<CURLoption>(245)
|
||||
#endif
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility structs & typedefs
|
||||
// Structure / Typedefs
|
||||
//----------------------------------------------
|
||||
typedef std::vector<std::string> etaglist_t;
|
||||
|
||||
// Each part information for Multipart upload
|
||||
struct filepart
|
||||
{
|
||||
bool uploaded; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) discriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
ssize_t size; // uploading size
|
||||
etaglist_t* etaglist; // use only parallel upload
|
||||
int etagpos; // use only parallel upload
|
||||
|
||||
filepart() : uploaded(false), fd(-1), startpos(0), size(-1), etaglist(NULL), etagpos(-1) {}
|
||||
~filepart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear(void)
|
||||
{
|
||||
uploaded = false;
|
||||
etag = "";
|
||||
fd = -1;
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t* list)
|
||||
{
|
||||
if(list){
|
||||
list->push_back(std::string(""));
|
||||
etaglist = list;
|
||||
etagpos = list->size() - 1;
|
||||
}else{
|
||||
etaglist = NULL;
|
||||
etagpos = - 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// for progress
|
||||
struct case_insensitive_compare_func
|
||||
{
|
||||
bool operator()(const std::string& a, const std::string& b){
|
||||
return strcasecmp(a.c_str(), b.c_str()) < 0;
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
|
||||
typedef std::pair<double, double> progress_t;
|
||||
typedef std::map<CURL*, time_t> curltime_t;
|
||||
typedef std::map<CURL*, progress_t> curlprogress_t;
|
||||
|
||||
class S3fsMultiCurl;
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
// Prototype function for lazy setup options for curl handle
|
||||
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
|
||||
|
||||
typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
|
||||
// internal use struct for openssl
|
||||
struct CRYPTO_dynlock_value
|
||||
{
|
||||
pthread_mutex_t dyn_mutex;
|
||||
};
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
class S3fsCurl
|
||||
{
|
||||
friend class S3fsMultiCurl;
|
||||
friend class S3fsMultiCurl;
|
||||
|
||||
private:
|
||||
enum REQTYPE {
|
||||
REQTYPE_UNSET = -1,
|
||||
REQTYPE_DELETE = 0,
|
||||
REQTYPE_HEAD,
|
||||
REQTYPE_PUTHEAD,
|
||||
REQTYPE_PUT,
|
||||
REQTYPE_GET,
|
||||
REQTYPE_CHKBUCKET,
|
||||
REQTYPE_LISTBUCKET,
|
||||
REQTYPE_PREMULTIPOST,
|
||||
REQTYPE_COMPLETEMULTIPOST,
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD
|
||||
};
|
||||
private:
|
||||
enum REQTYPE {
|
||||
REQTYPE_UNSET = -1,
|
||||
REQTYPE_DELETE = 0,
|
||||
REQTYPE_HEAD,
|
||||
REQTYPE_PUTHEAD,
|
||||
REQTYPE_PUT,
|
||||
REQTYPE_GET,
|
||||
REQTYPE_CHKBUCKET,
|
||||
REQTYPE_LISTBUCKET,
|
||||
REQTYPE_PREMULTIPOST,
|
||||
REQTYPE_COMPLETEMULTIPOST,
|
||||
REQTYPE_UPLOADMULTIPOST,
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD,
|
||||
REQTYPE_IAMROLE
|
||||
};
|
||||
|
||||
// class variables
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX];
|
||||
static pthread_mutex_t* crypt_mutex;
|
||||
static bool is_initglobal_done;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static std::string default_acl; // TODO: to enum
|
||||
static bool is_use_rrs;
|
||||
static bool is_use_sse;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static std::string AWSAccessKeyId;
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static const EVP_MD* evp_md;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static int max_parallel_cnt;
|
||||
// class variables
|
||||
static pthread_mutex_t curl_warnings_lock;
|
||||
static bool curl_warnings_once; // emit older curl warnings only once
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static struct callback_locks_t {
|
||||
pthread_mutex_t dns;
|
||||
pthread_mutex_t ssl_session;
|
||||
} callback_locks;
|
||||
static bool is_initglobal_done;
|
||||
static CurlHandlerPool* sCurlPool;
|
||||
static int sCurlPoolSize;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_cert_check;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
static time_t readwrite_timeout;
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static acl_t default_acl;
|
||||
static storage_class_t storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static std::string ssekmsid;
|
||||
static sse_type_t ssetype;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static bool is_dump_body;
|
||||
static std::string AWSAccessKeyId;
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_use_session_token;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static int IAM_api_version;
|
||||
static std::string IAMv2_token_url;
|
||||
static int IAMv2_token_ttl;
|
||||
static std::string IAMv2_token_ttl_hdr;
|
||||
static std::string IAMv2_token_hdr;
|
||||
static std::string IAMv2_api_token;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static int max_multireq;
|
||||
static off_t multipart_size;
|
||||
static off_t multipart_copy_size;
|
||||
static signature_type_t signature_type;
|
||||
static bool is_ua; // User-Agent
|
||||
static bool listobjectsv2;
|
||||
static bool requester_pays;
|
||||
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
REQTYPE type; // type of request
|
||||
std::string path; // target object path
|
||||
std::string base_path; // base path (for multi curl head request)
|
||||
std::string saved_path; // saved path = cache key (for multi curl head request)
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData* bodydata; // body data by WriteMemoryCallback
|
||||
BodyData* headdata; // header data by WriteMemoryCallback
|
||||
long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count for multipart
|
||||
FILE* b_infile; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
ssize_t b_partdata_size; // backup for retrying
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
REQTYPE type; // type of request
|
||||
std::string path; // target object path
|
||||
std::string base_path; // base path (for multi curl head request)
|
||||
std::string saved_path; // saved path = cache key (for multi curl head request)
|
||||
std::string url; // target object path(url)
|
||||
struct curl_slist* requestHeaders;
|
||||
headers_t responseHeaders; // header data by HeaderCallback
|
||||
BodyData bodydata; // body data by WriteMemoryCallback
|
||||
BodyData headdata; // header data by WriteMemoryCallback
|
||||
volatile long LastResponseCode;
|
||||
const unsigned char* postdata; // use by post method and read callback function.
|
||||
int postdata_remaining; // use by post method and read callback function.
|
||||
filepart partdata; // use by multipart upload/get object callback
|
||||
bool is_use_ahbe; // additional header by extension
|
||||
int retry_count; // retry count for multipart
|
||||
FILE* b_infile; // backup for retrying
|
||||
const unsigned char* b_postdata; // backup for retrying
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
off_t b_partdata_size; // backup for retrying
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string b_from; // backup for retrying(for copy request)
|
||||
headers_t b_meta; // backup for retrying(for copy request)
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
Semaphore *sem;
|
||||
pthread_mutex_t *completed_tids_lock;
|
||||
std::vector<pthread_t> *completed_tids;
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
CURLcode curlCode; // handle curl return
|
||||
|
||||
public:
|
||||
static const long S3FSCURL_RESPONSECODE_NOTSET = -1;
|
||||
static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
|
||||
static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
public:
|
||||
// constructor/destructor
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
|
||||
private:
|
||||
// class methods
|
||||
static bool InitGlobalCurl(void);
|
||||
static bool DestroyGlobalCurl(void);
|
||||
static bool InitShareCurl(void);
|
||||
static bool DestroyShareCurl(void);
|
||||
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr);
|
||||
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr);
|
||||
static bool InitCryptMutex(void);
|
||||
static bool DestroyCryptMutex(void);
|
||||
static void CryptMutexLock(int mode, int pos, const char* file, int line);
|
||||
static unsigned long CryptGetThreadid(void);
|
||||
static struct CRYPTO_dynlock_value* CreateDynCryptMutex(const char* file, int line);
|
||||
static void DynCryptMutexLock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line);
|
||||
static void DestoryDynCryptMutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line);
|
||||
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
|
||||
private:
|
||||
// class methods
|
||||
static bool InitGlobalCurl();
|
||||
static bool DestroyGlobalCurl();
|
||||
static bool InitShareCurl();
|
||||
static bool DestroyShareCurl();
|
||||
static void LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_access laccess, void* useptr);
|
||||
static void UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* useptr);
|
||||
static bool InitCryptMutex();
|
||||
static bool DestroyCryptMutex();
|
||||
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
|
||||
|
||||
static bool InitMimeType(const char* MimeFile = NULL);
|
||||
static bool LocateBundle(void);
|
||||
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
|
||||
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
|
||||
static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
static bool LocateBundle();
|
||||
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
|
||||
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
|
||||
static size_t ReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t UploadReadCallback(void *ptr, size_t size, size_t nmemb, void *userp);
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool MixMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* MixMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
// lazy functions for set curl options
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
|
||||
// methods
|
||||
bool ResetHandle(void);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
std::string CalcSignature(std::string method, std::string strMD5, std::string content_type, std::string date, std::string resource);
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
int GetIAMCredentials(void);
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool SetIAMv2APIToken(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
static bool SetIAMRoleFromMetaData(const char* response);
|
||||
static bool LoadEnvSseCKeys();
|
||||
static bool LoadEnvSseKmsid();
|
||||
static bool PushbackSseKeys(const std::string& onekey);
|
||||
static bool AddUserAgent(CURL* hCurl);
|
||||
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool ow_sse_flg);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
|
||||
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl(const char* MimeFile = NULL);
|
||||
static bool DestroyS3fsCurl(void);
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool ow_sse_flg);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
// methods
|
||||
bool ResetHandle(bool lock_already_held = false);
|
||||
bool RemakeHandle();
|
||||
bool ClearInternalData();
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
int GetIAMv2ApiToken();
|
||||
int GetIAMCredentials();
|
||||
|
||||
// class methods(valiables)
|
||||
static std::string LookupMimeType(std::string name);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout(void) { return S3fsCurl::readwrite_timeout; }
|
||||
static int SetRetries(int count);
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static std::string SetDefaultAcl(const char* acl);
|
||||
static bool SetUseRrs(bool flag);
|
||||
static bool GetUseRrs(void) { return S3fsCurl::is_use_rrs; }
|
||||
static bool SetUseSse(bool flag);
|
||||
static bool GetUseSse(void) { return S3fsCurl::is_use_sse; }
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool IsSetAccessKeyId(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
static int SetMaxParallelCount(int value);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, const std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
bool CopyMultipartPostComplete();
|
||||
bool MixMultipartPostComplete();
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
bool DestroyCurlHandle(void);
|
||||
public:
|
||||
// class methods
|
||||
static bool InitS3fsCurl();
|
||||
static bool InitMimeType(const std::string& strFile);
|
||||
static bool DestroyS3fsCurl();
|
||||
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
|
||||
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, off_t size);
|
||||
static bool CheckIAMCredentialUpdate();
|
||||
|
||||
bool GetResponseCode(long& responseCode);
|
||||
int RequestPerform(void);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL);
|
||||
bool PreHeadRequest(std::string& tpath, std::string& bpath, std::string& savedpath) {
|
||||
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str());
|
||||
}
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, headers_t& meta, bool ow_sse_flg);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd, bool ow_sse_flg);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool ow_sse_flg);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
static time_t SetReadwriteTimeout(time_t timeout);
|
||||
static time_t GetReadwriteTimeout() { return S3fsCurl::readwrite_timeout; }
|
||||
static int SetRetries(int count);
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket() { return S3fsCurl::is_public_bucket; }
|
||||
static acl_t SetDefaultAcl(acl_t acl);
|
||||
static acl_t GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse() { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
static sse_type_t SetSseType(sse_type_t type);
|
||||
static sse_type_t GetSseType() { return S3fsCurl::ssetype; }
|
||||
static bool IsSseDisable() { return (sse_type_t::SSE_DISABLE == S3fsCurl::ssetype); }
|
||||
static bool IsSseS3Type() { return (sse_type_t::SSE_S3 == S3fsCurl::ssetype); }
|
||||
static bool IsSseCType() { return (sse_type_t::SSE_C == S3fsCurl::ssetype); }
|
||||
static bool IsSseKmsType() { return (sse_type_t::SSE_KMS == S3fsCurl::ssetype); }
|
||||
static bool FinalCheckSse();
|
||||
static bool SetSseCKeys(const char* filepath);
|
||||
static bool SetSseKmsid(const char* kmsid);
|
||||
static bool IsSetSseKmsId() { return !S3fsCurl::ssekmsid.empty(); }
|
||||
static const char* GetSseKmsId() { return S3fsCurl::ssekmsid.c_str(); }
|
||||
static bool GetSseKey(std::string& md5, std::string& ssekey);
|
||||
static bool GetSseKeyMd5(int pos, std::string& md5);
|
||||
static int GetSseKeyCount();
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose() { return S3fsCurl::is_verbose; }
|
||||
static bool SetDumpBody(bool flag);
|
||||
static bool IsDumpBody() { return S3fsCurl::is_dump_body; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken);
|
||||
static bool IsSetAccessKeyID()
|
||||
{
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys()
|
||||
{
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname() { return S3fsCurl::ssl_verify_hostname; }
|
||||
static void ResetOffset(S3fsCurl* pCurl);
|
||||
// maximum parallel GET and PUT requests
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount() { return S3fsCurl::max_parallel_cnt; }
|
||||
// maximum parallel HEAD requests
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest() { return S3fsCurl::max_multireq; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole() { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
static off_t GetMultipartSize() { return S3fsCurl::multipart_size; }
|
||||
static bool SetMultipartCopySize(off_t size);
|
||||
static off_t GetMultipartCopySize() { return S3fsCurl::multipart_copy_size; }
|
||||
static signature_type_t SetSignatureType(signature_type_t signature_type) { signature_type_t bresult = S3fsCurl::signature_type; S3fsCurl::signature_type = signature_type; return bresult; }
|
||||
static signature_type_t GetSignatureType() { return S3fsCurl::signature_type; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag() { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent();
|
||||
static bool SetListObjectsV2(bool isset) { bool bresult = S3fsCurl::listobjectsv2; S3fsCurl::listobjectsv2 = isset; return bresult; }
|
||||
static bool IsListObjectsV2() { return S3fsCurl::listobjectsv2; }
|
||||
static bool SetRequesterPays(bool flag) { bool old_flag = S3fsCurl::requester_pays; S3fsCurl::requester_pays = flag; return old_flag; }
|
||||
static bool IsRequesterPays() { return S3fsCurl::requester_pays; }
|
||||
static bool SetIMDSVersion(int version);
|
||||
|
||||
// methods(valiables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
std::string GetSpacialSavedPath(void) const { return saved_path; }
|
||||
std::string GetUrl(void) const { return url; }
|
||||
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
|
||||
BodyData* GetBodyData(void) const { return bodydata; }
|
||||
BodyData* GetHeadData(void) const { return headdata; }
|
||||
long GetLastResponseCode(void) const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe(void) { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe(void) { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe(void) const { return is_use_ahbe; }
|
||||
int GetMultipartRetryCount(void) const { return retry_count; }
|
||||
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
|
||||
bool IsOverMultipartRetryCount(void) const { return (retry_count >= S3fsCurl::retries); }
|
||||
// methods
|
||||
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
|
||||
|
||||
bool LoadIAMRoleFromMetaData();
|
||||
bool AddSseRequestHead(sse_type_t ssetype, const std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
|
||||
int RequestPerform(bool dontAddAuthHeaders=false);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
|
||||
bool PreHeadRequest(const std::string& tpath, const std::string& bpath, const std::string& savedpath, int ssekey_pos = -1) {
|
||||
return PreHeadRequest(tpath.c_str(), bpath.c_str(), savedpath.c_str(), ssekey_pos);
|
||||
}
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, off_t size, sse_type_t ssetype, const std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, off_t size = -1);
|
||||
int CheckBucket();
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, const std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, const std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, off_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle() const { return hCurl; }
|
||||
std::string GetPath() const { return path; }
|
||||
std::string GetBasePath() const { return base_path; }
|
||||
std::string GetSpacialSavedPath() const { return saved_path; }
|
||||
std::string GetUrl() const { return url; }
|
||||
std::string GetOp() const { return op; }
|
||||
headers_t* GetResponseHeaders() { return &responseHeaders; }
|
||||
BodyData* GetBodyData() { return &bodydata; }
|
||||
BodyData* GetHeadData() { return &headdata; }
|
||||
CURLcode GetCurlCode() const { return curlCode; }
|
||||
long GetLastResponseCode() const { return LastResponseCode; }
|
||||
bool SetUseAhbe(bool ahbe);
|
||||
bool EnableUseAhbe() { return SetUseAhbe(true); }
|
||||
bool DisableUseAhbe() { return SetUseAhbe(false); }
|
||||
bool IsUseAhbe() const { return is_use_ahbe; }
|
||||
int GetMultipartRetryCount() const { return retry_count; }
|
||||
void SetMultipartRetryCount(int retrycnt) { retry_count = retrycnt; }
|
||||
bool IsOverMultipartRetryCount() const { return (retry_count >= S3fsCurl::retries); }
|
||||
int GetLastPreHeadSeecKeyPos() const { return b_ssekey_pos; }
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsMultiCurl
|
||||
//----------------------------------------------
|
||||
// Class for lapping multi curl
|
||||
//
|
||||
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failuer and retrying
|
||||
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
static int max_multireq;
|
||||
|
||||
CURLM* hMulti;
|
||||
s3fscurlmap_t cMap_all; // all of curl requests
|
||||
s3fscurlmap_t cMap_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
public:
|
||||
S3fsMultiCurl();
|
||||
~S3fsMultiCurl();
|
||||
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsMultiCurl::max_multireq; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
bool Clear(void) { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
int Request(void);
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class AdditionalHeader
|
||||
//----------------------------------------------
|
||||
typedef std::list<int> charcnt_list_t;
|
||||
typedef std::map<std::string, std::string> headerpair_t;
|
||||
typedef std::map<std::string, headerpair_t> addheader_t;
|
||||
|
||||
class AdditionalHeader
|
||||
{
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
charcnt_list_t charcntlist;
|
||||
addheader_t addheader;
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get(void) { return &singleton; }
|
||||
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
|
||||
bool Load(const char* file);
|
||||
void Unload(void);
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump(void) const;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility Functions
|
||||
//----------------------------------------------
|
||||
std::string GetContentMD5(int fd);
|
||||
unsigned char* md5hexsum(int fd, off_t start, ssize_t size);
|
||||
std::string md5sum(int fd, off_t start, ssize_t size);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
|
||||
#endif // S3FS_CURL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
127
src/curl_handlerpool.cpp
Normal file
127
src/curl_handlerpool.cpp
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "curl_handlerpool.h"
|
||||
#include "autolock.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class CurlHandlerPool
|
||||
//-------------------------------------------------------------------
|
||||
bool CurlHandlerPool::Init()
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
if (0 != pthread_mutex_init(&mLock, &attr)) {
|
||||
S3FS_PRN_ERR("Init curl handlers lock failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
for(int cnt = 0; cnt < mMaxHandlers; ++cnt){
|
||||
CURL* hCurl = curl_easy_init();
|
||||
if(!hCurl){
|
||||
S3FS_PRN_ERR("Init curl handlers pool failed");
|
||||
Destroy();
|
||||
return false;
|
||||
}
|
||||
mPool.push_back(hCurl);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CurlHandlerPool::Destroy()
|
||||
{
|
||||
while(!mPool.empty()){
|
||||
CURL* hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
if(hCurl){
|
||||
curl_easy_cleanup(hCurl);
|
||||
}
|
||||
}
|
||||
if (0 != pthread_mutex_destroy(&mLock)) {
|
||||
S3FS_PRN_ERR("Destroy curl handlers lock failed");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CURL* CurlHandlerPool::GetHandler(bool only_pool)
|
||||
{
|
||||
CURL* hCurl = NULL;
|
||||
{
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
if(!mPool.empty()){
|
||||
hCurl = mPool.back();
|
||||
mPool.pop_back();
|
||||
S3FS_PRN_DBG("Get handler from pool: rest = %d", static_cast<int>(mPool.size()));
|
||||
}
|
||||
}
|
||||
if(only_pool){
|
||||
return hCurl;
|
||||
}
|
||||
if(!hCurl){
|
||||
S3FS_PRN_INFO("Pool empty: force to create new handler");
|
||||
hCurl = curl_easy_init();
|
||||
}
|
||||
return hCurl;
|
||||
}
|
||||
|
||||
void CurlHandlerPool::ReturnHandler(CURL* hCurl, bool restore_pool)
|
||||
{
|
||||
if(!hCurl){
|
||||
return;
|
||||
}
|
||||
|
||||
if(restore_pool){
|
||||
AutoLock lock(&mLock);
|
||||
|
||||
S3FS_PRN_DBG("Return handler to pool");
|
||||
mPool.push_back(hCurl);
|
||||
|
||||
while(mMaxHandlers <= static_cast<int>(mPool.size())){
|
||||
CURL* hOldCurl = mPool.front();
|
||||
mPool.pop_front();
|
||||
if(hOldCurl){
|
||||
S3FS_PRN_INFO("Pool full: destroy the oldest handler");
|
||||
curl_easy_cleanup(hOldCurl);
|
||||
}
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_INFO("Pool full: destroy the handler");
|
||||
curl_easy_cleanup(hCurl);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
64
src/curl_handlerpool.h
Normal file
64
src/curl_handlerpool.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_HANDLERPOOL_H_
|
||||
#define S3FS_CURL_HANDLERPOOL_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <curl/curl.h>
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedefs
|
||||
//----------------------------------------------
|
||||
typedef std::list<CURL*> hcurllist_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler(bool only_pool);
|
||||
void ReturnHandler(CURL* hCurl, bool restore_pool);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
pthread_mutex_t mLock;
|
||||
hcurllist_t mPool;
|
||||
};
|
||||
|
||||
#endif // S3FS_CURL_HANDLERPOOL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
367
src/curl_multi.cpp
Normal file
367
src/curl_multi.cpp
Normal file
@ -0,0 +1,367 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "curl_multi.h"
|
||||
#include "curl.h"
|
||||
#include "autolock.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsMultiCurl
|
||||
//-------------------------------------------------------------------
|
||||
S3fsMultiCurl::S3fsMultiCurl(int maxParallelism) : maxParallelism(maxParallelism), SuccessCallback(NULL), RetryCallback(NULL)
|
||||
{
|
||||
int result;
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
if (0 != (result = pthread_mutex_init(&completed_tids_lock, &attr))) {
|
||||
S3FS_PRN_ERR("could not initialize completed_tids_lock: %i", result);
|
||||
}
|
||||
}
|
||||
|
||||
S3fsMultiCurl::~S3fsMultiCurl()
|
||||
{
|
||||
Clear();
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_destroy(&completed_tids_lock))){
|
||||
S3FS_PRN_ERR("could not destroy completed_tids_lock: %i", result);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::ClearEx(bool is_all)
|
||||
{
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_req.begin(); iter != clist_req.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
if(s3fscurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl; // with destroy curl handle.
|
||||
}
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(is_all){
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
S3fsMultiSuccessCallback S3fsMultiCurl::SetSuccessCallback(S3fsMultiSuccessCallback function)
|
||||
{
|
||||
S3fsMultiSuccessCallback old = SuccessCallback;
|
||||
SuccessCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
S3fsMultiRetryCallback S3fsMultiCurl::SetRetryCallback(S3fsMultiRetryCallback function)
|
||||
{
|
||||
S3fsMultiRetryCallback old = RetryCallback;
|
||||
RetryCallback = function;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool S3fsMultiCurl::SetS3fsCurlObject(S3fsCurl* s3fscurl)
|
||||
{
|
||||
if(!s3fscurl){
|
||||
return false;
|
||||
}
|
||||
clist_all.push_back(s3fscurl);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int S3fsMultiCurl::MultiPerform()
|
||||
{
|
||||
std::vector<pthread_t> threads;
|
||||
bool success = true;
|
||||
bool isMultiHead = false;
|
||||
Semaphore sem(GetMaxParallelism());
|
||||
int rc;
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
|
||||
pthread_t thread;
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
if(!s3fscurl){
|
||||
continue;
|
||||
}
|
||||
|
||||
sem.wait();
|
||||
|
||||
{
|
||||
AutoLock lock(&completed_tids_lock);
|
||||
for(std::vector<pthread_t>::iterator it = completed_tids.begin(); it != completed_tids.end(); ++it){
|
||||
void* retval;
|
||||
|
||||
rc = pthread_join(*it, &retval);
|
||||
if (rc) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_join - rc(%d) %s", rc, strerror(rc));
|
||||
} else {
|
||||
int int_retval = (int)(intptr_t)(retval);
|
||||
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
|
||||
S3FS_PRN_WARN("thread failed - rc(%d)", int_retval);
|
||||
}
|
||||
}
|
||||
}
|
||||
completed_tids.clear();
|
||||
}
|
||||
s3fscurl->sem = &sem;
|
||||
s3fscurl->completed_tids_lock = &completed_tids_lock;
|
||||
s3fscurl->completed_tids = &completed_tids;
|
||||
|
||||
isMultiHead |= s3fscurl->GetOp() == "HEAD";
|
||||
|
||||
rc = pthread_create(&thread, NULL, S3fsMultiCurl::RequestPerformWrapper, static_cast<void*>(s3fscurl));
|
||||
if (rc != 0) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_create - rc(%d)", rc);
|
||||
break;
|
||||
}
|
||||
threads.push_back(thread);
|
||||
}
|
||||
|
||||
for(int i = 0; i < sem.get_value(); ++i){
|
||||
sem.wait();
|
||||
}
|
||||
|
||||
AutoLock lock(&completed_tids_lock);
|
||||
for (std::vector<pthread_t>::iterator titer = completed_tids.begin(); titer != completed_tids.end(); ++titer) {
|
||||
void* retval;
|
||||
|
||||
rc = pthread_join(*titer, &retval);
|
||||
if (rc) {
|
||||
success = false;
|
||||
S3FS_PRN_ERR("failed pthread_join - rc(%d)", rc);
|
||||
} else {
|
||||
int int_retval = (int)(intptr_t)(retval);
|
||||
if (int_retval && !(int_retval == -ENOENT && isMultiHead)) {
|
||||
S3FS_PRN_WARN("thread failed - rc(%d)", int_retval);
|
||||
}
|
||||
}
|
||||
}
|
||||
completed_tids.clear();
|
||||
|
||||
return success ? 0 : -EIO;
|
||||
}
|
||||
|
||||
int S3fsMultiCurl::MultiRead()
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
|
||||
bool isRetry = false;
|
||||
bool isPostpone = false;
|
||||
bool isNeedResetOffset = true;
|
||||
long responseCode = S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET;
|
||||
CURLcode curlCode = s3fscurl->GetCurlCode();
|
||||
|
||||
if(s3fscurl->GetResponseCode(responseCode, false) && curlCode == CURLE_OK){
|
||||
if(S3fsCurl::S3FSCURL_RESPONSECODE_NOTSET == responseCode){
|
||||
// This is a case where the processing result has not yet been updated (should be very rare).
|
||||
isPostpone = true;
|
||||
}else if(400 > responseCode){
|
||||
// add into stat cache
|
||||
if(SuccessCallback && !SuccessCallback(s3fscurl)){
|
||||
S3FS_PRN_WARN("error from callback function(%s).", s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(400 == responseCode){
|
||||
// as possibly in multipart
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
isRetry = true;
|
||||
}else if(404 == responseCode){
|
||||
// not found
|
||||
// HEAD requests on readdir_multi_head can return 404
|
||||
if(s3fscurl->GetOp() != "HEAD"){
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
}
|
||||
}else if(500 == responseCode){
|
||||
// case of all other result, do retry.(11/13/2013)
|
||||
// because it was found that s3fs got 500 error from S3, but could success
|
||||
// to retry it.
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
isRetry = true;
|
||||
}else{
|
||||
// Retry in other case.
|
||||
S3FS_PRN_WARN("failed a request(%ld: %s)", responseCode, s3fscurl->url.c_str());
|
||||
isRetry = true;
|
||||
}
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed a request(Unknown response code: %s)", s3fscurl->url.c_str());
|
||||
// Reuse partical file
|
||||
switch(curlCode){
|
||||
case CURLE_OPERATION_TIMEDOUT:
|
||||
isRetry = true;
|
||||
isNeedResetOffset = false;
|
||||
break;
|
||||
|
||||
case CURLE_PARTIAL_FILE:
|
||||
isRetry = true;
|
||||
isNeedResetOffset = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
S3FS_PRN_ERR("###curlCode: %d msg: %s", curlCode, curl_easy_strerror(curlCode));
|
||||
isRetry = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(isPostpone){
|
||||
clist_req.erase(iter);
|
||||
clist_req.push_back(s3fscurl); // Re-evaluate at the end
|
||||
iter = clist_req.begin();
|
||||
}else{
|
||||
if(!isRetry || 0 != result){
|
||||
// If an EIO error has already occurred, it will be terminated
|
||||
// immediately even if retry processing is required.
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}else{
|
||||
S3fsCurl* retrycurl = NULL;
|
||||
|
||||
// Reset offset
|
||||
if(isNeedResetOffset){
|
||||
S3fsCurl::ResetOffset(s3fscurl);
|
||||
}
|
||||
|
||||
// For retry
|
||||
if(RetryCallback){
|
||||
retrycurl = RetryCallback(s3fscurl);
|
||||
if(NULL != retrycurl){
|
||||
clist_all.push_back(retrycurl);
|
||||
}else{
|
||||
// set EIO and wait for other parts.
|
||||
result = -EIO;
|
||||
}
|
||||
}
|
||||
if(s3fscurl != retrycurl){
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
}
|
||||
iter = clist_req.erase(iter);
|
||||
}
|
||||
}
|
||||
clist_req.clear();
|
||||
|
||||
if(0 != result){
|
||||
// If an EIO error has already occurred, clear all retry objects.
|
||||
for(s3fscurllist_t::iterator iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
s3fscurl->DestroyCurlHandle();
|
||||
delete s3fscurl;
|
||||
}
|
||||
clist_all.clear();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int S3fsMultiCurl::Request()
|
||||
{
|
||||
S3FS_PRN_INFO3("[count=%zu]", clist_all.size());
|
||||
|
||||
// Make request list.
|
||||
//
|
||||
// Send multi request loop( with retry )
|
||||
// (When many request is sends, sometimes gets "Couldn't connect to server")
|
||||
//
|
||||
while(!clist_all.empty()){
|
||||
// set curl handle to multi handle
|
||||
int result;
|
||||
s3fscurllist_t::iterator iter;
|
||||
for(iter = clist_all.begin(); iter != clist_all.end(); ++iter){
|
||||
S3fsCurl* s3fscurl = *iter;
|
||||
clist_req.push_back(s3fscurl);
|
||||
}
|
||||
clist_all.clear();
|
||||
|
||||
// Send multi request.
|
||||
if(0 != (result = MultiPerform())){
|
||||
Clear();
|
||||
return result;
|
||||
}
|
||||
|
||||
// Read the result
|
||||
if(0 != (result = MultiRead())){
|
||||
Clear();
|
||||
return result;
|
||||
}
|
||||
|
||||
// Cleanup curl handle in multi handle
|
||||
ClearEx(false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// thread function for performing an S3fsCurl request
|
||||
//
|
||||
void* S3fsMultiCurl::RequestPerformWrapper(void* arg)
|
||||
{
|
||||
S3fsCurl* s3fscurl= static_cast<S3fsCurl*>(arg);
|
||||
void* result = NULL;
|
||||
if(!s3fscurl){
|
||||
return (void*)(intptr_t)(-EIO);
|
||||
}
|
||||
if(s3fscurl->fpLazySetup){
|
||||
if(!s3fscurl->fpLazySetup(s3fscurl)){
|
||||
S3FS_PRN_ERR("Failed to lazy setup, then respond EIO.");
|
||||
result = (void*)(intptr_t)(-EIO);
|
||||
}
|
||||
}
|
||||
|
||||
if(!result){
|
||||
result = (void*)(intptr_t)(s3fscurl->RequestPerform());
|
||||
s3fscurl->DestroyCurlHandle(true, false);
|
||||
}
|
||||
|
||||
AutoLock lock(s3fscurl->completed_tids_lock);
|
||||
s3fscurl->completed_tids->push_back(pthread_self());
|
||||
s3fscurl->sem->post();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
79
src/curl_multi.h
Normal file
79
src/curl_multi.h
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_MULTI_H_
|
||||
#define S3FS_CURL_MULTI_H_
|
||||
|
||||
//----------------------------------------------
|
||||
// Typedef
|
||||
//----------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsMultiCurl
|
||||
//----------------------------------------------
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
const int maxParallelism;
|
||||
|
||||
s3fscurllist_t clist_all; // all of curl requests
|
||||
s3fscurllist_t clist_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
pthread_mutex_t completed_tids_lock;
|
||||
std::vector<pthread_t> completed_tids;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform();
|
||||
int MultiRead();
|
||||
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
explicit S3fsMultiCurl(int maxParallelism);
|
||||
~S3fsMultiCurl();
|
||||
|
||||
int GetMaxParallelism() { return maxParallelism; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
bool Clear() { return ClearEx(true); }
|
||||
bool SetS3fsCurlObject(S3fsCurl* s3fscurl);
|
||||
int Request();
|
||||
};
|
||||
|
||||
#endif // S3FS_CURL_MULTI_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
404
src/curl_util.cpp
Normal file
404
src/curl_util.cpp
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "curl_util.h"
|
||||
#include "string_util.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Functions
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// curl_slist_sort_insert
|
||||
// This function is like curl_slist_append function, but this adds data by a-sorting.
|
||||
// Because AWS signature needs sorted header.
|
||||
//
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data)
|
||||
{
|
||||
if(!data){
|
||||
return list;
|
||||
}
|
||||
std::string strkey = data;
|
||||
std::string strval;
|
||||
|
||||
std::string::size_type pos = strkey.find(':', 0);
|
||||
if(std::string::npos != pos){
|
||||
strval = strkey.substr(pos + 1);
|
||||
strkey.erase(pos);
|
||||
}
|
||||
|
||||
return curl_slist_sort_insert(list, strkey.c_str(), strval.c_str());
|
||||
}
|
||||
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value)
|
||||
{
|
||||
if(!key){
|
||||
return list;
|
||||
}
|
||||
|
||||
// key & value are trimmed and lower (only key)
|
||||
std::string strkey = trim(std::string(key));
|
||||
std::string strval = value ? trim(std::string(value)) : "";
|
||||
std::string strnew = key + std::string(": ") + strval;
|
||||
char* data;
|
||||
if(NULL == (data = strdup(strnew.c_str()))){
|
||||
return list;
|
||||
}
|
||||
|
||||
struct curl_slist **p = &list;
|
||||
for(;*p; p = &(*p)->next){
|
||||
std::string strcur = (*p)->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strcur.find(':', 0))){
|
||||
strcur.erase(pos);
|
||||
}
|
||||
|
||||
int result = strcasecmp(strkey.c_str(), strcur.c_str());
|
||||
if(0 == result){
|
||||
free((*p)->data);
|
||||
(*p)->data = data;
|
||||
return list;
|
||||
}else if(result < 0){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct curl_slist* new_item;
|
||||
if(NULL == (new_item = static_cast<struct curl_slist*>(malloc(sizeof(*new_item))))){
|
||||
free(data);
|
||||
return list;
|
||||
}
|
||||
|
||||
struct curl_slist* before = *p;
|
||||
*p = new_item;
|
||||
new_item->data = data;
|
||||
new_item->next = before;
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key)
|
||||
{
|
||||
if(!key){
|
||||
return list;
|
||||
}
|
||||
|
||||
std::string strkey = trim(std::string(key));
|
||||
struct curl_slist **p = &list;
|
||||
for(;*p; p = &(*p)->next){
|
||||
std::string strcur = (*p)->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strcur.find(':', 0))){
|
||||
strcur.erase(pos);
|
||||
}
|
||||
|
||||
int result = strcasecmp(strkey.c_str(), strcur.c_str());
|
||||
if(0 == result){
|
||||
free((*p)->data);
|
||||
struct curl_slist *tmp = *p;
|
||||
*p = (*p)->next;
|
||||
free(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list)
|
||||
{
|
||||
std::string sorted_headers;
|
||||
|
||||
if(!list){
|
||||
return sorted_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strkey = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strkey.find(':', 0))){
|
||||
if (trim(strkey.substr(pos + 1)).empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strkey.erase(pos);
|
||||
}
|
||||
if(0 < sorted_headers.length()){
|
||||
sorted_headers += ";";
|
||||
}
|
||||
sorted_headers += lower(strkey);
|
||||
}
|
||||
|
||||
return sorted_headers;
|
||||
}
|
||||
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key)
|
||||
{
|
||||
if(!list){
|
||||
return "";
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strkey = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strkey.find(':', 0))){
|
||||
if(0 == strcasecmp(trim(strkey.substr(0, pos)).c_str(), key.c_str())){
|
||||
return trim(strkey.substr(pos+1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string get_canonical_headers(const struct curl_slist* list)
|
||||
{
|
||||
std::string canonical_headers;
|
||||
|
||||
if(!list){
|
||||
canonical_headers = "\n";
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strhead = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strhead.find(':', 0))){
|
||||
std::string strkey = trim(lower(strhead.substr(0, pos)));
|
||||
std::string strval = trim(strhead.substr(pos + 1));
|
||||
if (strval.empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strhead = strkey + ":" + strval;
|
||||
}else{
|
||||
strhead = trim(lower(strhead));
|
||||
}
|
||||
canonical_headers += strhead;
|
||||
canonical_headers += "\n";
|
||||
}
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz)
|
||||
{
|
||||
std::string canonical_headers;
|
||||
|
||||
if(!list){
|
||||
canonical_headers = "\n";
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
for( ; list; list = list->next){
|
||||
std::string strhead = list->data;
|
||||
size_t pos;
|
||||
if(std::string::npos != (pos = strhead.find(':', 0))){
|
||||
std::string strkey = trim(lower(strhead.substr(0, pos)));
|
||||
std::string strval = trim(strhead.substr(pos + 1));
|
||||
if (strval.empty()) {
|
||||
// skip empty-value headers (as they are discarded by libcurl)
|
||||
continue;
|
||||
}
|
||||
strhead = strkey + ":" + strval;
|
||||
}else{
|
||||
strhead = trim(lower(strhead));
|
||||
}
|
||||
if(only_amz && strhead.substr(0, 5) != "x-amz"){
|
||||
continue;
|
||||
}
|
||||
canonical_headers += strhead;
|
||||
canonical_headers += "\n";
|
||||
}
|
||||
return canonical_headers;
|
||||
}
|
||||
|
||||
// function for using global values
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url)
|
||||
{
|
||||
if(!realpath){
|
||||
return false;
|
||||
}
|
||||
resourcepath = urlEncode(service_path + bucket + realpath);
|
||||
url = s3host + resourcepath;
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string prepare_url(const char* url)
|
||||
{
|
||||
S3FS_PRN_INFO3("URL is %s", url);
|
||||
|
||||
std::string uri;
|
||||
std::string hostname;
|
||||
std::string path;
|
||||
std::string url_str = std::string(url);
|
||||
std::string token = std::string("/") + bucket;
|
||||
int bucket_pos;
|
||||
int bucket_length = token.size();
|
||||
int uri_length = 0;
|
||||
|
||||
if(!strncasecmp(url_str.c_str(), "https://", 8)){
|
||||
uri_length = 8;
|
||||
} else if(!strncasecmp(url_str.c_str(), "http://", 7)) {
|
||||
uri_length = 7;
|
||||
}
|
||||
uri = url_str.substr(0, uri_length);
|
||||
bucket_pos = url_str.find(token, uri_length);
|
||||
|
||||
if(!pathrequeststyle){
|
||||
hostname = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length);
|
||||
path = url_str.substr((bucket_pos + bucket_length));
|
||||
}else{
|
||||
hostname = url_str.substr(uri_length, bucket_pos - uri_length);
|
||||
std::string part = url_str.substr((bucket_pos + bucket_length));
|
||||
if('/' != part[0]){
|
||||
part = "/" + part;
|
||||
}
|
||||
path = "/" + bucket + part;
|
||||
}
|
||||
|
||||
url_str = uri + hostname + path;
|
||||
|
||||
S3FS_PRN_INFO3("URL changed is %s", url_str.c_str());
|
||||
|
||||
return url_str;
|
||||
}
|
||||
|
||||
// [TODO]
|
||||
// This function uses temporary file, but should not use it.
|
||||
// For not using it, we implement function in each auth file(openssl, nss. gnutls).
|
||||
//
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5)
|
||||
{
|
||||
if(!pstr || '\0' == pstr[0]){
|
||||
S3FS_PRN_ERR("Parameter is wrong.");
|
||||
return false;
|
||||
}
|
||||
FILE* fp;
|
||||
if(NULL == (fp = tmpfile())){
|
||||
S3FS_PRN_ERR("Could not make tmpfile.");
|
||||
return false;
|
||||
}
|
||||
if(length != fwrite(pstr, sizeof(char), length, fp)){
|
||||
S3FS_PRN_ERR("Failed to write tmpfile.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
int fd;
|
||||
if(0 != fflush(fp) || 0 != fseek(fp, 0L, SEEK_SET) || -1 == (fd = fileno(fp))){
|
||||
S3FS_PRN_ERR("Failed to make MD5.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
// base64 md5
|
||||
md5 = s3fs_get_content_md5(fd);
|
||||
if(0 == md5.length()){
|
||||
S3FS_PRN_ERR("Failed to make MD5.");
|
||||
fclose(fp);
|
||||
return false;
|
||||
}
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string url_to_host(const std::string &url)
|
||||
{
|
||||
S3FS_PRN_INFO3("url is %s", url.c_str());
|
||||
|
||||
static const std::string http = "http://";
|
||||
static const std::string https = "https://";
|
||||
std::string hostname;
|
||||
|
||||
if (is_prefix(url.c_str(), http.c_str())) {
|
||||
hostname = url.substr(http.size());
|
||||
} else if (is_prefix(url.c_str(), https.c_str())) {
|
||||
hostname = url.substr(https.size());
|
||||
} else {
|
||||
S3FS_PRN_EXIT("url does not begin with http:// or https://");
|
||||
abort();
|
||||
}
|
||||
|
||||
size_t idx;
|
||||
if ((idx = hostname.find('/')) != std::string::npos) {
|
||||
return hostname.substr(0, idx);
|
||||
} else {
|
||||
return hostname;
|
||||
}
|
||||
}
|
||||
|
||||
std::string get_bucket_host()
|
||||
{
|
||||
if(!pathrequeststyle){
|
||||
return bucket + "." + url_to_host(s3host);
|
||||
}
|
||||
return url_to_host(s3host);
|
||||
}
|
||||
|
||||
const char* getCurlDebugHead(curl_infotype type)
|
||||
{
|
||||
const char* unknown = "";
|
||||
const char* dataIn = "BODY <";
|
||||
const char* dataOut = "BODY >";
|
||||
const char* headIn = "<";
|
||||
const char* headOut = ">";
|
||||
|
||||
switch(type){
|
||||
case CURLINFO_DATA_IN:
|
||||
return dataIn;
|
||||
case CURLINFO_DATA_OUT:
|
||||
return dataOut;
|
||||
case CURLINFO_HEADER_IN:
|
||||
return headIn;
|
||||
case CURLINFO_HEADER_OUT:
|
||||
return headOut;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return unknown;
|
||||
}
|
||||
|
||||
//
|
||||
// compare ETag ignoring quotes and case
|
||||
//
|
||||
bool etag_equals(std::string s1, std::string s2)
|
||||
{
|
||||
if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){
|
||||
s1.erase(s1.size() - 1);
|
||||
s1.erase(0, 1);
|
||||
}
|
||||
if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){
|
||||
s2.erase(s2.size() - 1);
|
||||
s2.erase(0, 1);
|
||||
}
|
||||
return 0 == strcasecmp(s1.c_str(), s2.c_str());
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
58
src/curl_util.h
Normal file
58
src/curl_util.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_UTIL_H_
|
||||
#define S3FS_CURL_UTIL_H_
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
class sse_type_t;
|
||||
|
||||
//----------------------------------------------
|
||||
// Functions
|
||||
//----------------------------------------------
|
||||
std::string GetContentMD5(int fd);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
struct curl_slist* curl_slist_remove(struct curl_slist* list, const char* key);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
bool make_md5_from_binary(const char* pstr, size_t length, std::string& md5);
|
||||
std::string url_to_host(const std::string &url);
|
||||
std::string get_bucket_host();
|
||||
const char* getCurlDebugHead(curl_infotype type);
|
||||
|
||||
bool etag_equals(std::string s1, std::string s2);
|
||||
|
||||
#endif // S3FS_CURL_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1964
src/fdcache.cpp
1964
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
221
src/fdcache.h
221
src/fdcache.h
@ -1,121 +1,27 @@
|
||||
#ifndef FD_CACHE_H_
|
||||
#define FD_CACHE_H_
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
class CacheFileStat
|
||||
{
|
||||
private:
|
||||
std::string path;
|
||||
int fd;
|
||||
#ifndef S3FS_FDCACHE_H_
|
||||
#define S3FS_FDCACHE_H_
|
||||
|
||||
private:
|
||||
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
public:
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
|
||||
CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open(void);
|
||||
bool Release(void);
|
||||
bool SetPath(const char* tpath, bool is_open = true);
|
||||
int GetFd(void) const { return fd; }
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// fdpage & PageList
|
||||
//------------------------------------------------
|
||||
// page block information
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
size_t bytes;
|
||||
bool init;
|
||||
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_init = false)
|
||||
: offset(start), bytes(size), init(is_init) {}
|
||||
|
||||
off_t next(void) const { return (offset + bytes); }
|
||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||
};
|
||||
typedef std::list<struct fdpage*> fdpage_list_t;
|
||||
|
||||
//
|
||||
// Management of loading area/modifying
|
||||
//
|
||||
class PageList
|
||||
{
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
PageList(off_t size = 0, bool is_init = false);
|
||||
~PageList();
|
||||
|
||||
off_t Size(void) const;
|
||||
int Resize(off_t size, bool is_init);
|
||||
int Init(off_t size, bool is_init);
|
||||
bool IsInit(off_t start, off_t size);
|
||||
bool SetInit(off_t start, off_t size, bool is_init = true);
|
||||
bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize);
|
||||
int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0);
|
||||
bool Serialize(CacheFileStat& file, bool is_output);
|
||||
void Dump(void);
|
||||
};
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
PageList pagelist;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
int fd; // file discriptor(tmp file or cache file)
|
||||
FILE* file; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
int Dup(void);
|
||||
bool SetAllStatus(bool is_enable);
|
||||
|
||||
public:
|
||||
FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(off_t size = -1, time_t time = -1);
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
int GetFd(void) const { return fd; }
|
||||
int SetMtime(time_t time);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetMtime(time_t& time);
|
||||
bool GetStats(struct stat& st);
|
||||
|
||||
bool SetAllEnable(void) { return SetAllStatus(true); }
|
||||
bool SetAllDisable(void) { return SetAllStatus(false); }
|
||||
bool LoadFull(off_t* size = NULL, bool force_load = false);
|
||||
int Load(off_t start, off_t size);
|
||||
int RowFlush(const char* tpath, headers_t& meta, bool ow_sse_flg, bool force_sync = false);
|
||||
int Flush(headers_t& meta, bool ow_sse_flg, bool force_sync = false) { return RowFlush(NULL, meta, ow_sse_flg, force_sync); }
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
};
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
#include "fdcache_entity.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdManager
|
||||
@ -123,34 +29,71 @@ typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value
|
||||
class FdManager
|
||||
{
|
||||
private:
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static size_t page_size;
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static pthread_mutex_t reserved_diskspace_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static off_t free_disk_space; // limit free disk space
|
||||
static std::string check_cache_output;
|
||||
static bool checked_lseek;
|
||||
static bool have_lseek_hole;
|
||||
|
||||
fdent_map_t fent;
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static off_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
|
||||
|
||||
public:
|
||||
FdManager();
|
||||
~FdManager();
|
||||
FdManager();
|
||||
~FdManager();
|
||||
|
||||
// Reference singleton
|
||||
static FdManager* get(void) { return &singleton; }
|
||||
// Reference singleton
|
||||
static FdManager* get() { return &singleton; }
|
||||
|
||||
static bool DeleteCacheDirectory(void);
|
||||
static int DeleteCacheFile(const char* path);
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static size_t SetPageSize(size_t size);
|
||||
static size_t GetPageSize(void) { return FdManager::page_size; }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
|
||||
static bool DeleteCacheDirectory();
|
||||
static int DeleteCacheFile(const char* path);
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir() { return !FdManager::cache_dir.empty(); }
|
||||
static const char* GetCacheDir() { return FdManager::cache_dir.c_str(); }
|
||||
static bool SetCacheCheckOutput(const char* path);
|
||||
static const char* GetCacheCheckOutput() { return FdManager::check_cache_output.c_str(); }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir();
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist();
|
||||
|
||||
FdEntity* GetFdEntity(const char* path);
|
||||
FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
||||
FdEntity* ExistOpen(const char* path) { return Open(path, -1, -1, false, false); }
|
||||
bool Close(FdEntity* ent);
|
||||
static off_t GetEnsureFreeDiskSpace();
|
||||
static off_t SetEnsureFreeDiskSpace(off_t size);
|
||||
static bool IsSafeDiskSpace(const char* path, off_t size);
|
||||
static void FreeReservedDiskSpace(off_t size);
|
||||
static bool ReserveDiskSpace(off_t size);
|
||||
static bool HaveLseekHole();
|
||||
|
||||
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1, bool increase_ref = true);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
|
||||
bool CheckAllCache();
|
||||
};
|
||||
|
||||
#endif // FD_CACHE_H_
|
||||
#endif // S3FS_FDCACHE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
144
src/fdcache_auto.cpp
Normal file
144
src/fdcache_auto.cpp
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "fdcache_auto.h"
|
||||
#include "fdcache.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// AutoFdEntity methods
|
||||
//------------------------------------------------
|
||||
AutoFdEntity::AutoFdEntity() : pFdEntity(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// The copy constructor should not be called, then this is private method.
|
||||
// Even if it is called, the consistency of the number of
|
||||
// references can be maintained, but this case is not assumed.
|
||||
//
|
||||
AutoFdEntity::AutoFdEntity(AutoFdEntity& other) : pFdEntity(NULL)
|
||||
{
|
||||
S3FS_PRN_WARN("This method should not be called. Please check the caller.");
|
||||
|
||||
if(other.pFdEntity){
|
||||
other.pFdEntity->Dup();
|
||||
pFdEntity = other.pFdEntity;
|
||||
}
|
||||
}
|
||||
|
||||
AutoFdEntity::~AutoFdEntity()
|
||||
{
|
||||
Close();
|
||||
}
|
||||
|
||||
bool AutoFdEntity::Close()
|
||||
{
|
||||
if(pFdEntity){
|
||||
if(!FdManager::get()->Close(pFdEntity)){
|
||||
S3FS_PRN_ERR("Failed to close fdentity.");
|
||||
return false;
|
||||
}
|
||||
pFdEntity = NULL;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method touches the internal fdentity with.
|
||||
// This is used to keep the file open.
|
||||
//
|
||||
bool AutoFdEntity::Detach()
|
||||
{
|
||||
if(!pFdEntity){
|
||||
S3FS_PRN_ERR("Does not have a associated FdEntity.");
|
||||
return false;
|
||||
}
|
||||
pFdEntity = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method calls the FdManager method without incrementing the
|
||||
// reference count.
|
||||
// This means that it will only be used to map to a file descriptor
|
||||
// that was already open.
|
||||
//
|
||||
FdEntity* AutoFdEntity::GetFdEntity(const char* path, int existfd, bool increase_ref)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(NULL == (pFdEntity = FdManager::get()->GetFdEntity(path, existfd, increase_ref))){
|
||||
S3FS_PRN_DBG("Could not find fd(file=%s, existfd=%d)", path, existfd);
|
||||
return NULL;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
FdEntity* AutoFdEntity::Open(const char* path, headers_t* pmeta, off_t size, time_t time, bool force_tmpfile, bool is_create, bool no_fd_lock_wait)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(NULL == (pFdEntity = FdManager::get()->Open(path, pmeta, size, time, force_tmpfile, is_create, no_fd_lock_wait))){
|
||||
return NULL;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
FdEntity* AutoFdEntity::ExistOpen(const char* path, int existfd, bool ignore_existfd)
|
||||
{
|
||||
Close();
|
||||
|
||||
if(NULL == (pFdEntity = FdManager::get()->ExistOpen(path, existfd, ignore_existfd))){
|
||||
return NULL;
|
||||
}
|
||||
return pFdEntity;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This operator should not be called, then this is private method.
|
||||
// Even if it is called, the consistency of the number of
|
||||
// references can be maintained, but this case is not assumed.
|
||||
//
|
||||
bool AutoFdEntity::operator=(AutoFdEntity& other)
|
||||
{
|
||||
S3FS_PRN_WARN("This method should not be called. Please check the caller.");
|
||||
|
||||
Close();
|
||||
|
||||
if(other.pFdEntity){
|
||||
other.pFdEntity->Dup();
|
||||
pFdEntity = other.pFdEntity;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
63
src/fdcache_auto.h
Normal file
63
src/fdcache_auto.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_AUTO_H_
|
||||
#define S3FS_FDCACHE_AUTO_H_
|
||||
|
||||
#include "fdcache_entity.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// class AutoFdEntity
|
||||
//------------------------------------------------
|
||||
// A class that opens fdentiry and closes it automatically.
|
||||
// This class object is used to prevent inconsistencies in
|
||||
// the number of references in fdentiry.
|
||||
// The methods are wrappers to the method of the FdManager class.
|
||||
//
|
||||
class AutoFdEntity
|
||||
{
|
||||
private:
|
||||
FdEntity* pFdEntity;
|
||||
|
||||
private:
|
||||
AutoFdEntity(AutoFdEntity& other);
|
||||
bool operator=(AutoFdEntity& other);
|
||||
|
||||
public:
|
||||
AutoFdEntity();
|
||||
~AutoFdEntity();
|
||||
|
||||
bool Close();
|
||||
bool Detach();
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1, bool increase_ref = true);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_AUTO_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1676
src/fdcache_entity.cpp
Normal file
1676
src/fdcache_entity.cpp
Normal file
File diff suppressed because it is too large
Load Diff
134
src/fdcache_entity.h
Normal file
134
src/fdcache_entity.h
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_ENTITY_H_
|
||||
#define S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
#include "fdcache_page.h"
|
||||
#include "metaheader.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// class FdEntity
|
||||
//------------------------------------------------
|
||||
class FdEntity
|
||||
{
|
||||
private:
|
||||
static bool mixmultipart; // whether multipart uploading can use copy api.
|
||||
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
ino_t inode; // inode number for cache file
|
||||
headers_t orgmeta; // original headers at opening
|
||||
off_t size_orgmeta; // original file size in original headers
|
||||
|
||||
pthread_mutex_t fdent_data_lock;// protects the following members
|
||||
PageList pagelist;
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
off_t mp_size; // size for no cached multipart(write method only)
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
volatile bool is_meta_pending;
|
||||
volatile time_t holding_mtime; // if mtime is updated while the file is open, it is set time_t value
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
|
||||
static ino_t GetInode(int fd);
|
||||
|
||||
void Clear();
|
||||
ino_t GetInode();
|
||||
int OpenMirrorFile();
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
bool SetAllStatusUnloaded() { return SetAllStatus(false); }
|
||||
int UploadPendingMeta();
|
||||
|
||||
public:
|
||||
static bool GetNoMixMultipart() { return mixmultipart; }
|
||||
static bool SetNoMixMultipart();
|
||||
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close();
|
||||
bool IsOpen() const { return (-1 != fd); }
|
||||
int Open(headers_t* pmeta = NULL, off_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, off_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool lock_already_held = false);
|
||||
int GetRefCnt() const { return refcnt; } // [NOTE] Use only debugging
|
||||
|
||||
const char* GetPath() const { return path.c_str(); }
|
||||
bool RenamePath(const std::string& newpath, std::string& fentmapkey);
|
||||
int GetFd() const { return fd; }
|
||||
bool IsModified() const;
|
||||
bool MergeOrgMeta(headers_t& updatemeta);
|
||||
|
||||
bool GetStats(struct stat& st, bool lock_already_held = false);
|
||||
int SetCtime(time_t time, bool lock_already_held = false);
|
||||
int SetAtime(time_t time, bool lock_already_held = false);
|
||||
int SetMCtime(time_t mtime, time_t ctime, bool lock_already_held = false);
|
||||
bool UpdateCtime();
|
||||
bool UpdateAtime();
|
||||
bool UpdateMtime(bool clear_holding_mtime = false);
|
||||
bool UpdateMCtime();
|
||||
bool SetHoldingMtime(time_t mtime, bool lock_already_held = false);
|
||||
bool ClearHoldingMtime(bool lock_already_held = false);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetXattr(std::string& xattr);
|
||||
bool SetXattr(const std::string& xattr);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost();
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, off_t size);
|
||||
int NoCacheCompleteMultipartPost();
|
||||
|
||||
off_t BytesModified() const;
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool ReserveDiskSpace(off_t size);
|
||||
bool PunchHole(off_t start = 0, size_t size = 0);
|
||||
};
|
||||
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
|
||||
#endif // S3FS_FDCACHE_ENTITY_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
974
src/fdcache_page.cpp
Normal file
974
src/fdcache_page.cpp
Normal file
@ -0,0 +1,974 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <sstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "fdcache_page.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
static const int CHECK_CACHEFILE_PART_SIZE = 1024 * 16; // Buffer size in PageList::CheckZeroAreaInFile()
|
||||
|
||||
//------------------------------------------------
|
||||
// fdpage_list_t utility
|
||||
//------------------------------------------------
|
||||
// Inline function for repeated processing
|
||||
inline void raw_add_compress_fdpage_list(fdpage_list_t& pagelist, fdpage& page, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
{
|
||||
if(0 < page.bytes){
|
||||
// [NOTE]
|
||||
// The page variable is subject to change here.
|
||||
//
|
||||
if(ignore_load){
|
||||
page.loaded = default_load;
|
||||
}
|
||||
if(ignore_modify){
|
||||
page.modified = default_modify;
|
||||
}
|
||||
pagelist.push_back(page);
|
||||
}
|
||||
}
|
||||
|
||||
// Compress the page list
|
||||
//
|
||||
// ignore_load: Ignore the flag of loaded member and compress
|
||||
// ignore_modify: Ignore the flag of modified member and compress
|
||||
// default_load: loaded flag value in the list after compression when ignore_load=true
|
||||
// default_modify: modified flag value in the list after compression when default_modify=true
|
||||
//
|
||||
// NOTE: ignore_modify and ignore_load cannot both be true.
|
||||
//
|
||||
static fdpage_list_t raw_compress_fdpage_list(const fdpage_list_t& pages, bool ignore_load, bool ignore_modify, bool default_load, bool default_modify)
|
||||
{
|
||||
fdpage_list_t compressed_pages;
|
||||
fdpage tmppage;
|
||||
bool is_first = true;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(!is_first){
|
||||
if( (!ignore_load && (tmppage.loaded != iter->loaded )) ||
|
||||
(!ignore_modify && (tmppage.modified != iter->modified)) )
|
||||
{
|
||||
// Different from the previous area, add it to list
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}else{
|
||||
// Same as the previous area
|
||||
if(tmppage.next() != iter->offset){
|
||||
// These are not contiguous areas, add it to list
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}else{
|
||||
// These are contiguous areas
|
||||
|
||||
// add current area
|
||||
tmppage.bytes += iter->bytes;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
// first erea
|
||||
is_first = false;
|
||||
|
||||
// keep current area
|
||||
tmppage = fdpage(iter->offset, iter->bytes, (ignore_load ? default_load : iter->loaded), (ignore_modify ? default_modify : iter->modified));
|
||||
}
|
||||
}
|
||||
// add lastest area
|
||||
if(!is_first){
|
||||
raw_add_compress_fdpage_list(compressed_pages, tmppage, ignore_load, ignore_modify, default_load, default_modify);
|
||||
}
|
||||
return compressed_pages;
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list_ignore_modify(const fdpage_list_t& pages, bool default_modify)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ true, /* default_load= */false, /* default_modify= */default_modify);
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list_ignore_load(const fdpage_list_t& pages, bool default_load)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ true, /* ignore_modify= */ false, /* default_load= */default_load, /* default_modify= */false);
|
||||
}
|
||||
|
||||
static fdpage_list_t compress_fdpage_list(const fdpage_list_t& pages)
|
||||
{
|
||||
return raw_compress_fdpage_list(pages, /* ignore_load= */ false, /* ignore_modify= */ false, /* default_load= */false, /* default_modify= */false);
|
||||
}
|
||||
|
||||
static fdpage_list_t parse_partsize_fdpage_list(const fdpage_list_t& pages, off_t max_partsize)
|
||||
{
|
||||
fdpage_list_t parsed_pages;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
// modified page
|
||||
fdpage tmppage = *iter;
|
||||
for(off_t start = iter->offset, rest_bytes = iter->bytes; 0 < rest_bytes; ){
|
||||
if((max_partsize * 2) < rest_bytes){
|
||||
// do parse
|
||||
tmppage.offset = start;
|
||||
tmppage.bytes = max_partsize;
|
||||
parsed_pages.push_back(tmppage);
|
||||
|
||||
start += max_partsize;
|
||||
rest_bytes -= max_partsize;
|
||||
}else{
|
||||
// Since the number of remaining bytes is less than twice max_partsize,
|
||||
// one of the divided areas will be smaller than max_partsize.
|
||||
// Therefore, this area at the end should not be divided.
|
||||
tmppage.offset = start;
|
||||
tmppage.bytes = rest_bytes;
|
||||
parsed_pages.push_back(tmppage);
|
||||
|
||||
start += rest_bytes;
|
||||
rest_bytes = 0;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
// not modified page is not parsed
|
||||
parsed_pages.push_back(*iter);
|
||||
}
|
||||
}
|
||||
return parsed_pages;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// PageList class methods
|
||||
//------------------------------------------------
|
||||
//
|
||||
// Examine and return the status of each block in the file.
|
||||
//
|
||||
// Assuming the file is a sparse file, check the HOLE and DATA areas
|
||||
// and return it in fdpage_list_t. The loaded flag of each fdpage is
|
||||
// set to false for HOLE blocks and true for DATA blocks.
|
||||
//
|
||||
bool PageList::GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list)
|
||||
{
|
||||
// [NOTE]
|
||||
// Express the status of the cache file using fdpage_list_t.
|
||||
// There is a hole in the cache file(sparse file), and the
|
||||
// state of this hole is expressed by the "loaded" member of
|
||||
// struct fdpage. (the "modified" member is not used)
|
||||
//
|
||||
if(0 == file_size){
|
||||
// file is empty
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_hole = false;
|
||||
int hole_pos = lseek(fd, 0, SEEK_HOLE);
|
||||
int data_pos = lseek(fd, 0, SEEK_DATA);
|
||||
if(-1 == hole_pos && -1 == data_pos){
|
||||
S3FS_PRN_ERR("Could not find the first position both HOLE and DATA in the file(fd=%d).", fd);
|
||||
return false;
|
||||
}else if(-1 == hole_pos){
|
||||
is_hole = false;
|
||||
}else if(-1 == data_pos){
|
||||
is_hole = true;
|
||||
}else if(hole_pos < data_pos){
|
||||
is_hole = true;
|
||||
}else{
|
||||
is_hole = false;
|
||||
}
|
||||
|
||||
for(int cur_pos = 0, next_pos = 0; 0 <= cur_pos; cur_pos = next_pos, is_hole = !is_hole){
|
||||
fdpage page;
|
||||
page.offset = cur_pos;
|
||||
page.loaded = !is_hole;
|
||||
page.modified = false;
|
||||
|
||||
next_pos = lseek(fd, cur_pos, (is_hole ? SEEK_DATA : SEEK_HOLE));
|
||||
if(-1 == next_pos){
|
||||
page.bytes = static_cast<off_t>(file_size - cur_pos);
|
||||
}else{
|
||||
page.bytes = next_pos - cur_pos;
|
||||
}
|
||||
sparse_list.push_back(page);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Confirm that the specified area is ZERO
|
||||
//
|
||||
bool PageList::CheckZeroAreaInFile(int fd, off_t start, size_t bytes)
|
||||
{
|
||||
char* readbuff = new char[CHECK_CACHEFILE_PART_SIZE];
|
||||
|
||||
for(size_t comp_bytes = 0, check_bytes = 0; comp_bytes < bytes; comp_bytes += check_bytes){
|
||||
if(CHECK_CACHEFILE_PART_SIZE < (bytes - comp_bytes)){
|
||||
check_bytes = CHECK_CACHEFILE_PART_SIZE;
|
||||
}else{
|
||||
check_bytes = bytes - comp_bytes;
|
||||
}
|
||||
bool found_bad_data = false;
|
||||
ssize_t read_bytes;
|
||||
if(-1 == (read_bytes = pread(fd, readbuff, check_bytes, (start + comp_bytes)))){
|
||||
S3FS_PRN_ERR("Something error is occurred in reading %zu bytes at %lld from file(%d).", check_bytes, static_cast<long long int>(start + comp_bytes), fd);
|
||||
found_bad_data = true;
|
||||
}else{
|
||||
check_bytes = static_cast<size_t>(read_bytes);
|
||||
for(size_t tmppos = 0; tmppos < check_bytes; ++tmppos){
|
||||
if('\0' != readbuff[tmppos]){
|
||||
// found not ZERO data.
|
||||
found_bad_data = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(found_bad_data){
|
||||
delete[] readbuff;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
delete[] readbuff;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Checks that the specified area matches the state of the sparse file.
|
||||
//
|
||||
// [Parameters]
|
||||
// checkpage: This is one state of the cache file, it is loaded from the stats file.
|
||||
// sparse_list: This is a list of the results of directly checking the cache file status(HOLE/DATA).
|
||||
// In the HOLE area, the "loaded" flag of fdpage is false. The DATA area has it set to true.
|
||||
// fd: opened file discriptor to target cache file.
|
||||
//
|
||||
bool PageList::CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
|
||||
{
|
||||
// Check the block status of a part(Check Area: checkpage) of the target file.
|
||||
// The elements of sparse_list have 5 patterns that overlap this block area.
|
||||
//
|
||||
// File |<---...--------------------------------------...--->|
|
||||
// Check Area (offset)<-------------------->(offset + bytes - 1)
|
||||
// Area case(0) <------->
|
||||
// Area case(1) <------->
|
||||
// Area case(2) <-------->
|
||||
// Area case(3) <---------->
|
||||
// Area case(4) <----------->
|
||||
// Area case(5) <----------------------------->
|
||||
//
|
||||
bool result = true;
|
||||
|
||||
for(fdpage_list_t::const_iterator iter = sparse_list.begin(); iter != sparse_list.end(); ++iter){
|
||||
off_t check_start = 0;
|
||||
off_t check_bytes = 0;
|
||||
if((iter->offset + iter->bytes) <= checkpage.offset){
|
||||
// case 0
|
||||
continue; // next
|
||||
|
||||
}else if((checkpage.offset + checkpage.bytes) <= iter->offset){
|
||||
// case 1
|
||||
break; // finish
|
||||
|
||||
}else if(iter->offset < checkpage.offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){
|
||||
// case 2
|
||||
check_start = checkpage.offset;
|
||||
check_bytes = iter->bytes - (checkpage.offset - iter->offset);
|
||||
|
||||
}else if(iter->offset < (checkpage.offset + checkpage.bytes) && (checkpage.offset + checkpage.bytes) < (iter->offset + iter->bytes)){
|
||||
// case 3
|
||||
check_start = iter->offset;
|
||||
check_bytes = checkpage.bytes - (iter->offset - checkpage.offset);
|
||||
|
||||
}else if(checkpage.offset < iter->offset && (iter->offset + iter->bytes) < (checkpage.offset + checkpage.bytes)){
|
||||
// case 4
|
||||
check_start = iter->offset;
|
||||
check_bytes = iter->bytes;
|
||||
|
||||
}else{ // (iter->offset <= checkpage.offset && (checkpage.offset + checkpage.bytes) <= (iter->offset + iter->bytes))
|
||||
// case 5
|
||||
check_start = checkpage.offset;
|
||||
check_bytes = checkpage.bytes;
|
||||
}
|
||||
|
||||
// check target area type
|
||||
if(checkpage.loaded || checkpage.modified){
|
||||
// target area must be not HOLE(DATA) area.
|
||||
if(!iter->loaded){
|
||||
// Found bad area, it is HOLE area.
|
||||
fdpage page(check_start, check_bytes, false, false);
|
||||
err_area_list.push_back(page);
|
||||
result = false;
|
||||
}
|
||||
}else{
|
||||
// target area should be HOLE area.(If it is not a block boundary, it may be a DATA area.)
|
||||
if(iter->loaded){
|
||||
// need to check this area's each data, it should be ZERO.
|
||||
if(!PageList::CheckZeroAreaInFile(fd, check_start, static_cast<size_t>(check_bytes))){
|
||||
// Discovered an area that has un-initial status data but it probably does not effect bad.
|
||||
fdpage page(check_start, check_bytes, true, false);
|
||||
warn_area_list.push_back(page);
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// PageList methods
|
||||
//------------------------------------------------
|
||||
void PageList::FreeList(fdpage_list_t& list)
|
||||
{
|
||||
list.clear();
|
||||
}
|
||||
|
||||
PageList::PageList(off_t size, bool is_loaded, bool is_modified)
|
||||
{
|
||||
Init(size, is_loaded, is_modified);
|
||||
}
|
||||
|
||||
PageList::PageList(const PageList& other)
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = other.pages.begin(); iter != other.pages.end(); ++iter){
|
||||
pages.push_back(*iter);
|
||||
}
|
||||
}
|
||||
|
||||
PageList::~PageList()
|
||||
{
|
||||
Clear();
|
||||
}
|
||||
|
||||
void PageList::Clear()
|
||||
{
|
||||
PageList::FreeList(pages);
|
||||
}
|
||||
|
||||
bool PageList::Init(off_t size, bool is_loaded, bool is_modified)
|
||||
{
|
||||
Clear();
|
||||
if(0 < size){
|
||||
fdpage page(0, size, is_loaded, is_modified);
|
||||
pages.push_back(page);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
off_t PageList::Size() const
|
||||
{
|
||||
if(pages.empty()){
|
||||
return 0;
|
||||
}
|
||||
fdpage_list_t::const_reverse_iterator riter = pages.rbegin();
|
||||
return riter->next();
|
||||
}
|
||||
|
||||
bool PageList::Compress()
|
||||
{
|
||||
pages = compress_fdpage_list(pages);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageList::Parse(off_t new_pos)
|
||||
{
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(new_pos == iter->offset){
|
||||
// nothing to do
|
||||
return true;
|
||||
}else if(iter->offset < new_pos && new_pos < iter->next()){
|
||||
fdpage page(iter->offset, new_pos - iter->offset, iter->loaded, iter->modified);
|
||||
iter->bytes -= (new_pos - iter->offset);
|
||||
iter->offset = new_pos;
|
||||
pages.insert(iter, page);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PageList::Resize(off_t size, bool is_loaded, bool is_modified)
|
||||
{
|
||||
off_t total = Size();
|
||||
|
||||
if(0 == total){
|
||||
Init(size, is_loaded, is_modified);
|
||||
|
||||
}else if(total < size){
|
||||
// add new area
|
||||
fdpage page(total, (size - total), is_loaded, is_modified);
|
||||
pages.push_back(page);
|
||||
|
||||
}else if(size < total){
|
||||
// cut area
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ){
|
||||
if(iter->next() <= size){
|
||||
++iter;
|
||||
}else{
|
||||
if(size <= iter->offset){
|
||||
iter = pages.erase(iter);
|
||||
}else{
|
||||
iter->bytes = size - iter->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
}else{ // total == size
|
||||
// nothing to do
|
||||
}
|
||||
// compress area
|
||||
return Compress();
|
||||
}
|
||||
|
||||
bool PageList::IsPageLoaded(off_t start, off_t size) const
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->end() < start){
|
||||
continue;
|
||||
}
|
||||
if(!iter->loaded){
|
||||
return false;
|
||||
}
|
||||
if(0 != size && start + size <= iter->next()){
|
||||
break;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageList::SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus, bool is_compress)
|
||||
{
|
||||
off_t now_size = Size();
|
||||
bool is_loaded = (PAGE_LOAD_MODIFIED == pstatus || PAGE_LOADED == pstatus);
|
||||
bool is_modified = (PAGE_LOAD_MODIFIED == pstatus || PAGE_MODIFIED == pstatus);
|
||||
|
||||
if(now_size <= start){
|
||||
if(now_size < start){
|
||||
// add
|
||||
Resize(start, false, is_modified); // set modified flag from now end pos to specified start pos.
|
||||
}
|
||||
Resize(start + size, is_loaded, is_modified);
|
||||
|
||||
}else if(now_size <= start + size){
|
||||
// cut
|
||||
Resize(start, false, false); // not changed loaded/modified flags in existing area.
|
||||
// add
|
||||
Resize(start + size, is_loaded, is_modified);
|
||||
|
||||
}else{
|
||||
// start-size are inner pages area
|
||||
// parse "start", and "start + size" position
|
||||
Parse(start);
|
||||
Parse(start + size);
|
||||
|
||||
// set loaded flag
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->end() < start){
|
||||
continue;
|
||||
}else if(start + size <= iter->offset){
|
||||
break;
|
||||
}else{
|
||||
iter->loaded = is_loaded;
|
||||
iter->modified = is_modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
// compress area
|
||||
return (is_compress ? Compress() : true);
|
||||
}
|
||||
|
||||
bool PageList::FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(start <= iter->end()){
|
||||
if(!iter->loaded && !iter->modified){ // Do not load unloaded and modified areas
|
||||
resstart = iter->offset;
|
||||
ressize = iter->bytes;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
off_t PageList::GetTotalUnloadedPageSize(off_t start, off_t size) const
|
||||
{
|
||||
off_t restsize = 0;
|
||||
off_t next = start + size;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->next() <= start){
|
||||
continue;
|
||||
}
|
||||
if(next <= iter->offset){
|
||||
break;
|
||||
}
|
||||
if(iter->loaded || iter->modified){
|
||||
continue;
|
||||
}
|
||||
off_t tmpsize;
|
||||
if(iter->offset <= start){
|
||||
if(iter->next() <= next){
|
||||
tmpsize = (iter->next() - start);
|
||||
}else{
|
||||
tmpsize = next - start; // = size
|
||||
}
|
||||
}else{
|
||||
if(iter->next() <= next){
|
||||
tmpsize = iter->next() - iter->offset; // = iter->bytes
|
||||
}else{
|
||||
tmpsize = next - iter->offset;
|
||||
}
|
||||
}
|
||||
restsize += tmpsize;
|
||||
}
|
||||
return restsize;
|
||||
}
|
||||
|
||||
int PageList::GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start, off_t size) const
|
||||
{
|
||||
// If size is 0, it means loading to end.
|
||||
if(0 == size){
|
||||
if(start < Size()){
|
||||
size = Size() - start;
|
||||
}
|
||||
}
|
||||
off_t next = start + size;
|
||||
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->next() <= start){
|
||||
continue;
|
||||
}
|
||||
if(next <= iter->offset){
|
||||
break;
|
||||
}
|
||||
if(iter->loaded || iter->modified){
|
||||
continue; // already loaded or modified
|
||||
}
|
||||
|
||||
// page area
|
||||
off_t page_start = std::max(iter->offset, start);
|
||||
off_t page_next = std::min(iter->next(), next);
|
||||
off_t page_size = page_next - page_start;
|
||||
|
||||
// add list
|
||||
fdpage_list_t::reverse_iterator riter = unloaded_list.rbegin();
|
||||
if(riter != unloaded_list.rend() && riter->next() == page_start){
|
||||
// merge to before page
|
||||
riter->bytes += page_size;
|
||||
}else{
|
||||
fdpage page(page_start, page_size, false, false);
|
||||
unloaded_list.push_back(page);
|
||||
}
|
||||
}
|
||||
return unloaded_list.size();
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method is called in advance when mixing POST and COPY in multi-part upload.
|
||||
// The minimum size of each part must be 5 MB, and the data area below this must be
|
||||
// downloaded from S3.
|
||||
// This method checks the current PageList status and returns the area that needs
|
||||
// to be downloaded so that each part is at least 5 MB.
|
||||
//
|
||||
bool PageList::GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize)
|
||||
{
|
||||
// compress before this processing
|
||||
if(!Compress()){
|
||||
return false;
|
||||
}
|
||||
|
||||
// make a list by modified flag
|
||||
fdpage_list_t modified_pages = compress_fdpage_list_ignore_load(pages, false);
|
||||
fdpage_list_t download_pages; // A non-contiguous page list showing the areas that need to be downloaded
|
||||
fdpage_list_t mixupload_pages; // A continuous page list showing only modified flags for mixupload
|
||||
fdpage prev_page;
|
||||
for(fdpage_list_t::const_iterator iter = modified_pages.begin(); iter != modified_pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
// current is modified area
|
||||
if(!prev_page.modified){
|
||||
// previous is not modified area
|
||||
if(prev_page.bytes < MIN_MULTIPART_SIZE){
|
||||
// previous(not modified) area is too small for one multipart size,
|
||||
// then all of previous area is needed to download.
|
||||
download_pages.push_back(prev_page);
|
||||
|
||||
// previous(not modified) area is set upload area.
|
||||
prev_page.modified = true;
|
||||
mixupload_pages.push_back(prev_page);
|
||||
}else{
|
||||
// previous(not modified) area is set copy area.
|
||||
prev_page.modified = false;
|
||||
mixupload_pages.push_back(prev_page);
|
||||
}
|
||||
// set current to previous
|
||||
prev_page = *iter;
|
||||
}else{
|
||||
// previous is modified area, too
|
||||
prev_page.bytes += iter->bytes;
|
||||
}
|
||||
|
||||
}else{
|
||||
// current is not modified area
|
||||
if(!prev_page.modified){
|
||||
// previous is not modified area, too
|
||||
prev_page.bytes += iter->bytes;
|
||||
|
||||
}else{
|
||||
// previous is modified area
|
||||
if(prev_page.bytes < MIN_MULTIPART_SIZE){
|
||||
// previous(modified) area is too small for one multipart size,
|
||||
// then part or all of current area is needed to download.
|
||||
off_t missing_bytes = MIN_MULTIPART_SIZE - prev_page.bytes;
|
||||
|
||||
if((missing_bytes + MIN_MULTIPART_SIZE) < iter-> bytes){
|
||||
// The current size is larger than the missing size, and the remainder
|
||||
// after deducting the missing size is larger than the minimum size.
|
||||
|
||||
fdpage missing_page(iter->offset, missing_bytes, false, false);
|
||||
download_pages.push_back(missing_page);
|
||||
|
||||
// previous(not modified) area is set upload area.
|
||||
prev_page.bytes = MIN_MULTIPART_SIZE;
|
||||
mixupload_pages.push_back(prev_page);
|
||||
|
||||
// set current to previous
|
||||
prev_page = *iter;
|
||||
prev_page.offset += missing_bytes;
|
||||
prev_page.bytes -= missing_bytes;
|
||||
|
||||
}else{
|
||||
// The current size is less than the missing size, or the remaining
|
||||
// size less the missing size is less than the minimum size.
|
||||
download_pages.push_back(*iter);
|
||||
|
||||
// add current to previous
|
||||
prev_page.bytes += iter->bytes;
|
||||
}
|
||||
|
||||
}else{
|
||||
// previous(modified) area is enough size for one multipart size.
|
||||
mixupload_pages.push_back(prev_page);
|
||||
|
||||
// set current to previous
|
||||
prev_page = *iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// lastest area
|
||||
if(0 < prev_page.bytes){
|
||||
mixupload_pages.push_back(prev_page);
|
||||
}
|
||||
|
||||
// compress
|
||||
dlpages = compress_fdpage_list_ignore_modify(download_pages, false);
|
||||
mixuppages = compress_fdpage_list_ignore_load(mixupload_pages, false);
|
||||
|
||||
// parse by max pagesize
|
||||
dlpages = parse_partsize_fdpage_list(dlpages, max_partsize);
|
||||
mixuppages = parse_partsize_fdpage_list(mixuppages, max_partsize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PageList::GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start, size_t size)
|
||||
{
|
||||
// compress before this processing
|
||||
if(!Compress()){
|
||||
return false;
|
||||
}
|
||||
|
||||
// extract areas without data
|
||||
fdpage_list_t tmp_pagelist;
|
||||
off_t stop_pos = (0L == size ? -1 : (start + size));
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if((iter->offset + iter->bytes) < start){
|
||||
continue;
|
||||
}
|
||||
if(-1 != stop_pos && stop_pos <= iter->offset){
|
||||
break;
|
||||
}
|
||||
if(iter->modified){
|
||||
continue;
|
||||
}
|
||||
|
||||
fdpage tmppage;
|
||||
tmppage.offset = std::max(iter->offset, start);
|
||||
tmppage.bytes = (-1 != stop_pos ? iter->bytes : std::min(iter->bytes, (stop_pos - tmppage.offset)));
|
||||
tmppage.loaded = iter->loaded;
|
||||
tmppage.modified = iter->modified;
|
||||
|
||||
tmp_pagelist.push_back(tmppage);
|
||||
}
|
||||
|
||||
if(tmp_pagelist.empty()){
|
||||
nodata_pages.clear();
|
||||
}else{
|
||||
// compress
|
||||
nodata_pages = compress_fdpage_list(tmp_pagelist);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
off_t PageList::BytesModified() const
|
||||
{
|
||||
off_t total = 0;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
total += iter->bytes;
|
||||
}
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
bool PageList::IsModified() const
|
||||
{
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PageList::ClearAllModified()
|
||||
{
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(iter->modified){
|
||||
iter->modified = false;
|
||||
}
|
||||
}
|
||||
return Compress();
|
||||
}
|
||||
|
||||
bool PageList::Serialize(CacheFileStat& file, bool is_output, ino_t inode)
|
||||
{
|
||||
if(!file.Open()){
|
||||
return false;
|
||||
}
|
||||
if(is_output){
|
||||
//
|
||||
// put to file
|
||||
//
|
||||
std::ostringstream ssall;
|
||||
ssall << inode << ":" << Size();
|
||||
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
ssall << "\n" << iter->offset << ":" << iter->bytes << ":" << (iter->loaded ? "1" : "0") << ":" << (iter->modified ? "1" : "0");
|
||||
}
|
||||
|
||||
if(-1 == ftruncate(file.GetFd(), 0)){
|
||||
S3FS_PRN_ERR("failed to truncate file(to 0) for stats(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
std::string strall = ssall.str();
|
||||
if(0 >= pwrite(file.GetFd(), strall.c_str(), strall.length(), 0)){
|
||||
S3FS_PRN_ERR("failed to write stats(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
}else{
|
||||
//
|
||||
// loading from file
|
||||
//
|
||||
struct stat st;
|
||||
memset(&st, 0, sizeof(struct stat));
|
||||
if(-1 == fstat(file.GetFd(), &st)){
|
||||
S3FS_PRN_ERR("fstat is failed. errno(%d)", errno);
|
||||
return false;
|
||||
}
|
||||
if(0 >= st.st_size){
|
||||
// nothing
|
||||
Init(0, false, false);
|
||||
return true;
|
||||
}
|
||||
char* ptmp = new char[st.st_size + 1];
|
||||
int result;
|
||||
// read from file
|
||||
if(0 >= (result = pread(file.GetFd(), ptmp, st.st_size, 0))){
|
||||
S3FS_PRN_ERR("failed to read stats(%d)", errno);
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
ptmp[result] = '\0';
|
||||
std::string oneline;
|
||||
std::istringstream ssall(ptmp);
|
||||
|
||||
// loaded
|
||||
Clear();
|
||||
|
||||
// load head line(for size and inode)
|
||||
off_t total;
|
||||
ino_t cache_inode; // if this value is 0, it means old format.
|
||||
if(!getline(ssall, oneline, '\n')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}else{
|
||||
std::istringstream sshead(oneline);
|
||||
std::string strhead1;
|
||||
std::string strhead2;
|
||||
|
||||
// get first part in head line.
|
||||
if(!getline(sshead, strhead1, ':')){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
// get second part in head line.
|
||||
if(!getline(sshead, strhead2, ':')){
|
||||
// old head format is "<size>\n"
|
||||
total = cvt_strtoofft(strhead1.c_str(), /* base= */10);
|
||||
cache_inode = 0;
|
||||
}else{
|
||||
// current head format is "<inode>:<size>\n"
|
||||
total = cvt_strtoofft(strhead2.c_str(), /* base= */10);
|
||||
cache_inode = static_cast<ino_t>(cvt_strtoofft(strhead1.c_str(), /* base= */10));
|
||||
if(0 == cache_inode){
|
||||
S3FS_PRN_ERR("wrong inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// check inode number
|
||||
if(0 != cache_inode && cache_inode != inode){
|
||||
S3FS_PRN_ERR("differ inode and inode number in parsed cache stats.");
|
||||
delete[] ptmp;
|
||||
return false;
|
||||
}
|
||||
|
||||
// load each part
|
||||
bool is_err = false;
|
||||
while(getline(ssall, oneline, '\n')){
|
||||
std::string part;
|
||||
std::istringstream ssparts(oneline);
|
||||
// offset
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
break;
|
||||
}
|
||||
off_t offset = cvt_strtoofft(part.c_str(), /* base= */10);
|
||||
// size
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
break;
|
||||
}
|
||||
off_t size = cvt_strtoofft(part.c_str(), /* base= */10);
|
||||
// loaded
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
break;
|
||||
}
|
||||
bool is_loaded = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
|
||||
bool is_modified;
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_modified = false; // old version does not have this part.
|
||||
}else{
|
||||
is_modified = (1 == cvt_strtoofft(part.c_str(), /* base= */10) ? true : false);
|
||||
}
|
||||
// add new area
|
||||
PageList::page_status pstatus =
|
||||
( is_loaded && is_modified ? PageList::PAGE_LOAD_MODIFIED :
|
||||
!is_loaded && is_modified ? PageList::PAGE_MODIFIED :
|
||||
is_loaded && !is_modified ? PageList::PAGE_LOADED : PageList::PAGE_NOT_LOAD_MODIFIED );
|
||||
|
||||
SetPageLoadedStatus(offset, size, pstatus);
|
||||
}
|
||||
delete[] ptmp;
|
||||
if(is_err){
|
||||
S3FS_PRN_ERR("failed to parse stats.");
|
||||
Clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
// check size
|
||||
if(total != Size()){
|
||||
S3FS_PRN_ERR("different size(%lld - %lld).", static_cast<long long int>(total), static_cast<long long int>(Size()));
|
||||
Clear();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void PageList::Dump() const
|
||||
{
|
||||
int cnt = 0;
|
||||
|
||||
S3FS_PRN_DBG("pages = {");
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter, ++cnt){
|
||||
S3FS_PRN_DBG(" [%08d] -> {%014lld - %014lld : %s / %s}", cnt, static_cast<long long int>(iter->offset), static_cast<long long int>(iter->bytes), iter->loaded ? "loaded" : "unloaded", iter->modified ? "modified" : "not modified");
|
||||
}
|
||||
S3FS_PRN_DBG("}");
|
||||
}
|
||||
|
||||
//
|
||||
// Compare the fdpage_list_t pages of the object with the state of the file.
|
||||
//
|
||||
// The loaded=true or modified=true area of pages must be a DATA block
|
||||
// (not a HOLE block) in the file.
|
||||
// The other area is a HOLE block in the file or is a DATA block(but the
|
||||
// data of the target area in that block should be ZERO).
|
||||
// If it is a bad area in the previous case, it will be reported as an error.
|
||||
// If the latter case does not match, it will be reported as a warning.
|
||||
//
|
||||
bool PageList::CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list)
|
||||
{
|
||||
err_area_list.clear();
|
||||
warn_area_list.clear();
|
||||
|
||||
// First, list the block disk allocation area of the cache file.
|
||||
// The cache file has holes(sparse file) and no disk block areas
|
||||
// are assigned to any holes.
|
||||
fdpage_list_t sparse_list;
|
||||
if(!PageList::GetSparseFilePages(fd, file_size, sparse_list)){
|
||||
S3FS_PRN_ERR("Something error is occurred in parsing hole/data of the cache file(%d).", fd);
|
||||
|
||||
fdpage page(0, static_cast<off_t>(file_size), false, false);
|
||||
err_area_list.push_back(page);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if(sparse_list.empty() && pages.empty()){
|
||||
// both file and stats information are empty, it means cache file size is ZERO.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Compare each pages and sparse_list
|
||||
bool result = true;
|
||||
for(fdpage_list_t::const_iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
if(!PageList::CheckAreaInSparseFile(*iter, sparse_list, fd, err_area_list, warn_area_list)){
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
132
src/fdcache_page.h
Normal file
132
src/fdcache_page.h
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_PAGE_H_
|
||||
#define S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
#include "fdcache_stat.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
// [NOTE]
|
||||
// If the following symbols in lseek whence are undefined, define them.
|
||||
// If it is not supported by lseek, s3fs judges by the processing result of lseek.
|
||||
//
|
||||
#ifndef SEEK_DATA
|
||||
#define SEEK_DATA 3
|
||||
#endif
|
||||
#ifndef SEEK_HOLE
|
||||
#define SEEK_HOLE 4
|
||||
#endif
|
||||
|
||||
//------------------------------------------------
|
||||
// Structure fdpage
|
||||
//------------------------------------------------
|
||||
// page block information
|
||||
struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
off_t bytes;
|
||||
bool loaded;
|
||||
bool modified;
|
||||
|
||||
fdpage(off_t start = 0, off_t size = 0, bool is_loaded = false, bool is_modified = false) :
|
||||
offset(start), bytes(size), loaded(is_loaded), modified(is_modified) {}
|
||||
|
||||
off_t next() const
|
||||
{
|
||||
return (offset + bytes);
|
||||
}
|
||||
off_t end() const
|
||||
{
|
||||
return (0 < bytes ? offset + bytes - 1 : 0);
|
||||
}
|
||||
};
|
||||
typedef std::list<struct fdpage> fdpage_list_t;
|
||||
|
||||
//------------------------------------------------
|
||||
// Class PageList
|
||||
//------------------------------------------------
|
||||
class FdEntity;
|
||||
|
||||
// cppcheck-suppress copyCtorAndEqOperator
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
public:
|
||||
enum page_status{
|
||||
PAGE_NOT_LOAD_MODIFIED = 0,
|
||||
PAGE_LOADED,
|
||||
PAGE_MODIFIED,
|
||||
PAGE_LOAD_MODIFIED
|
||||
};
|
||||
|
||||
private:
|
||||
static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list);
|
||||
static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes);
|
||||
static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
|
||||
void Clear();
|
||||
bool Compress();
|
||||
bool Parse(off_t new_pos);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
explicit PageList(off_t size = 0, bool is_loaded = false, bool is_modified = false);
|
||||
explicit PageList(const PageList& other);
|
||||
~PageList();
|
||||
|
||||
bool Init(off_t size, bool is_loaded, bool is_modified);
|
||||
off_t Size() const;
|
||||
bool Resize(off_t size, bool is_loaded, bool is_modified);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, off_t size, PageList::page_status pstatus = PAGE_LOADED, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
|
||||
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
|
||||
bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize);
|
||||
bool GetNoDataPageLists(fdpage_list_t& nodata_pages, off_t start = 0, size_t size = 0);
|
||||
|
||||
off_t BytesModified() const;
|
||||
bool IsModified() const;
|
||||
bool ClearAllModified();
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output, ino_t inode);
|
||||
void Dump() const;
|
||||
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_PAGE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
280
src/fdcache_stat.cpp
Normal file
280
src/fdcache_stat.cpp
Normal file
@ -0,0 +1,280 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <unistd.h>
|
||||
#include <sys/file.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "fdcache_stat.h"
|
||||
#include "fdcache.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat class methods
|
||||
//------------------------------------------------
|
||||
std::string CacheFileStat::GetCacheFileStatTopDir()
|
||||
{
|
||||
std::string top_path;
|
||||
if(!FdManager::IsCacheDir() || bucket.empty()){
|
||||
return top_path;
|
||||
}
|
||||
|
||||
// stat top dir( "/<cache_dir>/.<bucket_name>.stat" )
|
||||
top_path += FdManager::GetCacheDir();
|
||||
top_path += "/.";
|
||||
top_path += bucket;
|
||||
top_path += ".stat";
|
||||
return top_path;
|
||||
}
|
||||
|
||||
bool CacheFileStat::MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir)
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_ERR("The path to cache top dir is empty.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(is_create_dir){
|
||||
int result;
|
||||
if(0 != (result = mkdirp(top_path + mydirname(path), 0777))){
|
||||
S3FS_PRN_ERR("failed to create dir(%s) by errno(%d).", path, result);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(!path || '\0' == path[0]){
|
||||
sfile_path = top_path;
|
||||
}else{
|
||||
sfile_path = top_path + SAFESTRPTR(path);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::CheckCacheFileStatTopDir()
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to check permission.");
|
||||
return true;
|
||||
}
|
||||
|
||||
return check_exist_dir_permission(top_path.c_str());
|
||||
}
|
||||
|
||||
bool CacheFileStat::DeleteCacheFileStat(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return false;
|
||||
}
|
||||
// stat path
|
||||
std::string sfile_path;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(path, sfile_path, false)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path);
|
||||
return false;
|
||||
}
|
||||
if(0 != unlink(sfile_path.c_str())){
|
||||
if(ENOENT == errno){
|
||||
S3FS_PRN_DBG("failed to delete file(%s): errno=%d", path, errno);
|
||||
}else{
|
||||
S3FS_PRN_ERR("failed to delete file(%s): errno=%d", path, errno);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If remove stat file directory, it should do before removing
|
||||
// file cache directory.
|
||||
//
|
||||
bool CacheFileStat::DeleteCacheFileStatDirectory()
|
||||
{
|
||||
std::string top_path = CacheFileStat::GetCacheFileStatTopDir();
|
||||
if(top_path.empty()){
|
||||
S3FS_PRN_INFO("The path to cache top dir is empty, thus not need to remove it.");
|
||||
return true;
|
||||
}
|
||||
return delete_files_in_dir(top_path.c_str(), true);
|
||||
}
|
||||
|
||||
bool CacheFileStat::RenameCacheFileStat(const char* oldpath, const char* newpath)
|
||||
{
|
||||
if(!oldpath || '\0' == oldpath[0] || !newpath || '\0' == newpath[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
// stat path
|
||||
std::string old_filestat;
|
||||
std::string new_filestat;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(oldpath, old_filestat, false) || !CacheFileStat::MakeCacheFileStatPath(newpath, new_filestat, false)){
|
||||
return false;
|
||||
}
|
||||
|
||||
// check new stat path
|
||||
struct stat st;
|
||||
if(0 == stat(new_filestat.c_str(), &st)){
|
||||
// new stat path is existed, then unlink it.
|
||||
if(-1 == unlink(new_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to unlink new cache file stat path(%s) by errno(%d).", new_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// check old stat path
|
||||
if(0 != stat(old_filestat.c_str(), &st)){
|
||||
// old stat path is not existed, then nothing to do any more.
|
||||
return true;
|
||||
}
|
||||
|
||||
// link and unlink
|
||||
if(-1 == link(old_filestat.c_str(), new_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to link old cache file stat path(%s) to new cache file stat path(%s) by errno(%d).", old_filestat.c_str(), new_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
if(-1 == unlink(old_filestat.c_str())){
|
||||
S3FS_PRN_ERR("failed to unlink old cache file stat path(%s) by errno(%d).", old_filestat.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat methods
|
||||
//------------------------------------------------
|
||||
CacheFileStat::CacheFileStat(const char* tpath) : fd(-1)
|
||||
{
|
||||
if(tpath && '\0' != tpath[0]){
|
||||
SetPath(tpath, true);
|
||||
}
|
||||
}
|
||||
|
||||
CacheFileStat::~CacheFileStat()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
bool CacheFileStat::SetPath(const char* tpath, bool is_open)
|
||||
{
|
||||
if(!tpath || '\0' == tpath[0]){
|
||||
return false;
|
||||
}
|
||||
if(!Release()){
|
||||
// could not close old stat file.
|
||||
return false;
|
||||
}
|
||||
path = tpath;
|
||||
if(!is_open){
|
||||
return true;
|
||||
}
|
||||
return Open();
|
||||
}
|
||||
|
||||
bool CacheFileStat::RawOpen(bool readonly)
|
||||
{
|
||||
if(path.empty()){
|
||||
return false;
|
||||
}
|
||||
if(-1 != fd){
|
||||
// already opened
|
||||
return true;
|
||||
}
|
||||
// stat path
|
||||
std::string sfile_path;
|
||||
if(!CacheFileStat::MakeCacheFileStatPath(path.c_str(), sfile_path, true)){
|
||||
S3FS_PRN_ERR("failed to create cache stat file path(%s)", path.c_str());
|
||||
return false;
|
||||
}
|
||||
// open
|
||||
if(readonly){
|
||||
if(-1 == (fd = open(sfile_path.c_str(), O_RDONLY))){
|
||||
S3FS_PRN_ERR("failed to read only open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(-1 == (fd = open(sfile_path.c_str(), O_CREAT|O_RDWR, 0600))){
|
||||
S3FS_PRN_ERR("failed to open cache stat file path(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// lock
|
||||
if(-1 == flock(fd, LOCK_EX)){
|
||||
S3FS_PRN_ERR("failed to lock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
close(fd);
|
||||
fd = -1;
|
||||
return false;
|
||||
}
|
||||
// seek top
|
||||
if(0 != lseek(fd, 0, SEEK_SET)){
|
||||
S3FS_PRN_ERR("failed to lseek cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
flock(fd, LOCK_UN);
|
||||
close(fd);
|
||||
fd = -1;
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file locked(%s - %s)", path.c_str(), sfile_path.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::Open()
|
||||
{
|
||||
return RawOpen(false);
|
||||
}
|
||||
|
||||
bool CacheFileStat::ReadOnlyOpen()
|
||||
{
|
||||
return RawOpen(true);
|
||||
}
|
||||
|
||||
bool CacheFileStat::Release()
|
||||
{
|
||||
if(-1 == fd){
|
||||
// already release
|
||||
return true;
|
||||
}
|
||||
// unlock
|
||||
if(-1 == flock(fd, LOCK_UN)){
|
||||
S3FS_PRN_ERR("failed to unlock cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
S3FS_PRN_DBG("file unlocked(%s)", path.c_str());
|
||||
|
||||
if(-1 == close(fd)){
|
||||
S3FS_PRN_ERR("failed to close cache stat file(%s) - errno(%d)", path.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
fd = -1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
64
src/fdcache_stat.h
Normal file
64
src/fdcache_stat.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_FDCACHE_STAT_H_
|
||||
#define S3FS_FDCACHE_STAT_H_
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
class CacheFileStat
|
||||
{
|
||||
private:
|
||||
std::string path;
|
||||
int fd;
|
||||
|
||||
private:
|
||||
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
|
||||
|
||||
bool RawOpen(bool readonly);
|
||||
|
||||
public:
|
||||
static std::string GetCacheFileStatTopDir();
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir();
|
||||
static bool DeleteCacheFileStatDirectory();
|
||||
static bool RenameCacheFileStat(const char* oldpath, const char* newpath);
|
||||
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open();
|
||||
bool ReadOnlyOpen();
|
||||
bool Release();
|
||||
bool SetPath(const char* tpath, bool is_open = true);
|
||||
int GetFd() const { return fd; }
|
||||
};
|
||||
|
||||
#endif // S3FS_FDCACHE_STAT_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
399
src/gnutls_auth.cpp
Normal file
399
src/gnutls_auth.cpp
Normal file
@ -0,0 +1,399 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <gcrypt.h>
|
||||
#include <gnutls/gnutls.h>
|
||||
#include <gnutls/crypto.h>
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
#include <nettle/md5.h>
|
||||
#include <nettle/sha1.h>
|
||||
#include <nettle/hmac.h>
|
||||
#endif
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
{
|
||||
static const char version[] = "GnuTLS(nettle)";
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
#ifndef USE_GNUTLS_NETTLE
|
||||
if(NULL == gcry_check_version(NULL)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
*digest = new unsigned char[SHA1_DIGEST_SIZE];
|
||||
|
||||
struct hmac_sha1_ctx ctx_hmac;
|
||||
hmac_sha1_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha1_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha1_digest(&ctx_hmac, SHA1_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA1_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
*digest = new unsigned char[SHA256_DIGEST_SIZE];
|
||||
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
*digest = new unsigned char[*digestlen + 1];
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
delete[] *digest;
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return 16;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
struct md5_ctx ctx_md5;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
md5_init(&ctx_md5);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
off_t len = 512;
|
||||
unsigned char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
gcry_md_hd_t ctx_md5;
|
||||
gcry_error_t err;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
off_t len = 512;
|
||||
char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
gcry_md_close(ctx_md5);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, *digestlen, *digest);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
struct sha256_ctx ctx_sha256;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
off_t len = 512;
|
||||
unsigned char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[len];
|
||||
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
delete[] *digest;
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
off_t len = 512;
|
||||
char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
334
src/metaheader.cpp
Normal file
334
src/metaheader.cpp
Normal file
@ -0,0 +1,334 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "metaheader.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for convert
|
||||
//-------------------------------------------------------------------
|
||||
static time_t cvt_string_to_time(const char *str)
|
||||
{
|
||||
// [NOTE]
|
||||
// In rclone, there are cases where ns is set to x-amz-meta-mtime
|
||||
// with floating point number. s3fs uses x-amz-meta-mtime by
|
||||
// truncating the floating point or less (in seconds or less) to
|
||||
// correspond to this.
|
||||
//
|
||||
std::string strmtime;
|
||||
if(str && '\0' != *str){
|
||||
strmtime = str;
|
||||
std::string::size_type pos = strmtime.find('.', 0);
|
||||
if(std::string::npos != pos){
|
||||
strmtime.erase(pos);
|
||||
}
|
||||
}
|
||||
return static_cast<time_t>(cvt_strtoofft(strmtime.c_str()));
|
||||
}
|
||||
|
||||
static time_t get_time(const headers_t& meta, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find(header))){
|
||||
return -1;
|
||||
}
|
||||
return cvt_string_to_time((*iter).second.c_str());
|
||||
}
|
||||
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-mtime");
|
||||
if(0 < t){
|
||||
return t;
|
||||
}
|
||||
t = get_time(meta, "x-amz-meta-goog-reserved-file-mtime");
|
||||
if(0 < t){
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-ctime");
|
||||
if(0 < t){
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
time_t get_atime(const headers_t& meta, bool overcheck)
|
||||
{
|
||||
time_t t = get_time(meta, "x-amz-meta-atime");
|
||||
if(0 < t){
|
||||
return t;
|
||||
}
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
off_t get_size(const char *s)
|
||||
{
|
||||
return cvt_strtoofft(s);
|
||||
}
|
||||
|
||||
off_t get_size(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter = meta.find("Content-Length");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_size((*iter).second.c_str());
|
||||
}
|
||||
|
||||
mode_t get_mode(const char *s, int base)
|
||||
{
|
||||
return static_cast<mode_t>(cvt_strtoofft(s, base));
|
||||
}
|
||||
|
||||
mode_t get_mode(const headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
{
|
||||
mode_t mode = 0;
|
||||
bool isS3sync = false;
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
|
||||
mode = get_mode((*iter).second.c_str(), 8);
|
||||
}else{
|
||||
// If another tool creates an object without permissions, default to owner
|
||||
// read-write and group readable.
|
||||
mode = path[strlen(path) - 1] == '/' ? 0750 : 0640;
|
||||
}
|
||||
|
||||
// Checking the bitmask, if the last 3 bits are all zero then process as a regular
|
||||
// file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO,
|
||||
// S_IFSOCK, S_IFCHR, S_IFLNK and S_IFBLK devices can be processed properly by fuse.
|
||||
if(!(mode & S_IFMT)){
|
||||
if(!isS3sync){
|
||||
if(checkdir){
|
||||
if(forcedir){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(meta.end() != (iter = meta.find("Content-Type"))){
|
||||
std::string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
std::string::size_type pos = strConType.find(';');
|
||||
if(std::string::npos != pos){
|
||||
strConType.erase(pos);
|
||||
}
|
||||
if(strConType == "application/x-directory" || strConType == "httpd/unix-directory"){
|
||||
// Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' character at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
if(strConType == "text/plain" && (0 == size || 1 == size)){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}
|
||||
// If complement lack stat mode, when it's mode is not set any permission,
|
||||
// the object is added minimal mode only for read permission.
|
||||
if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){
|
||||
mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR));
|
||||
}
|
||||
}else{
|
||||
if(!checkdir){
|
||||
// cut dir/reg flag.
|
||||
mode &= ~S_IFDIR;
|
||||
mode &= ~S_IFREG;
|
||||
}
|
||||
}
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
|
||||
uid_t get_uid(const char *s)
|
||||
{
|
||||
return static_cast<uid_t>(cvt_strtoofft(s));
|
||||
}
|
||||
|
||||
uid_t get_uid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else{
|
||||
return geteuid();
|
||||
}
|
||||
}
|
||||
|
||||
gid_t get_gid(const char *s)
|
||||
{
|
||||
return static_cast<gid_t>(cvt_strtoofft(s));
|
||||
}
|
||||
|
||||
gid_t get_gid(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else{
|
||||
return getegid();
|
||||
}
|
||||
}
|
||||
|
||||
blkcnt_t get_blocks(off_t size)
|
||||
{
|
||||
return size / 512 + 1;
|
||||
}
|
||||
|
||||
time_t cvtIAMExpireStringToTime(const char* s)
|
||||
{
|
||||
struct tm tm;
|
||||
if(!s){
|
||||
return 0L;
|
||||
}
|
||||
memset(&tm, 0, sizeof(struct tm));
|
||||
strptime(s, "%Y-%m-%dT%H:%M:%S", &tm);
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(const char* s)
|
||||
{
|
||||
struct tm tm;
|
||||
if(!s){
|
||||
return -1;
|
||||
}
|
||||
memset(&tm, 0, sizeof(struct tm));
|
||||
strptime(s, "%a, %d %b %Y %H:%M:%S %Z", &tm);
|
||||
return timegm(&tm); // GMT
|
||||
}
|
||||
|
||||
time_t get_lastmodified(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter = meta.find("Last-Modified");
|
||||
if(meta.end() == iter){
|
||||
return -1;
|
||||
}
|
||||
return get_lastmodified((*iter).second.c_str());
|
||||
}
|
||||
|
||||
//
|
||||
// Returns it whether it is an object with need checking in detail.
|
||||
// If this function returns true, the object is possible to be directory
|
||||
// and is needed checking detail(searching sub object).
|
||||
//
|
||||
bool is_need_check_obj_detail(const headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
// directory object is Content-Length as 0.
|
||||
if(0 != get_size(meta)){
|
||||
return false;
|
||||
}
|
||||
// if the object has x-amz-meta information, checking is no more.
|
||||
if(meta.end() != meta.find("x-amz-meta-mode") ||
|
||||
meta.end() != meta.find("x-amz-meta-mtime") ||
|
||||
meta.end() != meta.find("x-amz-meta-ctime") ||
|
||||
meta.end() != meta.find("x-amz-meta-atime") ||
|
||||
meta.end() != meta.find("x-amz-meta-uid") ||
|
||||
meta.end() != meta.find("x-amz-meta-gid") ||
|
||||
meta.end() != meta.find("x-amz-meta-owner") ||
|
||||
meta.end() != meta.find("x-amz-meta-group") ||
|
||||
meta.end() != meta.find("x-amz-meta-permissions") )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// if there is not Content-Type, or Content-Type is "x-directory",
|
||||
// checking is no more.
|
||||
if(meta.end() == (iter = meta.find("Content-Type"))){
|
||||
return false;
|
||||
}
|
||||
if("application/x-directory" == (*iter).second){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// If add_noexist is false and the key does not exist, it will not be added.
|
||||
//
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist)
|
||||
{
|
||||
bool added = false;
|
||||
for(headers_t::const_iterator iter = additional.begin(); iter != additional.end(); ++iter){
|
||||
if(add_noexist || base.find(iter->first) != base.end()){
|
||||
base[iter->first] = iter->second;
|
||||
added = true;
|
||||
}
|
||||
}
|
||||
return added;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
71
src/metaheader.h
Normal file
71
src/metaheader.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_METAHEADER_H_
|
||||
#define S3FS_METAHEADER_H_
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// headers_t
|
||||
//-------------------------------------------------------------------
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>
|
||||
{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
time_t get_mtime(const headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(const headers_t& meta, bool overcheck = true);
|
||||
time_t get_atime(const headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(const headers_t& meta);
|
||||
mode_t get_mode(const char *s, int base = 0);
|
||||
mode_t get_mode(const headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(const headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(const headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(const headers_t& meta);
|
||||
bool is_need_check_obj_detail(const headers_t& meta);
|
||||
bool merge_headers(headers_t& base, const headers_t& additional, bool add_noexist);
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
#endif // S3FS_METAHEADER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
159
src/mpu_util.cpp
Normal file
159
src/mpu_util.cpp
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "mpu_util.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
utility_incomp_type utility_mode = NO_UTILITY_MODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static void print_incomp_mpu_list(incomp_mpu_list_t& list)
|
||||
{
|
||||
printf("\n");
|
||||
printf("Lists the parts that have been uploaded for a specific multipart upload.\n");
|
||||
printf("\n");
|
||||
|
||||
if(!list.empty()){
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
int cnt = 0;
|
||||
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter, ++cnt){
|
||||
printf(" Path : %s\n", (*iter).key.c_str());
|
||||
printf(" UploadId : %s\n", (*iter).id.c_str());
|
||||
printf(" Date : %s\n", (*iter).date.c_str());
|
||||
printf("\n");
|
||||
}
|
||||
printf("---------------------------------------------------------------\n");
|
||||
|
||||
}else{
|
||||
printf("There is no list.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static bool abort_incomp_mpu_list(incomp_mpu_list_t& list, time_t abort_time)
|
||||
{
|
||||
if(list.empty()){
|
||||
return true;
|
||||
}
|
||||
time_t now_time = time(NULL);
|
||||
|
||||
// do removing.
|
||||
S3fsCurl s3fscurl;
|
||||
bool result = true;
|
||||
for(incomp_mpu_list_t::iterator iter = list.begin(); iter != list.end(); ++iter){
|
||||
const char* tpath = (*iter).key.c_str();
|
||||
std::string upload_id = (*iter).id;
|
||||
|
||||
if(0 != abort_time){ // abort_time is 0, it means all.
|
||||
time_t date = 0;
|
||||
if(!get_unixtime_from_iso8601((*iter).date.c_str(), date)){
|
||||
S3FS_PRN_DBG("date format is not ISO 8601 for %s multipart uploading object, skip this.", tpath);
|
||||
continue;
|
||||
}
|
||||
if(now_time <= (date + abort_time)){
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != s3fscurl.AbortMultipartUpload(tpath, upload_id)){
|
||||
S3FS_PRN_EXIT("Failed to remove %s multipart uploading object.", tpath);
|
||||
result = false;
|
||||
}else{
|
||||
printf("Succeed to remove %s multipart uploading object.\n", tpath);
|
||||
}
|
||||
|
||||
// reset(initialize) curl object
|
||||
s3fscurl.DestroyCurlHandle();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int s3fs_utility_processing(time_t abort_time)
|
||||
{
|
||||
if(NO_UTILITY_MODE == utility_mode){
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
printf("\n*** s3fs run as utility mode.\n\n");
|
||||
|
||||
S3fsCurl s3fscurl;
|
||||
std::string body;
|
||||
int result = EXIT_SUCCESS;
|
||||
if(0 != s3fscurl.MultipartListRequest(body)){
|
||||
S3FS_PRN_EXIT("Could not get list multipart upload.\nThere is no incomplete multipart uploaded object in bucket.\n");
|
||||
result = EXIT_FAILURE;
|
||||
}else{
|
||||
// parse result(incomplete multipart upload information)
|
||||
S3FS_PRN_DBG("response body = {\n%s\n}", body.c_str());
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(body.c_str(), static_cast<int>(body.size()), "", NULL, 0))){
|
||||
S3FS_PRN_DBG("xmlReadMemory exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
// make incomplete uploads list
|
||||
incomp_mpu_list_t list;
|
||||
if(!get_incomp_mpu_list(doc, list)){
|
||||
S3FS_PRN_DBG("get_incomp_mpu_list exited with error.");
|
||||
result = EXIT_FAILURE;
|
||||
|
||||
}else{
|
||||
if(INCOMP_TYPE_LIST == utility_mode){
|
||||
// print list
|
||||
print_incomp_mpu_list(list);
|
||||
}else if(INCOMP_TYPE_ABORT == utility_mode){
|
||||
// remove
|
||||
if(!abort_incomp_mpu_list(list, abort_time)){
|
||||
S3FS_PRN_DBG("an error occurred during removal process.");
|
||||
result = EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
}
|
||||
}
|
||||
|
||||
// ssl
|
||||
s3fs_destroy_global_ssl();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
64
src/mpu_util.h
Normal file
64
src/mpu_util.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_MPU_UTIL_H_
|
||||
#define S3FS_MPU_UTIL_H_
|
||||
|
||||
#include <string>
|
||||
#include <list>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct incomplete_multipart_upload_info
|
||||
{
|
||||
std::string key;
|
||||
std::string id;
|
||||
std::string date;
|
||||
}INCOMP_MPU_INFO;
|
||||
|
||||
typedef std::list<INCOMP_MPU_INFO> incomp_mpu_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// enum for utility process mode
|
||||
//-------------------------------------------------------------------
|
||||
enum utility_incomp_type{
|
||||
NO_UTILITY_MODE = 0, // not utility mode
|
||||
INCOMP_TYPE_LIST, // list of incomplete mpu
|
||||
INCOMP_TYPE_ABORT // delete incomplete mpu
|
||||
};
|
||||
|
||||
extern utility_incomp_type utility_mode;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
int s3fs_utility_processing(time_t abort_time);
|
||||
|
||||
#endif // S3FS_MPU_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
142
src/mvnode.cpp
Normal file
142
src/mvnode.cpp
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "mvnode.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for moving objects
|
||||
//-------------------------------------------------------------------
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir)
|
||||
{
|
||||
MVNODE *p;
|
||||
char *p_old_path;
|
||||
char *p_new_path;
|
||||
|
||||
p = new MVNODE();
|
||||
|
||||
if(NULL == (p_old_path = strdup(old_path))){
|
||||
delete p;
|
||||
printf("create_mvnode: could not allocation memory for p_old_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(NULL == (p_new_path = strdup(new_path))){
|
||||
delete p;
|
||||
free(p_old_path);
|
||||
printf("create_mvnode: could not allocation memory for p_new_path\n");
|
||||
S3FS_FUSE_EXIT();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
p->old_path = p_old_path;
|
||||
p->new_path = p_new_path;
|
||||
p->is_dir = is_dir;
|
||||
p->is_normdir = normdir;
|
||||
p->prev = NULL;
|
||||
p->next = NULL;
|
||||
return p;
|
||||
}
|
||||
|
||||
//
|
||||
// Add sorted MVNODE data(Ascending order)
|
||||
//
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir)
|
||||
{
|
||||
if(!head || !tail){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
MVNODE* cur;
|
||||
MVNODE* mvnew;
|
||||
for(cur = *head; cur; cur = cur->next){
|
||||
if(cur->is_dir == is_dir){
|
||||
int nResult = strcmp(cur->old_path, old_path);
|
||||
if(0 == nResult){
|
||||
// Found same old_path.
|
||||
return cur;
|
||||
|
||||
}else if(0 > nResult){
|
||||
// next check.
|
||||
// ex: cur("abc"), mvnew("abcd")
|
||||
// ex: cur("abc"), mvnew("abd")
|
||||
continue;
|
||||
|
||||
}else{
|
||||
// Add into before cur-pos.
|
||||
// ex: cur("abc"), mvnew("ab")
|
||||
// ex: cur("abc"), mvnew("abb")
|
||||
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
|
||||
return NULL;
|
||||
}
|
||||
if(cur->prev){
|
||||
(cur->prev)->next = mvnew;
|
||||
}else{
|
||||
*head = mvnew;
|
||||
}
|
||||
mvnew->prev = cur->prev;
|
||||
mvnew->next = cur;
|
||||
cur->prev = mvnew;
|
||||
|
||||
return mvnew;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add into tail.
|
||||
if(NULL == (mvnew = create_mvnode(old_path, new_path, is_dir, normdir))){
|
||||
return NULL;
|
||||
}
|
||||
mvnew->prev = (*tail);
|
||||
if(*tail){
|
||||
(*tail)->next = mvnew;
|
||||
}
|
||||
(*tail) = mvnew;
|
||||
if(!(*head)){
|
||||
(*head) = mvnew;
|
||||
}
|
||||
return mvnew;
|
||||
}
|
||||
|
||||
void free_mvnodes(MVNODE *head)
|
||||
{
|
||||
MVNODE *my_head;
|
||||
MVNODE *next;
|
||||
|
||||
for(my_head = head, next = NULL; my_head; my_head = next){
|
||||
next = my_head->next;
|
||||
free(my_head->old_path);
|
||||
free(my_head->new_path);
|
||||
delete my_head;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
53
src/mvnode.h
Normal file
53
src/mvnode.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_MVNODE_H_
|
||||
#define S3FS_MVNODE_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure
|
||||
//-------------------------------------------------------------------
|
||||
typedef struct mvnode
|
||||
{
|
||||
char* old_path;
|
||||
char* new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
struct mvnode* prev;
|
||||
struct mvnode* next;
|
||||
} MVNODE;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for moving objects
|
||||
//-------------------------------------------------------------------
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
#endif // S3FS_MVNODE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
265
src/nss_auth.cpp
Normal file
265
src/nss_auth.cpp
Normal file
@ -0,0 +1,265 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
#include <nss.h>
|
||||
#include <pk11pub.h>
|
||||
#include <hasht.h>
|
||||
#include <prinit.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "NSS";
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
if(SECSuccess != NSS_NoDB_Init(NULL)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
PR_Cleanup();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
|
||||
if(NULL == (Slot = PK11_GetInternalKeySlot())){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != PK11_DigestBegin(Context) ||
|
||||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
|
||||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
|
||||
{
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
|
||||
*digest = new unsigned char[*digestlen];
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_LENGTH;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
PK11Context* md5ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int md5outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
md5ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
off_t len = 512;
|
||||
unsigned char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
PK11_DigestFinal(md5ctx, result, &md5outlen, get_md5_digest_length());
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
*digestlen = sha256outlen;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
PK11Context* sha256ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int sha256outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
off_t len = 512;
|
||||
unsigned char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
369
src/openssl_auth.cpp
Normal file
369
src/openssl_auth.cpp
Normal file
@ -0,0 +1,369 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <cstring>
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/buffer.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/md5.h>
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/crypto.h>
|
||||
#include <openssl/err.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "OpenSSL";
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
ERR_load_crypto_strings();
|
||||
ERR_load_BIO_strings();
|
||||
OpenSSL_add_all_algorithms();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
// internal use struct for openssl
|
||||
struct CRYPTO_dynlock_value
|
||||
{
|
||||
pthread_mutex_t dyn_mutex;
|
||||
};
|
||||
|
||||
static pthread_mutex_t* s3fs_crypt_mutex = NULL;
|
||||
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
int result;
|
||||
if(mode & CRYPTO_LOCK){
|
||||
if(0 != (result = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
}else{
|
||||
if(0 != (result = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long s3fs_crypt_get_threadid() __attribute__ ((unused));
|
||||
static unsigned long s3fs_crypt_get_threadid()
|
||||
{
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
return (unsigned long)(pthread_self());
|
||||
}
|
||||
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line) __attribute__ ((unused));
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
|
||||
{
|
||||
struct CRYPTO_dynlock_value* dyndata = new CRYPTO_dynlock_value();
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
int result;
|
||||
if(0 != (result = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", result);
|
||||
return NULL;
|
||||
}
|
||||
return dyndata;
|
||||
}
|
||||
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
int result;
|
||||
if(mode & CRYPTO_LOCK){
|
||||
if(0 != (result = pthread_mutex_lock(&(dyndata->dyn_mutex)))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
}else{
|
||||
if(0 != (result = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){
|
||||
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", result);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line) __attribute__ ((unused));
|
||||
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
|
||||
{
|
||||
if(dyndata){
|
||||
int result = pthread_mutex_destroy(&(dyndata->dyn_mutex));
|
||||
if(result != 0){
|
||||
S3FS_PRN_CRIT("failed to destroy dyn_mutex");
|
||||
abort();
|
||||
}
|
||||
delete dyndata;
|
||||
}
|
||||
}
|
||||
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
s3fs_crypt_mutex = new pthread_mutex_t[CRYPTO_num_locks()];
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
#if S3FS_PTHREAD_ERRORCHECK
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
#endif
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int result = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
|
||||
if(result != 0){
|
||||
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", result);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// static lock
|
||||
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
|
||||
CRYPTO_set_id_callback(s3fs_crypt_get_threadid);
|
||||
// dynamic lock
|
||||
CRYPTO_set_dynlock_create_callback(s3fs_dyn_crypt_mutex);
|
||||
CRYPTO_set_dynlock_lock_callback(s3fs_dyn_crypt_mutex_lock);
|
||||
CRYPTO_set_dynlock_destroy_callback(s3fs_destroy_dyn_crypt_mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
if(!s3fs_crypt_mutex){
|
||||
return true;
|
||||
}
|
||||
|
||||
CRYPTO_set_dynlock_destroy_callback(NULL);
|
||||
CRYPTO_set_dynlock_lock_callback(NULL);
|
||||
CRYPTO_set_dynlock_create_callback(NULL);
|
||||
CRYPTO_set_id_callback(NULL);
|
||||
CRYPTO_set_locking_callback(NULL);
|
||||
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
int result = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
|
||||
if(result != 0){
|
||||
S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
delete[] s3fs_crypt_mutex;
|
||||
s3fs_crypt_mutex = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
MD5_CTX md5ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
MD5_Init(&md5ctx);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
const off_t len = 512;
|
||||
char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
MD5_Update(&md5ctx, buf, bytes);
|
||||
}
|
||||
|
||||
result = new unsigned char[get_md5_digest_length()];
|
||||
MD5_Final(result, &md5ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
*digest = new unsigned char[*digestlen];
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, NULL);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size)
|
||||
{
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
off_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == fd){
|
||||
return NULL;
|
||||
}
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
S3FS_PRN_ERR("fstat error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
size = st.st_size;
|
||||
}
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
|
||||
for(off_t total = 0; total < size; total += bytes){
|
||||
const off_t len = 512;
|
||||
char buf[len];
|
||||
bytes = len < (size - total) ? len : (size - total);
|
||||
bytes = pread(fd, buf, bytes, start + total);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
}
|
||||
result = new unsigned char[get_sha256_digest_length()];
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
89
src/psemaphore.h
Normal file
89
src/psemaphore.h
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_SEMAPHORE_H_
|
||||
#define S3FS_SEMAPHORE_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class Semaphore
|
||||
//-------------------------------------------------------------------
|
||||
// portability wrapper for sem_t since macOS does not implement it
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
|
||||
~Semaphore()
|
||||
{
|
||||
// macOS cannot destroy a semaphore with posts less than the initializer
|
||||
for(int i = 0; i < get_value(); ++i){
|
||||
post();
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
void post() { dispatch_semaphore_signal(sem); }
|
||||
int get_value() const { return value; }
|
||||
|
||||
private:
|
||||
const int value;
|
||||
dispatch_semaphore_t sem;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#include <errno.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
|
||||
~Semaphore() { sem_destroy(&mutex); }
|
||||
void wait()
|
||||
{
|
||||
int r;
|
||||
do {
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
void post() { sem_post(&mutex); }
|
||||
int get_value() const { return value; }
|
||||
|
||||
private:
|
||||
const int value;
|
||||
sem_t mutex;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif // S3FS_SEMAPHORE_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
7988
src/s3fs.cpp
7988
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
142
src/s3fs.h
142
src/s3fs.h
@ -1,100 +1,92 @@
|
||||
#ifndef S3FS_S3_H_
|
||||
#define S3FS_S3_H_
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_H_
|
||||
#define S3FS_S3FS_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#define FIVE_GB 5368709120LL
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
#define S3FS_FUSE_EXIT() { \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}
|
||||
#define S3FS_FUSE_EXIT() \
|
||||
do{ \
|
||||
struct fuse_context* pcxt = fuse_get_context(); \
|
||||
if(pcxt){ \
|
||||
fuse_exit(pcxt->fuse); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// s3fs use many small allocated chunk in heap area for stats
|
||||
// cache and parsing xml, etc. The OS may decide that giving
|
||||
// this little memory back to the kernel will cause too much
|
||||
// overhead and delay the operation.
|
||||
// Address of gratitude, this workaround quotes a document of
|
||||
// libxml2.( http://xmlsoft.org/xmlmem.html )
|
||||
//
|
||||
// s3fs use many small allocated chunk in heap area for
|
||||
// stats cache and parsing xml, etc. The OS may decide
|
||||
// that giving this little memory back to the kernel
|
||||
// will cause too much overhead and delay the operation.
|
||||
// So s3fs calls malloc_trim function to really get the
|
||||
// memory back. Following macros is prepared for that
|
||||
// your system does not have it.
|
||||
//
|
||||
// Address of gratitude, this workaround quotes a document
|
||||
// of libxml2.
|
||||
// http://xmlsoft.org/xmlmem.html
|
||||
// When valgrind is used to test memory leak of s3fs, a large
|
||||
// amount of chunk may be reported. You can check the memory
|
||||
// release accurately by defining the S3FS_MALLOC_TRIM flag
|
||||
// and building it. Also, when executing s3fs, you can define
|
||||
// the MMAP_THRESHOLD environment variable and check more
|
||||
// accurate memory leak.( see, man 3 free )
|
||||
//
|
||||
#ifdef S3FS_MALLOC_TRIM
|
||||
#ifdef HAVE_MALLOC_TRIM
|
||||
|
||||
#include <malloc.h>
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
#else // S3FS_MALLOC_TRIM
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#endif // S3FS_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str)
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlFreeDoc(doc); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
#define S3FS_XMLFREE(ptr) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlFree(ptr); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlXPathFreeContext(ctx); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) \
|
||||
{ \
|
||||
do{ \
|
||||
xmlXPathFreeObject(obj); \
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
}while(0)
|
||||
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
#endif // S3FS_S3FS_H_
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str) \
|
||||
fprintf(stderr, "Warning: %s without malloc_trim is possibility of the use memory increase.\n", program_name.c_str())
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) xmlFreeDoc(doc)
|
||||
#define S3FS_XMLFREE(ptr) xmlFree(ptr)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) xmlXPathFreeContext(ctx)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) xmlXPathFreeObject(obj)
|
||||
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
|
||||
//
|
||||
// For initializing libcurl with NSS
|
||||
// Normally libcurl initializes the NSS library, but usually allows
|
||||
// you to initialize s3fs forcibly. Because Memory leak is reported
|
||||
// in valgrind(about curl_global_init() function), and this is for
|
||||
// the cancellation. When "--enable-nss-init" option is specified
|
||||
// at configurarion, it makes NSS_INIT_ENABLED flag into Makefile.
|
||||
// NOTICE
|
||||
// This defines and macros is temporary, and this should be deleted.
|
||||
//
|
||||
#ifdef NSS_INIT_ENABLED
|
||||
#include <nss.h>
|
||||
#include <prinit.h>
|
||||
|
||||
#define S3FS_INIT_NSS() \
|
||||
{ \
|
||||
NSS_NoDB_Init(NULL); \
|
||||
}
|
||||
#define S3FS_CLEANUP_NSS() \
|
||||
{ \
|
||||
NSS_Shutdown(); \
|
||||
PL_ArenaFinish(); \
|
||||
PR_Cleanup(); \
|
||||
}
|
||||
|
||||
#else // NSS_INIT_ENABLED
|
||||
|
||||
#define S3FS_INIT_NSS()
|
||||
#define S3FS_CLEANUP_NSS()
|
||||
|
||||
#endif // NSS_INIT_ENABLED
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
61
src/s3fs_auth.h
Normal file
61
src/s3fs_auth.h
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_AUTH_H_
|
||||
#define S3FS_AUTH_H_
|
||||
|
||||
#include <string>
|
||||
#include <sys/types.h>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for Authentication
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// in common_auth.cpp
|
||||
//
|
||||
std::string s3fs_get_content_md5(int fd);
|
||||
std::string s3fs_sha256_hex_fd(int fd, off_t start, off_t size);
|
||||
|
||||
//
|
||||
// in xxxxxx_auth.cpp
|
||||
//
|
||||
const char* s3fs_crypt_lib_name();
|
||||
bool s3fs_init_global_ssl();
|
||||
bool s3fs_destroy_global_ssl();
|
||||
bool s3fs_init_crypt_mutex();
|
||||
bool s3fs_destroy_crypt_mutex();
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_md5_digest_length();
|
||||
unsigned char* s3fs_md5_fd(int fd, off_t start, off_t size);
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_sha256_digest_length();
|
||||
unsigned char* s3fs_sha256_fd(int fd, off_t start, off_t size);
|
||||
|
||||
#endif // S3FS_AUTH_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
51
src/s3fs_global.cpp
Normal file
51
src/s3fs_global.cpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <string>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
off_t MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
bool foreground = false;
|
||||
bool nomultipart = false;
|
||||
bool pathrequeststyle = false;
|
||||
bool complement_stat = false;
|
||||
bool noxmlns = false;
|
||||
std::string program_name;
|
||||
std::string service_path = "/";
|
||||
std::string s3host = "https://s3.amazonaws.com";
|
||||
std::string bucket;
|
||||
std::string endpoint = "us-east-1";
|
||||
std::string cipher_suites;
|
||||
std::string instance_name;
|
||||
std::string aws_profile = "default";
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
568
src/s3fs_help.cpp
Normal file
568
src/s3fs_help.cpp
Normal file
@ -0,0 +1,568 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_help.h"
|
||||
#include "s3fs_auth.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Contents
|
||||
//-------------------------------------------------------------------
|
||||
static const char help_string[] =
|
||||
"\n"
|
||||
"Mount an Amazon S3 bucket as a file system.\n"
|
||||
"\n"
|
||||
"Usage:\n"
|
||||
" mounting\n"
|
||||
" s3fs bucket[:/path] mountpoint [options]\n"
|
||||
" s3fs mountpoint [options (must specify bucket= option)]\n"
|
||||
"\n"
|
||||
" unmounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs --incomplete-mpu-list (-u) bucket\n"
|
||||
" s3fs --incomplete-mpu-abort[=all | =<date format>] bucket\n"
|
||||
"\n"
|
||||
"s3fs Options:\n"
|
||||
"\n"
|
||||
" Most s3fs options are given in the form where \"opt\" is:\n"
|
||||
"\n"
|
||||
" <option_name>=<option_value>\n"
|
||||
"\n"
|
||||
" bucket\n"
|
||||
" - if it is not specified bucket name (and path) in command line,\n"
|
||||
" must specify this option after -o option for bucket name.\n"
|
||||
"\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. see\n"
|
||||
" https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n"
|
||||
" for the full list of canned acls\n"
|
||||
"\n"
|
||||
" retries (default=\"5\")\n"
|
||||
" - number of times to retry a failed S3 transaction\n"
|
||||
"\n"
|
||||
" use_cache (default=\"\" which means disabled)\n"
|
||||
" - local folder to use for local file cache\n"
|
||||
"\n"
|
||||
" check_cache_dir_exist (default is disable)\n"
|
||||
" - if use_cache is set, check if the cache directory exists.\n"
|
||||
" If this option is not specified, it will be created at runtime\n"
|
||||
" when the cache directory does not exist.\n"
|
||||
"\n"
|
||||
" del_cache (delete local file cache)\n"
|
||||
" - delete local file cache when s3fs starts and exits.\n"
|
||||
"\n"
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, onezone_ia, reduced_redundancy,\n"
|
||||
" intelligent_tiering, glacier, and deep_archive.\n"
|
||||
"\n"
|
||||
" use_rrs (default is disable)\n"
|
||||
" - use Amazon's Reduced Redundancy Storage.\n"
|
||||
" this option can not be specified with use_sse.\n"
|
||||
" (can specify use_rrs=1 for old version)\n"
|
||||
" this option has been replaced by new storage_class option.\n"
|
||||
"\n"
|
||||
" use_sse (default is disable)\n"
|
||||
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
|
||||
" SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n"
|
||||
" keys, SSE-C uses customer-provided encryption keys, and\n"
|
||||
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
|
||||
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
|
||||
" type (use_sse=1 is old type parameter).\n"
|
||||
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
|
||||
" \"use_sse=custom:<custom key file path>\" or\n"
|
||||
" \"use_sse=<custom key file path>\" (only <custom key file path>\n"
|
||||
" specified is old type parameter). You can use \"c\" for\n"
|
||||
" short \"custom\".\n"
|
||||
" The custom key file must be 600 permission. The file can\n"
|
||||
" have some lines, each line is one SSE-C key. The first line\n"
|
||||
" in file is used as Customer-Provided Encryption Keys for\n"
|
||||
" uploading and changing headers etc. If there are some keys\n"
|
||||
" after first line, those are used downloading object which\n"
|
||||
" are encrypted by not first key. So that, you can keep all\n"
|
||||
" SSE-C keys in file, that is SSE-C key history.\n"
|
||||
" If you specify \"custom\" (\"c\") without file path, you\n"
|
||||
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
|
||||
" environment. (AWSSSECKEYS environment has some SSE-C keys\n"
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
" object at downloading, you can use load_sse_c option instead\n"
|
||||
" of this option.\n"
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
|
||||
" KMS, you can set it after \"kmsid:\" (or \"k:\"). If you\n"
|
||||
" specify only \"kmsid\" (\"k\"), you need to set AWSSSEKMSID\n"
|
||||
" environment which value is <kms id>. You must be careful\n"
|
||||
" about that you can not use the KMS id which is not same EC2\n"
|
||||
" region.\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
" at downloading.\n"
|
||||
" If you use the custom-provided encryption key at uploading, you\n"
|
||||
" specify with \"use_sse=custom\". The file has many lines, one line\n"
|
||||
" means one custom key. So that you can keep all SSE-C keys in file,\n"
|
||||
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
|
||||
" file contents.\n"
|
||||
"\n"
|
||||
" public_bucket (default=\"\" which means disabled)\n"
|
||||
" - anonymously mount a public bucket when set to 1, ignores the \n"
|
||||
" $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n"
|
||||
" S3 does not allow copy object api for anonymous users, then\n"
|
||||
" s3fs sets nocopyapi option automatically when public_bucket=1\n"
|
||||
" option is specified.\n"
|
||||
"\n"
|
||||
" passwd_file (default=\"\")\n"
|
||||
" - specify which s3fs password file to use\n"
|
||||
"\n"
|
||||
" ahbe_conf (default=\"\" which means disabled)\n"
|
||||
" - This option specifies the configuration file path which\n"
|
||||
" file is the additional HTTP header by file (object) extension.\n"
|
||||
" The configuration file format is below:\n"
|
||||
" -----------\n"
|
||||
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
|
||||
" file suffix = file (object) suffix, if this field is empty,\n"
|
||||
" it means \"reg:(.*)\".(=all object).\n"
|
||||
" regex = regular expression to match the file (object) path.\n"
|
||||
" this type starts with \"reg:\" prefix.\n"
|
||||
" HTTP-header = additional HTTP header name\n"
|
||||
" HTTP-values = additional HTTP header value\n"
|
||||
" -----------\n"
|
||||
" Sample:\n"
|
||||
" -----------\n"
|
||||
" .gz Content-Encoding gzip\n"
|
||||
" .Z Content-Encoding compress\n"
|
||||
" reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n"
|
||||
" -----------\n"
|
||||
" A sample configuration file is uploaded in \"test\" directory.\n"
|
||||
" If you specify this option for set \"Content-Encoding\" HTTP \n"
|
||||
" header, please take care for RFC 2616.\n"
|
||||
"\n"
|
||||
" profile (default=\"default\")\n"
|
||||
" - Choose a profile from ${HOME}/.aws/credentials to authenticate\n"
|
||||
" against S3. Note that this format matches the AWS CLI format and\n"
|
||||
" differs from the s3fs passwd format.\n"
|
||||
"\n"
|
||||
" connect_timeout (default=\"300\" seconds)\n"
|
||||
" - time to wait for connection before giving up\n"
|
||||
"\n"
|
||||
" readwrite_timeout (default=\"120\" seconds)\n"
|
||||
" - time to wait between read/write activity before giving up\n"
|
||||
"\n"
|
||||
" list_object_max_keys (default=\"1000\")\n"
|
||||
" - specify the maximum number of keys returned by S3 list object\n"
|
||||
" API. The default is 1000. you can set this value to 1000 or more.\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n"
|
||||
" - maximum number of entries in the stat cache, and this maximum is\n"
|
||||
" also treated as the number of symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_expire (default is 900))\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached. and this\n"
|
||||
" is also set to the expire time of the symbolic link cache.\n"
|
||||
"\n"
|
||||
" stat_cache_interval_expire (default is 900)\n"
|
||||
" - specify expire time (seconds) for entries in the stat cache(and\n"
|
||||
" symbolic link cache).\n"
|
||||
" This expire time is based on the time from the last access time\n"
|
||||
" of the stat cache. This option is exclusive with stat_cache_expire,\n"
|
||||
" and is left for compatibility with older versions.\n"
|
||||
"\n"
|
||||
" enable_noobj_cache (default is disable)\n"
|
||||
" - enable cache entries for the object which does not exist.\n"
|
||||
" s3fs always has to check whether file (or sub directory) exists \n"
|
||||
" under object (path) when s3fs does some command, since s3fs has \n"
|
||||
" recognized a directory which does not exist and has files or \n"
|
||||
" sub directories under itself. It increases ListBucket request \n"
|
||||
" and makes performance bad.\n"
|
||||
" You can specify this option for performance, s3fs memorizes \n"
|
||||
" in stat cache that the object (file or directory) does not exist.\n"
|
||||
"\n"
|
||||
" no_check_certificate\n"
|
||||
" - server certificate won't be checked against the available \n"
|
||||
" certificate authorities.\n"
|
||||
"\n"
|
||||
" ssl_verify_hostname (default=\"2\")\n"
|
||||
" - When 0, do not verify the SSL certificate against the hostname.\n"
|
||||
"\n"
|
||||
" nodnscache (disable DNS cache)\n"
|
||||
" - s3fs is always using DNS cache, this option make DNS cache disable.\n"
|
||||
"\n"
|
||||
" nosscache (disable SSL session cache)\n"
|
||||
" - s3fs is always using SSL session cache, this option make SSL \n"
|
||||
" session cache disable.\n"
|
||||
"\n"
|
||||
" multireq_max (default=\"20\")\n"
|
||||
" - maximum number of parallel request for listing objects.\n"
|
||||
"\n"
|
||||
" parallel_count (default=\"5\")\n"
|
||||
" - number of parallel request for uploading big objects.\n"
|
||||
" s3fs uploads large object (over 20MB) by multipart post request, \n"
|
||||
" and sends parallel requests.\n"
|
||||
" This option limits parallel request count which s3fs requests \n"
|
||||
" at once. It is necessary to set this value depending on a CPU \n"
|
||||
" and a network band.\n"
|
||||
"\n"
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
|
||||
"\n"
|
||||
" multipart_copy_size (default=\"512\")\n"
|
||||
" - part size, in MB, for each multipart copy request, used for\n"
|
||||
" renames and mixupload.\n"
|
||||
" The minimum value is 5 MB and the maximum value is 5 GB.\n"
|
||||
" Must be at least 512 MB to copy the maximum 5 TB object size\n"
|
||||
" but lower values may improve performance.\n"
|
||||
"\n"
|
||||
" max_dirty_data (default=\"5120\")\n"
|
||||
" - flush dirty data to S3 after a certain number of MB written.\n"
|
||||
" The minimum value is 50 MB. -1 value means disable.\n"
|
||||
" Cannot be used with nomixupload.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default 0)\n"
|
||||
" - sets MB to ensure disk free space. This option means the\n"
|
||||
" threshold of free space size on disk which is used for the\n"
|
||||
" cache file by s3fs. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" multipart_threshold (default=\"25\")\n"
|
||||
" - threshold, in MB, to use multipart upload instead of\n"
|
||||
" single-part. Must be at least 5 MB.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"512\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" host (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - Set a non-Amazon host, e.g., https://example.com.\n"
|
||||
"\n"
|
||||
" servicepath (default=\"/\")\n"
|
||||
" - Set a service path when the non-Amazon host requires a prefix.\n"
|
||||
"\n"
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
" If you do not use https, please specify the URL with the url\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" endpoint (default=\"us-east-1\")\n"
|
||||
" - sets the endpoint to use on signature version 4\n"
|
||||
" If this option is not specified, s3fs uses \"us-east-1\" region as\n"
|
||||
" the default. If the s3fs could not connect to the region specified\n"
|
||||
" by this option, s3fs could not run. But if you do not specify this\n"
|
||||
" option, and if you can not connect with the default region, s3fs\n"
|
||||
" will retry to automatically connect to the other region. So s3fs\n"
|
||||
" can know the correct region name, because s3fs can find it in an\n"
|
||||
" error from the S3 server.\n"
|
||||
"\n"
|
||||
" sigv2 (default is signature version 4 falling back to version 2)\n"
|
||||
" - sets signing AWS requests by using only signature version 2\n"
|
||||
"\n"
|
||||
" sigv4 (default is signature version 4 falling back to version 2)\n"
|
||||
" - sets signing AWS requests by using only signature version 4\n"
|
||||
"\n"
|
||||
" mp_umask (default is \"0000\")\n"
|
||||
" - sets umask for the mount point directory.\n"
|
||||
" If allow_other option is not set, s3fs allows access to the mount\n"
|
||||
" point only to the owner. In the opposite case s3fs allows access\n"
|
||||
" to all users as the default. But if you set the allow_other with\n"
|
||||
" this option, you can control the permissions of the\n"
|
||||
" mount point by this option like umask.\n"
|
||||
"\n"
|
||||
" umask (default is \"0000\")\n"
|
||||
" - sets umask for files under the mountpoint. This can allow\n"
|
||||
" users other than the mounting user to read and write to files\n"
|
||||
" that they did not create.\n"
|
||||
"\n"
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" Allow S3 server to check data integrity of uploads via the\n"
|
||||
" Content-MD5 header. This can add CPU overhead to transfers.\n"
|
||||
"\n"
|
||||
" ecs (default is disable)\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" imdsv1only (default is to use IMDSv2 with fallback to v1)\n"
|
||||
" - AWS instance metadata service, used with IAM role authentication,\n"
|
||||
" supports the use of an API token. If you're using an IAM role\n"
|
||||
" in an environment that does not support IMDSv2, setting this flag\n"
|
||||
" will skip retrieval and usage of the API token when retrieving\n"
|
||||
" IAM credentials.\n"
|
||||
"\n"
|
||||
" ibm_iam_auth (default is not using IBM IAM authentication)\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" ibm_iam_endpoint (default is https://iam.bluemix.net)\n"
|
||||
" - sets the URL to use for IBM IAM authentication.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute (xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
" For example, encfs and ecryptfs need to support the extended attribute.\n"
|
||||
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
|
||||
" copy command with preserve=mode.\n"
|
||||
"\n"
|
||||
" noxmlns (disable registering xml name space)\n"
|
||||
" disable registering xml name space for response of \n"
|
||||
" ListBucketResult and ListVersionsResult etc. Default name \n"
|
||||
" space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n"
|
||||
" This option should not be specified now, because s3fs looks up\n"
|
||||
" xmlns automatically after v1.66.\n"
|
||||
"\n"
|
||||
" nomixupload (disable copy in multipart uploads)\n"
|
||||
" Disable to use PUT (copy api) when multipart uploading large size objects.\n"
|
||||
" By default, when doing multipart upload, the range of unchanged data\n"
|
||||
" will use PUT (copy api) whenever possible.\n"
|
||||
" When nocopyapi or norenameapi is specified, use of PUT (copy api) is\n"
|
||||
" invalidated even if this option is not specified.\n"
|
||||
"\n"
|
||||
" nocopyapi (for other incomplete compatibility object storage)\n"
|
||||
" For a distributed object storage which is compatibility S3\n"
|
||||
" API without PUT (copy api).\n"
|
||||
" If you set this option, s3fs do not use PUT with \n"
|
||||
" \"x-amz-copy-source\" (copy api). Because traffic is increased\n"
|
||||
" 2-3 times by this option, we do not recommend this.\n"
|
||||
"\n"
|
||||
" norenameapi (for other incomplete compatibility object storage)\n"
|
||||
" For a distributed object storage which is compatibility S3\n"
|
||||
" API without PUT (copy api).\n"
|
||||
" This option is a subset of nocopyapi option. The nocopyapi\n"
|
||||
" option does not use copy-api for all command (ex. chmod, chown,\n"
|
||||
" touch, mv, etc), but this option does not use copy-api for\n"
|
||||
" only rename command (ex. mv). If this option is specified with\n"
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" the virtual-host request style, by using the older path request\n"
|
||||
" style.\n"
|
||||
"\n"
|
||||
" listobjectsv2 (use ListObjectsV2)\n"
|
||||
" Issue ListObjectsV2 instead of ListObjects, useful on object\n"
|
||||
" stores without ListObjects support.\n"
|
||||
"\n"
|
||||
" noua (suppress User-Agent header)\n"
|
||||
" Usually s3fs outputs of the User-Agent in \"s3fs/<version> (commit\n"
|
||||
" hash <hash>; <using ssl library name>)\" format.\n"
|
||||
" If this option is specified, s3fs suppresses the output of the\n"
|
||||
" User-Agent.\n"
|
||||
"\n"
|
||||
" cipher_suites\n"
|
||||
" Customize the list of TLS cipher suites.\n"
|
||||
" Expects a colon separated list of cipher suite names.\n"
|
||||
" A list of available cipher suites, depending on your TLS engine,\n"
|
||||
" can be found on the CURL library documentation:\n"
|
||||
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
|
||||
"\n"
|
||||
" instance_name - The instance name of the current s3fs mountpoint.\n"
|
||||
" This name will be added to logging messages and user agent headers sent by s3fs.\n"
|
||||
"\n"
|
||||
" complement_stat (complement lack of file/directory mode)\n"
|
||||
" s3fs complements lack of information about file/directory mode\n"
|
||||
" if a file or a directory object does not have x-amz-meta-mode\n"
|
||||
" header. As default, s3fs does not complements stat information\n"
|
||||
" for a object, then the object will not be able to be allowed to\n"
|
||||
" list/modify.\n"
|
||||
"\n"
|
||||
" notsup_compat_dir (not support compatibility directory types)\n"
|
||||
" As a default, s3fs supports objects of the directory type as\n"
|
||||
" much as possible and recognizes them as directories.\n"
|
||||
" Objects that can be recognized as directory objects are \"dir/\",\n"
|
||||
" \"dir\", \"dir_$folder$\", and there is a file object that does\n"
|
||||
" not have a directory object but contains that directory path.\n"
|
||||
" s3fs needs redundant communication to support all these\n"
|
||||
" directory types. The object as the directory created by s3fs\n"
|
||||
" is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n"
|
||||
" a directory, communication traffic can be reduced. This option\n"
|
||||
" is used to give this restriction to s3fs.\n"
|
||||
" However, if there is a directory object other than \"dir/\" in\n"
|
||||
" the bucket, specifying this option is not recommended. s3fs may\n"
|
||||
" not be able to recognize the object correctly if an object\n"
|
||||
" created by s3fs exists in the bucket.\n"
|
||||
" Please use this option when the directory in the bucket is\n"
|
||||
" only \"dir/\" object.\n"
|
||||
"\n"
|
||||
" use_wtf8 - support arbitrary file system encoding.\n"
|
||||
" S3 requires all object names to be valid UTF-8. But some\n"
|
||||
" clients, notably Windows NFS clients, use their own encoding.\n"
|
||||
" This option re-encodes invalid UTF-8 object names into valid\n"
|
||||
" UTF-8 by mapping offending codes into a 'private' codepage of the\n"
|
||||
" Unicode set.\n"
|
||||
" Useful on clients not using UTF-8 as their file system encoding.\n"
|
||||
"\n"
|
||||
" use_session_token - indicate that session token should be provided.\n"
|
||||
" If credentials are provided by environment variables this switch\n"
|
||||
" forces presence check of AWSSESSIONTOKEN variable.\n"
|
||||
" Otherwise an error is returned.\n"
|
||||
"\n"
|
||||
" requester_pays (default is disable)\n"
|
||||
" This option instructs s3fs to enable requests involving\n"
|
||||
" Requester Pays buckets.\n"
|
||||
" It includes the 'x-amz-request-payer=requester' entry in the\n"
|
||||
" request header.\n"
|
||||
"\n"
|
||||
" mime (default is \"/etc/mime.types\")\n"
|
||||
" Specify the path of the mime.types file.\n"
|
||||
" If this option is not specified, the existence of \"/etc/mime.types\"\n"
|
||||
" is checked, and that file is loaded as mime information.\n"
|
||||
" If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n"
|
||||
" is checked as well.\n"
|
||||
"\n"
|
||||
" logfile - specify the log output file.\n"
|
||||
" s3fs outputs the log file to syslog. Alternatively, if s3fs is\n"
|
||||
" started with the \"-f\" option specified, the log will be output\n"
|
||||
" to the stdout/stderr.\n"
|
||||
" You can use this option to specify the log file that s3fs outputs.\n"
|
||||
" If you specify a log file with this option, it will reopen the log\n"
|
||||
" file when s3fs receives a SIGHUP signal. You can use the SIGHUP\n"
|
||||
" signal for log rotation.\n"
|
||||
"\n"
|
||||
" dbglevel (default=\"crit\")\n"
|
||||
" Set the debug message level. set value as crit (critical), err\n"
|
||||
" (error), warn (warning), info (information) to debug level.\n"
|
||||
" default debug level is critical. If s3fs run with \"-d\" option,\n"
|
||||
" the debug level is set information. When s3fs catch the signal\n"
|
||||
" SIGUSR2, the debug level is bumpup.\n"
|
||||
"\n"
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
" Specify \"normal\" or \"body\" for the parameter.\n"
|
||||
" If the parameter is omitted, it is the same as \"normal\".\n"
|
||||
" If \"body\" is specified, some API communication body data will be\n"
|
||||
" output in addition to the debug message output as \"normal\".\n"
|
||||
"\n"
|
||||
" no_time_stamp_msg - no time stamp in debug message\n"
|
||||
" The time stamp is output to the debug message by default.\n"
|
||||
" If this option is specified, the time stamp will not be output\n"
|
||||
" in the debug message.\n"
|
||||
" It is the same even if the environment variable \"S3FS_MSGTIMESTAMP\"\n"
|
||||
" is set to \"no\".\n"
|
||||
"\n"
|
||||
" set_check_cache_sigusr1 (default is stdout)\n"
|
||||
" If the cache is enabled, you can check the integrity of the\n"
|
||||
" cache file and the cache file's stats info file.\n"
|
||||
" This option is specified and when sending the SIGUSR1 signal\n"
|
||||
" to the s3fs process checks the cache status at that time.\n"
|
||||
" This option can take a file path as parameter to output the\n"
|
||||
" check result to that file. The file path parameter can be omitted.\n"
|
||||
" If omitted, the result will be output to stdout or syslog.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
" supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime,\n"
|
||||
" noatime, sync async, dirsync). Filesystems are mounted with\n"
|
||||
" '-onodev,nosuid' by default, which can only be overridden by a\n"
|
||||
" privileged user.\n"
|
||||
" \n"
|
||||
" There are many FUSE specific mount options that can be specified.\n"
|
||||
" e.g. allow_other See the FUSE's README for the full set.\n"
|
||||
"\n"
|
||||
"Utility mode Options:\n"
|
||||
"\n"
|
||||
" -u, --incomplete-mpu-list\n"
|
||||
" Lists multipart incomplete objects uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" --incomplete-mpu-abort (=all or =<date format>)\n"
|
||||
" Delete the multipart incomplete object uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" If \"all\" is specified for this option, all multipart incomplete\n"
|
||||
" objects will be deleted. If you specify no argument as an option,\n"
|
||||
" objects older than 24 hours (24H) will be deleted (This is the\n"
|
||||
" default value). You can specify an optional date format. It can\n"
|
||||
" be specified as year, month, day, hour, minute, second, and it is\n"
|
||||
" expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n"
|
||||
" For example, \"1Y6M10D12h30m30s\".\n"
|
||||
"\n"
|
||||
"Miscellaneous Options:\n"
|
||||
"\n"
|
||||
" -h, --help Output this help.\n"
|
||||
" --version Output version info.\n"
|
||||
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
|
||||
" twice turns on FUSE debug messages to STDOUT.\n"
|
||||
" -f FUSE foreground option - do not run as daemon.\n"
|
||||
" -s FUSE single-threaded option\n"
|
||||
" disable multi-threaded operation\n"
|
||||
"\n"
|
||||
"\n"
|
||||
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
|
||||
;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage()
|
||||
{
|
||||
printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n", program_name.c_str());
|
||||
}
|
||||
|
||||
void show_help()
|
||||
{
|
||||
show_usage();
|
||||
printf(help_string);
|
||||
}
|
||||
|
||||
void show_version()
|
||||
{
|
||||
printf(
|
||||
"Amazon Simple Storage Service File System V%s (commit:%s) with %s\n"
|
||||
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
|
||||
"License GPL2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>\n"
|
||||
"This is free software: you are free to change and redistribute it.\n"
|
||||
"There is NO WARRANTY, to the extent permitted by law.\n",
|
||||
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
40
src/s3fs_help.h
Normal file
40
src/s3fs_help.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_HELP_H_
|
||||
#define S3FS_S3FS_HELP_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage();
|
||||
void show_help();
|
||||
void show_version();
|
||||
|
||||
#endif // S3FS_S3FS_HELP_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
246
src/s3fs_logger.cpp
Normal file
246
src/s3fs_logger.cpp
Normal file
@ -0,0 +1,246 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs_logger.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : variables
|
||||
//-------------------------------------------------------------------
|
||||
const int S3fsLog::NEST_MAX;
|
||||
const char* S3fsLog::nest_spaces[S3fsLog::NEST_MAX] = {"", " ", " ", " "};
|
||||
const char* S3fsLog::LOGFILEENV = "S3FS_LOGFILE";
|
||||
const char* S3fsLog::MSGTIMESTAMP = "S3FS_MSGTIMESTAMP";
|
||||
S3fsLog* S3fsLog::pSingleton = NULL;
|
||||
S3fsLog::s3fs_log_level S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
|
||||
FILE* S3fsLog::logfp = NULL;
|
||||
std::string* S3fsLog::plogfile = NULL;
|
||||
char S3fsLog::current_time[64] = "";
|
||||
bool S3fsLog::time_stamp = true;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : class methods
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsLog::IsS3fsLogLevel(s3fs_log_level level)
|
||||
{
|
||||
return (level == (S3fsLog::debug_level & level));
|
||||
}
|
||||
|
||||
bool S3fsLog::SetLogfile(const char* pfile)
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
return false;
|
||||
}
|
||||
return S3fsLog::pSingleton->LowSetLogfile(pfile);
|
||||
}
|
||||
|
||||
bool S3fsLog::ReopenLogfile()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
return false;
|
||||
}
|
||||
if(!S3fsLog::logfp){
|
||||
S3FS_PRN_INFO("Currently the log file is output to stdout/stderr.");
|
||||
return true;
|
||||
}
|
||||
if(!S3fsLog::plogfile){
|
||||
S3FS_PRN_ERR("There is a problem with the path to the log file being NULL.");
|
||||
return false;
|
||||
}
|
||||
std::string tmp = *(S3fsLog::plogfile);
|
||||
return S3fsLog::pSingleton->LowSetLogfile(tmp.c_str());
|
||||
}
|
||||
|
||||
S3fsLog::s3fs_log_level S3fsLog::SetLogLevel(s3fs_log_level level)
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
return S3fsLog::pSingleton->LowSetLogLevel(level);
|
||||
}
|
||||
|
||||
S3fsLog::s3fs_log_level S3fsLog::BumpupLogLevel()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3FS_PRN_CRIT("S3fsLog::pSingleton is NULL.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
return S3fsLog::pSingleton->LowBumpupLogLevel();
|
||||
}
|
||||
|
||||
bool S3fsLog::SetTimeStamp(bool value)
|
||||
{
|
||||
bool old = S3fsLog::time_stamp;
|
||||
S3fsLog::time_stamp = value;
|
||||
return old;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class : methods
|
||||
//-------------------------------------------------------------------
|
||||
S3fsLog::S3fsLog()
|
||||
{
|
||||
if(!S3fsLog::pSingleton){
|
||||
S3fsLog::pSingleton = this;
|
||||
|
||||
// init syslog(default CRIT)
|
||||
openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER);
|
||||
LowLoadEnv();
|
||||
}else{
|
||||
S3FS_PRN_ERR("Already set singleton object for S3fsLog.");
|
||||
}
|
||||
}
|
||||
|
||||
S3fsLog::~S3fsLog()
|
||||
{
|
||||
if(S3fsLog::pSingleton == this){
|
||||
FILE* oldfp = S3fsLog::logfp;
|
||||
S3fsLog::logfp = NULL;
|
||||
if(oldfp && 0 != fclose(oldfp)){
|
||||
S3FS_PRN_ERR("Could not close old log file(%s), but continue...", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
|
||||
}
|
||||
if(S3fsLog::plogfile){
|
||||
delete S3fsLog::plogfile;
|
||||
S3fsLog::plogfile = NULL;
|
||||
}
|
||||
S3fsLog::pSingleton = NULL;
|
||||
S3fsLog::debug_level = S3fsLog::LEVEL_CRIT;
|
||||
|
||||
closelog();
|
||||
}else{
|
||||
S3FS_PRN_ERR("This object is not singleton S3fsLog object.");
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsLog::LowLoadEnv()
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return false;
|
||||
}
|
||||
char* pEnvVal;
|
||||
if(NULL != (pEnvVal = getenv(S3fsLog::LOGFILEENV))){
|
||||
if(!SetLogfile(pEnvVal)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(NULL != (pEnvVal = getenv(S3fsLog::MSGTIMESTAMP))){
|
||||
if(0 == strcasecmp(pEnvVal, "true") || 0 == strcasecmp(pEnvVal, "yes") || 0 == strcasecmp(pEnvVal, "1")){
|
||||
S3fsLog::time_stamp = true;
|
||||
}else if(0 == strcasecmp(pEnvVal, "false") || 0 == strcasecmp(pEnvVal, "no") || 0 == strcasecmp(pEnvVal, "0")){
|
||||
S3fsLog::time_stamp = false;
|
||||
}else{
|
||||
S3FS_PRN_WARN("Unknown %s environment value(%s) is specified, skip to set time stamp mode.", S3fsLog::MSGTIMESTAMP, pEnvVal);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsLog::LowSetLogfile(const char* pfile)
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!pfile){
|
||||
// close log file if it is opened
|
||||
if(S3fsLog::logfp && 0 != fclose(S3fsLog::logfp)){
|
||||
S3FS_PRN_ERR("Could not close log file(%s).", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
|
||||
return false;
|
||||
}
|
||||
S3fsLog::logfp = NULL;
|
||||
if(S3fsLog::plogfile){
|
||||
delete S3fsLog::plogfile;
|
||||
S3fsLog::plogfile = NULL;
|
||||
}
|
||||
}else{
|
||||
// open new log file
|
||||
//
|
||||
// [NOTE]
|
||||
// It will reopen even if it is the same file.
|
||||
//
|
||||
FILE* newfp;
|
||||
if(NULL == (newfp = fopen(pfile, "a+"))){
|
||||
S3FS_PRN_ERR("Could not open log file(%s).", pfile);
|
||||
return false;
|
||||
}
|
||||
|
||||
// switch new log file and close old log file if it is opened
|
||||
FILE* oldfp = S3fsLog::logfp;
|
||||
S3fsLog::logfp = newfp;
|
||||
if(oldfp && 0 != fclose(oldfp)){
|
||||
S3FS_PRN_ERR("Could not close old log file(%s).", (S3fsLog::plogfile ? S3fsLog::plogfile->c_str() : "null"));
|
||||
S3fsLog::logfp = oldfp;
|
||||
fclose(newfp);
|
||||
return false;
|
||||
}
|
||||
delete S3fsLog::plogfile;
|
||||
S3fsLog::plogfile = new std::string(pfile);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
S3fsLog::s3fs_log_level S3fsLog::LowSetLogLevel(s3fs_log_level level)
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
if(level == S3fsLog::debug_level){
|
||||
return S3fsLog::debug_level;
|
||||
}
|
||||
s3fs_log_level old = S3fsLog::debug_level;
|
||||
S3fsLog::debug_level = level;
|
||||
setlogmask(LOG_UPTO(GetSyslogLevel(S3fsLog::debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", GetLevelString(old), GetLevelString(S3fsLog::debug_level));
|
||||
return old;
|
||||
}
|
||||
|
||||
S3fsLog::s3fs_log_level S3fsLog::LowBumpupLogLevel()
|
||||
{
|
||||
if(S3fsLog::pSingleton != this){
|
||||
S3FS_PRN_ERR("This object is not as same as S3fsLog::pSingleton.");
|
||||
return S3fsLog::debug_level; // Although it is an error, it returns the current value.
|
||||
}
|
||||
s3fs_log_level old = S3fsLog::debug_level;
|
||||
S3fsLog::debug_level = ( LEVEL_CRIT == S3fsLog::debug_level ? LEVEL_ERR :
|
||||
LEVEL_ERR == S3fsLog::debug_level ? LEVEL_WARN :
|
||||
LEVEL_WARN == S3fsLog::debug_level ? LEVEL_INFO :
|
||||
LEVEL_INFO == S3fsLog::debug_level ? LEVEL_DBG : LEVEL_CRIT );
|
||||
setlogmask(LOG_UPTO(GetSyslogLevel(S3fsLog::debug_level)));
|
||||
S3FS_PRN_CRIT("change debug level from %sto %s", GetLevelString(old), GetLevelString(S3fsLog::debug_level));
|
||||
return old;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
266
src/s3fs_logger.h
Normal file
266
src/s3fs_logger.h
Normal file
@ -0,0 +1,266 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_LOGGER_H_
|
||||
#define S3FS_LOGGER_H_
|
||||
|
||||
#include <cstdio>
|
||||
#include <syslog.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#ifdef CLOCK_MONOTONIC_COARSE
|
||||
#define S3FS_CLOCK_MONOTONIC CLOCK_MONOTONIC_COARSE
|
||||
#else
|
||||
// case of OSX
|
||||
#define S3FS_CLOCK_MONOTONIC CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#define S3FSLOG_TIME_FMT "%s.%03dZ "
|
||||
#else
|
||||
#define S3FSLOG_TIME_FMT "%s.%03ldZ "
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// S3fsLog class
|
||||
//-------------------------------------------------------------------
|
||||
class S3fsLog
|
||||
{
|
||||
public:
|
||||
enum s3fs_log_level{
|
||||
LEVEL_CRIT = 0, // LEVEL_CRIT
|
||||
LEVEL_ERR = 1, // LEVEL_ERR
|
||||
LEVEL_WARN = 3, // LEVEL_WARNING
|
||||
LEVEL_INFO = 7, // LEVEL_INFO
|
||||
LEVEL_DBG = 15 // LEVEL_DEBUG
|
||||
};
|
||||
|
||||
protected:
|
||||
static const int NEST_MAX = 4;
|
||||
static const char* nest_spaces[NEST_MAX];
|
||||
static const char* LOGFILEENV;
|
||||
static const char* MSGTIMESTAMP;
|
||||
static S3fsLog* pSingleton;
|
||||
static s3fs_log_level debug_level;
|
||||
static FILE* logfp;
|
||||
static std::string* plogfile;
|
||||
static char current_time[64];
|
||||
static bool time_stamp;
|
||||
|
||||
protected:
|
||||
bool LowLoadEnv();
|
||||
bool LowSetLogfile(const char* pfile);
|
||||
s3fs_log_level LowSetLogLevel(s3fs_log_level level);
|
||||
s3fs_log_level LowBumpupLogLevel();
|
||||
|
||||
public:
|
||||
static bool IsS3fsLogLevel(s3fs_log_level level);
|
||||
static bool IsS3fsLogCrit() { return IsS3fsLogLevel(LEVEL_CRIT); }
|
||||
static bool IsS3fsLogErr() { return IsS3fsLogLevel(LEVEL_ERR); }
|
||||
static bool IsS3fsLogWarn() { return IsS3fsLogLevel(LEVEL_WARN); }
|
||||
static bool IsS3fsLogInfo() { return IsS3fsLogLevel(LEVEL_INFO); }
|
||||
static bool IsS3fsLogDbg() { return IsS3fsLogLevel(LEVEL_DBG); }
|
||||
|
||||
static int GetSyslogLevel(s3fs_log_level level)
|
||||
{
|
||||
return ( LEVEL_DBG == (level & LEVEL_DBG) ? LOG_DEBUG :
|
||||
LEVEL_INFO == (level & LEVEL_DBG) ? LOG_INFO :
|
||||
LEVEL_WARN == (level & LEVEL_DBG) ? LOG_WARNING :
|
||||
LEVEL_ERR == (level & LEVEL_DBG) ? LOG_ERR : LOG_CRIT );
|
||||
}
|
||||
|
||||
static const char* GetCurrentTime()
|
||||
{
|
||||
if(time_stamp){
|
||||
struct timeval now;
|
||||
struct timespec tsnow;
|
||||
struct tm res;
|
||||
char tmp[32];
|
||||
if(-1 == clock_gettime(S3FS_CLOCK_MONOTONIC, &tsnow)){
|
||||
now.tv_sec = tsnow.tv_sec;
|
||||
now.tv_usec = (tsnow.tv_nsec / 1000);
|
||||
}else{
|
||||
gettimeofday(&now, NULL);
|
||||
}
|
||||
strftime(tmp, sizeof(tmp), "%Y-%m-%dT%H:%M:%S", gmtime_r(&now.tv_sec, &res));
|
||||
snprintf(current_time, sizeof(current_time), S3FSLOG_TIME_FMT, tmp, (now.tv_usec / 1000));
|
||||
}else{
|
||||
current_time[0] = '\0';
|
||||
}
|
||||
return current_time;
|
||||
}
|
||||
|
||||
static const char* GetLevelString(s3fs_log_level level)
|
||||
{
|
||||
return ( LEVEL_DBG == (level & LEVEL_DBG) ? "[DBG] " :
|
||||
LEVEL_INFO == (level & LEVEL_DBG) ? "[INF] " :
|
||||
LEVEL_WARN == (level & LEVEL_DBG) ? "[WAN] " :
|
||||
LEVEL_ERR == (level & LEVEL_DBG) ? "[ERR] " : "[CRT] " );
|
||||
}
|
||||
|
||||
static const char* GetS3fsLogNest(int nest)
|
||||
{
|
||||
if(nest < NEST_MAX){
|
||||
return nest_spaces[nest];
|
||||
}else{
|
||||
return nest_spaces[NEST_MAX - 1];
|
||||
}
|
||||
}
|
||||
|
||||
static bool IsSetLogFile()
|
||||
{
|
||||
return (NULL != logfp);
|
||||
}
|
||||
|
||||
static FILE* GetOutputLogFile()
|
||||
{
|
||||
return (logfp ? logfp : stdout);
|
||||
}
|
||||
|
||||
static FILE* GetErrorLogFile()
|
||||
{
|
||||
return (logfp ? logfp : stderr);
|
||||
}
|
||||
|
||||
static void SeekEnd()
|
||||
{
|
||||
if(logfp){
|
||||
fseek(logfp, 0, SEEK_END);
|
||||
}
|
||||
}
|
||||
|
||||
static void Flush()
|
||||
{
|
||||
if(logfp){
|
||||
fflush(logfp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool SetLogfile(const char* pfile);
|
||||
static bool ReopenLogfile();
|
||||
static s3fs_log_level SetLogLevel(s3fs_log_level level);
|
||||
static s3fs_log_level BumpupLogLevel();
|
||||
static bool SetTimeStamp(bool value);
|
||||
|
||||
explicit S3fsLog();
|
||||
~S3fsLog();
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Debug macros
|
||||
//-------------------------------------------------------------------
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
do{ \
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s:%s(%d): " fmt "%s\n", S3fsLog::GetCurrentTime(), S3fsLog::GetLevelString(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
do{ \
|
||||
if(S3fsLog::IsS3fsLogLevel(level)){ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): " fmt "%s\n", S3fsLog::GetCurrentTime(), S3fsLog::GetLevelString(level), S3fsLog::GetS3fsLogNest(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(level), "%s%s" fmt "%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(nest), __VA_ARGS__); \
|
||||
} \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_CURLDBG(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s[CURL DBG] " fmt "%s\n", S3fsLog::GetCurrentTime(), __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetErrorLogFile(), "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
fprintf(S3fsLog::GetErrorLogFile(), "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(S3fsLog::GetOutputLogFile(), "%s%s%s%s:%s(%d): " fmt "%s\n", S3fsLog::GetCurrentTime(), S3fsLog::GetLevelString(S3fsLog::LEVEL_INFO), S3fsLog::GetS3fsLogNest(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3fsLog::GetS3fsLogNest(0), __VA_ARGS__, ""); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// Special macro for checking cache files
|
||||
#define S3FS_LOW_CACHE(fp, fmt, ...) \
|
||||
do{ \
|
||||
if(foreground || S3fsLog::IsSetLogFile()){ \
|
||||
S3fsLog::SeekEnd(); \
|
||||
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
|
||||
S3fsLog::Flush(); \
|
||||
}else{ \
|
||||
syslog(S3fsLog::GetSyslogLevel(S3fsLog::LEVEL_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
} \
|
||||
}while(0)
|
||||
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_CRIT, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_ERR, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_WARN, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3fsLog::LEVEL_DBG, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 0, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 1, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 2, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3fsLog::LEVEL_INFO, 3, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
|
||||
|
||||
#endif // S3FS_LOGGER_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
1195
src/s3fs_util.cpp
1195
src/s3fs_util.cpp
File diff suppressed because it is too large
Load Diff
142
src/s3fs_util.h
142
src/s3fs_util.h
@ -1,118 +1,54 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_UTIL_H_
|
||||
#define S3FS_S3FS_UTIL_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Typedef
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Struct
|
||||
//
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is nomalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
|
||||
s3obj_entry() : is_dir(false) {}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::list<std::string> s3obj_list_t;
|
||||
|
||||
//
|
||||
// Class
|
||||
//
|
||||
class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_nomalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const {
|
||||
return objects.begin();
|
||||
}
|
||||
s3obj_t::const_iterator end(void) const {
|
||||
return objects.end();
|
||||
}
|
||||
|
||||
public:
|
||||
S3ObjList() {}
|
||||
~S3ObjList() {}
|
||||
|
||||
bool IsEmpty(void) const {
|
||||
return objects.empty();
|
||||
}
|
||||
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
bool IsDir(const char* name) const;
|
||||
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
|
||||
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);
|
||||
};
|
||||
|
||||
typedef struct mvnode {
|
||||
char *old_path;
|
||||
char *new_path;
|
||||
bool is_dir;
|
||||
bool is_normdir;
|
||||
struct mvnode *prev;
|
||||
struct mvnode *next;
|
||||
} MVNODE;
|
||||
|
||||
class AutoLock
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t* auto_mutex;
|
||||
bool is_locked;
|
||||
|
||||
public:
|
||||
AutoLock(pthread_mutex_t* pmutex = NULL);
|
||||
~AutoLock();
|
||||
|
||||
bool Lock(void);
|
||||
bool Unlock(void);
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
std::string get_realpath(const char *path);
|
||||
|
||||
MVNODE *create_mvnode(const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const char *new_path, bool is_dir, bool normdir = false);
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
void init_sysconf_vars();
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(const std::string& path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(const std::string& path);
|
||||
|
||||
std::string mydirname(std::string path);
|
||||
std::string mybasename(std::string path);
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(headers_t& meta);
|
||||
mode_t get_mode(const char *s);
|
||||
mode_t get_mode(headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
|
||||
uid_t get_uid(const char *s);
|
||||
uid_t get_uid(headers_t& meta);
|
||||
gid_t get_gid(const char *s);
|
||||
gid_t get_gid(headers_t& meta);
|
||||
blkcnt_t get_blocks(off_t size);
|
||||
time_t cvtIAMExpireStringToTime(const char* s);
|
||||
time_t get_lastmodified(const char* s);
|
||||
time_t get_lastmodified(headers_t& meta);
|
||||
bool is_need_check_obj_detail(headers_t& meta);
|
||||
|
||||
void show_usage(void);
|
||||
void show_help(void);
|
||||
void show_version(void);
|
||||
bool compare_sysname(const char* target);
|
||||
|
||||
#endif // S3FS_S3FS_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
502
src/s3fs_xml.cpp
Normal file
502
src/s3fs_xml.cpp
Normal file
@ -0,0 +1,502 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_xml.h"
|
||||
#include "s3fs_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Variables
|
||||
//-------------------------------------------------------------------
|
||||
static const char* c_strErrorObjectName = "FILE or SUBDIR in DIR";
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
static bool GetXmlNsUrl(xmlDocPtr doc, std::string& nsurl)
|
||||
{
|
||||
static time_t tmLast = 0; // cache for 60 sec.
|
||||
static std::string strNs;
|
||||
bool result = false;
|
||||
|
||||
if(!doc){
|
||||
return false;
|
||||
}
|
||||
if((tmLast + 60) < time(NULL)){
|
||||
// refresh
|
||||
tmLast = time(NULL);
|
||||
strNs = "";
|
||||
xmlNodePtr pRootNode = xmlDocGetRootElement(doc);
|
||||
if(pRootNode){
|
||||
xmlNsPtr* nslist = xmlGetNsList(doc, pRootNode);
|
||||
if(nslist){
|
||||
if(nslist[0] && nslist[0]->href){
|
||||
strNs = (const char*)(nslist[0]->href);
|
||||
}
|
||||
S3FS_XMLFREE(nslist);
|
||||
}
|
||||
}
|
||||
}
|
||||
if(!strNs.empty()){
|
||||
nsurl = strNs;
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_base_exp(xmlDocPtr doc, const char* exp)
|
||||
{
|
||||
xmlXPathObjectPtr marker_xp;
|
||||
std::string xmlnsurl;
|
||||
std::string exp_string;
|
||||
|
||||
if(!doc){
|
||||
return NULL;
|
||||
}
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
exp_string = "/s3:ListBucketResult/s3:";
|
||||
} else {
|
||||
exp_string = "/ListBucketResult/";
|
||||
}
|
||||
|
||||
exp_string += exp;
|
||||
|
||||
if(NULL == (marker_xp = xmlXPathEvalExpression((xmlChar *)exp_string.c_str(), ctx))){
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(marker_xp->nodesetval)){
|
||||
S3FS_PRN_ERR("marker_xp->nodesetval is empty.");
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
return NULL;
|
||||
}
|
||||
xmlNodeSetPtr nodes = marker_xp->nodesetval;
|
||||
xmlChar* result = xmlNodeListGetString(doc, nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
|
||||
xmlXPathFreeObject(marker_xp);
|
||||
xmlXPathFreeContext(ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static xmlChar* get_prefix(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "Prefix");
|
||||
}
|
||||
|
||||
xmlChar* get_next_contination_token(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextContinuationToken");
|
||||
}
|
||||
|
||||
xmlChar* get_next_marker(xmlDocPtr doc)
|
||||
{
|
||||
return get_base_exp(doc, "NextMarker");
|
||||
}
|
||||
|
||||
// return: the pointer to object name on allocated memory.
|
||||
// the pointer to "c_strErrorObjectName".(not allocated)
|
||||
// NULL(a case of something error occurred)
|
||||
static char* get_object_name(xmlDocPtr doc, xmlNodePtr node, const char* path)
|
||||
{
|
||||
// Get full path
|
||||
xmlChar* fullpath = xmlNodeListGetString(doc, node, 1);
|
||||
if(!fullpath){
|
||||
S3FS_PRN_ERR("could not get object full path name..");
|
||||
return NULL;
|
||||
}
|
||||
// basepath(path) is as same as fullpath.
|
||||
if(0 == strcmp((char*)fullpath, path)){
|
||||
xmlFree(fullpath);
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
|
||||
// Make dir path and filename
|
||||
std::string strdirpath = mydirname(std::string((char*)fullpath));
|
||||
std::string strmybpath = mybasename(std::string((char*)fullpath));
|
||||
const char* dirpath = strdirpath.c_str();
|
||||
const char* mybname = strmybpath.c_str();
|
||||
const char* basepath= (path && '/' == path[0]) ? &path[1] : path;
|
||||
xmlFree(fullpath);
|
||||
|
||||
if(!mybname || '\0' == mybname[0]){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// check subdir & file in subdir
|
||||
if(dirpath && 0 < strlen(dirpath)){
|
||||
// case of "/"
|
||||
if(0 == strcmp(mybname, "/") && 0 == strcmp(dirpath, "/")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
// case of "."
|
||||
if(0 == strcmp(mybname, ".") && 0 == strcmp(dirpath, ".")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
// case of ".."
|
||||
if(0 == strcmp(mybname, "..") && 0 == strcmp(dirpath, ".")){
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
// case of "name"
|
||||
if(0 == strcmp(dirpath, ".")){
|
||||
// OK
|
||||
return strdup(mybname);
|
||||
}else{
|
||||
if(basepath && 0 == strcmp(dirpath, basepath)){
|
||||
// OK
|
||||
return strdup(mybname);
|
||||
}else if(basepath && 0 < strlen(basepath) && '/' == basepath[strlen(basepath) - 1] && 0 == strncmp(dirpath, basepath, strlen(basepath) - 1)){
|
||||
std::string withdirname;
|
||||
if(strlen(dirpath) > strlen(basepath)){
|
||||
withdirname = &dirpath[strlen(basepath)];
|
||||
}
|
||||
if(0 < withdirname.length() && '/' != withdirname[withdirname.length() - 1]){
|
||||
withdirname += "/";
|
||||
}
|
||||
withdirname += mybname;
|
||||
return strdup(withdirname.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
// case of something wrong
|
||||
return (char*)c_strErrorObjectName;
|
||||
}
|
||||
|
||||
static xmlChar* get_exp_value_xml(xmlDocPtr doc, xmlXPathContextPtr ctx, const char* exp_key)
|
||||
{
|
||||
if(!doc || !ctx || !exp_key){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
xmlXPathObjectPtr exp;
|
||||
xmlNodeSetPtr exp_nodes;
|
||||
xmlChar* exp_value;
|
||||
|
||||
// search exp_key tag
|
||||
if(NULL == (exp = xmlXPathEvalExpression((xmlChar*)exp_key, ctx))){
|
||||
S3FS_PRN_ERR("Could not find key(%s).", exp_key);
|
||||
return NULL;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(exp->nodesetval)){
|
||||
S3FS_PRN_ERR("Key(%s) node is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
}
|
||||
// get exp_key value & set in struct
|
||||
exp_nodes = exp->nodesetval;
|
||||
if(NULL == (exp_value = xmlNodeListGetString(doc, exp_nodes->nodeTab[0]->xmlChildrenNode, 1))){
|
||||
S3FS_PRN_ERR("Key(%s) value is empty.", exp_key);
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(exp);
|
||||
return exp_value;
|
||||
}
|
||||
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list)
|
||||
{
|
||||
if(!doc){
|
||||
return false;
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);;
|
||||
|
||||
std::string xmlnsurl;
|
||||
std::string ex_upload = "//";
|
||||
std::string ex_key;
|
||||
std::string ex_id;
|
||||
std::string ex_date;
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
ex_upload += "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_id += "s3:";
|
||||
ex_date += "s3:";
|
||||
}
|
||||
ex_upload += "Upload";
|
||||
ex_key += "Key";
|
||||
ex_id += "UploadId";
|
||||
ex_date += "Initiated";
|
||||
|
||||
// get "Upload" Tags
|
||||
xmlXPathObjectPtr upload_xp;
|
||||
if(NULL == (upload_xp = xmlXPathEvalExpression((xmlChar*)ex_upload.c_str(), ctx))){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return false;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(upload_xp->nodesetval)){
|
||||
S3FS_PRN_INFO("upload_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Make list
|
||||
int cnt;
|
||||
xmlNodeSetPtr upload_nodes;
|
||||
list.clear();
|
||||
for(cnt = 0, upload_nodes = upload_xp->nodesetval; cnt < upload_nodes->nodeNr; cnt++){
|
||||
ctx->node = upload_nodes->nodeTab[cnt];
|
||||
|
||||
INCOMP_MPU_INFO part;
|
||||
xmlChar* ex_value;
|
||||
|
||||
// search "Key" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_key.c_str()))){
|
||||
continue;
|
||||
}
|
||||
if('/' != *((char*)ex_value)){
|
||||
part.key = "/";
|
||||
}else{
|
||||
part.key = "";
|
||||
}
|
||||
part.key += (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
// search "UploadId" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_id.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.id = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
// search "Initiated" tag
|
||||
if(NULL == (ex_value = get_exp_value_xml(doc, ctx, ex_date.c_str()))){
|
||||
continue;
|
||||
}
|
||||
part.date = (char*)ex_value;
|
||||
S3FS_XMLFREE(ex_value);
|
||||
|
||||
list.push_back(part);
|
||||
}
|
||||
|
||||
S3FS_XMLXPATHFREEOBJECT(upload_xp);
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_truncated(xmlDocPtr doc)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
xmlChar* strTruncate = get_base_exp(doc, "IsTruncated");
|
||||
if(!strTruncate){
|
||||
return false;
|
||||
}
|
||||
if(0 == strcasecmp((const char*)strTruncate, "true")){
|
||||
result = true;
|
||||
}
|
||||
xmlFree(strTruncate);
|
||||
return result;
|
||||
}
|
||||
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head)
|
||||
{
|
||||
xmlXPathObjectPtr contents_xp;
|
||||
xmlNodeSetPtr content_nodes;
|
||||
|
||||
if(NULL == (contents_xp = xmlXPathEvalExpression((xmlChar*)ex_contents, ctx))){
|
||||
S3FS_PRN_ERR("xmlXPathEvalExpression returns null.");
|
||||
return -1;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(contents_xp->nodesetval)){
|
||||
S3FS_PRN_DBG("contents_xp->nodesetval is empty.");
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
return 0;
|
||||
}
|
||||
content_nodes = contents_xp->nodesetval;
|
||||
|
||||
bool is_dir;
|
||||
std::string stretag;
|
||||
int i;
|
||||
for(i = 0; i < content_nodes->nodeNr; i++){
|
||||
ctx->node = content_nodes->nodeTab[i];
|
||||
|
||||
// object name
|
||||
xmlXPathObjectPtr key;
|
||||
if(NULL == (key = xmlXPathEvalExpression((xmlChar*)ex_key, ctx))){
|
||||
S3FS_PRN_WARN("key is null. but continue.");
|
||||
continue;
|
||||
}
|
||||
if(xmlXPathNodeSetIsEmpty(key->nodesetval)){
|
||||
S3FS_PRN_WARN("node is empty. but continue.");
|
||||
xmlXPathFreeObject(key);
|
||||
continue;
|
||||
}
|
||||
xmlNodeSetPtr key_nodes = key->nodesetval;
|
||||
char* name = get_object_name(doc, key_nodes->nodeTab[0]->xmlChildrenNode, path);
|
||||
|
||||
if(!name){
|
||||
S3FS_PRN_WARN("name is something wrong. but continue.");
|
||||
|
||||
}else if((const char*)name != c_strErrorObjectName){
|
||||
is_dir = isCPrefix ? true : false;
|
||||
stretag = "";
|
||||
|
||||
if(!isCPrefix && ex_etag){
|
||||
// Get ETag
|
||||
xmlXPathObjectPtr ETag;
|
||||
if(NULL != (ETag = xmlXPathEvalExpression((xmlChar*)ex_etag, ctx))){
|
||||
if(xmlXPathNodeSetIsEmpty(ETag->nodesetval)){
|
||||
S3FS_PRN_INFO("ETag->nodesetval is empty.");
|
||||
}else{
|
||||
xmlNodeSetPtr etag_nodes = ETag->nodesetval;
|
||||
xmlChar* petag = xmlNodeListGetString(doc, etag_nodes->nodeTab[0]->xmlChildrenNode, 1);
|
||||
if(petag){
|
||||
stretag = (char*)petag;
|
||||
xmlFree(petag);
|
||||
}
|
||||
}
|
||||
xmlXPathFreeObject(ETag);
|
||||
}
|
||||
}
|
||||
if(!head.insert(name, (0 < stretag.length() ? stretag.c_str() : NULL), is_dir)){
|
||||
S3FS_PRN_ERR("insert_object returns with error.");
|
||||
xmlXPathFreeObject(key);
|
||||
xmlXPathFreeObject(contents_xp);
|
||||
free(name);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return -1;
|
||||
}
|
||||
free(name);
|
||||
}else{
|
||||
S3FS_PRN_DBG("name is file or subdir in dir. but continue.");
|
||||
}
|
||||
xmlXPathFreeObject(key);
|
||||
}
|
||||
S3FS_XMLXPATHFREEOBJECT(contents_xp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head)
|
||||
{
|
||||
std::string xmlnsurl;
|
||||
std::string ex_contents = "//";
|
||||
std::string ex_key;
|
||||
std::string ex_cprefix = "//";
|
||||
std::string ex_prefix;
|
||||
std::string ex_etag;
|
||||
|
||||
if(!doc){
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If there is not <Prefix>, use path instead of it.
|
||||
xmlChar* pprefix = get_prefix(doc);
|
||||
std::string prefix = (pprefix ? (char*)pprefix : path ? path : "");
|
||||
if(pprefix){
|
||||
xmlFree(pprefix);
|
||||
}
|
||||
|
||||
xmlXPathContextPtr ctx = xmlXPathNewContext(doc);
|
||||
|
||||
if(!noxmlns && GetXmlNsUrl(doc, xmlnsurl)){
|
||||
xmlXPathRegisterNs(ctx, (xmlChar*)"s3", (xmlChar*)xmlnsurl.c_str());
|
||||
ex_contents+= "s3:";
|
||||
ex_key += "s3:";
|
||||
ex_cprefix += "s3:";
|
||||
ex_prefix += "s3:";
|
||||
ex_etag += "s3:";
|
||||
}
|
||||
ex_contents+= "Contents";
|
||||
ex_key += "Key";
|
||||
ex_cprefix += "CommonPrefixes";
|
||||
ex_prefix += "Prefix";
|
||||
ex_etag += "ETag";
|
||||
|
||||
if(-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_contents.c_str(), ex_key.c_str(), ex_etag.c_str(), 0, head) ||
|
||||
-1 == append_objects_from_xml_ex(prefix.c_str(), doc, ctx, ex_cprefix.c_str(), ex_prefix.c_str(), NULL, 1, head) )
|
||||
{
|
||||
S3FS_PRN_ERR("append_objects_from_xml_ex returns with error.");
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
return -1;
|
||||
}
|
||||
S3FS_XMLXPATHFREECONTEXT(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions
|
||||
//-------------------------------------------------------------------
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value)
|
||||
{
|
||||
bool result = false;
|
||||
|
||||
if(!data || !key){
|
||||
return false;
|
||||
}
|
||||
value.clear();
|
||||
|
||||
xmlDocPtr doc;
|
||||
if(NULL == (doc = xmlReadMemory(data, len, "", NULL, 0))){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == doc->children){
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
return false;
|
||||
}
|
||||
for(xmlNodePtr cur_node = doc->children->children; NULL != cur_node; cur_node = cur_node->next){
|
||||
// For DEBUG
|
||||
// std::string cur_node_name(reinterpret_cast<const char *>(cur_node->name));
|
||||
// printf("cur_node_name: %s\n", cur_node_name.c_str());
|
||||
|
||||
if(XML_ELEMENT_NODE == cur_node->type){
|
||||
std::string elementName = reinterpret_cast<const char*>(cur_node->name);
|
||||
// For DEBUG
|
||||
// printf("elementName: %s\n", elementName.c_str());
|
||||
|
||||
if(cur_node->children){
|
||||
if(XML_TEXT_NODE == cur_node->children->type){
|
||||
if(elementName == key) {
|
||||
value = reinterpret_cast<const char *>(cur_node->children->content);
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
S3FS_XMLFREEDOC(doc);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
54
src/s3fs_xml.h
Normal file
54
src/s3fs_xml.h
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3FS_XML_H_
|
||||
#define S3FS_S3FS_XML_H_
|
||||
|
||||
#include <libxml/xpath.h>
|
||||
#include <libxml/xpathInternals.h>
|
||||
#include <libxml/tree.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "s3objlist.h"
|
||||
#include "mpu_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
bool is_truncated(xmlDocPtr doc);
|
||||
int append_objects_from_xml_ex(const char* path, xmlDocPtr doc, xmlXPathContextPtr ctx, const char* ex_contents, const char* ex_key, const char* ex_etag, int isCPrefix, S3ObjList& head);
|
||||
int append_objects_from_xml(const char* path, xmlDocPtr doc, S3ObjList& head);
|
||||
xmlChar* get_next_contination_token(xmlDocPtr doc);
|
||||
xmlChar* get_next_marker(xmlDocPtr doc);
|
||||
bool get_incomp_mpu_list(xmlDocPtr doc, incomp_mpu_list_t& list);
|
||||
|
||||
bool simple_parse_xml(const char* data, size_t len, const char* key, std::string& value);
|
||||
|
||||
#endif // S3FS_S3FS_XML_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
284
src/s3objlist.cpp
Normal file
284
src/s3objlist.cpp
Normal file
@ -0,0 +1,284 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3objlist.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
// New class S3ObjList is base on old s3_object struct.
|
||||
// This class is for S3 compatible clients.
|
||||
//
|
||||
// If name is terminated by "/", it is forced dir type.
|
||||
// If name is terminated by "_$folder$", it is forced dir type.
|
||||
// If is_dir is true and name is not terminated by "/", the name is added "/".
|
||||
//
|
||||
bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
s3obj_t::iterator iter;
|
||||
std::string newname;
|
||||
std::string orgname = name;
|
||||
|
||||
// Normalization
|
||||
std::string::size_type pos = orgname.find("_$folder$");
|
||||
if(std::string::npos != pos){
|
||||
newname = orgname.substr(0, pos);
|
||||
is_dir = true;
|
||||
}else{
|
||||
newname = orgname;
|
||||
}
|
||||
if(is_dir){
|
||||
if('/' != newname[newname.length() - 1]){
|
||||
newname += "/";
|
||||
}
|
||||
}else{
|
||||
if('/' == newname[newname.length() - 1]){
|
||||
is_dir = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Check derived name object.
|
||||
if(is_dir){
|
||||
std::string chkname = newname.substr(0, newname.length() - 1);
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir" object --> remove it.
|
||||
objects.erase(iter);
|
||||
}
|
||||
}else{
|
||||
std::string chkname = newname + "/";
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir/" object --> not add new object.
|
||||
// and add normalization
|
||||
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
|
||||
}
|
||||
}
|
||||
|
||||
// Add object
|
||||
if(objects.end() != (iter = objects.find(newname))){
|
||||
// Found same object --> update information.
|
||||
(*iter).second.normalname.erase();
|
||||
(*iter).second.orgname = orgname;
|
||||
(*iter).second.is_dir = is_dir;
|
||||
if(etag){
|
||||
(*iter).second.etag = std::string(etag); // over write
|
||||
}
|
||||
}else{
|
||||
// add new object
|
||||
s3obj_entry newobject;
|
||||
newobject.orgname = orgname;
|
||||
newobject.is_dir = is_dir;
|
||||
if(etag){
|
||||
newobject.etag = etag;
|
||||
}
|
||||
objects[newname] = newobject;
|
||||
}
|
||||
|
||||
// add normalization
|
||||
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
}
|
||||
|
||||
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
|
||||
return false;
|
||||
}
|
||||
if(0 == strcmp(name, normalized)){
|
||||
return true;
|
||||
}
|
||||
|
||||
s3obj_t::iterator iter;
|
||||
if(objects.end() != (iter = objects.find(name))){
|
||||
// found name --> over write
|
||||
iter->second.orgname.erase();
|
||||
iter->second.etag.erase();
|
||||
iter->second.normalname = normalized;
|
||||
iter->second.is_dir = is_dir;
|
||||
}else{
|
||||
// not found --> add new object
|
||||
s3obj_entry newobject;
|
||||
newobject.normalname = normalized;
|
||||
newobject.is_dir = is_dir;
|
||||
objects[name] = newobject;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const s3obj_entry* S3ObjList::GetS3Obj(const char* name) const
|
||||
{
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return NULL;
|
||||
}
|
||||
if(objects.end() == (iter = objects.find(name))){
|
||||
return NULL;
|
||||
}
|
||||
return &((*iter).second);
|
||||
}
|
||||
|
||||
std::string S3ObjList::GetOrgName(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return std::string("");
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return std::string("");
|
||||
}
|
||||
return ps3obj->orgname;
|
||||
}
|
||||
|
||||
std::string S3ObjList::GetNormalizedName(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return std::string("");
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return std::string("");
|
||||
}
|
||||
if(0 == (ps3obj->normalname).length()){
|
||||
return std::string(name);
|
||||
}
|
||||
return ps3obj->normalname;
|
||||
}
|
||||
|
||||
std::string S3ObjList::GetETag(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(!name || '\0' == name[0]){
|
||||
return std::string("");
|
||||
}
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return std::string("");
|
||||
}
|
||||
return ps3obj->etag;
|
||||
}
|
||||
|
||||
bool S3ObjList::IsDir(const char* name) const
|
||||
{
|
||||
const s3obj_entry* ps3obj;
|
||||
|
||||
if(NULL == (ps3obj = GetS3Obj(name))){
|
||||
return false;
|
||||
}
|
||||
return ps3obj->is_dir;
|
||||
}
|
||||
|
||||
bool S3ObjList::GetLastName(std::string& lastname) const
|
||||
{
|
||||
bool result = false;
|
||||
lastname = "";
|
||||
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
|
||||
if((*iter).second.orgname.length()){
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
|
||||
lastname = (*iter).second.orgname;
|
||||
result = true;
|
||||
}
|
||||
}else{
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.normalname.c_str())){
|
||||
lastname = (*iter).second.normalname;
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSlash) const
|
||||
{
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
for(iter = objects.begin(); objects.end() != iter; ++iter){
|
||||
if(OnlyNormalized && 0 != (*iter).second.normalname.length()){
|
||||
continue;
|
||||
}
|
||||
std::string name = (*iter).first;
|
||||
if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){
|
||||
// only "/" std::string is skipped this.
|
||||
name.erase(name.length() - 1);
|
||||
}
|
||||
list.push_back(name);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef std::map<std::string, bool> s3obj_h_t;
|
||||
|
||||
bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
|
||||
{
|
||||
s3obj_h_t h_map;
|
||||
s3obj_h_t::iterator hiter;
|
||||
s3obj_list_t::const_iterator liter;
|
||||
|
||||
for(liter = list.begin(); list.end() != liter; ++liter){
|
||||
std::string strtmp = (*liter);
|
||||
if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){
|
||||
strtmp.erase(strtmp.length() - 1);
|
||||
}
|
||||
h_map[strtmp] = true;
|
||||
|
||||
// check hierarchized directory
|
||||
for(std::string::size_type pos = strtmp.find_last_of('/'); std::string::npos != pos; pos = strtmp.find_last_of('/')){
|
||||
strtmp.erase(pos);
|
||||
if(0 == strtmp.length() || "/" == strtmp){
|
||||
break;
|
||||
}
|
||||
if(h_map.end() == h_map.find(strtmp)){
|
||||
// not found
|
||||
h_map[strtmp] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check map and add lost hierarchized directory.
|
||||
for(hiter = h_map.begin(); hiter != h_map.end(); ++hiter){
|
||||
if(false == (*hiter).second){
|
||||
// add hierarchized directory.
|
||||
std::string strtmp = (*hiter).first;
|
||||
if(haveSlash){
|
||||
strtmp += "/";
|
||||
}
|
||||
list.push_back(strtmp);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
79
src/s3objlist.h
Normal file
79
src/s3objlist.h
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_S3OBJLIST_H_
|
||||
#define S3FS_S3OBJLIST_H_
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Structure / Typedef
|
||||
//-------------------------------------------------------------------
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
|
||||
s3obj_entry() : is_dir(false) {}
|
||||
};
|
||||
|
||||
typedef std::map<std::string, struct s3obj_entry> s3obj_t;
|
||||
typedef std::list<std::string> s3obj_list_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
class S3ObjList
|
||||
{
|
||||
private:
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin() const { return objects.begin(); }
|
||||
s3obj_t::const_iterator end() const { return objects.end(); }
|
||||
|
||||
public:
|
||||
S3ObjList() {}
|
||||
~S3ObjList() {}
|
||||
|
||||
bool IsEmpty() const { return objects.empty(); }
|
||||
bool insert(const char* name, const char* etag = NULL, bool is_dir = false);
|
||||
std::string GetOrgName(const char* name) const;
|
||||
std::string GetNormalizedName(const char* name) const;
|
||||
std::string GetETag(const char* name) const;
|
||||
bool IsDir(const char* name) const;
|
||||
bool GetNameList(s3obj_list_t& list, bool OnlyNormalized = true, bool CutSlash = true) const;
|
||||
bool GetLastName(std::string& lastname) const;
|
||||
|
||||
static bool MakeHierarchizedList(s3obj_list_t& list, bool haveSlash);
|
||||
};
|
||||
|
||||
#endif // S3FS_S3OBJLIST_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
271
src/sighandlers.cpp
Normal file
271
src/sighandlers.cpp
Normal file
@ -0,0 +1,271 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <csignal>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "sighandlers.h"
|
||||
#include "fdcache.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3fsSignals
|
||||
//-------------------------------------------------------------------
|
||||
S3fsSignals* S3fsSignals::pSingleton = NULL;
|
||||
bool S3fsSignals::enableUsr1 = false;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class methods
|
||||
//-------------------------------------------------------------------
|
||||
bool S3fsSignals::Initialize()
|
||||
{
|
||||
if(!S3fsSignals::pSingleton){
|
||||
S3fsSignals::pSingleton = new S3fsSignals;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::Destroy()
|
||||
{
|
||||
delete S3fsSignals::pSingleton;
|
||||
S3fsSignals::pSingleton = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR1(int sig)
|
||||
{
|
||||
if(SIGUSR1 != sig){
|
||||
S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig);
|
||||
return;
|
||||
}
|
||||
|
||||
S3fsSignals* pSigobj = S3fsSignals::get();
|
||||
if(!pSigobj){
|
||||
S3FS_PRN_ERR("S3fsSignals object is not initialized.");
|
||||
return;
|
||||
}
|
||||
|
||||
if(!pSigobj->WakeupUsr1Thread()){
|
||||
S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::SetUsr1Handler(const char* path)
|
||||
{
|
||||
if(!FdManager::HaveLseekHole()){
|
||||
S3FS_PRN_ERR("Could not set SIGUSR1 for checking cache, because this system does not support SEEK_DATA/SEEK_HOLE in lseek function.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// set output file
|
||||
if(!FdManager::SetCacheCheckOutput(path)){
|
||||
S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)");
|
||||
return false;
|
||||
}
|
||||
|
||||
S3fsSignals::enableUsr1 = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void* S3fsSignals::CheckCacheWorker(void* arg)
|
||||
{
|
||||
Semaphore* pSem = static_cast<Semaphore*>(arg);
|
||||
if(!pSem){
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
|
||||
// wait and loop
|
||||
while(S3fsSignals::enableUsr1){
|
||||
// wait
|
||||
pSem->wait();
|
||||
if(!S3fsSignals::enableUsr1){
|
||||
break; // assap
|
||||
}
|
||||
|
||||
// check all cache
|
||||
if(!FdManager::get()->CheckAllCache()){
|
||||
S3FS_PRN_ERR("Processing failed due to some problem.");
|
||||
}
|
||||
|
||||
// do not allow request queuing
|
||||
for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){
|
||||
pSem->wait();
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerUSR2(int sig)
|
||||
{
|
||||
if(SIGUSR2 == sig){
|
||||
S3fsLog::BumpupLogLevel();
|
||||
}else{
|
||||
S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitUsr2Handler()
|
||||
{
|
||||
struct sigaction sa;
|
||||
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR2;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR2, &sa, NULL)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void S3fsSignals::HandlerHUP(int sig)
|
||||
{
|
||||
if(SIGHUP == sig){
|
||||
S3fsLog::ReopenLogfile();
|
||||
}else{
|
||||
S3FS_PRN_ERR("The handler for SIGHUP received signal(%d)", sig);
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitHupHandler()
|
||||
{
|
||||
struct sigaction sa;
|
||||
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerHUP;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGHUP, &sa, NULL)){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods
|
||||
//-------------------------------------------------------------------
|
||||
S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL)
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!InitUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue...");
|
||||
}
|
||||
}
|
||||
if(!S3fsSignals::InitUsr2Handler()){
|
||||
S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue...");
|
||||
}
|
||||
if(!S3fsSignals::InitHupHandler()){
|
||||
S3FS_PRN_ERR("failed to initialize SIGHUP handler for reopen log file, but continue...");
|
||||
}
|
||||
}
|
||||
|
||||
S3fsSignals::~S3fsSignals()
|
||||
{
|
||||
if(S3fsSignals::enableUsr1){
|
||||
if(!DestroyUsr1Handler()){
|
||||
S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue...");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool S3fsSignals::InitUsr1Handler()
|
||||
{
|
||||
if(pThreadUsr1 || pSemUsr1){
|
||||
S3FS_PRN_ERR("Already run thread for SIGUSR1");
|
||||
return false;
|
||||
}
|
||||
|
||||
// create thread
|
||||
int result;
|
||||
pSemUsr1 = new Semaphore(0);
|
||||
pThreadUsr1 = new pthread_t;
|
||||
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
|
||||
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// set handler
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(struct sigaction));
|
||||
sa.sa_handler = S3fsSignals::HandlerUSR1;
|
||||
sa.sa_flags = SA_RESTART;
|
||||
if(0 != sigaction(SIGUSR1, &sa, NULL)){
|
||||
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
|
||||
DestroyUsr1Handler();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::DestroyUsr1Handler()
|
||||
{
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
return false;
|
||||
}
|
||||
// for thread exit
|
||||
S3fsSignals::enableUsr1 = false;
|
||||
|
||||
// wakeup thread
|
||||
pSemUsr1->post();
|
||||
|
||||
// wait for thread exiting
|
||||
void* retval = NULL;
|
||||
int result;
|
||||
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
|
||||
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
|
||||
return false;
|
||||
}
|
||||
delete pSemUsr1;
|
||||
delete pThreadUsr1;
|
||||
pSemUsr1 = NULL;
|
||||
pThreadUsr1 = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool S3fsSignals::WakeupUsr1Thread()
|
||||
{
|
||||
if(!pThreadUsr1 || !pSemUsr1){
|
||||
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
|
||||
return false;
|
||||
}
|
||||
pSemUsr1->post();
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
73
src/sighandlers.h
Normal file
73
src/sighandlers.h
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_SIGHANDLERS_H_
|
||||
#define S3FS_SIGHANDLERS_H_
|
||||
|
||||
#include "psemaphore.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsSignals
|
||||
//----------------------------------------------
|
||||
class S3fsSignals
|
||||
{
|
||||
private:
|
||||
static S3fsSignals* pSingleton;
|
||||
static bool enableUsr1;
|
||||
|
||||
pthread_t* pThreadUsr1;
|
||||
Semaphore* pSemUsr1;
|
||||
|
||||
protected:
|
||||
static S3fsSignals* get() { return pSingleton; }
|
||||
|
||||
static void HandlerUSR1(int sig);
|
||||
static void* CheckCacheWorker(void* arg);
|
||||
|
||||
static void HandlerUSR2(int sig);
|
||||
static bool InitUsr2Handler();
|
||||
|
||||
static void HandlerHUP(int sig);
|
||||
static bool InitHupHandler();
|
||||
|
||||
S3fsSignals();
|
||||
~S3fsSignals();
|
||||
|
||||
bool InitUsr1Handler();
|
||||
bool DestroyUsr1Handler();
|
||||
bool WakeupUsr1Thread();
|
||||
|
||||
public:
|
||||
static bool Initialize();
|
||||
static bool Destroy();
|
||||
|
||||
static bool SetUsr1Handler(const char* path);
|
||||
};
|
||||
|
||||
#endif // S3FS_SIGHANDLERS_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -17,192 +17,571 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Gloval variables
|
||||
//-------------------------------------------------------------------
|
||||
const std::string SPACES = " \t\r\n";
|
||||
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
//-------------------------------------------------------------------
|
||||
// Templates
|
||||
//-------------------------------------------------------------------
|
||||
template <class T> std::string str(T value)
|
||||
{
|
||||
if(!str || '\0' == *str){
|
||||
return 0;
|
||||
}
|
||||
off_t result;
|
||||
bool chk_space;
|
||||
bool chk_base16_prefix;
|
||||
for(result = 0, chk_space = false, chk_base16_prefix = false; '\0' != *str; str++){
|
||||
// check head space
|
||||
if(!chk_space && isspace(*str)){
|
||||
continue;
|
||||
}else if(!chk_space){
|
||||
chk_space = true;
|
||||
std::ostringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
template std::string str(unsigned short value);
|
||||
template std::string str(int value);
|
||||
template std::string str(unsigned int value);
|
||||
template std::string str(long value);
|
||||
template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Functions
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
bool s3fs_strtoofft(off_t* value, const char* str, int base)
|
||||
{
|
||||
if(value == NULL || str == NULL){
|
||||
return false;
|
||||
}
|
||||
// check prefix for base 16
|
||||
if(!chk_base16_prefix){
|
||||
chk_base16_prefix = true;
|
||||
if('0' == *str && ('x' == str[1] || 'X' == str[1])){
|
||||
is_base_16 = true;
|
||||
str++;
|
||||
continue;
|
||||
}
|
||||
errno = 0;
|
||||
char *temp;
|
||||
long long result = strtoll(str, &temp, base);
|
||||
|
||||
if(temp == str || *temp != '\0'){
|
||||
return false;
|
||||
}
|
||||
// check like isalnum and set data
|
||||
result *= (is_base_16 ? 16 : 10);
|
||||
if('0' <= *str || '9' < *str){
|
||||
result += static_cast<off_t>(*str - '0');
|
||||
}else if(is_base_16){
|
||||
if('A' <= *str && *str <= 'F'){
|
||||
result += static_cast<off_t>(*str - 'A' + 0x0a);
|
||||
}else if('a' <= *str && *str <= 'f'){
|
||||
result += static_cast<off_t>(*str - 'a' + 0x0a);
|
||||
}else{
|
||||
if((result == LLONG_MIN || result == LLONG_MAX) && errno == ERANGE){
|
||||
return false;
|
||||
}
|
||||
|
||||
*value = result;
|
||||
return true;
|
||||
}
|
||||
|
||||
off_t cvt_strtoofft(const char* str, int base)
|
||||
{
|
||||
off_t result = 0;
|
||||
if(!s3fs_strtoofft(&result, str, base)){
|
||||
S3FS_PRN_WARN("something error is occurred in convert std::string(%s) to off_t, thus return 0 as default.", (str ? str : "null"));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string lower(std::string s)
|
||||
{
|
||||
// change each character of the std::string to lower case
|
||||
for(size_t i = 0; i < s.length(); i++){
|
||||
s[i] = tolower(s[i]);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
std::string trim_left(const std::string &s, const std::string &t /* = SPACES */)
|
||||
{
|
||||
std::string d(s);
|
||||
return d.erase(0, s.find_first_not_of(t));
|
||||
}
|
||||
|
||||
std::string trim_right(const std::string &s, const std::string &t /* = SPACES */)
|
||||
{
|
||||
std::string d(s);
|
||||
std::string::size_type i(d.find_last_not_of(t));
|
||||
if(i == std::string::npos){
|
||||
return "";
|
||||
}else{
|
||||
return 0;
|
||||
return d.erase(d.find_last_not_of(t) + 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
string lower(string s)
|
||||
std::string trim(const std::string &s, const std::string &t /* = SPACES */)
|
||||
{
|
||||
// change each character of the string to lower case
|
||||
for(unsigned int i = 0; i < s.length(); i++){
|
||||
s[i] = tolower(s[i]);
|
||||
}
|
||||
return s;
|
||||
return trim_left(trim_right(s, t), t);
|
||||
}
|
||||
|
||||
string IntToStr(int n)
|
||||
//
|
||||
// urlEncode a fuse path,
|
||||
// taking into special consideration "/",
|
||||
// otherwise regular urlEncode.
|
||||
//
|
||||
std::string urlEncode(const std::string &s)
|
||||
{
|
||||
stringstream result;
|
||||
result << n;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
string trim_left(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
return d.erase(0, s.find_first_not_of(t));
|
||||
}
|
||||
|
||||
string trim_right(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
string::size_type i(d.find_last_not_of(t));
|
||||
if(i == string::npos){
|
||||
return "";
|
||||
}else{
|
||||
return d.erase(d.find_last_not_of(t) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
string trim(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
return trim_left(trim_right(d, t), t);
|
||||
}
|
||||
|
||||
/**
|
||||
* urlEncode a fuse path,
|
||||
* taking into special consideration "/",
|
||||
* otherwise regular urlEncode.
|
||||
*/
|
||||
string urlEncode(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
if (s[i] == '/') { // Note- special case for fuse paths...
|
||||
result += s[i];
|
||||
} else if (isalnum(s[i])) {
|
||||
result += s[i];
|
||||
} else if (s[i] == '.' || s[i] == '-' || s[i] == '*' || s[i] == '_') {
|
||||
result += s[i];
|
||||
} else if (s[i] == ' ') {
|
||||
result += '+';
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(s[i]) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(s[i]) % 16];
|
||||
std::string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
unsigned char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9'))
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
result += "%";
|
||||
result += s3fs_hex(&c, 1, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return result;
|
||||
//
|
||||
// urlEncode a fuse path,
|
||||
// taking into special consideration "/",
|
||||
// otherwise regular urlEncode.
|
||||
//
|
||||
std::string urlEncode2(const std::string &s)
|
||||
{
|
||||
std::string result;
|
||||
for (size_t i = 0; i < s.length(); ++i) {
|
||||
unsigned char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
|| c == '%'
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9'))
|
||||
{
|
||||
result += c;
|
||||
}else{
|
||||
result += "%";
|
||||
result += s3fs_hex(&c, 1, false);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string urlDecode(const std::string& s)
|
||||
{
|
||||
std::string result;
|
||||
for(size_t i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
int ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += static_cast<char>(ch);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool takeout_str_dquart(std::string& str)
|
||||
{
|
||||
size_t pos;
|
||||
|
||||
// '"' for start
|
||||
if(std::string::npos != (pos = str.find_first_of('\"'))){
|
||||
str.erase(0, pos + 1);
|
||||
|
||||
// '"' for end
|
||||
if(std::string::npos == (pos = str.find_last_of('\"'))){
|
||||
return false;
|
||||
}
|
||||
str.erase(pos);
|
||||
if(std::string::npos != str.find_first_of('\"')){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// ex. target="http://......?keyword=value&..."
|
||||
//
|
||||
bool get_keyword_value(string& target, const char* keyword, string& value)
|
||||
bool get_keyword_value(const std::string& target, const char* keyword, std::string& value)
|
||||
{
|
||||
if(!keyword){
|
||||
return false;
|
||||
}
|
||||
size_t spos;
|
||||
size_t epos;
|
||||
if(string::npos == (spos = target.find(keyword))){
|
||||
return false;
|
||||
}
|
||||
spos += strlen(keyword);
|
||||
if('=' != target.at(spos)){
|
||||
return false;
|
||||
}
|
||||
spos++;
|
||||
if(string::npos == (epos = target.find('&', spos))){
|
||||
value = target.substr(spos);
|
||||
}else{
|
||||
value = target.substr(spos, (epos - spos));
|
||||
}
|
||||
return true;
|
||||
if(!keyword){
|
||||
return false;
|
||||
}
|
||||
size_t spos;
|
||||
size_t epos;
|
||||
if(std::string::npos == (spos = target.find(keyword))){
|
||||
return false;
|
||||
}
|
||||
spos += strlen(keyword);
|
||||
if('=' != target[spos]){
|
||||
return false;
|
||||
}
|
||||
spos++;
|
||||
if(std::string::npos == (epos = target.find('&', spos))){
|
||||
value = target.substr(spos);
|
||||
}else{
|
||||
value = target.substr(spos, (epos - spos));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
string prepare_url(const char* url)
|
||||
//
|
||||
// Returns the current date
|
||||
// in a format suitable for a HTTP request header.
|
||||
//
|
||||
std::string get_date_rfc850()
|
||||
{
|
||||
FPRNINFO("URL is %s", url);
|
||||
|
||||
string uri;
|
||||
string host;
|
||||
string path;
|
||||
string url_str = str(url);
|
||||
string token = str("/" + bucket);
|
||||
int bucket_pos = url_str.find(token);
|
||||
int bucket_length = token.size();
|
||||
int uri_length = 7;
|
||||
|
||||
if(!strncasecmp(url_str.c_str(), "https://", 8)){
|
||||
uri_length = 8;
|
||||
}
|
||||
uri = url_str.substr(0, uri_length);
|
||||
host = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length).c_str();
|
||||
path = url_str.substr((bucket_pos + bucket_length));
|
||||
|
||||
url_str = uri + host + path;
|
||||
|
||||
FPRNINFO("URL changed is %s", url_str.c_str());
|
||||
|
||||
return str(url_str);
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&t, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current date
|
||||
* in a format suitable for a HTTP request header.
|
||||
*/
|
||||
string get_date()
|
||||
void get_date_sigv3(std::string& date, std::string& date8601)
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t));
|
||||
return buf;
|
||||
time_t tm = time(NULL);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
}
|
||||
|
||||
std::string get_date_string(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
std::string get_date_iso8601(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
struct tm res;
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime_r(&tm, &res));
|
||||
return buf;
|
||||
}
|
||||
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
{
|
||||
if(!pdate){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
}
|
||||
unixtime = mktime(&tm);
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Convert to unixtime from std::string which formatted by following:
|
||||
// "12Y12M12D12h12m12s", "86400s", "9h30m", etc
|
||||
//
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime)
|
||||
{
|
||||
if(!argv){
|
||||
return false;
|
||||
}
|
||||
unixtime = 0;
|
||||
const char* ptmp;
|
||||
int last_unit_type = 0; // unit flag.
|
||||
bool is_last_number;
|
||||
time_t tmptime;
|
||||
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
|
||||
if('0' <= *ptmp && *ptmp <= '9'){
|
||||
tmptime *= 10;
|
||||
tmptime += static_cast<time_t>(*ptmp - '0');
|
||||
is_last_number = true;
|
||||
}else if(is_last_number){
|
||||
if('Y' == *ptmp && 1 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
|
||||
last_unit_type = 1;
|
||||
}else if('M' == *ptmp && 2 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
|
||||
last_unit_type = 2;
|
||||
}else if('D' == *ptmp && 3 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24));
|
||||
last_unit_type = 3;
|
||||
}else if('h' == *ptmp && 4 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60));
|
||||
last_unit_type = 4;
|
||||
}else if('m' == *ptmp && 5 > last_unit_type){
|
||||
unixtime += (tmptime * 60);
|
||||
last_unit_type = 5;
|
||||
}else if('s' == *ptmp && 6 > last_unit_type){
|
||||
unixtime += tmptime;
|
||||
last_unit_type = 6;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
tmptime = 0;
|
||||
is_last_number = false;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(is_last_number){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length, bool lower)
|
||||
{
|
||||
static const char hexLower[] = "0123456789abcdef";
|
||||
static const char hexUpper[] = "0123456789ABCDEF";
|
||||
|
||||
const char* hexAlphabet = (lower ? hexLower : hexUpper);
|
||||
std::string hex;
|
||||
for(size_t pos = 0; pos < length; ++pos){
|
||||
hex += hexAlphabet[input[pos] / 16];
|
||||
hex += hexAlphabet[input[pos] % 16];
|
||||
}
|
||||
return hex;
|
||||
}
|
||||
|
||||
char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
{
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 == length){
|
||||
return NULL;
|
||||
}
|
||||
result = new char[((length / 3) + 1) * 4 + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline unsigned char char_decode64(const char ch)
|
||||
{
|
||||
unsigned char by;
|
||||
if('A' <= ch && ch <= 'Z'){ // A - Z
|
||||
by = static_cast<unsigned char>(ch - 'A');
|
||||
}else if('a' <= ch && ch <= 'z'){ // a - z
|
||||
by = static_cast<unsigned char>(ch - 'a' + 26);
|
||||
}else if('0' <= ch && ch <= '9'){ // 0 - 9
|
||||
by = static_cast<unsigned char>(ch - '0' + 52);
|
||||
}else if('+' == ch){ // +
|
||||
by = 62;
|
||||
}else if('/' == ch){ // /
|
||||
by = 63;
|
||||
}else if('=' == ch){ // =
|
||||
by = 64;
|
||||
}else{ // something wrong
|
||||
by = UCHAR_MAX;
|
||||
}
|
||||
return by;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
{
|
||||
unsigned char* result;
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
result = new unsigned char[strlen(input) + 1];
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t input_len = strlen(input);
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
*plength = wpos;
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// detect and rewrite invalid utf8. We take invalid bytes
|
||||
// and encode them into a private region of the unicode
|
||||
// space. This is sometimes known as wtf8, wobbly transformation format.
|
||||
// it is necessary because S3 validates the utf8 used for identifiers for
|
||||
// correctness, while some clients may provide invalid utf, notably
|
||||
// windows using cp1252.
|
||||
//
|
||||
|
||||
// Base location for transform. The range 0xE000 - 0xF8ff
|
||||
// is a private range, se use the start of this range.
|
||||
static unsigned int escape_base = 0xe000;
|
||||
|
||||
// encode bytes into wobbly utf8.
|
||||
// 'result' can be null. returns true if transform was needed.
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result)
|
||||
{
|
||||
bool invalid = false;
|
||||
|
||||
// Pass valid utf8 code through
|
||||
for (; *s; s++) {
|
||||
const unsigned char c = *s;
|
||||
|
||||
// single byte encoding
|
||||
if (c <= 0x7f) {
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise, it must be one of the valid start bytes
|
||||
if ( c >= 0xc2 && c <= 0xf5 ) {
|
||||
// two byte encoding
|
||||
// don't need bounds check, std::string is zero terminated
|
||||
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
|
||||
// all two byte encodings starting higher than c1 are valid
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// three byte encoding
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
|
||||
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
|
||||
// not overlong and not a surrogate pair
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// four byte encoding
|
||||
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
|
||||
if (code >= 0x10000 && code <= 0x10ffff) {
|
||||
// not overlong and in defined unicode space
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// printf("invalid %02x at %d\n", c, i);
|
||||
// Invalid utf8 code. Convert it to a private two byte area of unicode
|
||||
// e.g. the e000 - f8ff area. This will be a three byte encoding
|
||||
invalid = true;
|
||||
if (result) {
|
||||
unsigned escape = escape_base + c;
|
||||
*result += static_cast<char>(0xe0 | ((escape >> 12) & 0x0f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 06) & 0x3f));
|
||||
*result += static_cast<char>(0x80 | ((escape >> 00) & 0x3f));
|
||||
}
|
||||
}
|
||||
return invalid;
|
||||
}
|
||||
|
||||
std::string s3fs_wtf8_encode(const std::string &s)
|
||||
{
|
||||
std::string result;
|
||||
s3fs_wtf8_encode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// The reverse operation, turn encoded bytes back into their original values
|
||||
// The code assumes that we map to a three-byte code point.
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result)
|
||||
{
|
||||
bool encoded = false;
|
||||
for (; *s; s++) {
|
||||
unsigned char c = *s;
|
||||
// look for a three byte tuple matching our encoding code
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
unsigned code = (c & 0x0f) << 12;
|
||||
code |= (s[1] & 0x3f) << 6;
|
||||
code |= (s[2] & 0x3f) << 0;
|
||||
if (code >= escape_base && code <= escape_base + 0xff) {
|
||||
// convert back
|
||||
encoded = true;
|
||||
if(result){
|
||||
*result += static_cast<char>(code - escape_base);
|
||||
}
|
||||
s+=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (result) {
|
||||
*result += c;
|
||||
}
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
||||
std::string s3fs_wtf8_decode(const std::string &s)
|
||||
{
|
||||
std::string result;
|
||||
s3fs_wtf8_decode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
@ -1,34 +1,120 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_STRING_UTIL_H_
|
||||
#define S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
* A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
//
|
||||
// A collection of string utilities for manipulating URLs and HTTP responses.
|
||||
//
|
||||
//-------------------------------------------------------------------
|
||||
// Gloval variables
|
||||
//-------------------------------------------------------------------
|
||||
extern const std::string SPACES;
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
//-------------------------------------------------------------------
|
||||
// Inline functions
|
||||
//-------------------------------------------------------------------
|
||||
static inline int is_prefix(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; }
|
||||
static inline const char* SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
#define SPACES " \t\r\n"
|
||||
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
|
||||
//-------------------------------------------------------------------
|
||||
// Templates
|
||||
//-------------------------------------------------------------------
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
template<typename T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
//-------------------------------------------------------------------
|
||||
// Macros(WTF8)
|
||||
//-------------------------------------------------------------------
|
||||
#define WTF8_ENCODE(ARG) \
|
||||
std::string ARG##_buf; \
|
||||
const char * ARG = _##ARG; \
|
||||
if (use_wtf8 && s3fs_wtf8_encode( _##ARG, 0 )) { \
|
||||
s3fs_wtf8_encode( _##ARG, &ARG##_buf); \
|
||||
ARG = ARG##_buf.c_str(); \
|
||||
}
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
//-------------------------------------------------------------------
|
||||
// Utilities
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Convert string to off_t. Returns false on bad input.
|
||||
// Replacement for C++11 std::stoll.
|
||||
//
|
||||
bool s3fs_strtoofft(off_t* value, const char* str, int base = 0);
|
||||
//
|
||||
// This function returns 0 if a value that cannot be converted is specified.
|
||||
// Only call if 0 is considered an error and the operation can continue.
|
||||
//
|
||||
off_t cvt_strtoofft(const char* str, int base = 0);
|
||||
|
||||
//
|
||||
// String Manipulation
|
||||
//
|
||||
std::string trim_left(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim_right(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim(const std::string &s, const std::string &t = SPACES);
|
||||
std::string lower(std::string s);
|
||||
std::string IntToStr(int);
|
||||
std::string get_date();
|
||||
|
||||
//
|
||||
// Date string
|
||||
//
|
||||
std::string get_date_rfc850();
|
||||
void get_date_sigv3(std::string& date, std::string& date8601);
|
||||
std::string get_date_string(time_t tm);
|
||||
std::string get_date_iso8601(time_t tm);
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime);
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
|
||||
|
||||
//
|
||||
// For encoding
|
||||
//
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_keyword_value(std::string& target, const char* keyword, std::string& value);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
|
||||
bool takeout_str_dquart(std::string& str);
|
||||
bool get_keyword_value(const std::string& target, const char* keyword, std::string& value);
|
||||
|
||||
//
|
||||
// For binary string
|
||||
//
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length, bool lower = true);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
|
||||
//
|
||||
// WTF8
|
||||
//
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_encode(const std::string &s);
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_decode(const std::string &s);
|
||||
|
||||
#endif // S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
|
||||
93
src/test_curl_util.cpp
Normal file
93
src/test_curl_util.cpp
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2020 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <string>
|
||||
#include <cstring>
|
||||
|
||||
#include "curl_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
#define ASSERT_IS_SORTED(x) assert_is_sorted((x), __FILE__, __LINE__)
|
||||
|
||||
void assert_is_sorted(struct curl_slist* list, const char *file, int line)
|
||||
{
|
||||
for(; list != NULL; list = list->next){
|
||||
std::string key1 = list->data;
|
||||
key1.erase(key1.find(':'));
|
||||
std::string key2 = list->data;
|
||||
key2.erase(key2.find(':'));
|
||||
std::cerr << "key1: " << key1 << " key2: " << key2 << std::endl;
|
||||
|
||||
if(strcasecmp(key1.c_str(), key2.c_str()) > 0){
|
||||
std::cerr << "not sorted: " << key1 << " " << key2 << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
}
|
||||
|
||||
size_t curl_slist_length(const struct curl_slist* list)
|
||||
{
|
||||
size_t len = 0;
|
||||
for(; list != NULL; list = list->next){
|
||||
++len;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
void test_sort_insert()
|
||||
{
|
||||
struct curl_slist* list = NULL;
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to head
|
||||
list = curl_slist_sort_insert(list, "2", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to tail
|
||||
list = curl_slist_sort_insert(list, "4", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add in between
|
||||
list = curl_slist_sort_insert(list, "3", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
// add to head
|
||||
list = curl_slist_sort_insert(list, "1", "val");
|
||||
ASSERT_IS_SORTED(list);
|
||||
ASSERT_STREQUALS("1: val", list->data);
|
||||
// replace head
|
||||
list = curl_slist_sort_insert(list, "1", "val2");
|
||||
ASSERT_IS_SORTED(list);
|
||||
ASSERT_EQUALS(static_cast<size_t>(4), curl_slist_length(list));
|
||||
ASSERT_STREQUALS("1: val2", list->data);
|
||||
curl_slist_free_all(list);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_sort_insert();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
160
src/test_string_util.cpp
Normal file
160
src/test_string_util.cpp
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <limits>
|
||||
#include <stdint.h>
|
||||
#include <strings.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "common.h"
|
||||
#include "s3fs.h"
|
||||
#include "string_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables for test_string_util
|
||||
//-------------------------------------------------------------------
|
||||
bool foreground = false;
|
||||
std::string instance_name;
|
||||
|
||||
void test_trim()
|
||||
{
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("0"), str(0));
|
||||
ASSERT_EQUALS(std::string("1"), str(1));
|
||||
ASSERT_EQUALS(std::string("-1"), str(-1));
|
||||
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
|
||||
}
|
||||
|
||||
void test_base64()
|
||||
{
|
||||
size_t len;
|
||||
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(1));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(2));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(3));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(4));
|
||||
|
||||
// TODO: invalid input
|
||||
}
|
||||
|
||||
void test_strtoofft()
|
||||
{
|
||||
off_t value;
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "0"));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(0L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "9"));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(9L));
|
||||
|
||||
ASSERT_FALSE(s3fs_strtoofft(&value, "A"));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "A", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(10L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "F", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(15L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "a", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(10L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "f", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(15L));
|
||||
|
||||
ASSERT_TRUE(s3fs_strtoofft(&value, "deadbeef", /*base=*/ 16));
|
||||
ASSERT_EQUALS(value, static_cast<off_t>(3735928559L));
|
||||
}
|
||||
|
||||
void test_wtf8_encoding()
|
||||
{
|
||||
std::string ascii("normal std::string");
|
||||
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
|
||||
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
|
||||
std::string broken = utf8;
|
||||
broken[14] = 0x97;
|
||||
std::string mixed = ascii + utf8 + cp1252;
|
||||
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
S3fsLog singletonLog;
|
||||
|
||||
test_trim();
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
94
src/test_util.h
Normal file
94
src/test_util.h
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_TEST_UTIL_H_
|
||||
#define S3FS_TEST_UTIL_H_
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "string_util.h"
|
||||
|
||||
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << s3fs_hex(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
|
||||
std::cerr << s3fs_hex(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T> void assert_nequals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << s3fs_hex(reinterpret_cast<const unsigned char *>(x.c_str()), x.size()) << std::endl;
|
||||
std::cerr << s3fs_hex(reinterpret_cast<const unsigned char *>(y.c_str()), y.size()) << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define ASSERT_TRUE(x) assert_equals((x), true, __FILE__, __LINE__)
|
||||
#define ASSERT_FALSE(x) assert_equals((x), false, __FILE__, __LINE__)
|
||||
#define ASSERT_EQUALS(x, y) assert_equals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_NEQUALS(x, y) assert_nequals((x), (y), __FILE__, __LINE__)
|
||||
#define ASSERT_STREQUALS(x, y) assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#endif // S3FS_TEST_UTIL_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
310
src/types.h
Normal file
310
src/types.h
Normal file
@ -0,0 +1,310 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_TYPES_H_
|
||||
#define S3FS_TYPES_H_
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
//
|
||||
// For extended attribute
|
||||
// (HAVE_XXX symbols are defined in config.h)
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
#if __cplusplus < 201103L
|
||||
#define OPERATOR_EXPLICIT
|
||||
#else
|
||||
#define OPERATOR_EXPLICIT explicit
|
||||
#endif
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// xattrs_t
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value
|
||||
{
|
||||
unsigned char* pvalue;
|
||||
size_t length;
|
||||
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
delete[] pvalue;
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// storage_class_t
|
||||
//-------------------------------------------------------------------
|
||||
class storage_class_t{
|
||||
public:
|
||||
enum Value{
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
ONEZONE_IA,
|
||||
REDUCED_REDUNDANCY,
|
||||
INTELLIGENT_TIERING,
|
||||
GLACIER,
|
||||
DEEP_ARCHIVE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
storage_class_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
|
||||
const char* str() const
|
||||
{
|
||||
switch(value_){
|
||||
case STANDARD:
|
||||
return "STANDARD";
|
||||
case STANDARD_IA:
|
||||
return "STANDARD_IA";
|
||||
case ONEZONE_IA:
|
||||
return "ONEZONE_IA";
|
||||
case REDUCED_REDUNDANCY:
|
||||
return "REDUCED_REDUNDANCY";
|
||||
case INTELLIGENT_TIERING:
|
||||
return "INTELLIGENT_TIERING";
|
||||
case GLACIER:
|
||||
return "GLACIER";
|
||||
case DEEP_ARCHIVE:
|
||||
return "DEEP_ARCHIVE";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
static storage_class_t from_str(const char* str)
|
||||
{
|
||||
if(0 == strcmp(str, "standard")){
|
||||
return STANDARD;
|
||||
}else if(0 == strcmp(str, "standard_ia")){
|
||||
return STANDARD_IA;
|
||||
}else if(0 == strcmp(str, "onezone_ia")){
|
||||
return ONEZONE_IA;
|
||||
}else if(0 == strcmp(str, "reduced_redundancy")){
|
||||
return REDUCED_REDUNDANCY;
|
||||
}else if(0 == strcmp(str, "intelligent_tiering")){
|
||||
return INTELLIGENT_TIERING;
|
||||
}else if(0 == strcmp(str, "glacier")){
|
||||
return GLACIER;
|
||||
}else if(0 == strcmp(str, "deep_archive")){
|
||||
return DEEP_ARCHIVE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// acl_t
|
||||
//-------------------------------------------------------------------
|
||||
class acl_t{
|
||||
public:
|
||||
enum Value{
|
||||
PRIVATE,
|
||||
PUBLIC_READ,
|
||||
PUBLIC_READ_WRITE,
|
||||
AWS_EXEC_READ,
|
||||
AUTHENTICATED_READ,
|
||||
BUCKET_OWNER_READ,
|
||||
BUCKET_OWNER_FULL_CONTROL,
|
||||
LOG_DELIVERY_WRITE,
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
acl_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
|
||||
const char* str() const
|
||||
{
|
||||
switch(value_){
|
||||
case PRIVATE:
|
||||
return "private";
|
||||
case PUBLIC_READ:
|
||||
return "public-read";
|
||||
case PUBLIC_READ_WRITE:
|
||||
return "public-read-write";
|
||||
case AWS_EXEC_READ:
|
||||
return "aws-exec-read";
|
||||
case AUTHENTICATED_READ:
|
||||
return "authenticated-read";
|
||||
case BUCKET_OWNER_READ:
|
||||
return "bucket-owner-read";
|
||||
case BUCKET_OWNER_FULL_CONTROL:
|
||||
return "bucket-owner-full-control";
|
||||
case LOG_DELIVERY_WRITE:
|
||||
return "log-delivery-write";
|
||||
case UNKNOWN:
|
||||
return NULL;
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
static acl_t from_str(const char *acl)
|
||||
{
|
||||
if(0 == strcmp(acl, "private")){
|
||||
return PRIVATE;
|
||||
}else if(0 == strcmp(acl, "public-read")){
|
||||
return PUBLIC_READ;
|
||||
}else if(0 == strcmp(acl, "public-read-write")){
|
||||
return PUBLIC_READ_WRITE;
|
||||
}else if(0 == strcmp(acl, "aws-exec-read")){
|
||||
return AWS_EXEC_READ;
|
||||
}else if(0 == strcmp(acl, "authenticated-read")){
|
||||
return AUTHENTICATED_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-read")){
|
||||
return BUCKET_OWNER_READ;
|
||||
}else if(0 == strcmp(acl, "bucket-owner-full-control")){
|
||||
return BUCKET_OWNER_FULL_CONTROL;
|
||||
}else if(0 == strcmp(acl, "log-delivery-write")){
|
||||
return LOG_DELIVERY_WRITE;
|
||||
}else{
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// sse_type_t
|
||||
//-------------------------------------------------------------------
|
||||
class sse_type_t{
|
||||
public:
|
||||
enum Value{
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
// cppcheck-suppress noExplicitConstructor
|
||||
sse_type_t(Value value) : value_(value) {}
|
||||
|
||||
operator Value() const { return value_; }
|
||||
|
||||
private:
|
||||
//OPERATOR_EXPLICIT operator bool();
|
||||
Value value_;
|
||||
};
|
||||
|
||||
enum signature_type_t {
|
||||
V2_ONLY,
|
||||
V4_ONLY,
|
||||
V2_OR_V4
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// etaglist_t / filepart
|
||||
//----------------------------------------------
|
||||
typedef std::list<std::string> etaglist_t;
|
||||
|
||||
//
|
||||
// Each part information for Multipart upload
|
||||
//
|
||||
struct filepart
|
||||
{
|
||||
bool uploaded; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
off_t size; // uploading size
|
||||
std::string* petag; // use only parallel upload
|
||||
|
||||
filepart() : uploaded(false), fd(-1), startpos(0), size(-1), petag(NULL) {}
|
||||
~filepart()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
uploaded = false;
|
||||
etag = "";
|
||||
fd = -1;
|
||||
startpos = 0;
|
||||
size = -1;
|
||||
petag = NULL;
|
||||
}
|
||||
|
||||
void add_etag_list(etaglist_t* list)
|
||||
{
|
||||
list->push_back(std::string());
|
||||
petag = &list->back();
|
||||
}
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// mimes_t
|
||||
//-------------------------------------------------------------------
|
||||
struct case_insensitive_compare_func
|
||||
{
|
||||
bool operator()(const std::string& a, const std::string& b) const {
|
||||
return strcasecmp(a.c_str(), b.c_str()) < 0;
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, case_insensitive_compare_func> mimes_t;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Typedefs specialized for use
|
||||
//-------------------------------------------------------------------
|
||||
typedef std::list<std::string> readline_t;
|
||||
typedef std::map<std::string, std::string> kvmap_t;
|
||||
typedef std::map<std::string, kvmap_t> bucketkvmap_t;
|
||||
|
||||
#endif // S3FS_TYPES_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
* vim<600: expandtab sw=4 ts=4
|
||||
*/
|
||||
@ -1,10 +1,40 @@
|
||||
######################################################################
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
######################################################################
|
||||
|
||||
TESTS=small-integration-test.sh
|
||||
|
||||
EXTRA_DIST = \
|
||||
integration-test-common.sh \
|
||||
require-root.sh \
|
||||
small-integration-test.sh \
|
||||
mergedir.sh \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
integration-test-common.sh \
|
||||
require-root.sh \
|
||||
small-integration-test.sh \
|
||||
mergedir.sh \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
|
||||
testdir = test
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
2
test/chaos-http-proxy.conf
Normal file
2
test/chaos-http-proxy.conf
Normal file
@ -0,0 +1,2 @@
|
||||
com.bouncestorage.chaoshttpproxy.http_503=1
|
||||
com.bouncestorage.chaoshttpproxy.success=9
|
||||
141
test/filter-suite-log.sh
Executable file
141
test/filter-suite-log.sh
Executable file
@ -0,0 +1,141 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
func_usage()
|
||||
{
|
||||
echo ""
|
||||
echo "Usage: $1 [-h] <log file path>"
|
||||
echo " -h print help"
|
||||
echo " log file path path for test-suite.log"
|
||||
echo ""
|
||||
}
|
||||
|
||||
PRGNAME=`basename $0`
|
||||
SCRIPTDIR=`dirname $0`
|
||||
S3FSDIR=`cd ${SCRIPTDIR}/..; pwd`
|
||||
TOPDIR=`cd ${S3FSDIR}/test; pwd`
|
||||
SUITELOG="${TOPDIR}/test-suite.log"
|
||||
TMP_LINENO_FILE="/tmp/.lineno.tmp"
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
if [ "X$1" = "X" ]; then
|
||||
break
|
||||
elif [ "X$1" = "X-h" -o "X$1" = "X-H" -o "X$1" = "X--help" -o "X$1" = "X--HELP" ]; then
|
||||
func_usage ${PRGNAME}
|
||||
exit 0
|
||||
else
|
||||
SUITELOG=$1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
if [ ! -f ${SUITELOG} ]; then
|
||||
echo "[ERROR] not found ${SUITELOG} log file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Extract keyword line numbers and types
|
||||
#
|
||||
# 0 : normal line
|
||||
# 1 : start line for one small test(specified in integration-test-main.sh)
|
||||
# 2 : passed line of end of one small test(specified in test-utils.sh)
|
||||
# 3 : failed line of end of one small test(specified in test-utils.sh)
|
||||
#
|
||||
grep -n -e 'test_.*: ".*"' -o -e 'test_.* passed' -o -e 'test_.* failed' ${SUITELOG} 2>/dev/null | sed 's/:test_.*: ".*"/ 1/g' | sed 's/:test_.* passed/ 2/g' | sed 's/:test_.* failed/ 3/g' > ${TMP_LINENO_FILE}
|
||||
|
||||
#
|
||||
# Loop for printing result
|
||||
#
|
||||
prev_line_type=0
|
||||
prev_line_number=1
|
||||
while read line; do
|
||||
# line is "<line number> <line type>"
|
||||
number_type=($line)
|
||||
|
||||
head_line_cnt=`expr ${number_type[0]} - 1`
|
||||
tail_line_cnt=`expr ${number_type[0]} - ${prev_line_number}`
|
||||
|
||||
if [ ${number_type[1]} -eq 2 ]; then
|
||||
echo ""
|
||||
fi
|
||||
if [ ${prev_line_type} -eq 1 ]; then
|
||||
if [ ${number_type[1]} -eq 2 ]; then
|
||||
# if passed, cut s3fs information messages
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
elif [ ${number_type[1]} -eq 3 ]; then
|
||||
# if failed, print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
# there is start keyword but not end keyword, so print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
fi
|
||||
elif [ ${prev_line_type} -eq 2 -o ${prev_line_type} -eq 3 ]; then
|
||||
if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then
|
||||
# previous is end of chmpx, but this type is end of chmpx without start keyword. then print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
# this area is not from start to end, cut s3fs information messages
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
else
|
||||
if [ ${number_type[1]} -eq 2 -o ${number_type[1]} -eq 3 ]; then
|
||||
# previous is normal, but this type is end of chmpx without start keyword. then print all
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
# this area is normal, cut s3fs information messages
|
||||
head -${head_line_cnt} ${SUITELOG} | tail -${tail_line_cnt} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
fi
|
||||
if [ ${number_type[1]} -eq 3 ]; then
|
||||
echo ""
|
||||
fi
|
||||
prev_line_type=${number_type[1]}
|
||||
prev_line_number=${number_type[0]}
|
||||
|
||||
done < ${TMP_LINENO_FILE}
|
||||
|
||||
#
|
||||
# Print rest lines
|
||||
#
|
||||
file_line_cnt=`wc -l ${SUITELOG} | awk '{print $1}'`
|
||||
tail_line_cnt=`expr ${file_line_cnt} - ${prev_line_number}`
|
||||
|
||||
if [ ${prev_line_type} -eq 1 ]; then
|
||||
tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%'
|
||||
else
|
||||
tail -${tail_line_cnt} ${SUITELOG} | grep -v -e '[0-9]\+\%' | grep -v -e '^s3fs: ' -a -e '\[INF\]'
|
||||
fi
|
||||
|
||||
#
|
||||
# Remove temp file
|
||||
#
|
||||
rm -f ${TMP_LINENO_FILE}
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
@ -1,14 +1,325 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance
|
||||
# to run tests against S3Proxy locally.
|
||||
#
|
||||
# To run against an Amazon S3 or other S3 provider, specify the following
|
||||
# environment variables:
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
|
||||
# S3FS_PROFILE=name s3fs profile to use (overrides key file)
|
||||
# TEST_BUCKET_1=bucketname Name of bucket to use
|
||||
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
|
||||
# S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
|
||||
# S3_ENDPOINT="us-east-1" Specify region
|
||||
# TMPDIR="/var/tmp" Set to use a temporary directory different
|
||||
# from /var/tmp
|
||||
#
|
||||
# Example of running against Amazon S3 using a bucket named "bucket":
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" ./small-integration-test.sh
|
||||
#
|
||||
# To change the s3fs-fuse debug level:
|
||||
#
|
||||
# DBGLEVEL=debug ./small-integration-test.sh
|
||||
#
|
||||
# To stop and wait after the mount point is up for manual interaction. This allows you to
|
||||
# explore the mounted file system exactly as it would have been started for the test case
|
||||
#
|
||||
# INTERACT=1 DBGLEVEL=debug ./small-integration-test.sh
|
||||
#
|
||||
# Run all of the tests from the makefile
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="https://s3.amazonaws.com" make check
|
||||
#
|
||||
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
|
||||
# useful for poking around with plain old curl
|
||||
#
|
||||
# PUBLIC=1 INTERACT=1 ./small-integration-test.sh
|
||||
#
|
||||
# A valgrind tool can be specified
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
S3FS=../src/s3fs
|
||||
|
||||
S3FS_CREDENTIALS_FILE=$(eval echo ~${SUDO_USER}/.passwd-s3fs)
|
||||
# Allow these defaulted values to be overridden
|
||||
: ${S3_URL:="https://127.0.0.1:8080"}
|
||||
: ${S3_ENDPOINT:="us-east-1"}
|
||||
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
|
||||
: ${TEST_BUCKET_1:="s3fs-integration-test"}
|
||||
|
||||
TEST_BUCKET_1=${SUDO_USER}-s3fs-integration-test
|
||||
TEST_BUCKET_MOUNT_POINT_1=/mnt/${TEST_BUCKET_1}
|
||||
export TEST_BUCKET_1
|
||||
export S3_URL
|
||||
export S3_ENDPOINT
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.7.1"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
CHAOS_HTTP_PROXY_VERSION="1.1.0"
|
||||
CHAOS_HTTP_PROXY_BINARY="chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}"
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
then
|
||||
echo "Missing credentials file: $S3FS_CREDENTIALS_FILE"
|
||||
exit 1
|
||||
fi
|
||||
chmod 600 "$S3FS_CREDENTIALS_FILE"
|
||||
|
||||
if [ -z "${S3FS_PROFILE}" ]; then
|
||||
export AWS_ACCESS_KEY_ID=$(cut -d: -f1 ${S3FS_CREDENTIALS_FILE})
|
||||
export AWS_SECRET_ACCESS_KEY=$(cut -d: -f2 ${S3FS_CREDENTIALS_FILE})
|
||||
fi
|
||||
|
||||
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
|
||||
then
|
||||
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
|
||||
# This function execute the function parameters $1 times
|
||||
# before giving up, with 1 second delays.
|
||||
function retry {
|
||||
set +o errexit
|
||||
N=$1; shift;
|
||||
status=0
|
||||
for i in $(seq $N); do
|
||||
echo "Trying: $*"
|
||||
eval $@
|
||||
status=$?
|
||||
if [ $status == 0 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
echo "Retrying: $*"
|
||||
done
|
||||
|
||||
if [ $status != 0 ]; then
|
||||
echo "timeout waiting for $*"
|
||||
fi
|
||||
set -o errexit
|
||||
return $status
|
||||
}
|
||||
|
||||
# Proxy is not started if S3PROXY_BINARY is an empty string
|
||||
# PUBLIC unset: use s3proxy.conf
|
||||
# PUBLIC=1: use s3proxy-noauth.conf (no request signing)
|
||||
#
|
||||
function start_s3proxy {
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
S3PROXY_CONFIG="s3proxy-noauth.conf"
|
||||
else
|
||||
S3PROXY_CONFIG="s3proxy.conf"
|
||||
fi
|
||||
|
||||
if [ -n "${S3PROXY_BINARY}" ]
|
||||
then
|
||||
if [ ! -e "${S3PROXY_BINARY}" ]; then
|
||||
wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
|
||||
--quiet -O "${S3PROXY_BINARY}"
|
||||
chmod +x "${S3PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
${STDBUF_BIN} -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG &
|
||||
S3PROXY_PID=$!
|
||||
|
||||
# wait for S3Proxy to start
|
||||
wait_for_port 8080
|
||||
fi
|
||||
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ]; then
|
||||
if [ ! -e "${CHAOS_HTTP_PROXY_BINARY}" ]; then
|
||||
wget "https://github.com/bouncestorage/chaos-http-proxy/releases/download/chaos-http-proxy-${CHAOS_HTTP_PROXY_VERSION}/chaos-http-proxy" \
|
||||
--quiet -O "${CHAOS_HTTP_PROXY_BINARY}"
|
||||
chmod +x "${CHAOS_HTTP_PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
${STDBUF_BIN} -oL -eL java -jar ${CHAOS_HTTP_PROXY_BINARY} --properties chaos-http-proxy.conf &
|
||||
CHAOS_HTTP_PROXY_PID=$!
|
||||
|
||||
# wait for Chaos HTTP Proxy to start
|
||||
wait_for_port 1080
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_s3proxy {
|
||||
if [ -n "${S3PROXY_PID}" ]
|
||||
then
|
||||
kill $S3PROXY_PID
|
||||
fi
|
||||
|
||||
if [ -n "${CHAOS_HTTP_PROXY_PID}" ]
|
||||
then
|
||||
kill $CHAOS_HTTP_PROXY_PID
|
||||
fi
|
||||
}
|
||||
|
||||
# Mount the bucket, function arguments passed to s3fs in addition to
|
||||
# a set of common arguments.
|
||||
function start_s3fs {
|
||||
# Public bucket if PUBLIC is set
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
AUTH_OPT="-o public_bucket=1"
|
||||
elif [ -n "${S3FS_PROFILE}" ]; then
|
||||
AUTH_OPT="-o profile=${S3FS_PROFILE}"
|
||||
else
|
||||
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
|
||||
fi
|
||||
|
||||
# If VALGRIND is set, pass it as options to valgrind.
|
||||
# start valgrind-listener in another shell.
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
# Start valgrind-listener (default port is 1500)
|
||||
if [ -n "${VALGRIND}" ]; then
|
||||
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
|
||||
fi
|
||||
|
||||
# On OSX only, we need to specify the direct_io and auto_cache flag.
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
DIRECT_IO_OPT="-o direct_io -o auto_cache"
|
||||
else
|
||||
DIRECT_IO_OPT=""
|
||||
fi
|
||||
|
||||
if [ -n "${CHAOS_HTTP_PROXY}" ]; then
|
||||
export http_proxy="127.0.0.1:1080"
|
||||
fi
|
||||
|
||||
# [NOTE]
|
||||
# On macos, running s3fs via stdbuf will result in no response.
|
||||
# Therefore, when it is macos, it is not executed via stdbuf.
|
||||
# This patch may be temporary, but no other method has been found at this time.
|
||||
#
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
VIA_STDBUF_CMDLINE=""
|
||||
else
|
||||
VIA_STDBUF_CMDLINE="${STDBUF_BIN} -oL -eL"
|
||||
fi
|
||||
|
||||
# Common s3fs options:
|
||||
#
|
||||
# TODO: Allow all these options to be overridden with env variables
|
||||
#
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
# createbucket
|
||||
# S3Proxy always starts with no buckets, this tests the s3fs-fuse
|
||||
# automatic bucket creation path.
|
||||
# $AUTH_OPT
|
||||
# Will be either "-o public_bucket=1"
|
||||
# or
|
||||
# "-o passwd_file=${S3FS_CREDENTIALS_FILE}"
|
||||
# dbglevel
|
||||
# error by default. override with DBGLEVEL env variable
|
||||
# -f
|
||||
# Keep s3fs in foreground instead of daemonizing
|
||||
#
|
||||
|
||||
# subshell with set -x to log exact invocation of s3fs-fuse
|
||||
(
|
||||
set -x
|
||||
${VIA_STDBUF_CMDLINE} \
|
||||
${VALGRIND_EXEC} ${S3FS} \
|
||||
$TEST_BUCKET_1 \
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o endpoint=${S3_ENDPOINT} \
|
||||
-o no_check_certificate \
|
||||
-o ssl_verify_hostname=0 \
|
||||
-o use_xattr=1 \
|
||||
-o createbucket \
|
||||
${AUTH_OPT} \
|
||||
${DIRECT_IO_OPT} \
|
||||
-o stat_cache_expire=1 \
|
||||
-o stat_cache_interval_expire=1 \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
-o no_time_stamp_msg \
|
||||
-o retries=3 \
|
||||
-f \
|
||||
"${@}" &
|
||||
echo $! >&3
|
||||
) 3>pid | ${STDBUF_BIN} -oL -eL ${SED_BIN} ${SED_BUFFER_FLAG} "s/^/s3fs: /" &
|
||||
sleep 1
|
||||
export S3FS_PID=$(<pid)
|
||||
rm -f pid
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
set +o errexit
|
||||
TRYCOUNT=0
|
||||
while [ $TRYCOUNT -le ${RETRIES:=20} ]; do
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=`expr ${TRYCOUNT} + 1`
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
set -o errexit
|
||||
else
|
||||
retry ${RETRIES:=20} grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
fi
|
||||
|
||||
# Quick way to start system up for manual testing with options under test
|
||||
if [[ -n ${INTERACT} ]]; then
|
||||
echo "Mountpoint $TEST_BUCKET_MOUNT_POINT_1 is ready"
|
||||
echo "control-C to quit"
|
||||
sleep infinity
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_s3fs {
|
||||
# Retry in case file system is in use
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
if df | grep -q $TEST_BUCKET_MOUNT_POINT_1; then
|
||||
retry 10 df "|" grep -q $TEST_BUCKET_MOUNT_POINT_1 "&&" umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
else
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts "&&" fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
|
||||
function common_exit_handler {
|
||||
stop_s3fs
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
1498
test/integration-test-main.sh
Executable file
1498
test/integration-test-main.sh
Executable file
File diff suppressed because it is too large
Load Diff
BIN
test/keystore.jks
Normal file
BIN
test/keystore.jks
Normal file
Binary file not shown.
@ -1,4 +1,24 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Merge old directory object to new.
|
||||
# For s3fs after v1.64
|
||||
@ -7,12 +27,12 @@
|
||||
###
|
||||
### UsageFunction <program name>
|
||||
###
|
||||
UsageFuntion()
|
||||
UsageFunction()
|
||||
{
|
||||
echo "Usage: $1 [-h] [-y] [-all] <base directory>"
|
||||
echo " -h print usage"
|
||||
echo " -y no confirm"
|
||||
echo " -all force all directoris"
|
||||
echo " -all force all directories"
|
||||
echo " There is no -all option is only to merge for other S3 client."
|
||||
echo " If -all is specified, this shell script merge all directory"
|
||||
echo " for s3fs old version."
|
||||
@ -28,7 +48,7 @@ DIRPARAM=""
|
||||
|
||||
while [ "$1" != "" ]; do
|
||||
if [ "X$1" = "X-help" -o "X$1" = "X-h" -o "X$1" = "X-H" ]; then
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 0
|
||||
elif [ "X$1" = "X-y" -o "X$1" = "X-Y" ]; then
|
||||
AUTOYES="yes"
|
||||
@ -38,7 +58,7 @@ while [ "$1" != "" ]; do
|
||||
if [ "X$DIRPARAM" != "X" ]; then
|
||||
echo "*** Input error."
|
||||
echo ""
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 1
|
||||
fi
|
||||
DIRPARAM=$1
|
||||
@ -48,7 +68,7 @@ done
|
||||
if [ "X$DIRPARAM" = "X" ]; then
|
||||
echo "*** Input error."
|
||||
echo ""
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -62,7 +82,7 @@ fi
|
||||
echo "#############################################################################"
|
||||
echo "[CAUTION]"
|
||||
echo "This program merges a directory made in s3fs which is older than version 1.64."
|
||||
echo "And made in other S3 client appilication."
|
||||
echo "And made in other S3 client application."
|
||||
echo "This program may be have bugs which are not fixed yet."
|
||||
echo "Please execute this program by responsibility of your own."
|
||||
echo "#############################################################################"
|
||||
@ -104,7 +124,7 @@ for DIR in $DIRLIST; do
|
||||
if [ "$ALLYES" = "no" ]; then
|
||||
### Skip "d---------" directories.
|
||||
### Other clients make directory object "dir/" which don't have
|
||||
### "x-amz-meta-mode" attribyte.
|
||||
### "x-amz-meta-mode" attribute.
|
||||
### Then these directories is "d---------", it is target directory.
|
||||
DIRPERMIT=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $1}'`
|
||||
if [ "$DIRPERMIT" != "d---------" ]; then
|
||||
@ -112,7 +132,7 @@ for DIR in $DIRLIST; do
|
||||
fi
|
||||
fi
|
||||
|
||||
### Comfirm
|
||||
### Confirm
|
||||
ANSWER=""
|
||||
if [ "$AUTOYES" = "yes" ]; then
|
||||
ANSWER="y"
|
||||
@ -165,5 +185,10 @@ echo -n "# Finished : " >> $LOGFILE
|
||||
echo `date` >> $LOGFILE
|
||||
|
||||
#
|
||||
# END
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
1
test/passwd-s3fs
Normal file
1
test/passwd-s3fs
Normal file
@ -0,0 +1 @@
|
||||
local-identity:local-credential
|
||||
@ -1,7 +1,35 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
if [[ $EUID -ne 0 ]]
|
||||
then
|
||||
echo "This test script must be run as root" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
66
test/run_tests_using_sanitizers.sh
Executable file
66
test/run_tests_using_sanitizers.sh
Executable file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Disable preprocessor warnings from _FORTIFY_SOURCE and -O0
|
||||
COMMON_FLAGS="-g -O0 -Wno-cpp"
|
||||
|
||||
# run tests with libstc++ debug mode, https://gcc.gnu.org/onlinedocs/libstdc++/manual/debug_mode.html
|
||||
make clean
|
||||
./configure CXXFLAGS="$COMMON_FLAGS -D_GLIBCXX_DEBUG"
|
||||
make
|
||||
DBGLEVEL=debug make check -C test/
|
||||
|
||||
# run tests under AddressSanitizer, https://clang.llvm.org/docs/AddressSanitizer.html
|
||||
make clean
|
||||
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=address -fsanitize-address-use-after-scope"
|
||||
make
|
||||
ASAN_OPTIONS='detect_leaks=1,detect_stack_use_after_return=1' make check -C test/
|
||||
|
||||
# run tests under MemorySanitizer, https://clang.llvm.org/docs/MemorySanitizer.html
|
||||
#make clean
|
||||
#./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=memory"
|
||||
#make
|
||||
#make check -C test/
|
||||
|
||||
# run tests under ThreadSanitizer, https://clang.llvm.org/docs/ThreadSanitizer.html
|
||||
make clean
|
||||
./configure CXX=clang++ CXXFLAGS="$COMMON_FLAGS -fsanitize=thread"
|
||||
make
|
||||
TSAN_OPTIONS='halt_on_error=1' make check -C test/
|
||||
|
||||
# run tests under UndefinedBehaviorSanitizer, https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
|
||||
make clean
|
||||
./configure CXX=clang++ CXXFLAGS="-$COMMON_FLAGS fsanitize=undefined,implicit-conversion,local-bounds,unsigned-integer-overflow"
|
||||
make
|
||||
make check -C test/
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
10
test/s3proxy.conf
Normal file
10
test/s3proxy.conf
Normal file
@ -0,0 +1,10 @@
|
||||
s3proxy.secure-endpoint=https://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v2-or-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
s3proxy.keystore-path=keystore.jks
|
||||
s3proxy.keystore-password=password
|
||||
|
||||
jclouds.provider=transient
|
||||
jclouds.identity=remote-identity
|
||||
jclouds.credential=remote-credential
|
||||
@ -1,24 +1,30 @@
|
||||
# S3FS: Samlpe ahbe_conf parameter file.
|
||||
# S3FS: Sample ahbe_conf parameter file.
|
||||
#
|
||||
# This file is configuration file for additional header by extension(ahbe).
|
||||
# s3fs loads this file at starting.
|
||||
#
|
||||
# Format:
|
||||
# line = [file suffix] HTTP-header [HTTP-header-values]
|
||||
# line = [file suffix or regex] HTTP-header [HTTP-header-values]
|
||||
# file suffix = file(object) suffix, if this field is empty,
|
||||
# it means "*"(all object).
|
||||
# it means "reg:(.*)".(=all object).
|
||||
# regex = regular expression to match the file(object) path.
|
||||
# this type starts with "reg:" prefix.
|
||||
# HTTP-header = additional HTTP header name
|
||||
# HTTP-header-values = additional HTTP header value
|
||||
#
|
||||
# <suffix(extension)> <HTTP header> <HTTP header values>
|
||||
#
|
||||
# Verification is done in the order in which they are described in the file.
|
||||
# That order is very important.
|
||||
#
|
||||
# Example:
|
||||
# " Content-Encoding gzip" --> all object
|
||||
# ".gz Content-Encoding gzip" --> only ".gz" extension file
|
||||
# " Content-Encoding gzip" --> all object
|
||||
# ".gz Content-Encoding gzip" --> only ".gz" extension file
|
||||
# "reg:^/DIR/(.*).t2$ Content-Encoding text2" --> "/DIR/*.t2" extension file
|
||||
#
|
||||
# Notice:
|
||||
# If you need to set all object, you can specify without "suffix".
|
||||
# Then all of object(file) is added additional header.
|
||||
# If you need to set all object, you can specify without "suffix" or regex
|
||||
# type "reg:(.*)". Then all of object(file) is added additional header.
|
||||
# If you have this configuration file for Content-Encoding, you should
|
||||
# know about RFC 2616.
|
||||
#
|
||||
@ -27,15 +33,20 @@
|
||||
# Encoding header, and SHOULD NOT be used in the Content-Encoding
|
||||
# header."
|
||||
#
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
.bz2 Content-Encoding bzip2
|
||||
.svgz Content-Encoding gzip
|
||||
.svg.gz Content-Encoding gzip
|
||||
.tgz Content-Encoding gzip
|
||||
.tar.gz Content-Encoding gzip
|
||||
.taz Content-Encoding gzip
|
||||
.tz Content-Encoding gzip
|
||||
.tbz2 Content-Encoding gzip
|
||||
gz.js Content-Encoding gzip
|
||||
|
||||
# file suffix type
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
.bz2 Content-Encoding bzip2
|
||||
.svgz Content-Encoding gzip
|
||||
.svg.gz Content-Encoding gzip
|
||||
.tgz Content-Encoding gzip
|
||||
.tar.gz Content-Encoding gzip
|
||||
.taz Content-Encoding gzip
|
||||
.tz Content-Encoding gzip
|
||||
.tbz2 Content-Encoding gzip
|
||||
gz.js Content-Encoding gzip
|
||||
|
||||
# regex type(test)
|
||||
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
|
||||
|
||||
|
||||
@ -1,4 +1,24 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# This is unsupport sample deleting cache files script.
|
||||
# So s3fs's local cache files(stats and objects) grow up,
|
||||
@ -12,96 +32,100 @@
|
||||
|
||||
func_usage()
|
||||
{
|
||||
echo ""
|
||||
echo "Usage: $1 <bucket name> <cache path> <limit size> [-silent]"
|
||||
echo " $1 -h"
|
||||
echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824"
|
||||
echo ""
|
||||
echo " bucket name = bucket name which specified s3fs option"
|
||||
echo " cache path = cache directory path which specified by"
|
||||
echo " use_cache s3fs option."
|
||||
echo " limit size = limit for total cache files size."
|
||||
echo " specify by BYTE"
|
||||
echo " -silent = silent mode"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "Usage: $1 <bucket name> <cache path> <limit size> [-silent]"
|
||||
echo " $1 -h"
|
||||
echo "Sample: $1 mybucket /tmp/s3fs/cache 1073741824"
|
||||
echo ""
|
||||
echo " bucket name = bucket name which specified s3fs option"
|
||||
echo " cache path = cache directory path which specified by"
|
||||
echo " use_cache s3fs option."
|
||||
echo " limit size = limit for total cache files size."
|
||||
echo " specify by BYTE"
|
||||
echo " -silent = silent mode"
|
||||
echo ""
|
||||
}
|
||||
|
||||
PRGNAME=`basename $0`
|
||||
|
||||
if [ "X$1" = "X-h" -o "X$1" = "X-H" ]; then
|
||||
func_usage $PRGNAME
|
||||
exit 0
|
||||
func_usage $PRGNAME
|
||||
exit 0
|
||||
fi
|
||||
if [ "X$1" = "X" -o "X$2" = "X" -o "X$3" = "X" ]; then
|
||||
func_usage $PRGNAME
|
||||
exit -1
|
||||
func_usage $PRGNAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUCKET=$1
|
||||
CDIR=$2
|
||||
CDIR="$2"
|
||||
LIMIT=$3
|
||||
SILENT=0
|
||||
if [ "X$4" = "X-silent" ]; then
|
||||
SILENT=1
|
||||
SILENT=1
|
||||
fi
|
||||
FILES_CDIR=$CDIR/$BUCKET
|
||||
STATS_CDIR=$CDIR/\.$BUCKET\.stat
|
||||
|
||||
FILES_CDIR="${CDIR}/${BUCKET}"
|
||||
STATS_CDIR="${CDIR}/.${BUCKET}.stat"
|
||||
CURRENT_CACHE_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
#
|
||||
# Check total size
|
||||
#
|
||||
if [ $LIMIT -ge `du -sb $FILES_CDIR | awk '{print $1}'` ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "$FILES_CDIR is below allowed $LIMIT"
|
||||
fi
|
||||
exit 0
|
||||
if [ $LIMIT -ge $CURRENT_CACHE_SIZE ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "$FILES_CDIR ($CURRENT_CACHE_SIZE) is below allowed $LIMIT"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
#
|
||||
# Make file list by sorted access time
|
||||
#
|
||||
ALL_STATS_ATIMELIST=`find $STATS_CDIR -type f -exec echo -n {} \; -exec echo -n " " \; -exec stat -c %X {} \; | awk '{print $2":"$1}' | sort`
|
||||
|
||||
#
|
||||
# Remove loop
|
||||
#
|
||||
TMP_ATIME=0
|
||||
TMP_STATS=""
|
||||
TMP_CFILE=""
|
||||
for part in $ALL_STATS_ATIMELIST; do
|
||||
TMP_ATIME=`echo $part | sed 's/\:/ /' | awk '{print $1}'`
|
||||
TMP_STATS=`echo $part | sed 's/\:/ /' | awk '{print $2}'`
|
||||
TMP_CFILE=`echo $TMP_STATS | sed s/\.$BUCKET\.stat/$BUCKET/`
|
||||
#
|
||||
# Make file list by sorted access time
|
||||
#
|
||||
find "$STATS_CDIR" -type f -exec stat -c "%X:%n" "{}" \; | sort | while read part
|
||||
do
|
||||
echo Looking at $part
|
||||
TMP_ATIME=`echo "$part" | cut -d: -f1`
|
||||
TMP_STATS="`echo "$part" | cut -d: -f2`"
|
||||
TMP_CFILE=`echo "$TMP_STATS" | sed s/\.$BUCKET\.stat/$BUCKET/`
|
||||
|
||||
if [ `stat -c %X $TMP_STATS` -eq $TMP_ATIME ]; then
|
||||
rm -f $TMP_STATS $TMP_CFILE > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)"
|
||||
fi
|
||||
exit -1
|
||||
else
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "remove file: $TMP_CFILE $TMP_STATS"
|
||||
fi
|
||||
if [ `stat -c %X "$TMP_STATS"` -eq $TMP_ATIME ]; then
|
||||
rm -f "$TMP_STATS" "$TMP_CFILE" > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "ERROR: Could not remove files($TMP_STATS,$TMP_CFILE)"
|
||||
fi
|
||||
exit 1
|
||||
else
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "remove file: $TMP_CFILE $TMP_STATS"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $LIMIT -ge `du -sb $FILES_CDIR | awk '{print $1}'` ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "finish removing files"
|
||||
if [ $LIMIT -ge `du -sb "$FILES_CDIR" | awk '{print $1}'` ]; then
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
echo "finish removing files"
|
||||
fi
|
||||
break
|
||||
fi
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $SILENT -ne 1 ]; then
|
||||
TOTAL_SIZE=`du -sb $FILES_CDIR | awk '{print $1}'`
|
||||
echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE"
|
||||
TOTAL_SIZE=`du -sb "$FILES_CDIR" | awk '{print $1}'`
|
||||
echo "Finish: $FILES_CDIR total size is $TOTAL_SIZE"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
||||
#
|
||||
# End
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
@ -1,284 +1,94 @@
|
||||
#!/bin/bash -e
|
||||
COMMON=integration-test-common.sh
|
||||
source $COMMON
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Test s3fs-fuse file system operations with
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
# Require root
|
||||
REQUIRE_ROOT=require-root.sh
|
||||
source $REQUIRE_ROOT
|
||||
#source $REQUIRE_ROOT
|
||||
|
||||
# Configuration
|
||||
TEST_TEXT="HELLO WORLD"
|
||||
TEST_TEXT_FILE=test-s3fs.txt
|
||||
TEST_DIR=testdir
|
||||
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
|
||||
TEST_TEXT_FILE_LENGTH=15
|
||||
source integration-test-common.sh
|
||||
|
||||
# Mount the bucket
|
||||
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
|
||||
then
|
||||
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
$S3FS $TEST_BUCKET_1 $TEST_BUCKET_MOUNT_POINT_1 -o passwd_file=$S3FS_CREDENTIALS_FILE
|
||||
CUR_DIR=`pwd`
|
||||
cd $TEST_BUCKET_MOUNT_POINT_1
|
||||
CACHE_DIR="/tmp/s3fs-cache"
|
||||
rm -rf "${CACHE_DIR}"
|
||||
mkdir "${CACHE_DIR}"
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
rm -f $TEST_TEXT_FILE
|
||||
#reserve 200MB for data cache
|
||||
source test-utils.sh
|
||||
CACHE_DISK_AVAIL_SIZE=`get_disk_avail_size $CACHE_DIR`
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
# [FIXME]
|
||||
# Only on MacOS, there are cases where process or system
|
||||
# other than the s3fs cache uses disk space.
|
||||
# We can imagine that this is caused by Timemachine, but
|
||||
# there is no workaround, so s3fs cache size is set +1gb
|
||||
# for error bypass.
|
||||
#
|
||||
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 1200))
|
||||
else
|
||||
ENSURE_DISKFREE_SIZE=$((CACHE_DISK_AVAIL_SIZE - 200))
|
||||
fi
|
||||
|
||||
# Write a small test file
|
||||
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
|
||||
do
|
||||
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
echo $TEST_TEXT >> $TEST_TEXT_FILE
|
||||
export CACHE_DIR
|
||||
export ENSURE_DISKFREE_SIZE
|
||||
FLAGS=(
|
||||
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}"
|
||||
enable_content_md5
|
||||
enable_noobj_cache
|
||||
max_stat_cache_size=100
|
||||
nocopyapi
|
||||
nomultipart
|
||||
notsup_compat_dir
|
||||
sigv2
|
||||
sigv4
|
||||
singlepart_copy_limit=10 # limit size to exercise multipart code paths
|
||||
#use_sse # TODO: S3Proxy does not support SSE
|
||||
)
|
||||
|
||||
start_s3proxy
|
||||
|
||||
for flag in "${FLAGS[@]}"; do
|
||||
echo "testing s3fs flag: $flag"
|
||||
|
||||
start_s3fs -o $flag
|
||||
|
||||
./integration-test-main.sh
|
||||
|
||||
stop_s3fs
|
||||
done
|
||||
|
||||
# Verify contents of file
|
||||
echo "Verifying length of test file"
|
||||
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
|
||||
then
|
||||
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
|
||||
exit 1
|
||||
fi
|
||||
stop_s3proxy
|
||||
|
||||
# Delete the test file
|
||||
rm $TEST_TEXT_FILE
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not delete file, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
echo "$0: tests complete."
|
||||
|
||||
##########################################################
|
||||
# Rename test (individual file)
|
||||
##########################################################
|
||||
echo "Testing mv file function ..."
|
||||
|
||||
# if the rename file exists, delete it
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
rm $ALT_TEST_TEXT_FILE
|
||||
fi
|
||||
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# create the test file again
|
||||
echo $TEST_TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#rename the test file
|
||||
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ ! -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not move file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check the contents of the alt file
|
||||
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
|
||||
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
|
||||
then
|
||||
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm $ALT_TEST_TEXT_FILE
|
||||
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${ALT_TEST_TEXT_FILE}, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# Rename test (individual directory)
|
||||
##########################################################
|
||||
echo "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir ${TEST_DIR}
|
||||
|
||||
if [ ! -d ${TEST_DIR} ]; then
|
||||
echo "Directory ${TEST_DIR} was not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rmdir ${TEST_DIR}_rename
|
||||
if [ -e "${TEST_DIR}_rename" ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
###################################################################
|
||||
# test redirects > and >>
|
||||
###################################################################
|
||||
echo "Testing redirects ..."
|
||||
|
||||
echo ABCDEF > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONTENT=`cat $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${CONTENT} != "ABCDEF" ]; then
|
||||
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo XYZ > $TEST_TEXT_FILE
|
||||
|
||||
CONTENT=`cat $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${CONTENT} != "XYZ" ]; then
|
||||
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 123456 >> $TEST_TEXT_FILE
|
||||
|
||||
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
|
||||
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${LINE1} != "XYZ" ]; then
|
||||
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ${LINE2} != "123456" ]; then
|
||||
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# clean up
|
||||
rm $TEST_TEXT_FILE
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#####################################################################
|
||||
# Simple directory test mkdir/rmdir
|
||||
#####################################################################
|
||||
echo "Testing creation/removal of a directory"
|
||||
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir ${TEST_DIR}
|
||||
|
||||
if [ ! -d ${TEST_DIR} ]; then
|
||||
echo "Directory ${TEST_DIR} was not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rmdir ${TEST_DIR}
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# File permissions test (individual file)
|
||||
##########################################################
|
||||
echo "Testing chmod file function ..."
|
||||
|
||||
# create the test file again
|
||||
echo $TEST_TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
|
||||
chmod 777 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE permissions"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm $TEST_TEXT_FILE
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# File permissions test (individual file)
|
||||
##########################################################
|
||||
echo "Testing chown file function ..."
|
||||
|
||||
# create the test file again
|
||||
echo $TEST_TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
|
||||
chown 1000:1000 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE ownership"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm $TEST_TEXT_FILE
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#####################################################################
|
||||
# Tests are finished
|
||||
#####################################################################
|
||||
|
||||
# Unmount the bucket
|
||||
cd $CUR_DIR
|
||||
umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
|
||||
echo "All tests complete."
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
|
||||
357
test/test-utils.sh
Normal file
357
test/test-utils.sh
Normal file
@ -0,0 +1,357 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#### Test utils
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
# Configuration
|
||||
TEST_TEXT="HELLO WORLD"
|
||||
TEST_TEXT_FILE=test-s3fs.txt
|
||||
TEST_DIR=testdir
|
||||
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
|
||||
TEST_TEXT_FILE_LENGTH=15
|
||||
BIG_FILE=big-file-s3fs.txt
|
||||
TEMP_DIR=${TMPDIR:-"/var/tmp"}
|
||||
# /dev/urandom can only return 32 MB per block maximum
|
||||
BIG_FILE_BLOCK_SIZE=$((25 * 1024 * 1024))
|
||||
BIG_FILE_COUNT=1
|
||||
# This should be greater than the multipart size
|
||||
BIG_FILE_LENGTH=$(($BIG_FILE_BLOCK_SIZE * $BIG_FILE_COUNT))
|
||||
export RUN_DIR
|
||||
|
||||
# [NOTE]
|
||||
# stdbuf, truncate and sed installed on macos do not work as
|
||||
# expected(not compatible with Linux).
|
||||
# Therefore, macos installs a brew package such as coreutils
|
||||
# and uses gnu commands(gstdbuf, gtruncate, gsed).
|
||||
# Set your PATH appropriately so that you can find these commands.
|
||||
#
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
export STDBUF_BIN="gstdbuf"
|
||||
export TRUNCATE_BIN="gtruncate"
|
||||
export SED_BIN="gsed"
|
||||
else
|
||||
export STDBUF_BIN="stdbuf"
|
||||
export TRUNCATE_BIN="truncate"
|
||||
export SED_BIN="sed"
|
||||
fi
|
||||
export SED_BUFFER_FLAG="--unbuffered"
|
||||
|
||||
function get_xattr() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
xattr -p "$1" "$2"
|
||||
else
|
||||
getfattr -n "$1" --only-values "$2"
|
||||
fi
|
||||
}
|
||||
|
||||
function set_xattr() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
xattr -w "$1" "$2" "$3"
|
||||
else
|
||||
setfattr -n "$1" -v "$2" "$3"
|
||||
fi
|
||||
}
|
||||
|
||||
function del_xattr() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
xattr -d "$1" "$2"
|
||||
else
|
||||
setfattr -x "$1" "$2"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_size() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%z" "$1"
|
||||
else
|
||||
stat -c %s "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_file_size() {
|
||||
FILE_NAME="$1"
|
||||
EXPECTED_SIZE="$2"
|
||||
|
||||
# Verify file is zero length via metadata
|
||||
size=$(get_size ${FILE_NAME})
|
||||
if [ $size -ne $EXPECTED_SIZE ]
|
||||
then
|
||||
echo "error: expected ${FILE_NAME} to be zero length"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify file is zero length via data
|
||||
size=$(cat ${FILE_NAME} | wc -c)
|
||||
if [ $size -ne $EXPECTED_SIZE ]
|
||||
then
|
||||
echo "error: expected ${FILE_NAME} to be $EXPECTED_SIZE length, got $size"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function mk_test_file {
|
||||
if [ $# == 0 ]; then
|
||||
TEXT=$TEST_TEXT
|
||||
else
|
||||
TEXT=$1
|
||||
fi
|
||||
echo $TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# wait & check
|
||||
BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'`
|
||||
TRY_COUNT=10
|
||||
while true; do
|
||||
MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then
|
||||
break
|
||||
fi
|
||||
TRY_COUNT=`expr $TRY_COUNT - 1`
|
||||
if [ $TRY_COUNT -le 0 ]; then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function rm_test_file {
|
||||
if [ $# == 0 ]; then
|
||||
FILE=$TEST_TEXT_FILE
|
||||
else
|
||||
FILE=$1
|
||||
fi
|
||||
rm -f $FILE
|
||||
|
||||
if [ -e $FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function mk_test_dir {
|
||||
mkdir ${TEST_DIR}
|
||||
|
||||
if [ ! -d ${TEST_DIR} ]; then
|
||||
echo "Directory ${TEST_DIR} was not created"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function rm_test_dir {
|
||||
rmdir ${TEST_DIR}
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create and cd to a unique directory for this test run
|
||||
# Sets RUN_DIR to the name of the created directory
|
||||
function cd_run_dir {
|
||||
if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then
|
||||
echo "TEST_BUCKET_MOUNT_POINT_1 variable not set"
|
||||
exit 1
|
||||
fi
|
||||
RUN_DIR=${TEST_BUCKET_MOUNT_POINT_1}/${1}
|
||||
mkdir -p ${RUN_DIR}
|
||||
cd ${RUN_DIR}
|
||||
}
|
||||
|
||||
function clean_run_dir {
|
||||
if [ -d ${RUN_DIR} ]; then
|
||||
rm -rf ${RUN_DIR} || echo "Error removing ${RUN_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Resets test suite
|
||||
function init_suite {
|
||||
TEST_LIST=()
|
||||
TEST_FAILED_LIST=()
|
||||
TEST_PASSED_LIST=()
|
||||
}
|
||||
|
||||
# Report a passing test case
|
||||
# report_pass TEST_NAME
|
||||
function report_pass {
|
||||
echo "$1 passed"
|
||||
TEST_PASSED_LIST+=($1)
|
||||
}
|
||||
|
||||
# Report a failing test case
|
||||
# report_fail TEST_NAME
|
||||
function report_fail {
|
||||
echo "$1 failed"
|
||||
TEST_FAILED_LIST+=($1)
|
||||
}
|
||||
|
||||
# Add tests to the suite
|
||||
# add_tests TEST_NAME...
|
||||
function add_tests {
|
||||
TEST_LIST+=("$@")
|
||||
}
|
||||
|
||||
# Log test name and description
|
||||
# describe [DESCRIPTION]
|
||||
function describe {
|
||||
echo "${FUNCNAME[1]}: \"$*\""
|
||||
}
|
||||
|
||||
# Runs each test in a suite and summarizes results. The list of
|
||||
# tests added by add_tests() is called with CWD set to a tmp
|
||||
# directory in the bucket. An attempt to clean this directory is
|
||||
# made after the test run.
|
||||
function run_suite {
|
||||
orig_dir=$PWD
|
||||
key_prefix="testrun-$RANDOM"
|
||||
cd_run_dir $key_prefix
|
||||
for t in "${TEST_LIST[@]}"; do
|
||||
# The following sequence runs tests in a subshell to allow continuation
|
||||
# on test failure, but still allowing errexit to be in effect during
|
||||
# the test.
|
||||
#
|
||||
# See:
|
||||
# https://groups.google.com/d/msg/gnu.bash.bug/NCK_0GmIv2M/dkeZ9MFhPOIJ
|
||||
# Other ways of trying to capture the return value will also disable
|
||||
# errexit in the function due to bash... compliance with POSIX?
|
||||
set +o errexit
|
||||
(set -o errexit; $t $key_prefix)
|
||||
if [[ $? == 0 ]]; then
|
||||
report_pass $t
|
||||
else
|
||||
report_fail $t
|
||||
fi
|
||||
set -o errexit
|
||||
done
|
||||
cd ${orig_dir}
|
||||
clean_run_dir
|
||||
|
||||
for t in "${TEST_PASSED_LIST[@]}"; do
|
||||
echo "PASS: $t"
|
||||
done
|
||||
for t in "${TEST_FAILED_LIST[@]}"; do
|
||||
echo "FAIL: $t"
|
||||
done
|
||||
|
||||
passed=${#TEST_PASSED_LIST[@]}
|
||||
failed=${#TEST_FAILED_LIST[@]}
|
||||
|
||||
echo "SUMMARY for $0: $passed tests passed. $failed tests failed."
|
||||
|
||||
if [[ $failed != 0 ]]; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
function get_ctime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%c" "$1"
|
||||
else
|
||||
stat -c "%Z" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_mtime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%m" "$1"
|
||||
else
|
||||
stat -c "%Y" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_atime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%a" "$1"
|
||||
else
|
||||
stat -c "%X" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_permissions() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%p" "$1"
|
||||
else
|
||||
stat -c "%a" "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_content_type() {
|
||||
INFO_STR=`aws_cli s3api head-object --bucket ${TEST_BUCKET_1} --key $1`
|
||||
if [[ "${INFO_STR}" != *"$2"* ]]
|
||||
then
|
||||
echo "moved file content-type is not as expected expected:$2 got:${INFO_STR}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function get_disk_avail_size() {
|
||||
DISK_AVAIL_SIZE=`BLOCKSIZE=$((1024 * 1024)) df $1 | awk '{print $4}' | tail -n 1`
|
||||
echo ${DISK_AVAIL_SIZE}
|
||||
}
|
||||
|
||||
function aws_cli() {
|
||||
FLAGS=""
|
||||
if [ -n "${S3FS_PROFILE}" ]; then
|
||||
FLAGS="--profile ${S3FS_PROFILE}"
|
||||
fi
|
||||
aws $* --endpoint-url "${S3_URL}" --no-verify-ssl $FLAGS
|
||||
}
|
||||
|
||||
function wait_for_port() {
|
||||
PORT=$1
|
||||
for i in $(seq 30); do
|
||||
if exec 3<>"/dev/tcp/127.0.0.1/${PORT}";
|
||||
then
|
||||
exec 3<&- # Close for read
|
||||
exec 3>&- # Close for write
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function make_random_string() {
|
||||
if [ -n "$1" ]; then
|
||||
END_POS=$1
|
||||
else
|
||||
END_POS=8
|
||||
fi
|
||||
RANDOM_STR=`cat /dev/urandom | base64 | sed 's#[/|+]##g' | head -1 | cut -c 1-${END_POS}`
|
||||
echo "${RANDOM_STR}"
|
||||
}
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
108
test/ut_test.py
Executable file
108
test/ut_test.py
Executable file
@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
import os
|
||||
import unittest
|
||||
import ConfigParser
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
|
||||
class OssfsUnitTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
def random_string(self, len):
|
||||
char_set = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g']
|
||||
list = []
|
||||
for i in range(0, len):
|
||||
list.append(random.choice(char_set))
|
||||
return "".join(list)
|
||||
|
||||
def test_read_file(self):
|
||||
filename = "%s" % (self.random_string(10))
|
||||
print filename
|
||||
|
||||
f = open(filename, 'w')
|
||||
data = self.random_string(1000)
|
||||
f.write(data)
|
||||
f.close()
|
||||
|
||||
f = open(filename, 'r')
|
||||
data = f.read(100)
|
||||
self.assertEqual(len(data), 100)
|
||||
data = f.read(100)
|
||||
self.assertEqual(len(data), 100)
|
||||
f.close()
|
||||
|
||||
def test_rename_file(self):
|
||||
filename1 = "%s" % (self.random_string(10))
|
||||
filename2 = "%s" % (self.random_string(10))
|
||||
print filename1, filename2
|
||||
|
||||
f = open(filename1, 'w+')
|
||||
data1 = self.random_string(1000)
|
||||
f.write(data1)
|
||||
|
||||
os.rename(filename1, filename2)
|
||||
|
||||
f.seek(0, 0)
|
||||
data2 = f.read()
|
||||
f.close()
|
||||
|
||||
self.assertEqual(len(data1), len(data2))
|
||||
self.assertEqual(data1, data2)
|
||||
|
||||
def test_rename_file2(self):
|
||||
filename1 = "%s" % (self.random_string(10))
|
||||
filename2 = "%s" % (self.random_string(10))
|
||||
print filename1, filename2
|
||||
|
||||
f = open(filename1, 'w')
|
||||
data1 = self.random_string(1000)
|
||||
f.write(data1)
|
||||
f.close()
|
||||
|
||||
os.rename(filename1, filename2)
|
||||
|
||||
f = open(filename2, 'r')
|
||||
f.seek(0, 0)
|
||||
data2 = f.read()
|
||||
f.close()
|
||||
|
||||
self.assertEqual(len(data1), len(data2))
|
||||
self.assertEqual(data1, data2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
46
test/write_multiple_offsets.py
Executable file
46
test/write_multiple_offsets.py
Executable file
@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 4 or len(sys.argv) % 2 != 0:
|
||||
sys.exit("Usage: %s OUTFILE OFFSET_1 SIZE_1 [OFFSET_N SIZE_N]...")
|
||||
|
||||
filename = sys.argv[1]
|
||||
|
||||
fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY)
|
||||
try:
|
||||
for i in range(2, len(sys.argv), 2):
|
||||
data = "a" * int(sys.argv[i+1])
|
||||
os.lseek(fd, int(sys.argv[i]), os.SEEK_SET)
|
||||
os.write(fd, data)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: expandtab sw=4 ts=4 fdm=marker
|
||||
# vim<600: expandtab sw=4 ts=4
|
||||
#
|
||||
Reference in New Issue
Block a user