Compare commits
597 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e1dafe76dd | |||
| 1a2e63ecff | |||
| a60b32cb80 | |||
| 6b58220009 | |||
| a841057679 | |||
| ee6abea956 | |||
| 8b0acd75e0 | |||
| cea7d44717 | |||
| 0da87e75fe | |||
| 566961c7a5 | |||
| ac65258d30 | |||
| 35261e6dba | |||
| 2818f23ba5 | |||
| 88f071ea22 | |||
| bd4bc0e7f1 | |||
| 890c1d53ff | |||
| 026260e7a1 | |||
| 99fe93b7f1 | |||
| b764c53020 | |||
| 11bd7128d2 | |||
| 7cda32664b | |||
| 4c73a0ae56 | |||
| 97fc845a6a | |||
| 7d9ac0163b | |||
| d903e064e0 | |||
| e1928288fe | |||
| 6ab6412dd3 | |||
| 30b7a69d3d | |||
| ccd0a446d8 | |||
| 0418e53b3c | |||
| bad48ab59a | |||
| bbad76bb71 | |||
| 6c1bd98c14 | |||
| b95e4acaeb | |||
| c238701d09 | |||
| 60d2ac3c7a | |||
| 967ef4d56b | |||
| ad57bdda6c | |||
| a0b69d1d3d | |||
| 5df94d7e33 | |||
| 1cbe9fb7a3 | |||
| 395f736753 | |||
| 065516c5f3 | |||
| 8660abaea2 | |||
| 366f0705a0 | |||
| ccea87ca68 | |||
| 5d54883e2f | |||
| 662f65c3c8 | |||
| 259f028490 | |||
| 5db550a298 | |||
| e3c77d2906 | |||
| ba00e79253 | |||
| c1791f920e | |||
| df3803c7b7 | |||
| 384b4cbafa | |||
| 40501a7a73 | |||
| ab89b4cd4a | |||
| 48e0d55c8e | |||
| 1eba27a50a | |||
| 41206fa0e2 | |||
| 21cf1d64e5 | |||
| ae91b6f673 | |||
| f4515b5cfa | |||
| 6c57cde7f9 | |||
| 5014c1827b | |||
| f531e6aff2 | |||
| c5c110137b | |||
| 5957d9ead0 | |||
| 5675df2a44 | |||
| 00bc9142c4 | |||
| 5653ab39fc | |||
| 473dd7c940 | |||
| ee824d52ba | |||
| 7c5fba9890 | |||
| f214cb03b2 | |||
| 416c51799b | |||
| cf6f665f03 | |||
| 20da0e4dd3 | |||
| fa8c417526 | |||
| 2c65aec6c8 | |||
| 96d8e6d823 | |||
| 62b8084300 | |||
| 907aff5de4 | |||
| bc09129ec5 | |||
| cd94f638e2 | |||
| b1fe419870 | |||
| 98b724391f | |||
| 620f6ec616 | |||
| 0c6a3882a2 | |||
| a08880ae15 | |||
| f48826dfe9 | |||
| 9c3551478e | |||
| cc94e1da26 | |||
| 2b7ea5813c | |||
| 185192be67 | |||
| ae4caa96a0 | |||
| af13ae82c1 | |||
| 13503c063b | |||
| 337da59368 | |||
| b0681246b9 | |||
| 52853f6b47 | |||
| f6eb841a24 | |||
| caea087aec | |||
| d2ae14d8b7 | |||
| 7115835834 | |||
| 551c6acf67 | |||
| 24df69f688 | |||
| 23a10dd644 | |||
| 034042f511 | |||
| 465c15ef40 | |||
| a22675bafd | |||
| 0e0ae38f6d | |||
| 7b30d5d15b | |||
| 4a5c9bef89 | |||
| 9d10a5aa70 | |||
| 107757f11d | |||
| a12e0d5ec4 | |||
| 42cdcbc2dc | |||
| eef549dac7 | |||
| c8ee132813 | |||
| d07c3f38b7 | |||
| 73da168b93 | |||
| 1fe0334c08 | |||
| 7d09914f1f | |||
| 3ac39d61f8 | |||
| c5677b4726 | |||
| 67685c3d49 | |||
| 864e20e1f2 | |||
| 51b3183cba | |||
| f02b1bc352 | |||
| 758b92e823 | |||
| df0ff3a2fd | |||
| edcf4c6218 | |||
| 28efff5986 | |||
| efba9bcbc1 | |||
| 6bd179c92b | |||
| 96764b7410 | |||
| ff3eb1971f | |||
| 94ddcb8d4f | |||
| b4c90d6957 | |||
| 75b59a7c16 | |||
| 3bcca75a88 | |||
| 79ea1a1561 | |||
| f0f61b3b55 | |||
| b955391621 | |||
| 8de992d42d | |||
| fef3fbc225 | |||
| acb61880b9 | |||
| 8ee95ff7ab | |||
| 95578cad43 | |||
| 465bbd3729 | |||
| 0fa895594e | |||
| 15573cd21e | |||
| 43df94719b | |||
| 980ba398bc | |||
| 0d59ac51c1 | |||
| 523043a2aa | |||
| 277da2c64a | |||
| 03217baa99 | |||
| 6affefff5b | |||
| 2506fe73fa | |||
| 25a03c370a | |||
| d40da2c68b | |||
| 7d6312ac78 | |||
| e26c69a327 | |||
| ff196e4257 | |||
| 19f0d498aa | |||
| 97a806447e | |||
| a00af2385b | |||
| 6fc972972f | |||
| 989d403b1f | |||
| 7b307601b5 | |||
| d731ab3a8e | |||
| 174d934d52 | |||
| b428f68acf | |||
| 5350e03147 | |||
| 28c7888a50 | |||
| 915a1321c7 | |||
| 8a11d7bc2f | |||
| 7aae4782d9 | |||
| aba9e29471 | |||
| d375bca0d0 | |||
| cd0c8599cc | |||
| 20878a1618 | |||
| edd0a11fb5 | |||
| 5e4bafeab7 | |||
| 67a836223a | |||
| 7e2d6a3eed | |||
| 1ee5a468f4 | |||
| 81e209bdd1 | |||
| 90eda81624 | |||
| cafe6015e3 | |||
| 2492dc60ce | |||
| 6f688770fd | |||
| 8c0b1d9c5b | |||
| efde0ec9de | |||
| 632495374b | |||
| 15b797f3ee | |||
| a7a64d954a | |||
| cca217f613 | |||
| 1a9cf6f66d | |||
| 02d7296210 | |||
| a688df813e | |||
| 164424bc89 | |||
| f38aaa3d0e | |||
| 7fabd18b1f | |||
| 5db369d67e | |||
| dba32fdf78 | |||
| 716baada22 | |||
| 1a93897e85 | |||
| 9fd1368611 | |||
| 9f174d7614 | |||
| 65d52506c4 | |||
| a56fe0ea28 | |||
| ec110bb0f3 | |||
| 232befb52a | |||
| f363c21ff5 | |||
| 1a96f40a10 | |||
| 6be3236b28 | |||
| ccefd835d0 | |||
| 1ddc14d59d | |||
| 87f617374a | |||
| b76fc350b0 | |||
| 4deb6fdd84 | |||
| 2d5be2157a | |||
| a19206cf0f | |||
| 0f9428ad5a | |||
| d748b333ee | |||
| e8a8019a71 | |||
| e8680b485d | |||
| ab4b92074c | |||
| d57c12d3c3 | |||
| 676b2090fb | |||
| 6005929a96 | |||
| 49ffaa1d94 | |||
| 9fb3fd1a4d | |||
| 28b2b5cac3 | |||
| 320b8e1171 | |||
| 95cb5d201f | |||
| 880708ab5f | |||
| 36917f7780 | |||
| fe44f81ef2 | |||
| a81a2091c3 | |||
| 88d6c20cde | |||
| 4ff41f2ebf | |||
| a7d2148c60 | |||
| 980c0f81dd | |||
| 775e493b0a | |||
| 584ea488bf | |||
| 594c9ca7d2 | |||
| c2b7a7e453 | |||
| 34b604cdfe | |||
| d16d616f34 | |||
| 50f1ad51c8 | |||
| fe253c3d22 | |||
| 6cc30eea44 | |||
| 6be264a17f | |||
| 1ddbd4d6bb | |||
| 845fdb43f2 | |||
| 72f6c4d2dc | |||
| cf23dc78ab | |||
| b78adb4bb0 | |||
| 115bd51f3f | |||
| b979d40778 | |||
| 10589a9497 | |||
| 2f5973c02b | |||
| 090c37a1c1 | |||
| d048f380c1 | |||
| fff40bbff3 | |||
| daef00e38b | |||
| 4ca1b90d00 | |||
| c5691b6c7c | |||
| fb2ee7cc02 | |||
| 136ec654c2 | |||
| 4e583583cd | |||
| 91861e7fcd | |||
| ded4faf2e4 | |||
| cf56b35766 | |||
| 98d55582eb | |||
| 84bdd51021 | |||
| fbd8959d69 | |||
| 67efc11d94 | |||
| d6e6eebb95 | |||
| 4c65c09f4d | |||
| b281328ff4 | |||
| e9d2b38726 | |||
| f4aac111a4 | |||
| 230991782b | |||
| ac99df5c09 | |||
| f81e6103cb | |||
| cd04cb0875 | |||
| 0755c6f60c | |||
| 1c9d7a9ea9 | |||
| e01ded9e27 | |||
| bf056b213a | |||
| 1af7aaeccb | |||
| c7cf86c2ef | |||
| 6472eedddc | |||
| 938554e569 | |||
| 150b83f61e | |||
| 87faed0d04 | |||
| c5a94cfc0c | |||
| f548e8ad5e | |||
| 203df6b58a | |||
| 0ac2f7cded | |||
| b90b51f2c5 | |||
| 8b457133da | |||
| 7bfaa24d25 | |||
| 4eff6b4dd1 | |||
| e3765ad497 | |||
| dd9f3aed36 | |||
| ccfa13f295 | |||
| 540c04e6cc | |||
| 4b40727644 | |||
| 83937700dd | |||
| 2c156ceea2 | |||
| 0615338592 | |||
| b847872622 | |||
| e932583309 | |||
| 7410b7525f | |||
| 88a4f04217 | |||
| ff607e1a2d | |||
| 4bfbfa3621 | |||
| 43b91d3235 | |||
| 9fa205f1c3 | |||
| e003732f18 | |||
| b946b59522 | |||
| ea6b287d1a | |||
| a6455ef1bc | |||
| 8e5e44bfce | |||
| ea151a70c4 | |||
| 1e1f2a66de | |||
| 163daa5de1 | |||
| b581290c30 | |||
| 1927ccfe0a | |||
| 8162d4925d | |||
| 2b3ece467b | |||
| c2f9b38a95 | |||
| 8e688816d4 | |||
| 8dbd5a3f65 | |||
| 4bd5ffb0fa | |||
| 7b2e963636 | |||
| 87d04acb2f | |||
| 759b44135a | |||
| 8b53e0d931 | |||
| 7db23f9d03 | |||
| 3e655bad3b | |||
| 5e97cb0f48 | |||
| ef90e0deed | |||
| f44b61c403 | |||
| 6067af6ef1 | |||
| d7a4fc2927 | |||
| 7b62de80f6 | |||
| 8ffff5ba96 | |||
| e804441234 | |||
| 9cc0fd2240 | |||
| fff2952d5f | |||
| b85bd53336 | |||
| e1de134d94 | |||
| 5af6d4bd82 | |||
| c673d9d935 | |||
| 0fdda61fb5 | |||
| 331b8456a0 | |||
| 63b6f3635b | |||
| c04bcce206 | |||
| dd7d9268f2 | |||
| a3ef5c820d | |||
| e4da5c59b6 | |||
| ad2a406205 | |||
| 001206f7c1 | |||
| 2ef7f497f6 | |||
| 497b108109 | |||
| 86f95b05bf | |||
| 70db77af38 | |||
| 8dd234dd8f | |||
| 83d46ef8c6 | |||
| 1b323a6252 | |||
| d102eb752d | |||
| 4252fab685 | |||
| 94e3dbb2dc | |||
| 8f115078cd | |||
| f51ad1f33e | |||
| e29069b8dc | |||
| 92e52dadd4 | |||
| a4b00897c1 | |||
| f1b7f5ea95 | |||
| 6a9082f126 | |||
| 48f0a6f811 | |||
| 1b39b2d450 | |||
| 785ed642ba | |||
| 3d5b8a7672 | |||
| 0aef0cf765 | |||
| 489f9edec7 | |||
| 718db57ade | |||
| 639dcf19b0 | |||
| 53bc960224 | |||
| ead346c6d3 | |||
| 375059d9f8 | |||
| 6b21d9d424 | |||
| dac9844765 | |||
| 849e66f6a1 | |||
| 6a8a2e4800 | |||
| 0358908910 | |||
| 32ce1a7267 | |||
| 9ea8da839c | |||
| 39cec488d2 | |||
| 96436df18d | |||
| 3aabb5616c | |||
| 8e55f45818 | |||
| ec4135c9ed | |||
| cfdfecb4d1 | |||
| 97b8b34aab | |||
| ce66430fac | |||
| 1fc56e6665 | |||
| d7d96907cf | |||
| eb97054f49 | |||
| 7280ca6a69 | |||
| 30b2a833a8 | |||
| 8f8e52b91a | |||
| 751c868769 | |||
| c3a47c26ec | |||
| 632578f328 | |||
| 5a4240b18d | |||
| 236aeb9dfd | |||
| bcfadbe1a8 | |||
| b5c027f15d | |||
| 15db80b459 | |||
| 76c0ef86e4 | |||
| a3e820e733 | |||
| a3568a1419 | |||
| 4ad57bdea5 | |||
| 085733d7c9 | |||
| fcb58aec3c | |||
| 402c609316 | |||
| 026a9f2bdc | |||
| 1918d6fa2d | |||
| fd04b9a437 | |||
| ea99603b58 | |||
| 036612dbb0 | |||
| 67d1576dfb | |||
| 2850fe731b | |||
| a157ac59ca | |||
| 20f425fe15 | |||
| 32520fd1fb | |||
| c0b21d8808 | |||
| 17d223b542 | |||
| 9c5bf0bb66 | |||
| dfa63345ed | |||
| 3f59b8da01 | |||
| 0ea88a73c7 | |||
| 2e344bb48f | |||
| c91a645782 | |||
| 96f63a17c0 | |||
| 756d1e5e81 | |||
| 2482aada43 | |||
| 64146f69a4 | |||
| edb3c78fe9 | |||
| 49e32967ec | |||
| 5655cffd32 | |||
| 09dff484e1 | |||
| deb0e9eec3 | |||
| 5d1c8a7eda | |||
| ff8a0c2eea | |||
| cbf7777f41 | |||
| fcb55c2109 | |||
| b6fa2deb9f | |||
| 801ca0c2d3 | |||
| 5f792a9a2b | |||
| 8ee71caabb | |||
| ed70f7763a | |||
| 730262f000 | |||
| cbc057bca7 | |||
| 6442642656 | |||
| 07a5a36b6a | |||
| 912bc58df0 | |||
| 13a91a52e8 | |||
| 4190130194 | |||
| d9b124f91e | |||
| 9b3c87ec97 | |||
| 8f85e5e543 | |||
| 966d229787 | |||
| 4d49ace06b | |||
| ad8c64104e | |||
| d59eff4288 | |||
| 219b155037 | |||
| fe3abed9f0 | |||
| 0ecf4aa6b4 | |||
| 477573265a | |||
| 4e03acf17a | |||
| 84fb3d83d8 | |||
| 3522e5eda3 | |||
| 3056644969 | |||
| 91587ad2c8 | |||
| 8a73d9fff0 | |||
| 28ee9f27b9 | |||
| 7ac58a1c69 | |||
| 3914281f1b | |||
| 3d734ad3e3 | |||
| bb4075d7b9 | |||
| 5b11ac0f4c | |||
| 7bc5f0ca13 | |||
| 14ce061215 | |||
| adb5a35097 | |||
| b0a12bcac1 | |||
| 39d4715b82 | |||
| aac92bd6c0 | |||
| f258a14070 | |||
| 3701f1c16b | |||
| 92fcee824b | |||
| 00f8e1d0ba | |||
| 43191eea53 | |||
| 490ed8f689 | |||
| 30152284cc | |||
| 70097709b2 | |||
| 07e007052a | |||
| bd27294ab0 | |||
| 5e5c20757b | |||
| 6231ae208a | |||
| 42a4f5fd95 | |||
| 6e0a302f7d | |||
| 98af055d8b | |||
| fa5c7ff4df | |||
| d7327df885 | |||
| 0f13c8fe97 | |||
| 44d740080b | |||
| 2fc3a4e91e | |||
| 66e0233410 | |||
| a04bec85b2 | |||
| f861b11a91 | |||
| 37f9bbd231 | |||
| af004576f1 | |||
| 26453c4874 | |||
| 4e18bf0bc2 | |||
| 7c298e94f5 | |||
| 761d2399f2 | |||
| 1210cf8c6c | |||
| 524e005b5c | |||
| d06b6d7d41 | |||
| e66e5d1dfc | |||
| 114966e7c0 | |||
| d2246297bd | |||
| 8ec5decbce | |||
| 0f7d77d599 | |||
| 699e3b3d79 | |||
| 2f8ad7ace8 | |||
| 6b6567ec9b | |||
| c8c71650eb | |||
| a07e804f57 | |||
| e9656810e3 | |||
| 4ee32d7559 | |||
| 53083202ba | |||
| 574a48f81f | |||
| 1b1cf2d4bd | |||
| e811ae1104 | |||
| d65bf4128d | |||
| be5735edb8 | |||
| 5bf2b46fa3 | |||
| cf2b0cca22 | |||
| 4ae5043534 | |||
| 1424f87754 | |||
| 4f953f9bd7 | |||
| 0d2f3e2dc4 | |||
| bb1f1d3faa | |||
| 98daf16681 | |||
| 939ba2b4b3 | |||
| d0b82428d5 | |||
| 902911765e | |||
| 03d84a07d1 | |||
| 1f686d93ff | |||
| d95b9ef1ac | |||
| 045f1e7906 | |||
| 69ef7fbefb | |||
| a56b8db410 | |||
| 082eb24c12 | |||
| f04b659f5e | |||
| eedc621637 | |||
| b31ec5c4af | |||
| 651e8c3158 | |||
| 77d4d066b5 | |||
| 1e97e99aa0 | |||
| 7212072ff0 | |||
| 8bcab645e1 | |||
| 9013917d58 | |||
| 1eddf92c35 | |||
| 28d82c9ccd | |||
| 2f90a04513 | |||
| 2724728476 | |||
| ed8f424c1a | |||
| 50137fe026 | |||
| 9237d07226 | |||
| 8c2be4aa85 | |||
| ccaed9a91c | |||
| a1ca8b7124 | |||
| 6633366218 | |||
| 22ea65f02c | |||
| 3d69ee0c30 | |||
| c88a5f38be |
27
.github/ISSUE_TEMPLATE.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
|
||||
#### Version of fuse being used (pkg-config --modversion fuse)
|
||||
_example: 2.9.4_
|
||||
|
||||
#### System information (uname -r)
|
||||
_command result: uname -r_
|
||||
|
||||
#### Distro (cat /etc/issue)
|
||||
_command result: cat /etc/issue_
|
||||
|
||||
#### s3fs command line used (if applicable)
|
||||
```
|
||||
```
|
||||
#### /etc/fstab entry (if applicable):
|
||||
```
|
||||
```
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
### Details about issue
|
||||
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
|
||||
### Details
|
||||
_Please describe the details of PullRequest._
|
||||
51
.gitignore
vendored
51
.gitignore
vendored
@ -1,21 +1,32 @@
|
||||
*.o
|
||||
Makefile
|
||||
Makefile.in
|
||||
aclocal.m4
|
||||
autom4te.cache/
|
||||
config.guess
|
||||
config.log
|
||||
config.status
|
||||
config.sub
|
||||
configure
|
||||
depcomp
|
||||
doc/Makefile
|
||||
doc/Makefile.in
|
||||
install-sh
|
||||
missing
|
||||
src/.deps/
|
||||
src/Makefile
|
||||
src/Makefile.in
|
||||
src/s3fs
|
||||
test/Makefile
|
||||
test/Makefile.in
|
||||
/Makefile
|
||||
/Makefile.in
|
||||
/aclocal.m4
|
||||
/autom4te.cache/
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/stamp-h1
|
||||
/config.h
|
||||
/config.h.in
|
||||
/config.h.in~
|
||||
/configure
|
||||
/depcomp
|
||||
/test-driver
|
||||
/compile
|
||||
/doc/Makefile
|
||||
/doc/Makefile.in
|
||||
/install-sh
|
||||
/missing
|
||||
/src/.deps/
|
||||
/src/Makefile
|
||||
/src/Makefile.in
|
||||
/src/s3fs
|
||||
/src/test_*
|
||||
/test/.deps/
|
||||
/test/Makefile
|
||||
/test/Makefile.in
|
||||
/test/s3proxy-*
|
||||
/test/*.log
|
||||
/default_commit_hash
|
||||
|
||||
7
.mailmap
Normal file
7
.mailmap
Normal file
@ -0,0 +1,7 @@
|
||||
Adrian Petrescu <apetresc@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Adrian Petrescu <apetresc@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Ben Lemasurier <ben.lemasurier@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Dan Moore <mooredan@suncup.net@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Randy Rizun <rrizun@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
Randy Rizun <rrizun@rrizun-ThinkPad-T530.(none)>
|
||||
Takeshi Nakatani <ggtakec@gmail.com@df820570-a93a-0410-bd06-b72b767a4274>
|
||||
43
.travis.yml
Normal file
43
.travis.yml
Normal file
@ -0,0 +1,43 @@
|
||||
language: cpp
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
before_install:
|
||||
- brew update
|
||||
- brew install truncate
|
||||
- brew tap caskroom/cask
|
||||
- brew cask install osxfuse
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- brew install gnu-sed
|
||||
- sudo ln -s /usr/local/opt/gnu-sed/bin/gsed /usr/local/bin/sed
|
||||
- sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf
|
||||
- brew install cppcheck
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
270
ChangeLog
270
ChangeLog
@ -1,16 +1,278 @@
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.83 -- Dec 17, 2017
|
||||
#606 - Add Homebrew instructions
|
||||
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
|
||||
#609 - Group permission checks sometimes fail with large number of groups
|
||||
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
|
||||
#621 - Upgrade to S3Proxy 1.5.3
|
||||
#627 - Update README.md
|
||||
#630 - Added travis test on osx for #601
|
||||
#631 - Merged macosx branch into master branch #601
|
||||
#636 - Fix intermittent upload failures on macOS
|
||||
#637 - Add blurb about non-Amazon S3 implementations
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argment - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
#659 - Do not fail updating directory when removing old-style object(ref #658)
|
||||
#660 - Refixed s3fs_init message(ref #652)
|
||||
#663 - Lock FdEntity when mutating orgmeta
|
||||
#664 - auth headers insertion refactoring
|
||||
#668 - Changed .travis.yml for fixing not found gpg2 on osx
|
||||
#669 - add IBM IAM authentication support
|
||||
#670 - Fixed a bug in S3fsCurl::LocateBundle
|
||||
#671 - Add support for ECS metadata endpoint
|
||||
#675 - Reduce use of preprocessor
|
||||
#676 - Move str definition from header to implementation
|
||||
#677 - Add s3proxy to .gitignore
|
||||
#679 - README.md Addition
|
||||
#681 - Changed functions about reading passwd file
|
||||
#684 - Correct signedness warning
|
||||
#686 - remove use of jsoncpp
|
||||
#688 - Improved use of temporary files - #678
|
||||
#690 - Added option ecs description to man page
|
||||
#692 - Updated template md files for issue and pr
|
||||
#695 - fix condition for parallel download
|
||||
#697 - Fixing race condition in FdEntity::GetStats
|
||||
#699 - Fix dbglevel usage
|
||||
|
||||
Version 1.82 -- May 13, 2017
|
||||
#597 - Not fallback to HTTP - #596
|
||||
#598 - Updated ChangeLog and configure.ac for release 1.82
|
||||
|
||||
Version 1.81 -- May 13, 2017
|
||||
#426 - Updated to correct ChangeLog
|
||||
#431 - fix typo s/controll/control/
|
||||
#432 - Include location constraint when creating bucket
|
||||
#433 - Correct search and replace typo
|
||||
#440 - Handled all curl error without exiting process - #437
|
||||
#443 - Fix for leaks during stat cache entry expiry / truncation (#340)
|
||||
#444 - Add mirror file logic for removing cache file
|
||||
#447 - added fuse package for mounting via /etc/fstab, fixes #417
|
||||
#449 - Accept mount options compatible with mtab
|
||||
#451 - Correct path in README
|
||||
#454 - Changed for accepting mount options compatible with mtab - #449
|
||||
#466 - Fixed a bug about could not copy file mode from org file
|
||||
#471 - Added use_xattr option for #467 and #460
|
||||
#477 - OS-specific correspondence of the extended attribute header
|
||||
#483 - Trim symbolic link original path in file
|
||||
#487 - Split header debugging onto multiple lines for easier reading
|
||||
#488 - Fixed searching Content-Length without case sensitive - #480
|
||||
#489 - Changed headers_t map using nocase compare function - #488
|
||||
#494 - Fix typo s/destroied/destroyed/
|
||||
#495 - Fix invalid V4 signature on multipart copy requests
|
||||
#498 - Upgrade to S3Proxy 1.5.1
|
||||
#502 - Fixed issue#435 branch codes for remaining bugs(2)
|
||||
#503 - Add missing call to mtime test
|
||||
#504 - Use describe helper function
|
||||
#505 - Correct typos
|
||||
#509 - Use server-provided ETag during complete upload
|
||||
#511 - Fixed a bug about uploading NULL to some part of the file contents
|
||||
#512 - Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468
|
||||
#513 - Added issue and PR templates.
|
||||
#517 - Update s3fs.1 - removed duplicated word
|
||||
#520 - Added links for eventual consistency in README.md - #515
|
||||
#539 - Upgrade to S3Proxy 1.5.2
|
||||
#540 - Address cppcheck 1.77 warnings
|
||||
#545 - Changed base cached time of stat_cache_expire option - #523
|
||||
#546 - Fixed double initialization of SSL library at foreground
|
||||
#550 - Add umount instruction for unplivileged user
|
||||
#551 - Updated stat_cache_expire option description - #545
|
||||
#552 - switch S3fsMultiCurl to use foreground threads
|
||||
#553 - add TLS cipher suites customization
|
||||
#554 - cleanup cache directory when running out of disk space
|
||||
#555 - don't sign empty headers (as they are discarded
|
||||
#556 - fix multipart upload handling without cache
|
||||
#557 - Added check_cache_dir_exist option(refixed #347) - #538
|
||||
#558 - Fixed a bug in logic about truncating stat cache
|
||||
#560 - Fixed about multipart uploading at no free space related to #509
|
||||
#567 - Do not send ACL unless overridden
|
||||
#576 - Added option for complementing lack of stat mode
|
||||
#578 - Refactored the get_object_attribute function
|
||||
#579 - Added notsup_compat_dir option
|
||||
#580 - Enhanced bucket/path parameter check
|
||||
#582 - Check errors returned in 200 OK responses for put header request
|
||||
#583 - Updated limit object size in s3fs man page
|
||||
#585 - Fixed failure to upload/copy with SSE_C and SSE_KMS
|
||||
#587 - Changed copyright year format for debian pkg
|
||||
#588 - Default transport to HTTPS
|
||||
#590 - Updated man page for default_acl option - #567
|
||||
#593 - Backward compatible for changing default transport to HTTPS
|
||||
#594 - Check bucket at public bucket and add nocopyapi option automatically
|
||||
#595 - Updated ChangeLog and configure.ac for release 1.81
|
||||
|
||||
Version 1.80 -- May 29, 2016
|
||||
#213 - Parse ETag from copy multipart correctly
|
||||
#215 - Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
|
||||
#217 - Override install, so that the make install does not install rename_before_close under /test
|
||||
#219 - Address Coverity errors
|
||||
#220 - Test removing a non-empty directory
|
||||
#221 - Compare idiomatically
|
||||
#222 - Annotate constructors as explicit
|
||||
#224 - Configure cppcheck
|
||||
#229 - Convert rename_before_close to a shell script
|
||||
#231 - Rewrite AutoLock
|
||||
#232 - Always hold stat_cache_lock when using stat_cache
|
||||
#233 - Remove IntToStr
|
||||
#234 - Update README
|
||||
#235 - Plug leak during complete multipart upload
|
||||
#237 - Refactor tests into individual functions
|
||||
#238 - Enable all cppcheck rules
|
||||
#239 - Update stale Google Code reference in --help
|
||||
#240 - Enable Content-MD5 during multipart upload part
|
||||
#243 - Run cppcheck during Travis builds
|
||||
#245 - Elide duplicate lookups of std::map via iterators
|
||||
#246 - Unlock during early return in TruncateCache
|
||||
#247 - Base64 cleanup
|
||||
#248 - Enable integration tests for Travis
|
||||
#249 - Silence wget
|
||||
#250 - s3fs can print version with short commit hash - #228
|
||||
#251 - Skip xattr tests if utilities are missing
|
||||
#252 - This fixes an issue with caching when the creation of a subdirectory …
|
||||
#253 - Added chacking cache dir perms at starting.
|
||||
#256 - Add no atomic rename to limitations
|
||||
#257 - Update README.md: Bugfix password file permissions errors
|
||||
#258 - Update README.md to better explain mount upon boot
|
||||
#260 - Wrap help text at 80 characters
|
||||
#261 - Correct help timeouts
|
||||
#263 - Allow integration testing against Amazon S3
|
||||
#265 - Fix integration tests
|
||||
#266 - Cleanup from PR #265
|
||||
#267 - Added the _netdev option to the fstab example.
|
||||
#268 - Use 127.0.0.1 not localhost in s3proxy wait loop
|
||||
#271 - Add support for standard_ia storage class
|
||||
#274 - Modified man page for storage_class option(#271)
|
||||
#275 - Changed and cleaned the logic for debug message.
|
||||
#278 - Supported for SSE KMS(#270)
|
||||
#280 - Supported a object which is larger than free disk space
|
||||
#285 - Add test for symlink
|
||||
#288 - Fixed a bug about head request(copy) for SSE - issue#286
|
||||
#289 - Print source file in log messages
|
||||
#291 - File opened with O_TRUNC is not flushed - Issue #290
|
||||
#293 - Fix a small spelling issue.
|
||||
#295 - File opened with O_TRUNC is not flushed - changed #291
|
||||
#300 - Update integration-test-main.sh
|
||||
#302 - Fix syslog level used by S3FS_PRN_EXIT()
|
||||
#304 - Fixed a bug about mtime - #299
|
||||
#306 - Fix read concurrency to work in parallel count
|
||||
#307 - Fix pthread portability problem
|
||||
#308 - Changed ensure free disk space as additional change for #306
|
||||
#309 - Check pthread prtability in configure as additional change for #307
|
||||
#310 - Update integration-test-main.sh as additional change for #300
|
||||
#311 - Change error log to debug log in s3fs_read()
|
||||
#313 - fix gitignore
|
||||
#319 - Clean up mount point on errors in s3fs_init()
|
||||
#321 - delete stat cache entry in s3fs_fsync so st_size is refreshed - #320
|
||||
#323 - Add goofys to references
|
||||
#328 - Fix v4 signature with use_path_request_style
|
||||
#329 - Correct multiple issues with GET and v4 signing
|
||||
#330 - Pass by const reference where possible
|
||||
#331 - Address various clang warnings
|
||||
#334 - Bucket host should include port and not path
|
||||
#336 - update REAME.md for fstab
|
||||
#338 - Fixed a bug about IAMCRED type could not be retried.
|
||||
#339 - Updated README.md for fstab example.
|
||||
#341 - Fix the memory leak issue in fdcache.
|
||||
#346 - Fix empty directory check against AWS S3
|
||||
#348 - Integration test summary, continue on error
|
||||
#350 - Changed cache out logic for stat - #340
|
||||
#351 - Check cache dirctory path and attributes - #347
|
||||
#352 - Remove stat file cache dir if specified del_cache - #337
|
||||
#354 - Supported regex type for additional header format - #343
|
||||
#355 - Fixed codes about clock_gettime for osx
|
||||
#356 - Fixed codes about clock_gettime for osx(2)
|
||||
#357 - Fixed codes about clock_gettime for osx(3)
|
||||
#359 - Remove optional parameter from Content-Type header - #358
|
||||
#360 - Fix clock_gettime autotools detection on Linux
|
||||
#364 - Checked content-type by no case-sensitivity - #363
|
||||
#371 - Always set stats cache for opened file
|
||||
#372 - Fixed a bug about etag comparison in stats cache, etc.
|
||||
#376 - Test for writing after an lseek past end of file
|
||||
#379 - Fixed a bug about writing sparsed file - #375
|
||||
#385 - fix typo in curl.cpp: s/returing/returning/
|
||||
#391 - Update s3fs.1
|
||||
#394 - Revert "Fixed a bug about writing sparsed file - #375"
|
||||
#395 - Fixed writing sparsed file - #375,#379,#394
|
||||
#397 - Supported User-Agent header - #383
|
||||
#403 - Fix a bug of truncating empty file
|
||||
#404 - Add curl handler pool to reuse connections
|
||||
#409 - Fixed 'load_sse_c' option not working - #388
|
||||
#410 - Allow duplicate key in ahbe_conf - #386
|
||||
#411 - loading IAM role name automatically(iam_role option) - #387
|
||||
#415 - Fixed a bug about stat_cache_expire - #382
|
||||
#420 - Skip early credential checks when iam_role=auto
|
||||
#422 - Fixes for iam_role=auto
|
||||
#424 - Added travis CI badge in README.md
|
||||
#425 - Updated ChangeLog and configure.ac for release 1.80
|
||||
|
||||
Version 1.79 -- Jul 19, 2015
|
||||
issue #60 - Emit user-friendly log messages on failed CheckBucket requests
|
||||
issue #62 - Remove stray chars from source files
|
||||
issue #63 - Fix spelling errors
|
||||
issue #68 - FreeBSD issue
|
||||
issue #69 - Address clang always true warnings
|
||||
issue #73 - Small gitignore fixes
|
||||
issue #74 - url: handle scheme omission
|
||||
issue #83 - Changed option processing to use strtol() to get a umask
|
||||
issue #93 - Add simple unit tests for trim functions
|
||||
issue #100 - CURL handles not properly initialized to use DNS or SSL session caching
|
||||
issue #101 - Optimized function "bool directory_empty()"
|
||||
issue #103 - Remove prefix option in s3fs man page - issue#87
|
||||
issue #104 - fix rename before close
|
||||
issue #116 - Supported signature version 4
|
||||
issue #119 - Added new mp_umask option about issue#107, pr#110
|
||||
issue #124 - Fallback to v2 signatures correctly.
|
||||
issue #130 - refactor integration tests create/cleanup file
|
||||
issue #131 - Test ls
|
||||
issue #132 - Use S3Proxy to run integration tests
|
||||
issue #134 - Include Content-Type in complete MPU V2 signature
|
||||
issue #135 - Correct V4 signature for initiate multipart upload
|
||||
issue #136 - Small fixes to integration tests
|
||||
issue #137 - Add test for multi-part upload
|
||||
issue #138 - Fixed bugs, not turn use_cache off and ty to load to end - issue#97
|
||||
issue #143 - Fixed a bug no use_cache case about fixed #138 - issue#141
|
||||
issue #144 - Add Travis configuration
|
||||
issue #146 - add exit handler to cleanup on failures
|
||||
issue #147 - Use S3Proxy 1.4.0-SNAPSHOT
|
||||
issue #150 - Fixed a bug not handling fsync - #145
|
||||
issue #154 - Fixed url-encoding for ampersand etc on sigv4 - Improvement/#149
|
||||
issue #155 - Fixed a bug: unable to mount bucket subdirectory
|
||||
issue #156 - Fixed a bug about ssl session sharing with libcurl older 7.23.0 - issue#126
|
||||
issue #159 - Upgrade to S3Proxy 1.4.0
|
||||
issue #164 - send the correct Host header when using -o url
|
||||
issue #165 - Auth v4 refactor
|
||||
issue #167 - Increased default connecting/reading/writing timeout value
|
||||
issue #168 - switch to use region specific endpoints to compute correct v4 signature
|
||||
issue #170 - Reviewed and fixed response codes print in curl.cpp - #157
|
||||
issue #171 - Support buckets with mixed-case names
|
||||
issue #173 - Run integration tests via Travis
|
||||
issue #176 - configure.ac: detect target, if target is darwin (OSX), then #176
|
||||
issue #177 - Add .mailmap
|
||||
issue #178 - Update .gitignore
|
||||
issue #184 - Add usage information for multipart_size
|
||||
issue #185 - Correct obvious typos in usage and README
|
||||
issue #190 - Add a no_check_certificate option.
|
||||
issue #194 - Tilda in a file-name breaks things (EPERM)
|
||||
issue #198 - Disasble integration tests for Travis
|
||||
issue #199 - Supported extended attributes(retry)
|
||||
issue #200 - fixed fallback to sigv2 for bucket create and GCS
|
||||
issue #202 - Specialize {set,get}xattr for OS X
|
||||
issue #204 - Add integration test for xattr
|
||||
issue #207 - Fixed a few small spelling issues.
|
||||
|
||||
Version 1.78 -- Sep 15, 2014
|
||||
issue #29 - Possible to create Debian/Ubuntu packages?(googlecode issue 109)
|
||||
issue 417(googlecode) - Password file with DOS format is not handled properly
|
||||
issue #41 - Failed making signature
|
||||
issue #40 - Moving a directory containing more than 1000 files truncates the
|
||||
directory
|
||||
issue #40 - Moving a directory containing more than 1000 files truncates the directory
|
||||
issue #49 - use_sse is ignored when creating new files
|
||||
issue #39 - Support for SSE-C
|
||||
issue #50 - Cannot find pkg-config when configured with any SSL backend except
|
||||
openssl
|
||||
issue #50 - Cannot find pkg-config when configured with any SSL backend except openssl
|
||||
|
||||
Version 1.77 -- Apr 19, 2014
|
||||
issue 405(googlecode) - enable_content_md5 Input/output error
|
||||
|
||||
14
Makefile.am
14
Makefile.am
@ -19,7 +19,7 @@
|
||||
######################################################################
|
||||
SUBDIRS=src test doc
|
||||
|
||||
EXTRA_DIST=doc
|
||||
EXTRA_DIST=doc default_commit_hash
|
||||
|
||||
dist-hook:
|
||||
rm -rf `find $(distdir)/doc -type d -name .svn`
|
||||
@ -28,3 +28,15 @@ dist-hook:
|
||||
release : dist ../utils/release.sh
|
||||
../utils/release.sh $(DIST_ARCHIVES)
|
||||
|
||||
cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
-U CURLE_PEER_FAILED_VERIFICATION \
|
||||
-U P_tmpdir \
|
||||
-U ENOATTR \
|
||||
--enable=all \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unusedFunction \
|
||||
--suppress=variableScope \
|
||||
src/ test/
|
||||
|
||||
67
README
67
README
@ -1,67 +0,0 @@
|
||||
THIS README CONTAINS OUTDATED INFORMATION - please refer to the wiki or --help
|
||||
|
||||
S3FS-Fuse
|
||||
|
||||
S3FS is FUSE (File System in User Space) based solution to mount/unmount an Amazon S3 storage buckets and use system commands with S3 just like it was another Hard Disk.
|
||||
|
||||
In order to compile s3fs, You'll need the following requirements:
|
||||
|
||||
* Kernel-devel packages (or kernel source) installed that is the SAME version of your running kernel
|
||||
* LibXML2-devel packages
|
||||
* CURL-devel packages (or compile curl from sources at: curl.haxx.se/ use 7.15.X)
|
||||
* GCC, GCC-C++
|
||||
* pkgconfig
|
||||
* FUSE (>= 2.8.4)
|
||||
* FUSE Kernel module installed and running (RHEL 4.x/CentOS 4.x users - read below)
|
||||
* OpenSSL-devel (0.9.8)
|
||||
GnuTLS(gcrypt and nettle)
|
||||
NSS
|
||||
* Git
|
||||
|
||||
If you're using YUM or APT to install those packages, then it might require additional packaging, allow it to be installed.
|
||||
|
||||
Downloading & Compiling:
|
||||
------------------------
|
||||
In order to download s3fs, download from following url:
|
||||
https://github.com/s3fs-fuse/s3fs-fuse/archive/master.zip
|
||||
Or clone the following command:
|
||||
git clone git://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
|
||||
Go inside the directory that has been created (s3fs-fuse) and run: ./autogen.sh
|
||||
This will generate a number of scripts in the project directory, including a configure script which you should run with: ./configure
|
||||
If configure succeeded, you can now run: make. If it didn't, make sure you meet the dependencies above.
|
||||
This should compile the code. If everything goes OK, you'll be greated with "ok!" at the end and you'll have a binary file called "s3fs"
|
||||
in the src/ directory.
|
||||
|
||||
As root (you can use su, su -, sudo) do: "make install" -this will copy the "s3fs" binary to /usr/local/bin.
|
||||
|
||||
Congratulations. S3fs is now compiled and installed.
|
||||
|
||||
Usage:
|
||||
------
|
||||
In order to use s3fs, make sure you have the Access Key and the Secret Key handy. (refer to the wiki)
|
||||
First, create a directory where to mount the S3 bucket you want to use.
|
||||
Example (as root): mkdir -p /mnt/s3
|
||||
Then run: s3fs mybucket[:path] /mnt/s3
|
||||
|
||||
This will mount your bucket to /mnt/s3. You can do a simple "ls -l /mnt/s3" to see the content of your bucket.
|
||||
|
||||
If you want to allow other people access the same bucket in the same machine, you can add "-o allow_other" to read/write/delete content of the bucket.
|
||||
|
||||
You can add a fixed mount point in /etc/fstab, here's an example:
|
||||
|
||||
s3fs#mybucket /mnt/s3 fuse allow_other 0 0
|
||||
|
||||
This will mount upon reboot (or by launching: mount -a) your bucket on your machine.
|
||||
If that does not work, probably you should specify with "_netdev" option in fstab.
|
||||
|
||||
All other options can be read at: https://github.com/s3fs-fuse/s3fs-fuse/wiki/Fuse-Over-Amazon
|
||||
|
||||
Known Issues:
|
||||
-------------
|
||||
s3fs should be working fine with S3 storage. However, There are couple of limitations:
|
||||
|
||||
* Currently s3fs could hang the CPU if you have lots of time-outs. This is *NOT* a fault of s3fs but rather libcurl. This happends when you try to copy thousands of files in 1 session, it doesn't happend when you upload hundreds of files or less.
|
||||
* CentOS 4.x/RHEL 4.x users - if you use the kernel that shipped with your distribution and didn't upgrade to the latest kernel RedHat/CentOS gives, you might have a problem loading the "fuse" kernel. Please upgrade to the latest kernel (2.6.16 or above) and make sure "fuse" kernel module is compiled and loadable since FUSE requires this kernel module and s3fs requires it as well.
|
||||
* Moving/renaming/erasing files takes time since the whole file needs to be accessed first. A workaround could be to use s3fs's cache support with the use_cache option.
|
||||
|
||||
151
README.md
Normal file
151
README.md
Normal file
@ -0,0 +1,151 @@
|
||||
s3fs
|
||||
====
|
||||
|
||||
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
|
||||
[](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
|
||||
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
|
||||
* large files via multi-part upload
|
||||
* renames via server-side copy
|
||||
* optional server-side encryption
|
||||
* data integrity via MD5 hashes
|
||||
* in-memory metadata caching
|
||||
* local disk data caching
|
||||
* user-specified regions, including Amazon GovCloud
|
||||
* authenticate via v2 or v4 signatures
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
* On Linux, ensure you have all the dependencies:
|
||||
|
||||
On Ubuntu 14.04:
|
||||
|
||||
```
|
||||
sudo apt-get install automake autotools-dev fuse g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
```
|
||||
|
||||
On CentOS 7:
|
||||
|
||||
```
|
||||
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
```
|
||||
|
||||
Then compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
* On Mac OS X, install via [Homebrew](http://brew.sh/):
|
||||
|
||||
```ShellSession
|
||||
$ brew cask install osxfuse
|
||||
$ brew install s3fs
|
||||
```
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Enter your S3 identity and credential in a file `/path/to/passwd` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
|
||||
chmod 600 /path/to/passwd
|
||||
```
|
||||
|
||||
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
|
||||
```
|
||||
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -o url=http://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=http://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file:
|
||||
```
|
||||
echo SERVICEINSTANCEID:APIKEY > /path/to/passwd
|
||||
```
|
||||
The Service Instance ID is only required when using the `-o create_bucket` option.
|
||||
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
|
||||
chmod 600 /etc/passwd-s3fs
|
||||
```
|
||||
|
||||
Note2: You may also need to make sure `netfs` service is start on boot
|
||||
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
|
||||
|
||||
* random writes or appends to files require rewriting the entire file
|
||||
* metadata operations such as listing directories have poor performance due to network latency
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* no atomic renames of files or directories
|
||||
* no coordination between multiple clients mounting the same bucket
|
||||
* no hard links
|
||||
|
||||
References
|
||||
----------
|
||||
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
|
||||
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
Licensed under the GNU GPL version 2
|
||||
|
||||
22
autogen.sh
22
autogen.sh
@ -19,6 +19,28 @@
|
||||
#
|
||||
# See the file ChangeLog for a revision history.
|
||||
|
||||
echo "--- Make commit hash file -------"
|
||||
|
||||
SHORTHASH="unknown"
|
||||
type git > /dev/null 2>&1
|
||||
if [ $? -eq 0 -a -d .git ]; then
|
||||
RESULT=`git rev-parse --short HEAD`
|
||||
if [ $? -eq 0 ]; then
|
||||
SHORTHASH=${RESULT}
|
||||
fi
|
||||
fi
|
||||
echo ${SHORTHASH} > default_commit_hash
|
||||
|
||||
echo "--- Finished commit hash file ---"
|
||||
|
||||
echo "--- Start autotools -------------"
|
||||
|
||||
aclocal \
|
||||
&& autoheader \
|
||||
&& automake --add-missing \
|
||||
&& autoconf
|
||||
|
||||
echo "--- Finished autotools ----------"
|
||||
|
||||
exit 0
|
||||
|
||||
|
||||
95
configure.ac
95
configure.ac
@ -20,16 +20,36 @@
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.78)
|
||||
AC_INIT(s3fs, 1.83)
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
AM_INIT_AUTOMAKE()
|
||||
AM_INIT_AUTOMAKE([foreign])
|
||||
|
||||
AC_PROG_CXX
|
||||
AC_PROG_CC
|
||||
|
||||
AC_CHECK_HEADERS([sys/xattr.h])
|
||||
AC_CHECK_HEADERS([attr/xattr.h])
|
||||
AC_CHECK_HEADERS([sys/extattr.h])
|
||||
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl For OSX
|
||||
dnl ----------------------------------------------
|
||||
case "$target" in
|
||||
*-darwin* )
|
||||
# Do something specific for mac
|
||||
min_fuse_version=2.7.3
|
||||
;;
|
||||
*)
|
||||
# Default Case
|
||||
# assume other supported linux system
|
||||
min_fuse_version=2.8.4
|
||||
;;
|
||||
esac
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl Choice SSL library
|
||||
dnl ----------------------------------------------
|
||||
@ -156,13 +176,13 @@ dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6])
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
@ -171,7 +191,7 @@ gnutls)
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(gcrypt, gcry_control, [gnutls_nettle=0])])
|
||||
AS_IF([test $gnutls_nettle = 0],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 gnutls >= 2.12.0 ])
|
||||
LIBS="-lgnutls -lgcrypt $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(gcrypt)
|
||||
@ -185,7 +205,7 @@ nettle)
|
||||
AS_IF([test "$gnutls_nettle" = ""], [AC_CHECK_LIB(nettle, nettle_MD5Init, [gnutls_nettle=1])])
|
||||
AS_IF([test $gnutls_nettle = 1],
|
||||
[
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nettle >= 2.7.1 ])
|
||||
LIBS="-lgnutls -lnettle $LIBS"
|
||||
AC_MSG_CHECKING([gnutls is build with])
|
||||
AC_MSG_RESULT(nettle)
|
||||
@ -194,7 +214,7 @@ nettle)
|
||||
;;
|
||||
nss)
|
||||
AC_MSG_RESULT(NSS)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= 2.8.4 libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 nss >= 3.15.0 ])
|
||||
;;
|
||||
*)
|
||||
AC_MSG_ERROR([unknown ssl library type.])
|
||||
@ -207,12 +227,67 @@ AM_CONDITIONAL([USE_GNUTLS_NETTLE], [test "$auth_lib" = nettle])
|
||||
AM_CONDITIONAL([USE_SSL_NSS], [test "$auth_lib" = nss])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl end of ssl library
|
||||
dnl check functions
|
||||
dnl ----------------------------------------------
|
||||
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS(malloc_trim, , )
|
||||
AC_CHECK_FUNCS([malloc_trim])
|
||||
|
||||
dnl clock_gettime function(osx)
|
||||
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
|
||||
AC_CHECK_FUNCS([clock_gettime])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl check symbols/macros/enums
|
||||
dnl ----------------------------------------------
|
||||
dnl PTHREAD_MUTEX_RECURSIVE
|
||||
AC_MSG_CHECKING([pthread mutex recursive])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <pthread.h>]],
|
||||
[[int i = PTHREAD_MUTEX_RECURSIVE;]])
|
||||
],
|
||||
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE, [Define if you have PTHREAD_MUTEX_RECURSIVE])
|
||||
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE)
|
||||
],
|
||||
[AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([[#include <pthread.h>]],
|
||||
[[int i = PTHREAD_MUTEX_RECURSIVE_NP;]])
|
||||
],
|
||||
[AC_DEFINE(S3FS_MUTEX_RECURSIVE, PTHREAD_MUTEX_RECURSIVE_NP, [Define if you have PTHREAD_MUTEX_RECURSIVE_NP])
|
||||
AC_MSG_RESULT(PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
],
|
||||
[AC_MSG_ERROR([do not have PTHREAD_MUTEX_RECURSIVE symbol])])
|
||||
]
|
||||
)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl output files
|
||||
dnl ----------------------------------------------
|
||||
AC_CONFIG_FILES(Makefile src/Makefile test/Makefile doc/Makefile)
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl short commit hash
|
||||
dnl ----------------------------------------------
|
||||
AC_CHECK_PROG([GITCMD], [git —version], [yes], [no])
|
||||
AC_CHECK_FILE([.git], [DOTGITDIR=yes], [DOTGITDIR=no])
|
||||
|
||||
AC_MSG_CHECKING([github short commit hash])
|
||||
if test “x${GITCMD}” = “xyes” -a “x${DOTGITDIR}” = “xyes”; then
|
||||
GITCOMMITHASH=`git rev-parse --short HEAD`
|
||||
elif test -f default_commit_hash; then
|
||||
GITCOMMITHASH=`cat default_commit_hash`
|
||||
else
|
||||
GITCOMMITHASH="unknown"
|
||||
fi
|
||||
AC_MSG_RESULT([${GITCOMMITHASH}])
|
||||
|
||||
AC_DEFINE_UNQUOTED([COMMIT_HASH_VAL], ["${GITCOMMITHASH}"], [short commit hash value on github])
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl put
|
||||
dnl ----------------------------------------------
|
||||
AC_OUTPUT
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl end configuration
|
||||
dnl ----------------------------------------------
|
||||
|
||||
|
||||
184
doc/man/s3fs.1
184
doc/man/s3fs.1
@ -5,12 +5,18 @@ S3FS \- FUSE-based file system backed by Amazon S3
|
||||
.SS mounting
|
||||
.TP
|
||||
\fBs3fs bucket[:/path] mountpoint \fP [options]
|
||||
.TP
|
||||
\fBs3fs mountpoint \fP [options(must specify bucket= option)]
|
||||
.SS unmounting
|
||||
.TP
|
||||
\fBumount mountpoint
|
||||
For root.
|
||||
.TP
|
||||
\fBfusermount -u mountpoint
|
||||
For unprivileged user.
|
||||
.SS utility mode ( remove interrupted multipart uploading objects )
|
||||
.TP
|
||||
\fBs3fs -u bucket
|
||||
\fBs3fs \-u bucket
|
||||
.SH DESCRIPTION
|
||||
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
|
||||
.SH AUTHENTICATION
|
||||
@ -48,13 +54,13 @@ FUSE singlethreaded option (disables multi-threaded operation)
|
||||
All s3fs options must given in the form where "opt" is:
|
||||
<option_name>=<option_value>
|
||||
.TP
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written S3 objects, e.g., "public-read".
|
||||
Any created files will have this canned acl.
|
||||
Any updated files will also have this canned acl applied!
|
||||
\fB\-o\fR bucket
|
||||
if it is not specified bucket name(and path) in command line, must specify this option after \-o option for bucket name.
|
||||
.TP
|
||||
\fB\-o\fR prefix (default="") (coming soon!)
|
||||
a prefix to append to all S3 objects.
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
empty string means do not send header.
|
||||
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="2")
|
||||
number of times to retry a failed S3 transaction.
|
||||
@ -62,22 +68,48 @@ number of times to retry a failed S3 transaction.
|
||||
\fB\-o\fR use_cache (default="" which means disabled)
|
||||
local folder to use for local file cache.
|
||||
.TP
|
||||
\fB\-o\fR check_cache_dir_exist (default is disable)
|
||||
If use_cache is set, check if the cache directory exists.
|
||||
If this option is not specified, it will be created at runtime when the cache directory does not exist.
|
||||
.TP
|
||||
\fB\-o\fR del_cache - delete local file cache
|
||||
delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
\fB\-o\fR storage_class (default is standard)
|
||||
store object with specified storage class.
|
||||
this option replaces the old option use_rrs.
|
||||
Possible values: standard, standard_ia, and reduced_redundancy.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
this option can not be specified with use_sse.
|
||||
(can specify use_rrs=1 for old version)
|
||||
this option has been replaced by new storage_class option.
|
||||
.TP
|
||||
\fB\-o\fR use_sse (default is disable)
|
||||
use Amazon<EFBFBD>fs Server-Site Encryption or Server-Side Encryption with Customer-Provided Encryption Keys.
|
||||
this option can not be specified with use_rrs. specifying only "use_sse" or "use_sse=1" enables Server-Side Encryption.(use_sse=1 for old version)
|
||||
specifying this option with file path which has some SSE-C secret key enables Server-Side Encryption with Customer-Provided Encryption Keys.(use_sse=file)
|
||||
the file must be 600 permission. the file can have some lines, each line is one SSE-C key. the first line in file is used as Customer-Provided Encryption Keys for uploading and change headers etc.
|
||||
if there are some keys after first line, those are used downloading object which are encripted by not first key.
|
||||
so that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
if AWSSSECKEYS environment is set, you can set SSE-C key instead of this option.
|
||||
Specify three type Amazon's Server-Site Encryption: SSE-S3, SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption keys, SSE-C uses customer-provided encryption keys, and SSE-KMS uses the master key which you manage in AWS KMS.
|
||||
You can specify "use_sse" or "use_sse=1" enables SSE-S3 type (use_sse=1 is old type parameter).
|
||||
Case of setting SSE-C, you can specify "use_sse=custom", "use_sse=custom:<custom key file path>" or "use_sse=<custom key file path>"(only <custom key file path> specified is old type parameter).
|
||||
You can use "c" for short "custom".
|
||||
The custom key file must be 600 permission. The file can have some lines, each line is one SSE-C key.
|
||||
The first line in file is used as Customer-Provided Encryption Keys for uploading and changing headers etc.
|
||||
If there are some keys after first line, those are used downloading object which are encrypted by not first key.
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
|
||||
If you specify only "kmsid"("k"), you need to set AWSSSEKMSID environment which value is <kms id>.
|
||||
You must be careful about that you can not use the KMS id which is not same EC2 region.
|
||||
.TP
|
||||
\fB\-o\fR load_sse_c - specify SSE-C keys
|
||||
Specify the custom-provided encryption keys file path for decrypting at downloading.
|
||||
If you use the custom-provided encryption key at uploading, you specify with "use_sse=custom".
|
||||
The file has many lines, one line means one custom key.
|
||||
So that you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
AWSSSECKEYS environment is as same as this file contents.
|
||||
.TP
|
||||
\fB\-o\fR passwd_file (default="")
|
||||
specify the path to the password file, which which takes precedence over the password in $HOME/.passwd-s3fs and /etc/passwd-s3fs
|
||||
@ -86,34 +118,40 @@ specify the path to the password file, which which takes precedence over the pas
|
||||
This option specifies the configuration file path which file is the additional HTTP header by file(object) extension.
|
||||
The configuration file format is below:
|
||||
-----------
|
||||
line = [file suffix] HTTP-header [HTTP-values]
|
||||
file suffix = file(object) suffix, if this field is empty, it means "*"(all object).
|
||||
line = [file suffix or regex] HTTP-header [HTTP-values]
|
||||
file suffix = file(object) suffix, if this field is empty, it means "reg:(.*)".(=all object).
|
||||
regex = regular expression to match the file(object) path. this type starts with "reg:" prefix.
|
||||
HTTP-header = additional HTTP header name
|
||||
HTTP-values = additional HTTP header value
|
||||
-----------
|
||||
Sample:
|
||||
-----------
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
X-S3FS-MYHTTPHEAD myvalue
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
|
||||
-----------
|
||||
A sample configuration file is uploaded in "test" directory.
|
||||
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
.TP
|
||||
\fB\-o\fR connect_timeout (default="10" seconds)
|
||||
\fB\-o\fR connect_timeout (default="300" seconds)
|
||||
time to wait for connection before giving up.
|
||||
.TP
|
||||
\fB\-o\fR readwrite_timeout (default="30" seconds)
|
||||
\fB\-o\fR readwrite_timeout (default="60" seconds)
|
||||
time to wait between read/write activity before giving up.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
|
||||
maximum number of entries in the stat cache
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time indicates the time since stat cached.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_interval_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time is based on the time from the last access time of the stat cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_noobj_cache (default is disable)
|
||||
enable cache entries for the object which does not exist.
|
||||
@ -121,6 +159,10 @@ s3fs always has to check whether file(or sub directory) exists under object(path
|
||||
It increases ListBucket request and makes performance bad.
|
||||
You can specify this option for performance, s3fs memorizes in stat cache that the object(file or directory) does not exist.
|
||||
.TP
|
||||
\fB\-o\fR no_check_certificate (by default this option is disabled)
|
||||
do not check ssl certificate.
|
||||
server certificate won't be checked against the available certificate authorities.
|
||||
.TP
|
||||
\fB\-o\fR nodnscache - disable dns cache.
|
||||
s3fs is always using dns cache, this option make dns cache disable.
|
||||
.TP
|
||||
@ -135,22 +177,36 @@ number of parallel request for uploading big objects.
|
||||
s3fs uploads large object(default:over 20MB) by multipart post request, and sends parallel requests.
|
||||
This option limits parallel request count which s3fs requests at once.
|
||||
It is necessary to set this value depending on a CPU and a network band.
|
||||
This option is lated to fd_page_size option and affects it.
|
||||
.TP
|
||||
\fB\-o\fR fd_page_size(default="52428800"(50MB))
|
||||
number of internal management page size for each file discriptor.
|
||||
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
|
||||
This option should not be changed when you don't have a trouble with performance.
|
||||
This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size).
|
||||
.TP
|
||||
\fB\-o\fR multipart_size(default="10"(10MB))
|
||||
number of one part size in multipart uploading request.
|
||||
The default size is 10MB(10485760byte), this value is minimum size.
|
||||
Specify number of MB and over 10(MB).
|
||||
This option is lated to fd_page_size option and affects it.
|
||||
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
|
||||
Specify number of MB and over 5(MB).
|
||||
.TP
|
||||
\fB\-o\fR url (default="http://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
|
||||
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, and uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR endpoint (default="us-east-1")
|
||||
sets the endpoint to use.
|
||||
If this option is not specified, s3fs uses "us-east-1" region as the default.
|
||||
If the s3fs could not connect to the region specified by this option, s3fs could not run.
|
||||
But if you do not specify this option, and if you can not connect with the default region, s3fs will retry to automatically connect to the other region.
|
||||
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
|
||||
.TP
|
||||
\fB\-o\fR sigv2 (default is signature version 4)
|
||||
sets signing AWS requests by sing Signature Version 2.
|
||||
.TP
|
||||
\fB\-o\fR mp_umask (default is "0000")
|
||||
sets umask for the mount point directory.
|
||||
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
|
||||
In the opposite case s3fs allows access to all users as the default.
|
||||
But if you set the allow_other with this option, you can control permissions of the mount point by this option like umask.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
@ -160,11 +216,23 @@ Enable to send "Content-MD5" header when uploading a object without multipart po
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
.TP
|
||||
\fB\-o\fR iam_role ( default is no role )
|
||||
set the IAM Role that will supply the credentials from the instance meta-data.
|
||||
\fB\-o\fR ecs ( default is disable )
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registing xml name space.
|
||||
disable registing xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
\fB\-o\fR iam_role ( default is no IAM role )
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_auth ( default is not using IBM IAM authentication )
|
||||
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
|
||||
Enable to handle the extended attribute(xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registering xml name space.
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
@ -174,18 +242,50 @@ If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api).
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
|
||||
If this option is specified with nocopapi, the s3fs ignores it.
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
Enble compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
.TP
|
||||
\fB\-o\fR noua (suppress User-Agent header)
|
||||
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
|
||||
If this option is specified, s3fs suppresses the output of the User-Agent.
|
||||
.TP
|
||||
\fB\-o\fR cipher_suites
|
||||
Customize TLS cipher suite list. Expects a colon separated list of cipher suite names.
|
||||
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
|
||||
https://curl.haxx.se/docs/ssl-ciphers.html
|
||||
.TP
|
||||
\fB\-o\fR complement_stat (complement lack of file/directory mode)
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
.TP
|
||||
\fB\-o\fR notsup_compat_dir (not support compatibility directory types)
|
||||
As a default, s3fs supports objects of the directory type as much as possible and recognizes them as directories.
|
||||
Objects that can be recognized as directory objects are "dir/", "dir", "dir_$folder$", and there is a file object that does not have a directory object but contains that directory path.
|
||||
s3fs needs redundant communication to support all these directory types.
|
||||
The object as the directory created by s3fs is "dir/".
|
||||
By restricting s3fs to recognize only "dir/" as a directory, communication traffic can be reduced.
|
||||
This option is used to give this restriction to s3fs.
|
||||
However, if there is a directory object other than "dir/" in the bucket, specifying this option is not recommended.
|
||||
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
|
||||
Please use this option when the directory in the bucket is only "dir/" object.
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
When s3fs catch the signal SIGUSR2, the debug level is bumpup.
|
||||
.TP
|
||||
\fB\-o\fR curldbg - put curl debug message
|
||||
Put the debug message from libcurl when this option is specified.
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
.TP
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH NOTES
|
||||
.TP
|
||||
Maximum file size=64GB (limited by s3fs, not Amazon).
|
||||
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
|
||||
.TP
|
||||
|
||||
@ -24,7 +24,7 @@ if USE_GNUTLS_NETTLE
|
||||
AM_CPPFLAGS += -DUSE_GNUTLS_NETTLE
|
||||
endif
|
||||
|
||||
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h common.h
|
||||
s3fs_SOURCES = s3fs.cpp s3fs.h curl.cpp curl.h cache.cpp cache.h string_util.cpp string_util.h s3fs_util.cpp s3fs_util.h fdcache.cpp fdcache.h common_auth.cpp s3fs_auth.h addhead.cpp addhead.h common.h
|
||||
if USE_SSL_OPENSSL
|
||||
s3fs_SOURCES += openssl_auth.cpp
|
||||
endif
|
||||
@ -37,3 +37,8 @@ endif
|
||||
|
||||
s3fs_LDADD = $(DEPS_LIBS)
|
||||
|
||||
noinst_PROGRAMS = test_string_util
|
||||
|
||||
test_string_util_SOURCES = string_util.cpp test_string_util.cpp test_util.h
|
||||
|
||||
TESTS = test_string_util
|
||||
|
||||
286
src/addhead.cpp
Normal file
286
src/addhead.cpp
Normal file
@ -0,0 +1,286 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <assert.h>
|
||||
#include <curl/curl.h>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
#include "common.h"
|
||||
#include "addhead.h"
|
||||
#include "curl.h"
|
||||
#include "s3fs.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Symbols
|
||||
//-------------------------------------------------------------------
|
||||
#define ADD_HEAD_REGEX "reg:"
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AdditionalHeader
|
||||
//-------------------------------------------------------------------
|
||||
AdditionalHeader AdditionalHeader::singleton;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class AdditionalHeader method
|
||||
//-------------------------------------------------------------------
|
||||
AdditionalHeader::AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
AdditionalHeader::~AdditionalHeader()
|
||||
{
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Load(const char* file)
|
||||
{
|
||||
if(!file){
|
||||
S3FS_PRN_WARN("file is NULL.");
|
||||
return false;
|
||||
}
|
||||
Unload();
|
||||
|
||||
ifstream AH(file);
|
||||
if(!AH.good()){
|
||||
S3FS_PRN_WARN("Could not open file(%s).", file);
|
||||
return false;
|
||||
}
|
||||
|
||||
// read file
|
||||
string line;
|
||||
PADDHEAD paddhead;
|
||||
while(getline(AH, line)){
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(0 == line.size()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
stringstream ss(line);
|
||||
string key(""); // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
if(ss){
|
||||
ss >> head;
|
||||
if(ss && static_cast<size_t>(ss.tellg()) < line.size()){
|
||||
value = line.substr(static_cast<int>(ss.tellg()) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
// check it
|
||||
if(0 == head.size()){
|
||||
if(0 == key.size()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
Unload();
|
||||
return false;
|
||||
}
|
||||
|
||||
paddhead = new ADDHEAD;
|
||||
if(0 == strncasecmp(key.c_str(), ADD_HEAD_REGEX, strlen(ADD_HEAD_REGEX))){
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
|
||||
continue;
|
||||
}
|
||||
key = key.substr(strlen(ADD_HEAD_REGEX));
|
||||
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
char errbuf[256];
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
|
||||
// set
|
||||
paddhead->pregex = preg;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
|
||||
}else{
|
||||
// not regex, directly comparing
|
||||
paddhead->pregex = NULL;
|
||||
paddhead->basestring = key;
|
||||
paddhead->headkey = head;
|
||||
paddhead->headvalue = value;
|
||||
}
|
||||
|
||||
// add list
|
||||
addheadlist.push_back(paddhead);
|
||||
|
||||
// set flag
|
||||
if(!is_enable){
|
||||
is_enable = true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void AdditionalHeader::Unload(void)
|
||||
{
|
||||
is_enable = false;
|
||||
|
||||
for(addheadlist_t::iterator iter = addheadlist.begin(); iter != addheadlist.end(); iter = addheadlist.erase(iter)){
|
||||
PADDHEAD paddhead = *iter;
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
regfree(paddhead->pregex);
|
||||
delete paddhead->pregex;
|
||||
}
|
||||
delete paddhead;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AdditionalHeader::AddHeader(headers_t& meta, const char* path) const
|
||||
{
|
||||
if(!is_enable){
|
||||
return true;
|
||||
}
|
||||
if(!path){
|
||||
S3FS_PRN_WARN("path is NULL.");
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t pathlength = strlen(path);
|
||||
|
||||
// loop
|
||||
//
|
||||
// [NOTE]
|
||||
// Because to allow duplicate key, and then scanning the entire table.
|
||||
//
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter){
|
||||
const PADDHEAD paddhead = *iter;
|
||||
if(!paddhead){
|
||||
continue;
|
||||
}
|
||||
|
||||
if(paddhead->pregex){
|
||||
// regex
|
||||
regmatch_t match; // not use
|
||||
if(0 == regexec(paddhead->pregex, path, 1, &match, 0)){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}else{
|
||||
// directly comparing
|
||||
if(paddhead->basestring.length() < pathlength){
|
||||
if(0 == paddhead->basestring.length() || 0 == strcmp(&path[pathlength - paddhead->basestring.length()], paddhead->basestring.c_str())){
|
||||
// match -> adding header
|
||||
meta[paddhead->headkey] = paddhead->headvalue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const char* path) const
|
||||
{
|
||||
headers_t meta;
|
||||
|
||||
if(!AddHeader(meta, path)){
|
||||
return list;
|
||||
}
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
// Adding header
|
||||
list = curl_slist_sort_insert(list, iter->first.c_str(), iter->second.c_str());
|
||||
}
|
||||
meta.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
return list;
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Dump(void) const
|
||||
{
|
||||
if(!IS_S3FS_LOG_DBG()){
|
||||
return true;
|
||||
}
|
||||
|
||||
stringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
|
||||
|
||||
for(addheadlist_t::const_iterator iter = addheadlist.begin(); iter != addheadlist.end(); ++iter, ++cnt){
|
||||
const PADDHEAD paddhead = *iter;
|
||||
|
||||
ssdbg << " [" << cnt << "] = {" << endl;
|
||||
|
||||
if(paddhead){
|
||||
if(paddhead->pregex){
|
||||
ssdbg << " type\t\t--->\tregex" << endl;
|
||||
}else{
|
||||
ssdbg << " type\t\t--->\tsuffix matching" << endl;
|
||||
}
|
||||
ssdbg << " base string\t--->\t" << paddhead->basestring << endl;
|
||||
ssdbg << " add header\t--->\t" << paddhead->headkey << ": " << paddhead->headvalue << endl;
|
||||
}
|
||||
ssdbg << " }" << endl;
|
||||
}
|
||||
|
||||
|
||||
ssdbg << "}" << endl;
|
||||
|
||||
// print all
|
||||
S3FS_PRN_DBG("%s", ssdbg.str().c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
*/
|
||||
70
src/addhead.h
Normal file
70
src/addhead.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_ADDHEAD_H_
|
||||
#define S3FS_ADDHEAD_H_
|
||||
|
||||
#include <regex.h>
|
||||
|
||||
//----------------------------------------------
|
||||
// class AdditionalHeader
|
||||
//----------------------------------------------
|
||||
typedef struct add_header{
|
||||
regex_t* pregex; // not NULL means using regex, NULL means comparing suffix directly.
|
||||
std::string basestring;
|
||||
std::string headkey;
|
||||
std::string headvalue;
|
||||
}ADDHEAD, *PADDHEAD;
|
||||
|
||||
typedef std::vector<PADDHEAD> addheadlist_t;
|
||||
|
||||
class AdditionalHeader
|
||||
{
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
addheadlist_t addheadlist;
|
||||
|
||||
protected:
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get(void) { return &singleton; }
|
||||
|
||||
bool Load(const char* file);
|
||||
void Unload(void);
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump(void) const;
|
||||
};
|
||||
|
||||
#endif // S3FS_ADDHEAD_H_
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
* vim600: noet sw=4 ts=4 fdm=marker
|
||||
* vim<600: noet sw=4 ts=4
|
||||
*/
|
||||
318
src/cache.cpp
318
src/cache.cpp
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -21,6 +21,9 @@
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#ifndef HAVE_CLOCK_GETTIME
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
@ -29,15 +32,107 @@
|
||||
#include <syslog.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <list>
|
||||
|
||||
#include "cache.h"
|
||||
#include "s3fs.h"
|
||||
#include "s3fs_util.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility
|
||||
//-------------------------------------------------------------------
|
||||
#ifndef CLOCK_REALTIME
|
||||
#define CLOCK_REALTIME 0
|
||||
#endif
|
||||
#ifndef CLOCK_MONOTONIC
|
||||
#define CLOCK_MONOTONIC CLOCK_REALTIME
|
||||
#endif
|
||||
#ifndef CLOCK_MONOTONIC_COARSE
|
||||
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CLOCK_GETTIME
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
{
|
||||
return clock_gettime(static_cast<clockid_t>(clk_id), ts);
|
||||
}
|
||||
#else
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
{
|
||||
struct timeval now;
|
||||
if(0 != gettimeofday(&now, NULL)){
|
||||
return -1;
|
||||
}
|
||||
ts->tv_sec = now.tv_sec;
|
||||
ts->tv_nsec = now.tv_usec * 1000;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
inline void SetStatCacheTime(struct timespec& ts)
|
||||
{
|
||||
if(-1 == s3fs_clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){
|
||||
ts.tv_sec = time(NULL);
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
}
|
||||
|
||||
inline void InitStatCacheTime(struct timespec& ts)
|
||||
{
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
|
||||
inline int CompareStatCacheTime(struct timespec& ts1, struct timespec& ts2)
|
||||
{
|
||||
// return -1: ts1 < ts2
|
||||
// 0: ts1 == ts2
|
||||
// 1: ts1 > ts2
|
||||
if(ts1.tv_sec < ts2.tv_sec){
|
||||
return -1;
|
||||
}else if(ts1.tv_sec > ts2.tv_sec){
|
||||
return 1;
|
||||
}else{
|
||||
if(ts1.tv_nsec < ts2.tv_nsec){
|
||||
return -1;
|
||||
}else if(ts1.tv_nsec > ts2.tv_nsec){
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline bool IsExpireStatCacheTime(const struct timespec& ts, const time_t& expire)
|
||||
{
|
||||
struct timespec nowts;
|
||||
SetStatCacheTime(nowts);
|
||||
return ((ts.tv_sec + expire) < nowts.tv_sec);
|
||||
}
|
||||
|
||||
//
|
||||
// For cache out
|
||||
//
|
||||
typedef std::vector<stat_cache_t::iterator> statiterlist_t;
|
||||
|
||||
struct sort_statiterlist{
|
||||
// ascending order
|
||||
bool operator()(const stat_cache_t::iterator& src1, const stat_cache_t::iterator& src2) const
|
||||
{
|
||||
int result = CompareStatCacheTime(src1->second->cache_date, src2->second->cache_date);
|
||||
if(0 == result){
|
||||
if(src1->second->hit_count < src2->second->hit_count){
|
||||
result = -1;
|
||||
}
|
||||
}
|
||||
return (result < 0);
|
||||
}
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Static
|
||||
//-------------------------------------------------------------------
|
||||
@ -47,7 +142,7 @@ pthread_mutex_t StatCache::stat_cache_lock;
|
||||
//-------------------------------------------------------------------
|
||||
// Constructor/Destructor
|
||||
//-------------------------------------------------------------------
|
||||
StatCache::StatCache() : IsExpireTime(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
|
||||
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
stat_cache.clear();
|
||||
@ -87,19 +182,21 @@ time_t StatCache::GetExpireTime(void) const
|
||||
return (IsExpireTime ? ExpireTime : (-1));
|
||||
}
|
||||
|
||||
time_t StatCache::SetExpireTime(time_t expire)
|
||||
time_t StatCache::SetExpireTime(time_t expire, bool is_interval)
|
||||
{
|
||||
time_t old = ExpireTime;
|
||||
ExpireTime = expire;
|
||||
IsExpireTime = true;
|
||||
time_t old = ExpireTime;
|
||||
ExpireTime = expire;
|
||||
IsExpireTime = true;
|
||||
IsExpireIntervalType = is_interval;
|
||||
return old;
|
||||
}
|
||||
|
||||
time_t StatCache::UnsetExpireTime(void)
|
||||
{
|
||||
time_t old = IsExpireTime ? ExpireTime : (-1);
|
||||
ExpireTime = 0;
|
||||
IsExpireTime = false;
|
||||
time_t old = IsExpireTime ? ExpireTime : (-1);
|
||||
ExpireTime = 0;
|
||||
IsExpireTime = false;
|
||||
IsExpireIntervalType = false;
|
||||
return old;
|
||||
}
|
||||
|
||||
@ -143,7 +240,7 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second){
|
||||
stat_cache_entry* ent = (*iter).second;
|
||||
if(!IsExpireTime|| (ent->cache_date + ExpireTime) >= time(NULL)){
|
||||
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
|
||||
if(ent->noobjcache){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
if(!IsCacheNoObject){
|
||||
@ -155,19 +252,28 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
return false;
|
||||
}
|
||||
// hit without checking etag
|
||||
string stretag;
|
||||
if(petag){
|
||||
string stretag = ent->meta["ETag"];
|
||||
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
|
||||
is_delete_cache = true;
|
||||
// find & check ETag
|
||||
for(headers_t::iterator iter = ent->meta.begin(); iter != ent->meta.end(); ++iter){
|
||||
string tag = lower(iter->first);
|
||||
if(tag == "etag"){
|
||||
stretag = iter->second;
|
||||
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
|
||||
is_delete_cache = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(is_delete_cache){
|
||||
// not hit by different ETag
|
||||
DPRNNN("stat cache not hit by ETag[path=%s][time=%jd][hit count=%lu][ETag(%s)!=(%s)]",
|
||||
strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count, petag ? petag : "null", ent->meta["ETag"].c_str());
|
||||
S3FS_PRN_DBG("stat cache not hit by ETag[path=%s][time=%jd.%09ld][hit count=%lu][ETag(%s)!=(%s)]",
|
||||
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count, petag ? petag : "null", stretag.c_str());
|
||||
}else{
|
||||
// hit
|
||||
DPRNNN("stat cache hit [path=%s][time=%jd][hit count=%lu]", strpath.c_str(), (intmax_t)(ent->cache_date), ent->hit_count);
|
||||
S3FS_PRN_DBG("stat cache hit [path=%s][time=%jd.%09ld][hit count=%lu]",
|
||||
strpath.c_str(), (intmax_t)(ent->cache_date.tv_sec), ent->cache_date.tv_nsec, ent->hit_count);
|
||||
|
||||
if(pst!= NULL){
|
||||
*pst= ent->stbuf;
|
||||
@ -179,7 +285,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
(*pisforce) = ent->isforce;
|
||||
}
|
||||
ent->hit_count++;
|
||||
ent->cache_date = time(NULL);
|
||||
|
||||
if(IsExpireIntervalType){
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
@ -219,10 +328,10 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second) {
|
||||
if(!IsExpireTime|| ((*iter).second->cache_date + ExpireTime) >= time(NULL)){
|
||||
if(!IsExpireTime || !IsExpireStatCacheTime((*iter).second->cache_date, ExpireTime)){
|
||||
if((*iter).second->noobjcache){
|
||||
// noobjcache = true means no object.
|
||||
(*iter).second->cache_date = time(NULL);
|
||||
SetStatCacheTime((*iter).second->cache_date);
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
@ -239,17 +348,24 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
|
||||
bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool no_truncate)
|
||||
{
|
||||
if(CacheSize< 1){
|
||||
if(!no_truncate && CacheSize< 1){
|
||||
return true;
|
||||
}
|
||||
DPRNNN("add stat cache entry[path=%s]", key.c_str());
|
||||
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
|
||||
|
||||
if(stat_cache.end() != stat_cache.find(key)){
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
bool found = stat_cache.end() != stat_cache.find(key);
|
||||
bool do_truncate = stat_cache.size() > CacheSize;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
}else{
|
||||
if(stat_cache.size() > CacheSize){
|
||||
if(do_truncate){
|
||||
if(!TruncateCache()){
|
||||
return false;
|
||||
}
|
||||
@ -263,35 +379,40 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir)
|
||||
return false;
|
||||
}
|
||||
ent->hit_count = 0;
|
||||
ent->cache_date = time(NULL); // Set time.
|
||||
ent->isforce = forcedir;
|
||||
ent->noobjcache = false;
|
||||
ent->notruncate = (no_truncate ? 1L : 0L);
|
||||
ent->meta.clear();
|
||||
SetStatCacheTime(ent->cache_date); // Set time.
|
||||
//copy only some keys
|
||||
for(headers_t::iterator iter = meta.begin(); iter != meta.end(); ++iter){
|
||||
string tag = (*iter).first;
|
||||
string value = (*iter).second;
|
||||
if(tag == "Content-Type"){
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag == "Content-Length"){
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag == "ETag"){
|
||||
ent->meta[tag] = value;
|
||||
}else if(tag == "Last-Modified"){
|
||||
ent->meta[tag] = value;
|
||||
string tag = lower(iter->first);
|
||||
string value = iter->second;
|
||||
if(tag == "content-type"){
|
||||
ent->meta[iter->first] = value;
|
||||
}else if(tag == "content-length"){
|
||||
ent->meta[iter->first] = value;
|
||||
}else if(tag == "etag"){
|
||||
ent->meta[iter->first] = value;
|
||||
}else if(tag == "last-modified"){
|
||||
ent->meta[iter->first] = value;
|
||||
}else if(tag.substr(0, 5) == "x-amz"){
|
||||
ent->meta[tag] = value;
|
||||
}else{
|
||||
// Check for upper case
|
||||
transform(tag.begin(), tag.end(), tag.begin(), static_cast<int (*)(int)>(std::tolower));
|
||||
if(tag.substr(0, 5) == "x-amz"){
|
||||
ent->meta[tag] = value;
|
||||
}
|
||||
ent->meta[tag] = value; // key is lower case for "x-amz"
|
||||
}
|
||||
}
|
||||
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
|
||||
if(stat_cache.end() != iter){
|
||||
if(iter->second){
|
||||
delete iter->second;
|
||||
}
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
stat_cache[key] = ent;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
@ -305,12 +426,19 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
if(CacheSize < 1){
|
||||
return true;
|
||||
}
|
||||
DPRNNN("add no object cache entry[path=%s]", key.c_str());
|
||||
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
|
||||
|
||||
if(stat_cache.end() != stat_cache.find(key)){
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
bool found = stat_cache.end() != stat_cache.find(key);
|
||||
bool do_truncate = stat_cache.size() > CacheSize;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
}else{
|
||||
if(stat_cache.size() > CacheSize){
|
||||
if(do_truncate){
|
||||
if(!TruncateCache()){
|
||||
return false;
|
||||
}
|
||||
@ -321,47 +449,109 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
stat_cache_entry* ent = new stat_cache_entry();
|
||||
memset(&(ent->stbuf), 0, sizeof(struct stat));
|
||||
ent->hit_count = 0;
|
||||
ent->cache_date = time(NULL); // Set time.
|
||||
ent->isforce = false;
|
||||
ent->noobjcache = true;
|
||||
ent->notruncate = 0L;
|
||||
ent->meta.clear();
|
||||
SetStatCacheTime(ent->cache_date); // Set time.
|
||||
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
|
||||
if(stat_cache.end() != iter){
|
||||
if(iter->second){
|
||||
delete iter->second;
|
||||
}
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
stat_cache[key] = ent;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
|
||||
{
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key);
|
||||
|
||||
if(stat_cache.end() != iter){
|
||||
stat_cache_entry* ent = iter->second;
|
||||
if(ent){
|
||||
if(no_truncate){
|
||||
++(ent->notruncate);
|
||||
}else{
|
||||
if(0L < ent->notruncate){
|
||||
--(ent->notruncate);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
}
|
||||
|
||||
bool StatCache::TruncateCache(void)
|
||||
{
|
||||
if(0 == stat_cache.size()){
|
||||
if(stat_cache.empty()){
|
||||
return true;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
time_t lowest_time = time(NULL) + 1;
|
||||
stat_cache_t::iterator iter_to_delete = stat_cache.end();
|
||||
stat_cache_t::iterator iter;
|
||||
|
||||
for(iter = stat_cache.begin(); iter != stat_cache.end(); iter++) {
|
||||
if((*iter).second){
|
||||
if(lowest_time > (*iter).second->cache_date){
|
||||
lowest_time = (*iter).second->cache_date;
|
||||
iter_to_delete = iter;
|
||||
// 1) erase over expire time
|
||||
if(IsExpireTime){
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
|
||||
stat_cache_entry* entry = iter->second;
|
||||
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
|
||||
if(entry){
|
||||
delete entry;
|
||||
}
|
||||
stat_cache.erase(iter++);
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(stat_cache.end() != iter_to_delete){
|
||||
DPRNNN("truncate stat cache[path=%s]", (*iter_to_delete).first.c_str());
|
||||
if((*iter_to_delete).second){
|
||||
delete (*iter_to_delete).second;
|
||||
}
|
||||
stat_cache.erase(iter_to_delete);
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
// 2) check stat cache count
|
||||
if(stat_cache.size() < CacheSize){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
// 3) erase from the old cache in order
|
||||
size_t erase_count= stat_cache.size() - CacheSize + 1;
|
||||
statiterlist_t erase_iters;
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
|
||||
// check no truncate
|
||||
stat_cache_entry* ent = iter->second;
|
||||
if(ent && 0L < ent->notruncate){
|
||||
// skip for no truncate entry
|
||||
if(0 < erase_count){
|
||||
--erase_count; // decrement
|
||||
}
|
||||
}
|
||||
// iter is not have notruncate flag
|
||||
erase_iters.push_back(iter);
|
||||
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
|
||||
if(erase_count < erase_iters.size()){
|
||||
erase_iters.pop_back();
|
||||
}
|
||||
}
|
||||
for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
|
||||
stat_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
|
||||
if(siter->second){
|
||||
delete siter->second;
|
||||
}
|
||||
stat_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
@ -372,7 +562,7 @@ bool StatCache::DelStat(const char* key)
|
||||
if(!key){
|
||||
return false;
|
||||
}
|
||||
DPRNNN("delete stat cache entry[path=%s]", key);
|
||||
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
|
||||
34
src/cache.h
34
src/cache.h
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -17,6 +17,7 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CACHE_H_
|
||||
#define S3FS_CACHE_H_
|
||||
|
||||
@ -26,15 +27,18 @@
|
||||
// Struct
|
||||
//
|
||||
struct stat_cache_entry {
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
time_t cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
struct stat stbuf;
|
||||
unsigned long hit_count;
|
||||
struct timespec cache_date;
|
||||
headers_t meta;
|
||||
bool isforce;
|
||||
bool noobjcache; // Flag: cache is no object for no listing.
|
||||
unsigned long notruncate; // 0<: not remove automatically at checking truncate
|
||||
|
||||
stat_cache_entry() : hit_count(0), cache_date(0), isforce(false), noobjcache(false) {
|
||||
stat_cache_entry() : hit_count(0), isforce(false), noobjcache(false), notruncate(0L) {
|
||||
memset(&stbuf, 0, sizeof(struct stat));
|
||||
cache_date.tv_sec = 0;
|
||||
cache_date.tv_nsec = 0;
|
||||
meta.clear();
|
||||
}
|
||||
};
|
||||
@ -51,20 +55,21 @@ class StatCache
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
|
||||
private:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
void Clear(void);
|
||||
bool GetStat(std::string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce);
|
||||
// Truncate stat cache
|
||||
bool TruncateCache(void);
|
||||
|
||||
public:
|
||||
StatCache();
|
||||
~StatCache();
|
||||
|
||||
// Reference singleton
|
||||
static StatCache* getStatCacheData(void) {
|
||||
return &singleton;
|
||||
@ -74,7 +79,7 @@ class StatCache
|
||||
unsigned long GetCacheSize(void) const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime(void) const;
|
||||
time_t SetExpireTime(time_t expire);
|
||||
time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
time_t UnsetExpireTime(void);
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject(void) {
|
||||
@ -109,7 +114,10 @@ class StatCache
|
||||
bool AddNoObjectCache(std::string& key);
|
||||
|
||||
// Add stat cache
|
||||
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false);
|
||||
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key);
|
||||
|
||||
186
src/common.h
186
src/common.h
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -17,83 +17,159 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
#include "../config.h"
|
||||
|
||||
//
|
||||
// Extended attribute
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
|
||||
static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
// for debug
|
||||
#define FPRINT_NEST_SPACE_0 ""
|
||||
#define FPRINT_NEST_SPACE_1 " "
|
||||
#define FPRINT_NEST_SPACE_2 " "
|
||||
#define FPRINT_NEST_CHECK(NEST) \
|
||||
(0 == NEST ? FPRINT_NEST_SPACE_0 : 1 == NEST ? FPRINT_NEST_SPACE_1 : FPRINT_NEST_SPACE_2)
|
||||
//
|
||||
// Debug level
|
||||
//
|
||||
enum s3fs_log_level{
|
||||
S3FS_LOG_CRIT = 0, // LOG_CRIT
|
||||
S3FS_LOG_ERR = 1, // LOG_ERR
|
||||
S3FS_LOG_WARN = 3, // LOG_WARNING
|
||||
S3FS_LOG_INFO = 7, // LOG_INFO
|
||||
S3FS_LOG_DBG = 15 // LOG_DEBUG
|
||||
};
|
||||
|
||||
#define LOWFPRINT(NEST, ...) \
|
||||
printf("%s%s(%d): ", FPRINT_NEST_CHECK(NEST), __func__, __LINE__); \
|
||||
printf(__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
//
|
||||
// Debug macros
|
||||
//
|
||||
#define IS_S3FS_LOG_CRIT() (S3FS_LOG_CRIT == debug_level)
|
||||
#define IS_S3FS_LOG_ERR() (S3FS_LOG_ERR == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_WARN() (S3FS_LOG_WARN == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_INFO() (S3FS_LOG_INFO == (debug_level & S3FS_LOG_DBG))
|
||||
#define IS_S3FS_LOG_DBG() (S3FS_LOG_DBG == (debug_level & S3FS_LOG_DBG))
|
||||
|
||||
#define FPRINT(NEST, ...) \
|
||||
if(foreground){ \
|
||||
LOWFPRINT(NEST, __VA_ARGS__); \
|
||||
}
|
||||
#define S3FS_LOG_LEVEL_TO_SYSLOG(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? LOG_DEBUG : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? LOG_INFO : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? LOG_WARNING : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? LOG_ERR : LOG_CRIT )
|
||||
|
||||
#define FPRINT2(NEST, ...) \
|
||||
if(foreground2){ \
|
||||
LOWFPRINT(NEST, __VA_ARGS__); \
|
||||
}
|
||||
#define S3FS_LOG_LEVEL_STRING(level) \
|
||||
( S3FS_LOG_DBG == (level & S3FS_LOG_DBG) ? "[DBG] " : \
|
||||
S3FS_LOG_INFO == (level & S3FS_LOG_DBG) ? "[INF] " : \
|
||||
S3FS_LOG_WARN == (level & S3FS_LOG_DBG) ? "[WAN] " : \
|
||||
S3FS_LOG_ERR == (level & S3FS_LOG_DBG) ? "[ERR] " : "[CRT] " )
|
||||
|
||||
#define LOWSYSLOGPRINT(LEVEL, ...) \
|
||||
syslog(LEVEL, __VA_ARGS__);
|
||||
#define S3FS_LOG_NEST_MAX 4
|
||||
#define S3FS_LOG_NEST(nest) (nest < S3FS_LOG_NEST_MAX ? s3fs_log_nest[nest] : s3fs_log_nest[S3FS_LOG_NEST_MAX - 1])
|
||||
|
||||
#define SYSLOGPRINT(LEVEL, ...) \
|
||||
if(LEVEL <= LOG_CRIT || debug){ \
|
||||
LOWSYSLOGPRINT(LEVEL, __VA_ARGS__); \
|
||||
}
|
||||
#define S3FS_LOW_LOGPRN(level, fmt, ...) \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DPRINT(LEVEL, NEST, ...) \
|
||||
FPRINT(NEST, __VA_ARGS__); \
|
||||
SYSLOGPRINT(LEVEL, __VA_ARGS__);
|
||||
#define S3FS_LOW_LOGPRN2(level, nest, fmt, ...) \
|
||||
if(S3FS_LOG_CRIT == level || (S3FS_LOG_CRIT != debug_level && level == (debug_level & level))){ \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DPRINT2(LEVEL, ...) \
|
||||
FPRINT2(2, __VA_ARGS__); \
|
||||
SYSLOGPRINT(LEVEL, __VA_ARGS__);
|
||||
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
|
||||
if(foreground){ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
|
||||
}
|
||||
|
||||
// print debug message
|
||||
#define FPRN(...) FPRINT(0, __VA_ARGS__)
|
||||
#define FPRNN(...) FPRINT(1, __VA_ARGS__)
|
||||
#define FPRNNN(...) FPRINT(2, __VA_ARGS__)
|
||||
#define FPRNINFO(...) FPRINT2(2, __VA_ARGS__)
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s" fmt "%s", S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
|
||||
}
|
||||
|
||||
// print debug message with putting syslog
|
||||
#define DPRNCRIT(...) DPRINT(LOG_CRIT, 0, __VA_ARGS__)
|
||||
#define DPRN(...) DPRINT(LOG_ERR, 0, __VA_ARGS__)
|
||||
#define DPRNN(...) DPRINT(LOG_DEBUG, 1, __VA_ARGS__)
|
||||
#define DPRNNN(...) DPRINT(LOG_DEBUG, 2, __VA_ARGS__)
|
||||
#define DPRNINFO(...) DPRINT2(LOG_INFO, __VA_ARGS__)
|
||||
// [NOTE]
|
||||
// small trick for VA_ARGS
|
||||
//
|
||||
#define S3FS_PRN_EXIT(fmt, ...) S3FS_LOW_LOGPRN_EXIT(fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CRIT(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_CRIT, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_ERR(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_ERR, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_WARN(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_WARN, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_DBG(fmt, ...) S3FS_LOW_LOGPRN(S3FS_LOG_DBG, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 0, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO0(fmt, ...) S3FS_LOG_INFO(fmt, __VA_ARGS__)
|
||||
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
|
||||
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "")
|
||||
|
||||
//
|
||||
// Typedef
|
||||
//
|
||||
typedef std::map<std::string, std::string> headers_t;
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
|
||||
//
|
||||
// Global valiables
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
extern bool debug;
|
||||
extern bool foreground;
|
||||
extern bool foreground2;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string host;
|
||||
extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
typedef struct xattr_value{
|
||||
unsigned char* pvalue;
|
||||
size_t length;
|
||||
|
||||
explicit xattr_value(unsigned char* pval = NULL, size_t len = 0) : pvalue(pval), length(len) {}
|
||||
~xattr_value()
|
||||
{
|
||||
if(pvalue){
|
||||
free(pvalue);
|
||||
}
|
||||
}
|
||||
}XATTRVAL, *PXATTRVAL;
|
||||
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//
|
||||
// Global variables
|
||||
//
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
extern bool complement_stat;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string host;
|
||||
extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
extern std::string endpoint;
|
||||
extern std::string cipher_suites;
|
||||
extern s3fs_log_level debug_level;
|
||||
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
|
||||
#endif // S3FS_COMMON_H_
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -18,49 +18,20 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <string>
|
||||
|
||||
#include "s3fs_auth.h"
|
||||
#include "string_util.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function
|
||||
//-------------------------------------------------------------------
|
||||
char* s3fs_base64(unsigned char* input, size_t length)
|
||||
{
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 >= length){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
string s3fs_get_content_md5(int fd)
|
||||
{
|
||||
unsigned char* md5hex;
|
||||
@ -84,22 +55,37 @@ string s3fs_get_content_md5(int fd)
|
||||
string s3fs_md5sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_md5_digest_length();
|
||||
char md5[2 * digestlen + 1];
|
||||
char hexbuf[3];
|
||||
unsigned char* md5hex;
|
||||
|
||||
if(NULL == (md5hex = s3fs_md5hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
|
||||
memset(md5, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(hexbuf, 3, "%02x", md5hex[pos]);
|
||||
strncat(md5, hexbuf, 2);
|
||||
}
|
||||
std::string md5 = s3fs_hex(md5hex, digestlen);
|
||||
free(md5hex);
|
||||
|
||||
return string(md5);
|
||||
return md5;
|
||||
}
|
||||
|
||||
string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
char sha256[2 * digestlen + 1];
|
||||
char hexbuf[3];
|
||||
unsigned char* sha256hex;
|
||||
|
||||
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
|
||||
return string("");
|
||||
}
|
||||
|
||||
memset(sha256, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(hexbuf, 3, "%02x", sha256hex[pos]);
|
||||
strncat(sha256, hexbuf, 2);
|
||||
}
|
||||
free(sha256hex);
|
||||
|
||||
return string(sha256);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
2579
src/curl.cpp
2579
src/curl.cpp
File diff suppressed because it is too large
Load Diff
210
src/curl.h
210
src/curl.h
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -17,9 +17,17 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_CURL_H_
|
||||
#define S3FS_CURL_H_
|
||||
|
||||
#include <cassert>
|
||||
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
//----------------------------------------------
|
||||
static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
//----------------------------------------------
|
||||
@ -65,7 +73,7 @@ struct filepart
|
||||
{
|
||||
bool uploaded; // does finish uploading
|
||||
std::string etag; // expected etag value
|
||||
int fd; // base file(temporary full file) discriptor
|
||||
int fd; // base file(temporary full file) descriptor
|
||||
off_t startpos; // seek fd point for uploading
|
||||
ssize_t size; // uploading size
|
||||
etaglist_t* etaglist; // use only parallel upload
|
||||
@ -115,6 +123,35 @@ typedef std::map<CURL*, progress_t> curlprogress_t;
|
||||
|
||||
class S3fsMultiCurl;
|
||||
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers)
|
||||
: mMaxHandlers(maxHandlers)
|
||||
, mHandlers(NULL)
|
||||
, mIndex(-1)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler();
|
||||
void ReturnHandler(CURL* h);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
|
||||
pthread_mutex_t mLock;
|
||||
CURL** mHandlers;
|
||||
int mIndex;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
@ -122,10 +159,27 @@ typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// storage class(rrs)
|
||||
enum storage_class_t {
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
REDUCED_REDUNDANCY
|
||||
};
|
||||
|
||||
// sse type
|
||||
enum sse_type_t {
|
||||
SSE_DISABLE = 0, // not use server side encrypting
|
||||
SSE_S3, // server side encrypting by S3 key
|
||||
SSE_C, // server side encrypting by custom key
|
||||
SSE_KMS // server side encrypting by kms id
|
||||
};
|
||||
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
enum {
|
||||
SHARE_MUTEX_DNS = 0,
|
||||
SHARE_MUTEX_SSL_SESSION = 1,
|
||||
SHARE_MUTEX_MAX = 2,
|
||||
};
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
@ -149,14 +203,18 @@ class S3fsCurl
|
||||
REQTYPE_COPYMULTIPOST,
|
||||
REQTYPE_MULTILIST,
|
||||
REQTYPE_IAMCRED,
|
||||
REQTYPE_ABORTMULTIUPLOAD
|
||||
REQTYPE_ABORTMULTIUPLOAD,
|
||||
REQTYPE_IAMROLE
|
||||
};
|
||||
|
||||
// class variables
|
||||
static pthread_mutex_t curl_handles_lock;
|
||||
static pthread_mutex_t curl_share_lock[SHARE_MUTEX_MAX];
|
||||
static bool is_initglobal_done;
|
||||
static CurlHandlerPool* sCurlPool;
|
||||
static int sCurlPoolSize;
|
||||
static CURLSH* hCurlShare;
|
||||
static bool is_cert_check;
|
||||
static bool is_dns_cache;
|
||||
static bool is_ssl_session_cache;
|
||||
static long connect_timeout;
|
||||
@ -164,23 +222,33 @@ class S3fsCurl
|
||||
static int retries;
|
||||
static bool is_public_bucket;
|
||||
static std::string default_acl; // TODO: to enum
|
||||
static bool is_use_rrs;
|
||||
static storage_class_t storage_class;
|
||||
static sseckeylist_t sseckeys;
|
||||
static bool is_use_sse;
|
||||
static std::string ssekmsid;
|
||||
static sse_type_t ssetype;
|
||||
static bool is_content_md5;
|
||||
static bool is_verbose;
|
||||
static std::string AWSAccessKeyId;
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
static bool is_ua; // User-Agent
|
||||
|
||||
// variables
|
||||
CURL* hCurl;
|
||||
@ -204,12 +272,15 @@ class S3fsCurl
|
||||
int b_postdata_remaining; // backup for retrying
|
||||
off_t b_partdata_startpos; // backup for retrying
|
||||
ssize_t b_partdata_size; // backup for retrying
|
||||
bool b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssekey_md5; // backup for retrying
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
S3fsCurl(bool ahbe = false);
|
||||
explicit S3fsCurl(bool ahbe = false);
|
||||
~S3fsCurl();
|
||||
|
||||
private:
|
||||
@ -238,21 +309,31 @@ class S3fsCurl
|
||||
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
static bool SetIAMRoleFromMetaData(const char* response);
|
||||
static bool LoadEnvSseCKeys(void);
|
||||
static bool LoadEnvSseKmsid(void);
|
||||
static bool PushbackSseKeys(std::string& onekey);
|
||||
static bool AddUserAgent(CURL* hCurl);
|
||||
|
||||
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
|
||||
|
||||
// methods
|
||||
bool ResetHandle(void);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
std::string CalcSignature(std::string method, std::string strMD5, std::string content_type, std::string date, std::string resource);
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
int GetIAMCredentials(void);
|
||||
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, std::string& upload_id);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, std::string& upload_id);
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
|
||||
public:
|
||||
// class methods
|
||||
@ -262,8 +343,9 @@ class S3fsCurl
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
|
||||
// class methods(valiables)
|
||||
static std::string LookupMimeType(std::string name);
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
static bool SetSslSessionCache(bool isCache);
|
||||
static long SetConnectTimeout(long timeout);
|
||||
@ -273,37 +355,60 @@ class S3fsCurl
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static std::string SetDefaultAcl(const char* acl);
|
||||
static bool SetUseRrs(bool flag);
|
||||
static bool GetUseRrs(void) { return S3fsCurl::is_use_rrs; }
|
||||
static bool SetSseKeys(const char* filepath);
|
||||
static bool LoadEnvSseKeys(void);
|
||||
static std::string GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
static sse_type_t SetSseType(sse_type_t type);
|
||||
static sse_type_t GetSseType(void) { return S3fsCurl::ssetype; }
|
||||
static bool IsSseDisable(void) { return (SSE_DISABLE == S3fsCurl::ssetype); }
|
||||
static bool IsSseS3Type(void) { return (SSE_S3 == S3fsCurl::ssetype); }
|
||||
static bool IsSseCType(void) { return (SSE_C == S3fsCurl::ssetype); }
|
||||
static bool IsSseKmsType(void) { return (SSE_KMS == S3fsCurl::ssetype); }
|
||||
static bool FinalCheckSse(void);
|
||||
static bool SetSseCKeys(const char* filepath);
|
||||
static bool SetSseKmsid(const char* kmsid);
|
||||
static bool IsSetSseKmsId(void) { return !S3fsCurl::ssekmsid.empty(); }
|
||||
static const char* GetSseKmsId(void) { return S3fsCurl::ssekmsid.c_str(); }
|
||||
static bool GetSseKey(std::string& md5, std::string& ssekey);
|
||||
static bool GetSseKeyMd5(int pos, std::string& md5);
|
||||
static int GetSseKeyCount(void);
|
||||
static bool IsSseCustomMode(void);
|
||||
static bool SetUseSse(bool flag);
|
||||
static bool GetUseSse(void) { return S3fsCurl::is_use_sse; }
|
||||
static bool SetContentMd5(bool flag);
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool IsSetAccessKeyId(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
static bool IsSetAccessKeyID(void){
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
static off_t GetMultipartSize(void) { return S3fsCurl::multipart_size; }
|
||||
static bool SetSignatureV4(bool isset) { bool bresult = S3fsCurl::is_sigv4; S3fsCurl::is_sigv4 = isset; return bresult; }
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent(void);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
bool DestroyCurlHandle(void);
|
||||
|
||||
bool AddSseKeyRequestHead(std::string& md5, bool is_copy);
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode);
|
||||
int RequestPerform(void);
|
||||
int DeleteRequest(const char* tpath);
|
||||
@ -314,17 +419,21 @@ class S3fsCurl
|
||||
int HeadRequest(const char* tpath, headers_t& meta);
|
||||
int PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy);
|
||||
int PutRequest(const char* tpath, headers_t& meta, int fd);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, std::string& ssekeymd5);
|
||||
int PreGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size, sse_type_t ssetype, std::string& ssevalue);
|
||||
int GetObjectRequest(const char* tpath, int fd, off_t start = -1, ssize_t size = -1);
|
||||
int CheckBucket(void);
|
||||
int ListBucketRequest(const char* tpath, const char* query);
|
||||
int PreMultipartPostRequest(const char* tpath, headers_t& meta, std::string& upload_id, bool is_copy);
|
||||
int CompleteMultipartPostRequest(const char* tpath, std::string& upload_id, etaglist_t& parts);
|
||||
int UploadMultipartPostRequest(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int MultipartListRequest(std::string& body);
|
||||
int AbortMultipartUpload(const char* tpath, std::string& upload_id);
|
||||
int MultipartHeadRequest(const char* tpath, off_t size, headers_t& meta, bool is_copy);
|
||||
int MultipartUploadRequest(const char* tpath, headers_t& meta, int fd, bool is_copy);
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(valiables)
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
@ -351,14 +460,13 @@ class S3fsCurl
|
||||
//
|
||||
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failuer and retrying
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
static int max_multireq;
|
||||
|
||||
CURLM* hMulti;
|
||||
s3fscurlmap_t cMap_all; // all of curl requests
|
||||
s3fscurlmap_t cMap_req; // curl requests are sent
|
||||
|
||||
@ -370,6 +478,8 @@ class S3fsMultiCurl
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
S3fsMultiCurl();
|
||||
~S3fsMultiCurl();
|
||||
@ -384,36 +494,6 @@ class S3fsMultiCurl
|
||||
int Request(void);
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class AdditionalHeader
|
||||
//----------------------------------------------
|
||||
typedef std::list<int> charcnt_list_t;
|
||||
typedef std::map<std::string, std::string> headerpair_t;
|
||||
typedef std::map<std::string, headerpair_t> addheader_t;
|
||||
|
||||
class AdditionalHeader
|
||||
{
|
||||
private:
|
||||
static AdditionalHeader singleton;
|
||||
bool is_enable;
|
||||
charcnt_list_t charcntlist;
|
||||
addheader_t addheader;
|
||||
|
||||
public:
|
||||
// Reference singleton
|
||||
static AdditionalHeader* get(void) { return &singleton; }
|
||||
|
||||
AdditionalHeader();
|
||||
~AdditionalHeader();
|
||||
|
||||
bool Load(const char* file);
|
||||
void Unload(void);
|
||||
|
||||
bool AddHeader(headers_t& meta, const char* path) const;
|
||||
struct curl_slist* AddHeader(struct curl_slist* list, const char* path) const;
|
||||
bool Dump(void) const;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// Utility Functions
|
||||
//----------------------------------------------
|
||||
@ -421,7 +501,13 @@ std::string GetContentMD5(int fd);
|
||||
unsigned char* md5hexsum(int fd, off_t start, ssize_t size);
|
||||
std::string md5sum(int fd, off_t start, ssize_t size);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* data);
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
#endif // S3FS_CURL_H_
|
||||
|
||||
|
||||
1908
src/fdcache.cpp
1908
src/fdcache.cpp
File diff suppressed because it is too large
Load Diff
143
src/fdcache.h
143
src/fdcache.h
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -20,6 +20,9 @@
|
||||
#ifndef FD_CACHE_H_
|
||||
#define FD_CACHE_H_
|
||||
|
||||
#include <sys/statvfs.h>
|
||||
#include "curl.h"
|
||||
|
||||
//------------------------------------------------
|
||||
// CacheFileStat
|
||||
//------------------------------------------------
|
||||
@ -34,8 +37,10 @@ class CacheFileStat
|
||||
|
||||
public:
|
||||
static bool DeleteCacheFileStat(const char* path);
|
||||
static bool CheckCacheFileStatTopDir(void);
|
||||
static bool DeleteCacheFileStatDirectory(void);
|
||||
|
||||
CacheFileStat(const char* tpath = NULL);
|
||||
explicit CacheFileStat(const char* tpath = NULL);
|
||||
~CacheFileStat();
|
||||
|
||||
bool Open(void);
|
||||
@ -52,40 +57,49 @@ struct fdpage
|
||||
{
|
||||
off_t offset;
|
||||
size_t bytes;
|
||||
bool init;
|
||||
bool loaded;
|
||||
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_init = false)
|
||||
: offset(start), bytes(size), init(is_init) {}
|
||||
fdpage(off_t start = 0, size_t size = 0, bool is_loaded = false)
|
||||
: offset(start), bytes(size), loaded(is_loaded) {}
|
||||
|
||||
off_t next(void) const { return (offset + bytes); }
|
||||
off_t end(void) const { return (0 < bytes ? offset + bytes - 1 : 0); }
|
||||
};
|
||||
typedef std::list<struct fdpage*> fdpage_list_t;
|
||||
|
||||
class FdEntity;
|
||||
|
||||
//
|
||||
// Management of loading area/modifying
|
||||
//
|
||||
class PageList
|
||||
{
|
||||
friend class FdEntity; // only one method access directly pages.
|
||||
|
||||
private:
|
||||
fdpage_list_t pages;
|
||||
|
||||
private:
|
||||
void Clear(void);
|
||||
bool Compress(void);
|
||||
bool Parse(off_t new_pos);
|
||||
|
||||
public:
|
||||
static void FreeList(fdpage_list_t& list);
|
||||
|
||||
PageList(off_t size = 0, bool is_init = false);
|
||||
explicit PageList(size_t size = 0, bool is_loaded = false);
|
||||
~PageList();
|
||||
|
||||
off_t Size(void) const;
|
||||
int Resize(off_t size, bool is_init);
|
||||
int Init(off_t size, bool is_init);
|
||||
bool IsInit(off_t start, off_t size);
|
||||
bool SetInit(off_t start, off_t size, bool is_init = true);
|
||||
bool FindUninitPage(off_t start, off_t& resstart, size_t& ressize);
|
||||
int GetUninitPages(fdpage_list_t& uninit_list, off_t start = 0);
|
||||
bool Init(size_t size, bool is_loaded);
|
||||
size_t Size(void) const;
|
||||
bool Resize(size_t size, bool is_loaded);
|
||||
|
||||
bool IsPageLoaded(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
bool SetPageLoadedStatus(off_t start, size_t size, bool is_loaded = true, bool is_compress = true);
|
||||
bool FindUnloadedPage(off_t start, off_t& resstart, size_t& ressize) const;
|
||||
size_t GetTotalUnloadedPageSize(off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, size_t size = 0) const; // size=0 is checking to end of list
|
||||
|
||||
bool Serialize(CacheFileStat& file, bool is_output);
|
||||
void Dump(void);
|
||||
};
|
||||
@ -99,40 +113,67 @@ class FdEntity
|
||||
pthread_mutex_t fdent_lock;
|
||||
bool is_lock_init;
|
||||
PageList pagelist;
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
int fd; // file discriptor(tmp file or cache file)
|
||||
FILE* file; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
int refcnt; // reference count
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
headers_t orgmeta; // original headers at opening
|
||||
size_t size_orgmeta; // original file size in original headers
|
||||
|
||||
std::string upload_id; // for no cached multipart uploading when no disk space
|
||||
etaglist_t etaglist; // for no cached multipart uploading when no disk space
|
||||
off_t mp_start; // start position for no cached multipart(write method only)
|
||||
size_t mp_size; // size for no cached multipart(write method only)
|
||||
|
||||
private:
|
||||
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
|
||||
|
||||
void Clear(void);
|
||||
int Dup(void);
|
||||
bool SetAllStatus(bool is_enable);
|
||||
int OpenMirrorFile(void);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
|
||||
public:
|
||||
FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
explicit FdEntity(const char* tpath = NULL, const char* cpath = NULL);
|
||||
~FdEntity();
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(off_t size = -1, time_t time = -1);
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
int GetFd(void) const { return fd; }
|
||||
int SetMtime(time_t time);
|
||||
bool GetSize(off_t& size);
|
||||
bool GetMtime(time_t& time);
|
||||
bool GetStats(struct stat& st);
|
||||
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
|
||||
int Dup(bool no_fd_lock_wait = false);
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
void SetPath(const std::string &newpath) { path = newpath; }
|
||||
int GetFd(void) const { return fd; }
|
||||
|
||||
bool GetStats(struct stat& st);
|
||||
int SetMtime(time_t time);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(size_t& size);
|
||||
bool SetMode(mode_t mode);
|
||||
bool SetUId(uid_t uid);
|
||||
bool SetGId(gid_t gid);
|
||||
bool SetContentType(const char* path);
|
||||
|
||||
int Load(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCacheLoadAndPost(off_t start = 0, size_t size = 0); // size=0 means loading to end
|
||||
int NoCachePreMultipartPost(void);
|
||||
int NoCacheMultipartPost(int tgfd, off_t start, size_t size);
|
||||
int NoCacheCompleteMultipartPost(void);
|
||||
|
||||
int RowFlush(const char* tpath, bool force_sync = false);
|
||||
int Flush(bool force_sync = false) { return RowFlush(NULL, force_sync); }
|
||||
|
||||
bool SetAllEnable(void) { return SetAllStatus(true); }
|
||||
bool SetAllDisable(void) { return SetAllStatus(false); }
|
||||
bool LoadFull(off_t* size = NULL, bool force_load = false);
|
||||
int Load(off_t start, off_t size);
|
||||
int RowFlush(const char* tpath, headers_t& meta, bool force_sync = false);
|
||||
int Flush(headers_t& meta, bool force_sync = false) { return RowFlush(NULL, meta, force_sync); }
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
void CleanupCache();
|
||||
};
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
|
||||
@ -144,11 +185,17 @@ class FdManager
|
||||
private:
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static size_t page_size;
|
||||
static bool check_cache_dir_exist;
|
||||
static size_t free_disk_space; // limit free disk space
|
||||
|
||||
fdent_map_t fent;
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static uint64_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
|
||||
public:
|
||||
FdManager();
|
||||
@ -162,14 +209,24 @@ class FdManager
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static size_t SetPageSize(size_t size);
|
||||
static size_t GetPageSize(void) { return FdManager::page_size; }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir(void);
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist(void);
|
||||
|
||||
FdEntity* GetFdEntity(const char* path);
|
||||
FdEntity* Open(const char* path, off_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
||||
FdEntity* ExistOpen(const char* path) { return Open(path, -1, -1, false, false); }
|
||||
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
|
||||
static size_t SetEnsureFreeDiskSpace(size_t size);
|
||||
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
|
||||
static bool IsSafeDiskSpace(const char* path, size_t size);
|
||||
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
};
|
||||
|
||||
#endif // FD_CACHE_H_
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -103,7 +103,7 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -120,15 +120,34 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA256_DIGEST_SIZE))){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct hmac_sha256_ctx ctx_hmac;
|
||||
hmac_sha256_set_key(&ctx_hmac, keylen, reinterpret_cast<const uint8_t*>(key));
|
||||
hmac_sha256_update(&ctx_hmac, datalen, reinterpret_cast<const uint8_t*>(data));
|
||||
hmac_sha256_digest(&ctx_hmac, SHA256_DIGEST_SIZE, reinterpret_cast<uint8_t*>(*digest));
|
||||
*digestlen = SHA256_DIGEST_SIZE;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 >= (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
@ -142,16 +161,34 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
return false;
|
||||
}
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
free(*digest);
|
||||
*digest = NULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
#define MD5_DIGEST_LENGTH 16
|
||||
|
||||
size_t get_md5_digest_length(void)
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
return 16;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
@ -178,7 +215,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
DPRNNN("file read error(%d)", errno);
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
@ -222,7 +259,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_md5, GCRY_MD_MD5, 0))){
|
||||
DPRNN("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
S3FS_PRN_ERR("MD5 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -234,7 +271,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
DPRNNN("file read error(%d)", errno);
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
@ -256,6 +293,152 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sha256_ctx ctx_sha256;
|
||||
sha256_init(&ctx_sha256);
|
||||
sha256_update(&ctx_sha256, datalen, data);
|
||||
sha256_digest(&ctx_sha256, *digestlen, *digest);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
struct sha256_ctx ctx_sha256;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256_init(&ctx_sha256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
free(*digest);
|
||||
return false;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, data, datalen);
|
||||
memcpy(*digest, gcry_md_read(ctx_sha256, 0), *digestlen);
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
gcry_md_hd_t ctx_sha256;
|
||||
gcry_error_t err;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
if(GPG_ERR_NO_ERROR != (err = gcry_md_open(&ctx_sha256, GCRY_MD_SHA256, 0))){
|
||||
S3FS_PRN_ERR("SHA256 context creation failure: %s/%s", gcry_strsource(err), gcry_strerror(err));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
return NULL;
|
||||
}
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
gcry_md_close(ctx_sha256);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
105
src/nss_auth.cpp
105
src/nss_auth.cpp
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -83,9 +83,9 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -94,17 +94,17 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
PK11Context* Context;
|
||||
SECStatus SecStatus;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), keylen};
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
|
||||
if(NULL == (Slot = PK11_GetInternalKeySlot())){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, CKM_SHA_1_HMAC, PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
if(NULL == (pKey = PK11_ImportSymKey(Slot, (is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), PK11_OriginUnwrap, CKA_SIGN, &KeySecItem, NULL))){
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
}
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey(CKM_SHA_1_HMAC, CKA_SIGN, pKey, &NullSecItem))){
|
||||
if(NULL == (Context = PK11_CreateContextBySymKey((is_sha256 ? CKM_SHA256_HMAC : CKM_SHA_1_HMAC), CKA_SIGN, pKey, &NullSecItem))){
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
return false;
|
||||
@ -132,6 +132,16 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
@ -172,7 +182,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
DPRNNN("file read error(%d)", errno);
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
@ -193,6 +203,87 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
PK11Context* sha256ctx;
|
||||
unsigned int sha256outlen;
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
PK11_DigestOp(sha256ctx, data, datalen);
|
||||
PK11_DigestFinal(sha256ctx, *digest, &sha256outlen, *digestlen);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
*digestlen = sha256outlen;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
PK11Context* sha256ctx;
|
||||
unsigned char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
unsigned int sha256outlen;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(buf, 0, 512);
|
||||
sha256ctx = PK11_CreateDigestContext(SEC_OID_SHA256);
|
||||
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestFinal(sha256ctx, result, &sha256outlen, get_sha256_digest_length());
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -32,6 +32,7 @@
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/hmac.h>
|
||||
#include <openssl/md5.h>
|
||||
#include <openssl/sha.h>
|
||||
#include <openssl/crypto.h>
|
||||
#include <openssl/err.h>
|
||||
#include <string>
|
||||
@ -94,7 +95,9 @@ static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
|
||||
static unsigned long s3fs_crypt_get_threadid(void)
|
||||
{
|
||||
return static_cast<unsigned long>(pthread_self());
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
return (unsigned long)(pthread_self());
|
||||
}
|
||||
|
||||
static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int line)
|
||||
@ -102,7 +105,7 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l
|
||||
struct CRYPTO_dynlock_value* dyndata;
|
||||
|
||||
if(NULL == (dyndata = static_cast<struct CRYPTO_dynlock_value*>(malloc(sizeof(struct CRYPTO_dynlock_value))))){
|
||||
DPRNCRIT("Could not allocate memory for CRYPTO_dynlock_value");
|
||||
S3FS_PRN_CRIT("Could not allocate memory for CRYPTO_dynlock_value");
|
||||
return NULL;
|
||||
}
|
||||
pthread_mutex_init(&(dyndata->dyn_mutex), NULL);
|
||||
@ -131,14 +134,14 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
FPRNNN("s3fs_crypt_mutex is not NULL, destory it.");
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
if(!s3fs_destroy_crypt_mutex()){
|
||||
DPRN("Failed to s3fs_crypt_mutex");
|
||||
S3FS_PRN_ERR("Failed to s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(NULL == (s3fs_crypt_mutex = static_cast<pthread_mutex_t*>(malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t))))){
|
||||
DPRNCRIT("Could not allocate memory for s3fs_crypt_mutex");
|
||||
S3FS_PRN_CRIT("Could not allocate memory for s3fs_crypt_mutex");
|
||||
return false;
|
||||
}
|
||||
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
|
||||
@ -180,20 +183,34 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for HMAC
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
if(NULL == ((*digest) = (unsigned char*)malloc(*digestlen))){
|
||||
return false;
|
||||
}
|
||||
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
|
||||
if(is_sha256){
|
||||
HMAC(EVP_sha256(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}else{
|
||||
HMAC(EVP_sha1(), key, keylen, data, datalen, *digest, digestlen);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, false);
|
||||
}
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
return s3fs_HMAC_RAW(key, keylen, data, datalen, digest, digestlen, true);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
@ -233,7 +250,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
DPRNNN("file read error(%d)", errno);
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
return NULL;
|
||||
}
|
||||
MD5_Update(&md5ctx, buf, bytes);
|
||||
@ -253,6 +270,85 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
return result;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(mdctx, md, NULL);
|
||||
EVP_DigestUpdate(mdctx, data, datalen);
|
||||
EVP_DigestFinal_ex(mdctx, *digest, digestlen);
|
||||
EVP_MD_CTX_destroy(mdctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
const EVP_MD* md = EVP_get_digestbyname("sha256");
|
||||
EVP_MD_CTX* sha256ctx;
|
||||
char buf[512];
|
||||
ssize_t bytes;
|
||||
unsigned char* result;
|
||||
|
||||
if(-1 == size){
|
||||
struct stat st;
|
||||
if(-1 == fstat(fd, &st)){
|
||||
return NULL;
|
||||
}
|
||||
size = static_cast<ssize_t>(st.st_size);
|
||||
}
|
||||
|
||||
// seek to top of file.
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sha256ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(sha256ctx, md, NULL);
|
||||
|
||||
memset(buf, 0, 512);
|
||||
for(ssize_t total = 0; total < size; total += bytes){
|
||||
bytes = 512 < (size - total) ? 512 : (size - total);
|
||||
bytes = read(fd, buf, bytes);
|
||||
if(0 == bytes){
|
||||
// end of file
|
||||
break;
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
EVP_DigestUpdate(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
return NULL;
|
||||
}
|
||||
EVP_DigestFinal_ex(sha256ctx, result, NULL);
|
||||
EVP_MD_CTX_destroy(sha256ctx);
|
||||
|
||||
if(-1 == lseek(fd, start, SEEK_SET)){
|
||||
free(result);
|
||||
return NULL;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
2709
src/s3fs.cpp
2709
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -21,7 +21,8 @@
|
||||
#define S3FS_S3_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#define FIVE_GB 5368709120LL
|
||||
|
||||
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
@ -84,8 +85,6 @@
|
||||
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
|
||||
char* get_object_sseckey_md5(const char* path);
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -20,15 +20,18 @@
|
||||
#ifndef S3FS_AUTH_H_
|
||||
#define S3FS_AUTH_H_
|
||||
|
||||
#include <string>
|
||||
#include <sys/types.h>
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for Authentication
|
||||
//-------------------------------------------------------------------
|
||||
//
|
||||
// in common_auth.cpp
|
||||
//
|
||||
char* s3fs_base64(unsigned char* input, size_t length);
|
||||
std::string s3fs_get_content_md5(int fd);
|
||||
std::string s3fs_md5sum(int fd, off_t start, ssize_t size);
|
||||
std::string s3fs_sha256sum(int fd, off_t start, ssize_t size);
|
||||
|
||||
//
|
||||
// in xxxxxx_auth.cpp
|
||||
@ -39,8 +42,12 @@ bool s3fs_destroy_global_ssl(void);
|
||||
bool s3fs_init_crypt_mutex(void);
|
||||
bool s3fs_destroy_crypt_mutex(void);
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_md5_digest_length(void);
|
||||
unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size);
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen);
|
||||
size_t get_sha256_digest_length(void);
|
||||
unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size);
|
||||
|
||||
#endif // S3FS_AUTH_H_
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2013 Takeshi Nakatani <ggtakec.com>
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -46,7 +46,7 @@
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global valiables
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
std::string mount_prefix = "";
|
||||
|
||||
@ -110,7 +110,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir/" object --> not add new object.
|
||||
// and add normalization
|
||||
return insert_nomalized(orgname.c_str(), chkname.c_str(), true);
|
||||
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,10 +135,10 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
}
|
||||
|
||||
// add normalization
|
||||
return insert_nomalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
}
|
||||
|
||||
bool S3ObjList::insert_nomalized(const char* name, const char* normalized, bool is_dir)
|
||||
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
|
||||
return false;
|
||||
@ -233,7 +233,7 @@ bool S3ObjList::GetLastName(std::string& lastname) const
|
||||
{
|
||||
bool result = false;
|
||||
lastname = "";
|
||||
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); iter++){
|
||||
for(s3obj_t::const_iterator iter = objects.begin(); iter != objects.end(); ++iter){
|
||||
if((*iter).second.orgname.length()){
|
||||
if(0 > strcmp(lastname.c_str(), (*iter).second.orgname.c_str())){
|
||||
lastname = (*iter).second.orgname;
|
||||
@ -253,7 +253,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla
|
||||
{
|
||||
s3obj_t::const_iterator iter;
|
||||
|
||||
for(iter = objects.begin(); objects.end() != iter; iter++){
|
||||
for(iter = objects.begin(); objects.end() != iter; ++iter){
|
||||
if(OnlyNormalized && 0 != (*iter).second.normalname.length()){
|
||||
continue;
|
||||
}
|
||||
@ -275,7 +275,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
|
||||
s3obj_h_t::iterator hiter;
|
||||
s3obj_list_t::const_iterator liter;
|
||||
|
||||
for(liter = list.begin(); list.end() != liter; liter++){
|
||||
for(liter = list.begin(); list.end() != liter; ++liter){
|
||||
string strtmp = (*liter);
|
||||
if(1 < strtmp.length() && '/' == strtmp[strtmp.length() - 1]){
|
||||
strtmp = strtmp.substr(0, strtmp.length() - 1);
|
||||
@ -425,51 +425,25 @@ void free_mvnodes(MVNODE *head)
|
||||
//-------------------------------------------------------------------
|
||||
// Class AutoLock
|
||||
//-------------------------------------------------------------------
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex), is_locked(false)
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex, bool no_wait) : auto_mutex(pmutex)
|
||||
{
|
||||
Lock();
|
||||
if (no_wait) {
|
||||
is_lock_acquired = pthread_mutex_trylock(auto_mutex) == 0;
|
||||
} else {
|
||||
is_lock_acquired = pthread_mutex_lock(auto_mutex) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool AutoLock::isLockAcquired() const
|
||||
{
|
||||
return is_lock_acquired;
|
||||
}
|
||||
|
||||
AutoLock::~AutoLock()
|
||||
{
|
||||
Unlock();
|
||||
}
|
||||
|
||||
bool AutoLock::Lock(void)
|
||||
{
|
||||
if(!auto_mutex){
|
||||
return false;
|
||||
}
|
||||
if(is_locked){
|
||||
// already locked
|
||||
return true;
|
||||
}
|
||||
try{
|
||||
pthread_mutex_lock(auto_mutex);
|
||||
is_locked = true;
|
||||
}catch(exception& e){
|
||||
is_locked = false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AutoLock::Unlock(void)
|
||||
{
|
||||
if(!auto_mutex){
|
||||
return false;
|
||||
}
|
||||
if(!is_locked){
|
||||
// already unlocked
|
||||
return true;
|
||||
}
|
||||
try{
|
||||
if (is_lock_acquired) {
|
||||
pthread_mutex_unlock(auto_mutex);
|
||||
is_locked = false;
|
||||
}catch(exception& e){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -478,27 +452,28 @@ bool AutoLock::Unlock(void)
|
||||
// get user name from uid
|
||||
string get_username(uid_t uid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
int result;
|
||||
static size_t maxlen = 0; // set once
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
|
||||
// make buffer
|
||||
if(0 == maxlen){
|
||||
if(0 > (maxlen = (size_t)sysconf(_SC_GETPW_R_SIZE_MAX))){
|
||||
DPRNNN("could not get max pw length.");
|
||||
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
S3FS_PRN_WARN("could not get max pw length.");
|
||||
maxlen = 0;
|
||||
return string("");
|
||||
}
|
||||
maxlen = res;
|
||||
}
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
DPRNCRIT("failed to allocate memory.");
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return string("");
|
||||
}
|
||||
// get group infomation
|
||||
if(0 != (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
|
||||
DPRNNN("could not get pw infomation.");
|
||||
// get group information
|
||||
if(0 != getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo)){
|
||||
S3FS_PRN_WARN("could not get pw information.");
|
||||
free(pbuf);
|
||||
return string("");
|
||||
}
|
||||
@ -512,9 +487,9 @@ string get_username(uid_t uid)
|
||||
return name;
|
||||
}
|
||||
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
static size_t maxlen = 0; // set once
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct group ginfo;
|
||||
@ -522,19 +497,30 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
|
||||
// make buffer
|
||||
if(0 == maxlen){
|
||||
if(0 > (maxlen = (size_t)sysconf(_SC_GETGR_R_SIZE_MAX))){
|
||||
DPRNNN("could not get max name length.");
|
||||
long res = sysconf(_SC_GETGR_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
S3FS_PRN_ERR("could not get max name length.");
|
||||
maxlen = 0;
|
||||
return -ERANGE;
|
||||
}
|
||||
maxlen = res;
|
||||
}
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
DPRNCRIT("failed to allocate memory.");
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
// get group infomation
|
||||
if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
DPRNNN("could not get group infomation.");
|
||||
// get group information
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
free(pbuf);
|
||||
maxlen *= 2;
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get group information(%d).", result);
|
||||
free(pbuf);
|
||||
return -result;
|
||||
}
|
||||
@ -565,6 +551,14 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
//-------------------------------------------------------------------
|
||||
// safe variant of dirname
|
||||
// dirname clobbers path so let it operate on a tmp copy
|
||||
string mydirname(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return string("");
|
||||
}
|
||||
return mydirname(string(path));
|
||||
}
|
||||
|
||||
string mydirname(string path)
|
||||
{
|
||||
return string(dirname((char*)path.c_str()));
|
||||
@ -572,6 +566,14 @@ string mydirname(string path)
|
||||
|
||||
// safe variant of basename
|
||||
// basename clobbers path so let it operate on a tmp copy
|
||||
string mybasename(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return string("");
|
||||
}
|
||||
return mybasename(string(path));
|
||||
}
|
||||
|
||||
string mybasename(string path)
|
||||
{
|
||||
return string(basename((char*)path.c_str()));
|
||||
@ -580,23 +582,102 @@ string mybasename(string path)
|
||||
// mkdir --parents
|
||||
int mkdirp(const string& path, mode_t mode)
|
||||
{
|
||||
string base;
|
||||
string component;
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
base += "/" + component;
|
||||
mkdir(base.c_str(), mode);
|
||||
|
||||
struct stat st;
|
||||
if(0 == stat(base.c_str(), &st)){
|
||||
if(!S_ISDIR(st.st_mode)){
|
||||
return EPERM;
|
||||
}
|
||||
}else{
|
||||
if(0 != mkdir(base.c_str(), mode)){
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// get existed directory path
|
||||
string get_exist_directory_path(const string& path)
|
||||
{
|
||||
string existed("/"); // "/" is existed.
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
if(base != "/"){
|
||||
base += "/";
|
||||
}
|
||||
base += component;
|
||||
struct stat st;
|
||||
if(0 == stat(base.c_str(), &st) && S_ISDIR(st.st_mode)){
|
||||
existed = base;
|
||||
}else{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return existed;
|
||||
}
|
||||
|
||||
bool check_exist_dir_permission(const char* dirpath)
|
||||
{
|
||||
if(!dirpath || '\0' == dirpath[0]){
|
||||
return false;
|
||||
}
|
||||
|
||||
// exists
|
||||
struct stat st;
|
||||
if(0 != stat(dirpath, &st)){
|
||||
if(ENOENT == errno){
|
||||
// dir does not exitst
|
||||
return true;
|
||||
}
|
||||
if(EACCES == errno){
|
||||
// could not access directory
|
||||
return false;
|
||||
}
|
||||
// something error occurred
|
||||
return false;
|
||||
}
|
||||
|
||||
// check type
|
||||
if(!S_ISDIR(st.st_mode)){
|
||||
// path is not directory
|
||||
return false;
|
||||
}
|
||||
|
||||
// check permission
|
||||
uid_t myuid = geteuid();
|
||||
if(myuid == st.st_uid){
|
||||
if(S_IRWXU != (st.st_mode & S_IRWXU)){
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(1 == is_uid_include_group(myuid, st.st_gid)){
|
||||
if(S_IRWXG != (st.st_mode & S_IRWXG)){
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(S_IRWXO != (st.st_mode & S_IRWXO)){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool delete_files_in_dir(const char* dir, bool is_remove_own)
|
||||
{
|
||||
DIR* dp;
|
||||
struct dirent* dent;
|
||||
|
||||
if(NULL == (dp = opendir(dir))){
|
||||
DPRNINFO("could not open dir(%s) - errno(%d)", dir, errno);
|
||||
S3FS_PRN_ERR("could not open dir(%s) - errno(%d)", dir, errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -609,20 +690,20 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
|
||||
fullpath += dent->d_name;
|
||||
struct stat st;
|
||||
if(0 != lstat(fullpath.c_str(), &st)){
|
||||
DPRN("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
closedir(dp);
|
||||
return false;
|
||||
}
|
||||
if(S_ISDIR(st.st_mode)){
|
||||
// dir -> Reentrant
|
||||
if(!delete_files_in_dir(fullpath.c_str(), true)){
|
||||
DPRNINFO("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
S3FS_PRN_ERR("could not remove sub dir(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
closedir(dp);
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(0 != unlink(fullpath.c_str())){
|
||||
DPRN("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
S3FS_PRN_ERR("could not remove file(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
closedir(dp);
|
||||
return false;
|
||||
}
|
||||
@ -631,7 +712,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
|
||||
closedir(dp);
|
||||
|
||||
if(is_remove_own && 0 != rmdir(dir)){
|
||||
DPRN("could not remove dir(%s) - errno(%d)", dir, errno);
|
||||
S3FS_PRN_ERR("could not remove dir(%s) - errno(%d)", dir, errno);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -664,8 +745,8 @@ off_t get_size(const char *s)
|
||||
|
||||
off_t get_size(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("Content-Length"))){
|
||||
headers_t::const_iterator iter = meta.find("Content-Length");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_size((*iter).second.c_str());
|
||||
@ -701,13 +782,30 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
}else{
|
||||
if(meta.end() != (iter = meta.find("Content-Type"))){
|
||||
string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
string::size_type pos = strConType.find(";");
|
||||
if(string::npos != pos){
|
||||
strConType = strConType.substr(0, pos);
|
||||
}
|
||||
if(strConType == "application/x-directory"){
|
||||
mode |= S_IFDIR;
|
||||
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' charactor at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
if(strConType == "text/plain" && (0 == size || 1 == size)){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
@ -717,6 +815,11 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If complement lack stat mode, when it's mode is not set any permission,
|
||||
// the object is added minimal mode only for read permission.
|
||||
if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){
|
||||
mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR));
|
||||
}
|
||||
}else{
|
||||
if(!checkdir){
|
||||
// cut dir/reg flag.
|
||||
@ -789,8 +892,8 @@ time_t get_lastmodified(const char* s)
|
||||
|
||||
time_t get_lastmodified(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("Last-Modified"))){
|
||||
headers_t::const_iterator iter = meta.find("Last-Modified");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_lastmodified((*iter).second.c_str());
|
||||
@ -847,6 +950,17 @@ void show_help (void)
|
||||
"\n"
|
||||
"Mount an Amazon S3 bucket as a file system.\n"
|
||||
"\n"
|
||||
"Usage:\n"
|
||||
" mounting\n"
|
||||
" s3fs bucket[:/path] mountpoint [options]\n"
|
||||
" s3fs mountpoint [options(must specify bucket= option)]\n"
|
||||
"\n"
|
||||
" umounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs -u bucket\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
@ -857,10 +971,15 @@ void show_help (void)
|
||||
"\n"
|
||||
" <option_name>=<option_value>\n"
|
||||
"\n"
|
||||
" bucket\n"
|
||||
" - if it is not specified bucket name(and path) in command line,\n"
|
||||
" must specify this option after -o option for bucket name.\n"
|
||||
"\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects\n"
|
||||
" see http://aws.amazon.com/documentation/s3/ for the \n"
|
||||
" full list of canned acls\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. empty string means do not send\n"
|
||||
" header. see http://aws.amazon.com/documentation/s3/ for the\n"
|
||||
" full list of canned acls\n"
|
||||
"\n"
|
||||
" retries (default=\"2\")\n"
|
||||
" - number of times to retry a failed s3 transaction\n"
|
||||
@ -868,34 +987,69 @@ void show_help (void)
|
||||
" use_cache (default=\"\" which means disabled)\n"
|
||||
" - local folder to use for local file cache\n"
|
||||
"\n"
|
||||
" check_cache_dir_exist (default is disable)\n"
|
||||
" - if use_cache is set, check if the cache directory exists.\n"
|
||||
" if this option is not specified, it will be created at runtime\n"
|
||||
" when the cache directory does not exist.\n"
|
||||
"\n"
|
||||
" del_cache (delete local file cache)\n"
|
||||
" - delete local file cache when s3fs starts and exits.\n"
|
||||
"\n"
|
||||
" use_rrs (default is disable)\n"
|
||||
" - this option makes Amazon's Reduced Redundancy Storage enable.\n"
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, and reduced_redundancy.\n"
|
||||
"\n"
|
||||
" use_sse (default is disable)\n"
|
||||
" - use Amazon<EFBFBD>fs Server-Site Encryption or Server-Side Encryption\n"
|
||||
" with Customer-Provided Encryption Keys.\n"
|
||||
" this option can not be specified with use_rrs. specifying only \n"
|
||||
" \"use_sse\" or \"use_sse=1\" enables Server-Side Encryption.\n"
|
||||
" (use_sse=1 for old version)\n"
|
||||
" specifying this option with file path which has some SSE-C\n"
|
||||
" secret key enables Server-Side Encryption with Customer-Provided\n"
|
||||
" Encryption Keys.(use_sse=file)\n"
|
||||
" the file must be 600 permission. the file can have some lines,\n"
|
||||
" each line is one SSE-C key. the first line in file is used as\n"
|
||||
" Customer-Provided Encryption Keys for uploading and changing\n"
|
||||
" headers etc.\n"
|
||||
" if there are some keys after first line, those are used\n"
|
||||
" downloading object which are encripted by not first key.\n"
|
||||
" so that, you can keep all SSE-C keys in file, that is SSE-C\n"
|
||||
" key history.\n"
|
||||
" if AWSSSECKEYS environment is set, you can set SSE-C key instead\n"
|
||||
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
|
||||
" SSE-C or SSE-KMS. SSE-S3 uses Amazon S3-managed encryption\n"
|
||||
" keys, SSE-C uses customer-provided encryption keys, and\n"
|
||||
" SSE-KMS uses the master key which you manage in AWS KMS.\n"
|
||||
" You can specify \"use_sse\" or \"use_sse=1\" enables SSE-S3\n"
|
||||
" type(use_sse=1 is old type parameter).\n"
|
||||
" Case of setting SSE-C, you can specify \"use_sse=custom\",\n"
|
||||
" \"use_sse=custom:<custom key file path>\" or\n"
|
||||
" \"use_sse=<custom key file path>\"(only <custom key file path>\n"
|
||||
" specified is old type parameter). You can use \"c\" for\n"
|
||||
" short \"custom\".\n"
|
||||
" The custom key file must be 600 permission. The file can\n"
|
||||
" have some lines, each line is one SSE-C key. The first line\n"
|
||||
" in file is used as Customer-Provided Encryption Keys for\n"
|
||||
" uploading and changing headers etc. If there are some keys\n"
|
||||
" after first line, those are used downloading object which\n"
|
||||
" are encrypted by not first key. So that, you can keep all\n"
|
||||
" SSE-C keys in file, that is SSE-C key history.\n"
|
||||
" If you specify \"custom\"(\"c\") without file path, you\n"
|
||||
" need to set custom key by load_sse_c option or AWSSSECKEYS\n"
|
||||
" environment.(AWSSSECKEYS environment has some SSE-C keys\n"
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
" object at downloading, you can use load_sse_c option instead\n"
|
||||
" of this option.\n"
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
" If you san specify SSE-KMS type with your <kms id> in AWS\n"
|
||||
" KMS, you can set it after \"kmsid:\"(or \"k:\"). If you\n"
|
||||
" specify only \"kmsid\"(\"k\"), you need to set AWSSSEKMSID\n"
|
||||
" environment which value is <kms id>. You must be careful\n"
|
||||
" about that you can not use the KMS id which is not same EC2\n"
|
||||
" region.\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
" at downloading.\n"
|
||||
" If you use the custom-provided encryption key at uploading, you\n"
|
||||
" specify with \"use_sse=custom\". The file has many lines, one line\n"
|
||||
" means one custom key. So that you can keep all SSE-C keys in file,\n"
|
||||
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
|
||||
" file contents.\n"
|
||||
"\n"
|
||||
" public_bucket (default=\"\" which means disabled)\n"
|
||||
" - anonymously mount a public bucket when set to 1\n"
|
||||
" - anonymously mount a public bucket when set to 1, ignores the \n"
|
||||
" $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n"
|
||||
" S3 does not allow copy object api for anonymous users, then\n"
|
||||
" s3fs sets nocopyapi option automatically when public_bucket=1\n"
|
||||
" option is specified.\n"
|
||||
"\n"
|
||||
" passwd_file (default=\"\")\n"
|
||||
" - specify which s3fs password file to use\n"
|
||||
@ -905,26 +1059,28 @@ void show_help (void)
|
||||
" file is the additional HTTP header by file(object) extension.\n"
|
||||
" The configuration file format is below:\n"
|
||||
" -----------\n"
|
||||
" line = [file suffix] HTTP-header [HTTP-values]\n"
|
||||
" line = [file suffix or regex] HTTP-header [HTTP-values]\n"
|
||||
" file suffix = file(object) suffix, if this field is empty,\n"
|
||||
" it means \"*\"(all object).\n"
|
||||
" it means \"reg:(.*)\".(=all object).\n"
|
||||
" regex = regular expression to match the file(object) path.\n"
|
||||
" this type starts with \"reg:\" prefix.\n"
|
||||
" HTTP-header = additional HTTP header name\n"
|
||||
" HTTP-values = additional HTTP header value\n"
|
||||
" -----------\n"
|
||||
" Sample:\n"
|
||||
" -----------\n"
|
||||
" .gz Content-Encoding gzip\n"
|
||||
" .Z Content-Encoding compress\n"
|
||||
" X-S3FS-MYHTTPHEAD myvalue\n"
|
||||
" .gz Content-Encoding gzip\n"
|
||||
" .Z Content-Encoding compress\n"
|
||||
" reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2\n"
|
||||
" -----------\n"
|
||||
" A sample configuration file is uploaded in \"test\" directory.\n"
|
||||
" If you specify this option for set \"Content-Encoding\" HTTP \n"
|
||||
" header, please take care for RFC 2616.\n"
|
||||
"\n"
|
||||
" connect_timeout (default=\"10\" seconds)\n"
|
||||
" connect_timeout (default=\"300\" seconds)\n"
|
||||
" - time to wait for connection before giving up\n"
|
||||
"\n"
|
||||
" readwrite_timeout (default=\"30\" seconds)\n"
|
||||
" readwrite_timeout (default=\"60\" seconds)\n"
|
||||
" - time to wait between read/write activity before giving up\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"1000\" entries (about 4MB))\n"
|
||||
@ -932,6 +1088,7 @@ void show_help (void)
|
||||
"\n"
|
||||
" stat_cache_expire (default is no expire)\n"
|
||||
" - specify expire time(seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached.\n"
|
||||
"\n"
|
||||
" enable_noobj_cache (default is disable)\n"
|
||||
" - enable cache entries for the object which does not exist.\n"
|
||||
@ -943,6 +1100,10 @@ void show_help (void)
|
||||
" You can specify this option for performance, s3fs memorizes \n"
|
||||
" in stat cache that the object(file or directory) does not exist.\n"
|
||||
"\n"
|
||||
" no_check_certificate\n"
|
||||
" - server certificate won't be checked against the available \n"
|
||||
" certificate authorities.\n"
|
||||
"\n"
|
||||
" nodnscache (disable dns cache)\n"
|
||||
" - s3fs is always using dns cache, this option make dns cache disable.\n"
|
||||
"\n"
|
||||
@ -961,28 +1122,75 @@ void show_help (void)
|
||||
" at once. It is necessary to set this value depending on a CPU \n"
|
||||
" and a network band.\n"
|
||||
"\n"
|
||||
" fd_page_size (default=\"52428800\"(50MB))\n"
|
||||
" - number of internal management page size for each file discriptor.\n"
|
||||
" For delayed reading and writing by s3fs, s3fs manages pages which \n"
|
||||
" is separated from object. Each pages has a status that data is \n"
|
||||
" already loaded(or not loaded yet).\n"
|
||||
" This option should not be changed when you don't have a trouble \n"
|
||||
" with performance.\n"
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
"\n"
|
||||
" url (default=\"http://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access amazon s3\n"
|
||||
" ensure_diskfree (default same multipart_size value)\n"
|
||||
" - sets MB to ensure disk free space. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"5120\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
" If you do not use https, please specify the URL with the url\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" endpoint (default=\"us-east-1\")\n"
|
||||
" - sets the endpoint to use on signature version 4\n"
|
||||
" If this option is not specified, s3fs uses \"us-east-1\" region as\n"
|
||||
" the default. If the s3fs could not connect to the region specified\n"
|
||||
" by this option, s3fs could not run. But if you do not specify this\n"
|
||||
" option, and if you can not connect with the default region, s3fs\n"
|
||||
" will retry to automatically connect to the other region. So s3fs\n"
|
||||
" can know the correct region name, because s3fs can find it in an\n"
|
||||
" error from the S3 server.\n"
|
||||
"\n"
|
||||
" sigv2 (default is signature version 4)\n"
|
||||
" - sets signing AWS requests by sing Signature Version 2\n"
|
||||
"\n"
|
||||
" mp_umask (default is \"0000\")\n"
|
||||
" - sets umask for the mount point directory.\n"
|
||||
" If allow_other option is not set, s3fs allows access to the mount\n"
|
||||
" point only to the owner. In the opposite case s3fs allows access\n"
|
||||
" to all users as the default. But if you set the allow_other with\n"
|
||||
" this option, you can control the permissions of the\n"
|
||||
" mount point by this option like umask.\n"
|
||||
"\n"
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - verifying uploaded object without multipart by content-md5 header.\n"
|
||||
" - ensure data integrity during writes with MD5 hash.\n"
|
||||
"\n"
|
||||
" iam_role (default is no role)\n"
|
||||
" - set the IAM Role that will supply the credentials from the \n"
|
||||
" instance meta-data.\n"
|
||||
" ecs\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" noxmlns (disable registing xml name space)\n"
|
||||
" disable registing xml name space for response of \n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" ibm_iam_auth\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute(xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
" For example, encfs and ecryptfs need to support the extended attribute.\n"
|
||||
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
|
||||
" copy command with preserve=mode.\n"
|
||||
"\n"
|
||||
" noxmlns (disable registering xml name space)\n"
|
||||
" disable registering xml name space for response of \n"
|
||||
" ListBucketResult and ListVersionsResult etc. Default name \n"
|
||||
" space is looked up from \"http://s3.amazonaws.com/doc/2006-03-01\".\n"
|
||||
" This option should not be specified now, because s3fs looks up\n"
|
||||
@ -1002,13 +1210,61 @@ void show_help (void)
|
||||
" option does not use copy-api for all command(ex. chmod, chown,\n"
|
||||
" touch, mv, etc), but this option does not use copy-api for\n"
|
||||
" only rename command(ex. mv). If this option is specified with\n"
|
||||
" nocopapi, the s3fs ignores it.\n"
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
" Enble compatibility with S3-like APIs which do not support\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" the virtual-host request style, by using the older path request\n"
|
||||
" style.\n"
|
||||
"\n"
|
||||
" noua (suppress User-Agent header)\n"
|
||||
" Usually s3fs outputs of the User-Agent in \"s3fs/<version> (commit\n"
|
||||
" hash <hash>; <using ssl library name>)\" format.\n"
|
||||
" If this option is specified, s3fs suppresses the output of the\n"
|
||||
" User-Agent.\n"
|
||||
"\n"
|
||||
" dbglevel (default=\"crit\")\n"
|
||||
" Set the debug message level. set value as crit(critical), err\n"
|
||||
" (error), warn(warning), info(information) to debug level.\n"
|
||||
" default debug level is critical. If s3fs run with \"-d\" option,\n"
|
||||
" the debug level is set information. When s3fs catch the signal\n"
|
||||
" SIGUSR2, the debug level is bumpup.\n"
|
||||
"\n"
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
"\n"
|
||||
" cipher_suites - customize TLS cipher suite list\n"
|
||||
" Customize the list of TLS cipher suites.\n"
|
||||
" Expects a colon separated list of cipher suite names.\n"
|
||||
" A list of available cipher suites, depending on your TLS engine,\n"
|
||||
" can be found on the CURL library documentation:\n"
|
||||
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
|
||||
"\n"
|
||||
" complement_stat (complement lack of file/directory mode)\n"
|
||||
" s3fs complements lack of information about file/directory mode\n"
|
||||
" if a file or a directory object does not have x-amz-meta-mode\n"
|
||||
" header. As default, s3fs does not complements stat information\n"
|
||||
" for a object, then the object will not be able to be allowed to\n"
|
||||
" list/modify.\n"
|
||||
"\n"
|
||||
" notsup_compat_dir (not support compatibility directory types)\n"
|
||||
" As a default, s3fs supports objects of the directory type as\n"
|
||||
" much as possible and recognizes them as directories.\n"
|
||||
" Objects that can be recognized as directory objects are \"dir/\",\n"
|
||||
" \"dir\", \"dir_$folder$\", and there is a file object that does\n"
|
||||
" not have a directory object but contains that directory path.\n"
|
||||
" s3fs needs redundant communication to support all these\n"
|
||||
" directory types. The object as the directory created by s3fs\n"
|
||||
" is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n"
|
||||
" a directory, communication traffic can be reduced. This option\n"
|
||||
" is used to give this restriction to s3fs.\n"
|
||||
" However, if there is a directory object other than \"dir/\" in\n"
|
||||
" the bucket, specifying this option is not recommended. s3fs may\n"
|
||||
" not be able to recognize the object correctly if an object\n"
|
||||
" created by s3fs exists in the bucket.\n"
|
||||
" Please use this option when the directory in the bucket is\n"
|
||||
" only \"dir/\" object.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
@ -1027,12 +1283,11 @@ void show_help (void)
|
||||
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
|
||||
" twice turns on FUSE debug messages to STDOUT.\n"
|
||||
" -f FUSE foreground option - do not run as daemon.\n"
|
||||
" -s FUSE singlethread option\n"
|
||||
" -s FUSE singlethreaded option\n"
|
||||
" disable multi-threaded operation\n"
|
||||
"\n"
|
||||
"\n"
|
||||
"Report bugs to <s3fs-devel@googlegroups.com>\n"
|
||||
"s3fs home page: <http://code.google.com/p/s3fs/>\n"
|
||||
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
|
||||
);
|
||||
return;
|
||||
}
|
||||
@ -1040,12 +1295,12 @@ void show_help (void)
|
||||
void show_version(void)
|
||||
{
|
||||
printf(
|
||||
"Amazon Simple Storage Service File System V%s with %s\n"
|
||||
"Amazon Simple Storage Service File System V%s(commit:%s) with %s\n"
|
||||
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
|
||||
"License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>\n"
|
||||
"This is free software: you are free to change and redistribute it.\n"
|
||||
"There is NO WARRANTY, to the extent permitted by law.\n",
|
||||
VERSION, s3fs_crypt_lib_name());
|
||||
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -27,7 +27,7 @@
|
||||
// Struct
|
||||
//
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is nomalized name.
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
@ -47,7 +47,7 @@ class S3ObjList
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_nomalized(const char* name, const char* normalized, bool is_dir);
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const {
|
||||
@ -88,14 +88,12 @@ class AutoLock
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t* auto_mutex;
|
||||
bool is_locked;
|
||||
bool is_lock_acquired;
|
||||
|
||||
public:
|
||||
AutoLock(pthread_mutex_t* pmutex = NULL);
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, bool no_wait = false);
|
||||
bool isLockAcquired() const;
|
||||
~AutoLock();
|
||||
|
||||
bool Lock(void);
|
||||
bool Unlock(void);
|
||||
};
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -108,11 +106,15 @@ MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const cha
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(std::string path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(std::string path);
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -17,7 +17,9 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
|
||||
@ -30,6 +32,21 @@
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <class T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
template std::string str(unsigned short value);
|
||||
template std::string str(int value);
|
||||
template std::string str(unsigned int value);
|
||||
template std::string str(long value);
|
||||
template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
@ -84,13 +101,6 @@ string lower(string s)
|
||||
return s;
|
||||
}
|
||||
|
||||
string IntToStr(int n)
|
||||
{
|
||||
stringstream result;
|
||||
result << n;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
string trim_left(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
@ -123,26 +133,98 @@ string urlEncode(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
if (s[i] == '/') { // Note- special case for fuse paths...
|
||||
result += s[i];
|
||||
} else if (isalnum(s[i])) {
|
||||
result += s[i];
|
||||
} else if (s[i] == '.' || s[i] == '-' || s[i] == '*' || s[i] == '_') {
|
||||
result += s[i];
|
||||
} else if (s[i] == ' ') {
|
||||
result += '%';
|
||||
result += '2';
|
||||
result += '0';
|
||||
char c = s[i];
|
||||
if (c == '/' // Note- special case for fuse paths...
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')) {
|
||||
result += c;
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(s[i]) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(s[i]) % 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* urlEncode a fuse path,
|
||||
* taking into special consideration "/",
|
||||
* otherwise regular urlEncode.
|
||||
*/
|
||||
string urlEncode2(const string &s)
|
||||
{
|
||||
string result;
|
||||
for (unsigned i = 0; i < s.length(); ++i) {
|
||||
char c = s[i];
|
||||
if (c == '=' // Note- special case for fuse paths...
|
||||
|| c == '&' // Note- special case for s3...
|
||||
|| c == '%'
|
||||
|| c == '.'
|
||||
|| c == '-'
|
||||
|| c == '_'
|
||||
|| c == '~'
|
||||
|| (c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')) {
|
||||
result += c;
|
||||
} else {
|
||||
result += "%";
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) / 16];
|
||||
result += hexAlphabet[static_cast<unsigned char>(c) % 16];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
string urlDecode(const string& s)
|
||||
{
|
||||
string result;
|
||||
for(unsigned i = 0; i < s.length(); ++i){
|
||||
if(s[i] != '%'){
|
||||
result += s[i];
|
||||
}else{
|
||||
char ch = 0;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
if(s.length() <= ++i){
|
||||
break; // wrong format.
|
||||
}
|
||||
ch *= 16;
|
||||
ch += ('0' <= s[i] && s[i] <= '9') ? (s[i] - '0') : ('A' <= s[i] && s[i] <= 'F') ? (s[i] - 'A' + 0x0a) : ('a' <= s[i] && s[i] <= 'f') ? (s[i] - 'a' + 0x0a) : 0x00;
|
||||
result += ch;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool takeout_str_dquart(string& str)
|
||||
{
|
||||
size_t pos;
|
||||
|
||||
// '"' for start
|
||||
if(string::npos != (pos = str.find_first_of("\""))){
|
||||
str = str.substr(pos + 1);
|
||||
|
||||
// '"' for end
|
||||
if(string::npos == (pos = str.find_last_of("\""))){
|
||||
return false;
|
||||
}
|
||||
str = str.substr(0, pos);
|
||||
if(string::npos != str.find_first_of("\"")){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// ex. target="http://......?keyword=value&..."
|
||||
//
|
||||
@ -169,48 +251,11 @@ bool get_keyword_value(string& target, const char* keyword, string& value)
|
||||
return true;
|
||||
}
|
||||
|
||||
string prepare_url(const char* url)
|
||||
{
|
||||
FPRNINFO("URL is %s", url);
|
||||
|
||||
string uri;
|
||||
string host;
|
||||
string path;
|
||||
string url_str = str(url);
|
||||
string token = str("/" + bucket);
|
||||
int bucket_pos = url_str.find(token);
|
||||
int bucket_length = token.size();
|
||||
int uri_length = 7;
|
||||
|
||||
if(!strncasecmp(url_str.c_str(), "https://", 8)){
|
||||
uri_length = 8;
|
||||
}
|
||||
uri = url_str.substr(0, uri_length);
|
||||
|
||||
if(!pathrequeststyle){
|
||||
host = bucket + "." + url_str.substr(uri_length, bucket_pos - uri_length).c_str();
|
||||
path = url_str.substr((bucket_pos + bucket_length));
|
||||
}else{
|
||||
host = url_str.substr(uri_length, bucket_pos - uri_length).c_str();
|
||||
string part = url_str.substr((bucket_pos + bucket_length));
|
||||
if('/' != part[0]){
|
||||
part = "/" + part;
|
||||
}
|
||||
path = "/" + bucket + part;
|
||||
}
|
||||
|
||||
url_str = uri + host + path;
|
||||
|
||||
FPRNINFO("URL changed is %s", url_str.c_str());
|
||||
|
||||
return str(url_str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current date
|
||||
* in a format suitable for a HTTP request header.
|
||||
*/
|
||||
string get_date()
|
||||
string get_date_rfc850()
|
||||
{
|
||||
char buf[100];
|
||||
time_t t = time(NULL);
|
||||
@ -218,6 +263,125 @@ string get_date()
|
||||
return buf;
|
||||
}
|
||||
|
||||
void get_date_sigv3(string& date, string& date8601)
|
||||
{
|
||||
time_t tm = time(NULL);
|
||||
date = get_date_string(tm);
|
||||
date8601 = get_date_iso8601(tm);
|
||||
}
|
||||
|
||||
string get_date_string(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
strftime(buf, sizeof(buf), "%Y%m%d", gmtime(&tm));
|
||||
return buf;
|
||||
}
|
||||
|
||||
string get_date_iso8601(time_t tm)
|
||||
{
|
||||
char buf[100];
|
||||
strftime(buf, sizeof(buf), "%Y%m%dT%H%M%SZ", gmtime(&tm));
|
||||
return buf;
|
||||
}
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length)
|
||||
{
|
||||
std::string hex;
|
||||
for(size_t pos = 0; pos < length; ++pos){
|
||||
char hexbuf[3];
|
||||
snprintf(hexbuf, 3, "%02x", input[pos]);
|
||||
hex += hexbuf;
|
||||
}
|
||||
return hex;
|
||||
}
|
||||
|
||||
char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
{
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 >= length){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < length; rpos += 3){
|
||||
parts[0] = (input[rpos] & 0xfc) >> 2;
|
||||
parts[1] = ((input[rpos] & 0x03) << 4) | ((((rpos + 1) < length ? input[rpos + 1] : 0x00) & 0xf0) >> 4);
|
||||
parts[2] = (rpos + 1) < length ? (((input[rpos + 1] & 0x0f) << 2) | ((((rpos + 2) < length ? input[rpos + 2] : 0x00) & 0xc0) >> 6)) : 0x40;
|
||||
parts[3] = (rpos + 2) < length ? (input[rpos + 2] & 0x3f) : 0x40;
|
||||
|
||||
result[wpos++] = base[parts[0]];
|
||||
result[wpos++] = base[parts[1]];
|
||||
result[wpos++] = base[parts[2]];
|
||||
result[wpos++] = base[parts[3]];
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline unsigned char char_decode64(const char ch)
|
||||
{
|
||||
unsigned char by;
|
||||
if('A' <= ch && ch <= 'Z'){ // A - Z
|
||||
by = static_cast<unsigned char>(ch - 'A');
|
||||
}else if('a' <= ch && ch <= 'z'){ // a - z
|
||||
by = static_cast<unsigned char>(ch - 'a' + 26);
|
||||
}else if('0' <= ch && ch <= '9'){ // 0 - 9
|
||||
by = static_cast<unsigned char>(ch - '0' + 52);
|
||||
}else if('+' == ch){ // +
|
||||
by = 62;
|
||||
}else if('/' == ch){ // /
|
||||
by = 63;
|
||||
}else if('=' == ch){ // =
|
||||
by = 64;
|
||||
}else{ // something wrong
|
||||
by = UCHAR_MAX;
|
||||
}
|
||||
return by;
|
||||
}
|
||||
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
{
|
||||
unsigned char* result;
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
unsigned char parts[4];
|
||||
size_t input_len = strlen(input);
|
||||
size_t rpos;
|
||||
size_t wpos;
|
||||
for(rpos = 0, wpos = 0; rpos < input_len; rpos += 4){
|
||||
parts[0] = char_decode64(input[rpos]);
|
||||
parts[1] = (rpos + 1) < input_len ? char_decode64(input[rpos + 1]) : 64;
|
||||
parts[2] = (rpos + 2) < input_len ? char_decode64(input[rpos + 2]) : 64;
|
||||
parts[3] = (rpos + 3) < input_len ? char_decode64(input[rpos + 3]) : 64;
|
||||
|
||||
result[wpos++] = ((parts[0] << 2) & 0xfc) | ((parts[1] >> 4) & 0x03);
|
||||
if(64 == parts[2]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[1] << 4) & 0xf0) | ((parts[2] >> 2) & 0x0f);
|
||||
if(64 == parts[3]){
|
||||
break;
|
||||
}
|
||||
result[wpos++] = ((parts[2] << 6) & 0xc0) | (parts[3] & 0x3f);
|
||||
}
|
||||
result[wpos] = '\0';
|
||||
*plength = wpos;
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -25,18 +25,15 @@
|
||||
*/
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
#define SPACES " \t\r\n"
|
||||
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
|
||||
static const std::string SPACES = " \t\r\n";
|
||||
|
||||
template<typename T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); }
|
||||
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
|
||||
@ -44,12 +41,20 @@ std::string trim_left(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim_right(const std::string &s, const std::string &t = SPACES);
|
||||
std::string trim(const std::string &s, const std::string &t = SPACES);
|
||||
std::string lower(std::string s);
|
||||
std::string IntToStr(int);
|
||||
std::string get_date();
|
||||
std::string get_date_rfc850(void);
|
||||
void get_date_sigv3(std::string& date, std::string& date8601);
|
||||
std::string get_date_string(time_t tm);
|
||||
std::string get_date_iso8601(time_t tm);
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string prepare_url(const char* url);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
bool takeout_str_dquart(std::string& str);
|
||||
bool get_keyword_value(std::string& target, const char* keyword, std::string& value);
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
|
||||
#endif // S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
|
||||
83
src/test_string_util.cpp
Normal file
83
src/test_string_util.cpp
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <limits>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
|
||||
#include "string_util.h"
|
||||
#include "test_util.h"
|
||||
|
||||
void test_trim()
|
||||
{
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234 "), trim_left("1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_left("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234 "));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234 "));
|
||||
ASSERT_EQUALS(std::string(" 1234"), trim_right(" 1234"));
|
||||
ASSERT_EQUALS(std::string("1234"), trim_right("1234"));
|
||||
|
||||
ASSERT_EQUALS(std::string("0"), str(0));
|
||||
ASSERT_EQUALS(std::string("1"), str(1));
|
||||
ASSERT_EQUALS(std::string("-1"), str(-1));
|
||||
ASSERT_EQUALS(std::string("9223372036854775807"), str(std::numeric_limits<int64_t>::max()));
|
||||
ASSERT_EQUALS(std::string("-9223372036854775808"), str(std::numeric_limits<int64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("0"), str(std::numeric_limits<uint64_t>::min()));
|
||||
ASSERT_EQUALS(std::string("18446744073709551615"), str(std::numeric_limits<uint64_t>::max()));
|
||||
}
|
||||
|
||||
void test_base64()
|
||||
{
|
||||
size_t len;
|
||||
ASSERT_STREQUALS(s3fs_base64(NULL, 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64(NULL, &len)), NULL);
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>(""), 0), NULL);
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("", &len)), NULL);
|
||||
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1"), 1), "MQ==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MQ==", &len)), "1");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(1));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("12"), 2), "MTI=");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTI=", &len)), "12");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(2));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("123"), 3), "MTIz");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIz", &len)), "123");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(3));
|
||||
ASSERT_STREQUALS(s3fs_base64(reinterpret_cast<const unsigned char *>("1234"), 4), "MTIzNA==");
|
||||
ASSERT_STREQUALS(reinterpret_cast<const char *>(s3fs_decode64("MTIzNA==", &len)), "1234");
|
||||
ASSERT_EQUALS(len, static_cast<size_t>(4));
|
||||
|
||||
// TODO: invalid input
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_trim();
|
||||
test_base64();
|
||||
return 0;
|
||||
}
|
||||
47
src/test_util.h
Normal file
47
src/test_util.h
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
|
||||
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define ASSERT_EQUALS(x, y) \
|
||||
assert_equals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_STREQUALS(x, y) \
|
||||
assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
@ -27,3 +27,4 @@ EXTRA_DIST = \
|
||||
sample_delcache.sh \
|
||||
sample_ahbe.conf
|
||||
|
||||
testdir = test
|
||||
|
||||
@ -1,14 +1,239 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Common code for starting an s3fs-fuse mountpoint and an S3Proxy instance
|
||||
# to run tests against S3Proxy locally.
|
||||
#
|
||||
# To run against an Amazon S3 or other S3 provider, specify the following
|
||||
# environment variables:
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
|
||||
# TEST_BUCKET_1=bucketname Name of bucket to use
|
||||
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
|
||||
# S3_URL="http://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
|
||||
#
|
||||
# Example of running against Amazon S3 using a bucket named "bucket:
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" ./small-integration-test.sh
|
||||
#
|
||||
# To change the s3fs-fuse debug level:
|
||||
#
|
||||
# DBGLEVEL=debug ./small-integration-test.sh
|
||||
#
|
||||
# To stop and wait after the mount point is up for manual interaction. This allows you to
|
||||
# explore the mounted file system exactly as it would have been started for the test case
|
||||
#
|
||||
# INTERACT=1 DBGLEVEL=debug ./small-integration-test.sh
|
||||
#
|
||||
# Run all of the tests from the makefile
|
||||
#
|
||||
# S3FS_CREDENTIALS_FILE=keyfile TEST_BUCKET_1=bucket S3PROXY_BINARY="" S3_URL="http://s3.amazonaws.com" make check
|
||||
#
|
||||
# Run the tests with request auth turned off in both S3Proxy and s3fs-fuse. This can be
|
||||
# useful for poking around with plain old curl
|
||||
#
|
||||
# PUBLIC=1 INTERACT=1 ./small-integration-test.sh
|
||||
#
|
||||
# A valgrind tool can be specified
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
|
||||
set -o errexit
|
||||
S3FS=../src/s3fs
|
||||
|
||||
S3FS_CREDENTIALS_FILE=$(eval echo ~${SUDO_USER}/.passwd-s3fs)
|
||||
# Allow these defaulted values to be overridden
|
||||
: ${S3_URL:="https://127.0.0.1:8080"}
|
||||
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
|
||||
: ${TEST_BUCKET_1:="s3fs-integration-test"}
|
||||
|
||||
TEST_BUCKET_1=${SUDO_USER}-s3fs-integration-test
|
||||
TEST_BUCKET_MOUNT_POINT_1=/mnt/${TEST_BUCKET_1}
|
||||
export TEST_BUCKET_1
|
||||
export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.5.3"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
then
|
||||
echo "Missing credentials file: $S3FS_CREDENTIALS_FILE"
|
||||
exit 1
|
||||
fi
|
||||
chmod 600 "$S3FS_CREDENTIALS_FILE"
|
||||
|
||||
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
|
||||
then
|
||||
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
|
||||
# This function execute the function parameters $1 times
|
||||
# before giving up, with 1 second delays.
|
||||
function retry {
|
||||
set +o errexit
|
||||
N=$1; shift;
|
||||
status=0
|
||||
for i in $(seq $N); do
|
||||
echo "Trying: $@"
|
||||
$@
|
||||
status=$?
|
||||
if [ $status == 0 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
echo "Retrying: $@"
|
||||
done
|
||||
|
||||
if [ $status != 0 ]; then
|
||||
echo "timeout waiting for $@"
|
||||
fi
|
||||
set -o errexit
|
||||
return $status
|
||||
}
|
||||
|
||||
# Proxy is not started if S3PROXY_BINARY is an empty string
|
||||
# PUBLIC unset: use s3proxy.conf
|
||||
# PUBLIC=1: use s3proxy-noauth.conf (no request signing)
|
||||
#
|
||||
function start_s3proxy {
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
S3PROXY_CONFIG="s3proxy-noauth.conf"
|
||||
else
|
||||
S3PROXY_CONFIG="s3proxy.conf"
|
||||
fi
|
||||
|
||||
if [ -n "${S3PROXY_BINARY}" ]
|
||||
then
|
||||
if [ ! -e "${S3PROXY_BINARY}" ]; then
|
||||
wget "https://github.com/andrewgaul/s3proxy/releases/download/s3proxy-${S3PROXY_VERSION}/s3proxy" \
|
||||
--quiet -O "${S3PROXY_BINARY}"
|
||||
chmod +x "${S3PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG &
|
||||
S3PROXY_PID=$!
|
||||
|
||||
# wait for S3Proxy to start
|
||||
for i in $(seq 30);
|
||||
do
|
||||
if exec 3<>"/dev/tcp/127.0.0.1/8080";
|
||||
then
|
||||
exec 3<&- # Close for read
|
||||
exec 3>&- # Close for write
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_s3proxy {
|
||||
if [ -n "${S3PROXY_PID}" ]
|
||||
then
|
||||
kill $S3PROXY_PID
|
||||
fi
|
||||
}
|
||||
|
||||
# Mount the bucket, function arguments passed to s3fs in addition to
|
||||
# a set of common arguments.
|
||||
function start_s3fs {
|
||||
|
||||
# Public bucket if PUBLIC is set
|
||||
if [ -n "${PUBLIC}" ]; then
|
||||
AUTH_OPT="-o public_bucket=1"
|
||||
else
|
||||
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
|
||||
fi
|
||||
|
||||
# If VALGRIND is set, pass it as options to valgrind.
|
||||
# start valgrind-listener in another shell.
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
# Start valgind-listener (default port is 1500)
|
||||
if [ -n "${VALGRIND}" ]; then
|
||||
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
|
||||
fi
|
||||
|
||||
# Common s3fs options:
|
||||
#
|
||||
# TODO: Allow all these options to be overriden with env variables
|
||||
#
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
# createbucket
|
||||
# S3Proxy always starts with no buckets, this tests the s3fs-fuse
|
||||
# automatic bucket creation path.
|
||||
# $AUTH_OPT
|
||||
# Will be either "-o public_bucket=1"
|
||||
# or
|
||||
# "-o passwd_file=${S3FS_CREDENTIALS_FILE}"
|
||||
# dbglevel
|
||||
# error by default. override with DBGLEVEL env variable
|
||||
# -f
|
||||
# Keep s3fs in foreground instead of daemonizing
|
||||
#
|
||||
|
||||
# subshell with set -x to log exact invocation of s3fs-fuse
|
||||
(
|
||||
set -x
|
||||
stdbuf -oL -eL \
|
||||
${VALGRIND_EXEC} ${S3FS} \
|
||||
$TEST_BUCKET_1 \
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o no_check_certificate \
|
||||
-o ssl_verify_hostname=0 \
|
||||
-o createbucket \
|
||||
${AUTH_OPT} \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
-o retries=3 \
|
||||
-f \
|
||||
${@} | stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
)
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
set +o errexit
|
||||
TRYCOUNT=0
|
||||
while [ $TRYCOUNT -le 20 ]; do
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=`expr ${TRYCOUNT} + 1`
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
set -o errexit
|
||||
else
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
fi
|
||||
|
||||
# Quick way to start system up for manual testing with options under test
|
||||
if [[ -n ${INTERACT} ]]; then
|
||||
echo "Mountpoint $TEST_BUCKET_MOUNT_POINT_1 is ready"
|
||||
echo "control-C to quit"
|
||||
sleep infinity
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function stop_s3fs {
|
||||
# Retry in case file system is in use
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
retry 10 df | grep -q $TEST_BUCKET_MOUNT_POINT_1 && umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
else
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
|
||||
function common_exit_handler {
|
||||
stop_s3fs
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
467
test/integration-test-main.sh
Executable file
467
test/integration-test-main.sh
Executable file
@ -0,0 +1,467 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
source test-utils.sh
|
||||
|
||||
function test_append_file {
|
||||
describe "Testing append to file ..."
|
||||
|
||||
# Write a small test file
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > ${TEST_TEXT_FILE}
|
||||
fi
|
||||
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
|
||||
do
|
||||
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
done > ${TEST_TEXT_FILE}
|
||||
|
||||
# Verify contents of file
|
||||
echo "Verifying length of test file"
|
||||
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
|
||||
then
|
||||
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_truncate_file {
|
||||
describe "Testing truncate file ..."
|
||||
# Write a small test file
|
||||
echo "${TEST_TEXT}" > ${TEST_TEXT_FILE}
|
||||
|
||||
# Truncate file to 0 length. This should trigger open(path, O_RDWR | O_TRUNC...)
|
||||
: > ${TEST_TEXT_FILE}
|
||||
|
||||
# Verify file is zero length
|
||||
if [ -s ${TEST_TEXT_FILE} ]
|
||||
then
|
||||
echo "error: expected ${TEST_TEXT_FILE} to be zero length"
|
||||
return 1
|
||||
fi
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_truncate_empty_file {
|
||||
describe "Testing truncate empty file ..."
|
||||
# Write an empty test file
|
||||
touch ${TEST_TEXT_FILE}
|
||||
|
||||
# Truncate the file to 1024 length
|
||||
t_size=1024
|
||||
truncate ${TEST_TEXT_FILE} -s $t_size
|
||||
|
||||
# Verify file is zero length
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
size=$(stat -f "%z" ${TEST_TEXT_FILE})
|
||||
else
|
||||
size=$(stat -c %s ${TEST_TEXT_FILE})
|
||||
fi
|
||||
if [ $t_size -ne $size ]
|
||||
then
|
||||
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
|
||||
return 1
|
||||
fi
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_mv_file {
|
||||
describe "Testing mv file function ..."
|
||||
# if the rename file exists, delete it
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
rm $ALT_TEST_TEXT_FILE
|
||||
fi
|
||||
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
# save file length
|
||||
ALT_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
|
||||
#rename the test file
|
||||
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ ! -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not move file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check the contents of the alt file
|
||||
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
|
||||
then
|
||||
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm_test_file $ALT_TEST_TEXT_FILE
|
||||
}
|
||||
|
||||
function test_mv_directory {
|
||||
describe "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
mk_test_dir
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rmdir ${TEST_DIR}_rename
|
||||
if [ -e "${TEST_DIR}_rename" ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function test_redirects {
|
||||
describe "Testing redirects ..."
|
||||
|
||||
mk_test_file ABCDEF
|
||||
|
||||
CONTENT=`cat $TEST_TEXT_FILE`
|
||||
|
||||
if [ "${CONTENT}" != "ABCDEF" ]; then
|
||||
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo XYZ > $TEST_TEXT_FILE
|
||||
|
||||
CONTENT=`cat $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${CONTENT} != "XYZ" ]; then
|
||||
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo 123456 >> $TEST_TEXT_FILE
|
||||
|
||||
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
|
||||
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${LINE1} != "XYZ" ]; then
|
||||
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ ${LINE2} != "123456" ]; then
|
||||
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_mkdir_rmdir {
|
||||
describe "Testing creation/removal of a directory"
|
||||
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
mk_test_dir
|
||||
rm_test_dir
|
||||
}
|
||||
|
||||
function test_chmod {
|
||||
describe "Testing chmod file function ..."
|
||||
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chmod 777 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE permissions"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_chown {
|
||||
describe "Testing chown file function ..."
|
||||
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chown 1000:1000 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
if [ $ORIGINAL_PERMISSIONS == "1000:1000" ]
|
||||
then
|
||||
echo "Could not be strict check because original file permission 1000:1000"
|
||||
else
|
||||
echo "Could not modify $TEST_TEXT_FILE ownership($ORIGINAL_PERMISSIONS to 1000:1000)"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm_test_file
|
||||
}
|
||||
|
||||
function test_list {
|
||||
describe "Testing list"
|
||||
mk_test_file
|
||||
mk_test_dir
|
||||
|
||||
file_cnt=$(ls -1 | wc -l)
|
||||
if [ $file_cnt != 2 ]; then
|
||||
echo "Expected 2 file but got $file_cnt"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm_test_file
|
||||
rm_test_dir
|
||||
}
|
||||
|
||||
function test_remove_nonempty_directory {
|
||||
describe "Testing removing a non-empty directory"
|
||||
mk_test_dir
|
||||
touch "${TEST_DIR}/file"
|
||||
rmdir "${TEST_DIR}" 2>&1 | grep -q "Directory not empty"
|
||||
rm "${TEST_DIR}/file"
|
||||
rm_test_dir
|
||||
}
|
||||
|
||||
function test_rename_before_close {
|
||||
describe "Testing rename before close ..."
|
||||
(
|
||||
echo foo
|
||||
mv $TEST_TEXT_FILE ${TEST_TEXT_FILE}.new
|
||||
) > $TEST_TEXT_FILE
|
||||
|
||||
if ! cmp <(echo foo) ${TEST_TEXT_FILE}.new; then
|
||||
echo "rename before close failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm_test_file ${TEST_TEXT_FILE}.new
|
||||
rm -f ${TEST_TEXT_FILE}
|
||||
}
|
||||
|
||||
function test_multipart_upload {
|
||||
describe "Testing multi-part upload ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
|
||||
# Verify contents of file
|
||||
echo "Comparing test file"
|
||||
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}"
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -f "/tmp/${BIG_FILE}"
|
||||
rm_test_file "${BIG_FILE}"
|
||||
}
|
||||
|
||||
function test_multipart_copy {
|
||||
describe "Testing multi-part copy ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
mv "${BIG_FILE}" "${BIG_FILE}-copy"
|
||||
|
||||
# Verify contents of file
|
||||
echo "Comparing test file"
|
||||
if ! cmp "/tmp/${BIG_FILE}" "${BIG_FILE}-copy"
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -f "/tmp/${BIG_FILE}"
|
||||
rm_test_file "${BIG_FILE}-copy"
|
||||
}
|
||||
|
||||
function test_special_characters {
|
||||
describe "Testing special characters ..."
|
||||
|
||||
ls 'special' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special?' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special*' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'special~' 2>&1 | grep -q 'No such file or directory'
|
||||
ls 'specialµ' 2>&1 | grep -q 'No such file or directory'
|
||||
}
|
||||
|
||||
function test_symlink {
|
||||
describe "Testing symlinks ..."
|
||||
|
||||
rm -f $TEST_TEXT_FILE
|
||||
rm -f $ALT_TEST_TEXT_FILE
|
||||
echo foo > $TEST_TEXT_FILE
|
||||
|
||||
ln -s $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
cmp $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
|
||||
rm -f $TEST_TEXT_FILE
|
||||
|
||||
[ -L $ALT_TEST_TEXT_FILE ]
|
||||
[ ! -f $ALT_TEST_TEXT_FILE ]
|
||||
}
|
||||
|
||||
function test_extended_attributes {
|
||||
command -v setfattr >/dev/null 2>&1 || \
|
||||
{ echo "Skipping extended attribute tests" ; return; }
|
||||
|
||||
describe "Testing extended attributes ..."
|
||||
|
||||
rm -f $TEST_TEXT_FILE
|
||||
touch $TEST_TEXT_FILE
|
||||
|
||||
# set value
|
||||
setfattr -n key1 -v value1 $TEST_TEXT_FILE
|
||||
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
|
||||
|
||||
# append value
|
||||
setfattr -n key2 -v value2 $TEST_TEXT_FILE
|
||||
getfattr -n key1 --only-values $TEST_TEXT_FILE | grep -q '^value1$'
|
||||
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
|
||||
|
||||
# remove value
|
||||
setfattr -x key1 $TEST_TEXT_FILE
|
||||
! getfattr -n key1 --only-values $TEST_TEXT_FILE
|
||||
getfattr -n key2 --only-values $TEST_TEXT_FILE | grep -q '^value2$'
|
||||
}
|
||||
|
||||
function test_mtime_file {
|
||||
describe "Testing mtime preservation function ..."
|
||||
|
||||
# if the rename file exists, delete it
|
||||
if [ -e $ALT_TEST_TEXT_FILE -o -L $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
rm $ALT_TEST_TEXT_FILE
|
||||
fi
|
||||
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
sleep 2 # allow for some time to pass to compare the timestamps between test & alt
|
||||
|
||||
#copy the test file with preserve mode
|
||||
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
testmtime=`stat -f "%m" $TEST_TEXT_FILE`
|
||||
altmtime=`stat -f "%m" $ALT_TEST_TEXT_FILE`
|
||||
else
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
fi
|
||||
if [ "$testmtime" -ne "$altmtime" ]
|
||||
then
|
||||
echo "File times do not match: $testmtime != $altmtime"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function test_rm_rf_dir {
|
||||
describe "Test that rm -rf will remove directory with contents"
|
||||
# Create a dir with some files and directories
|
||||
mkdir dir1
|
||||
mkdir dir1/dir2
|
||||
touch dir1/file1
|
||||
touch dir1/dir2/file2
|
||||
|
||||
# Remove the dir with recursive rm
|
||||
rm -rf dir1
|
||||
|
||||
if [ -e dir1 ]; then
|
||||
echo "rm -rf did not remove $PWD/dir1"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function test_write_after_seek_ahead {
|
||||
describe "Test writes succeed after a seek ahead"
|
||||
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
|
||||
rm testfile
|
||||
}
|
||||
|
||||
|
||||
function add_all_tests {
|
||||
add_tests test_append_file
|
||||
add_tests test_truncate_file
|
||||
add_tests test_truncate_empty_file
|
||||
add_tests test_mv_file
|
||||
add_tests test_mv_directory
|
||||
add_tests test_redirects
|
||||
add_tests test_mkdir_rmdir
|
||||
add_tests test_chmod
|
||||
add_tests test_chown
|
||||
add_tests test_list
|
||||
add_tests test_remove_nonempty_directory
|
||||
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
|
||||
#add_tests test_rename_before_close
|
||||
add_tests test_multipart_upload
|
||||
add_tests test_multipart_copy
|
||||
add_tests test_special_characters
|
||||
add_tests test_symlink
|
||||
add_tests test_extended_attributes
|
||||
add_tests test_mtime_file
|
||||
add_tests test_rm_rf_dir
|
||||
add_tests test_write_after_seek_ahead
|
||||
}
|
||||
|
||||
init_suite
|
||||
add_all_tests
|
||||
run_suite
|
||||
BIN
test/keystore.jks
Normal file
BIN
test/keystore.jks
Normal file
Binary file not shown.
1
test/passwd-s3fs
Normal file
1
test/passwd-s3fs
Normal file
@ -0,0 +1 @@
|
||||
local-identity:local-credential
|
||||
10
test/s3proxy.conf
Normal file
10
test/s3proxy.conf
Normal file
@ -0,0 +1,10 @@
|
||||
s3proxy.secure-endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
s3proxy.keystore-path=keystore.jks
|
||||
s3proxy.keystore-password=password
|
||||
|
||||
jclouds.provider=transient
|
||||
jclouds.identity=remote-identity
|
||||
jclouds.credential=remote-credential
|
||||
@ -1,24 +1,30 @@
|
||||
# S3FS: Samlpe ahbe_conf parameter file.
|
||||
# S3FS: Sample ahbe_conf parameter file.
|
||||
#
|
||||
# This file is configuration file for additional header by extension(ahbe).
|
||||
# s3fs loads this file at starting.
|
||||
#
|
||||
# Format:
|
||||
# line = [file suffix] HTTP-header [HTTP-header-values]
|
||||
# line = [file suffix or regex] HTTP-header [HTTP-header-values]
|
||||
# file suffix = file(object) suffix, if this field is empty,
|
||||
# it means "*"(all object).
|
||||
# it means "reg:(.*)".(=all object).
|
||||
# regex = regular expression to match the file(object) path.
|
||||
# this type starts with "reg:" prefix.
|
||||
# HTTP-header = additional HTTP header name
|
||||
# HTTP-header-values = additional HTTP header value
|
||||
#
|
||||
# <suffix(extension)> <HTTP header> <HTTP header values>
|
||||
#
|
||||
# Verification is done in the order in which they are described in the file.
|
||||
# That order is very important.
|
||||
#
|
||||
# Example:
|
||||
# " Content-Encoding gzip" --> all object
|
||||
# ".gz Content-Encoding gzip" --> only ".gz" extension file
|
||||
# " Content-Encoding gzip" --> all object
|
||||
# ".gz Content-Encoding gzip" --> only ".gz" extension file
|
||||
# "reg:^/DIR/(.*).t2$ Content-Encoding text2" --> "/DIR/*.t2" extension file
|
||||
#
|
||||
# Notice:
|
||||
# If you need to set all object, you can specify without "suffix".
|
||||
# Then all of object(file) is added additional header.
|
||||
# If you need to set all object, you can specify without "suffix" or regex
|
||||
# type "reg:(.*)". Then all of object(file) is added additional header.
|
||||
# If you have this configuration file for Content-Encoding, you should
|
||||
# know about RFC 2616.
|
||||
#
|
||||
@ -27,15 +33,20 @@
|
||||
# Encoding header, and SHOULD NOT be used in the Content-Encoding
|
||||
# header."
|
||||
#
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
.bz2 Content-Encoding bzip2
|
||||
.svgz Content-Encoding gzip
|
||||
.svg.gz Content-Encoding gzip
|
||||
.tgz Content-Encoding gzip
|
||||
.tar.gz Content-Encoding gzip
|
||||
.taz Content-Encoding gzip
|
||||
.tz Content-Encoding gzip
|
||||
.tbz2 Content-Encoding gzip
|
||||
gz.js Content-Encoding gzip
|
||||
|
||||
# file suffix type
|
||||
.gz Content-Encoding gzip
|
||||
.Z Content-Encoding compress
|
||||
.bz2 Content-Encoding bzip2
|
||||
.svgz Content-Encoding gzip
|
||||
.svg.gz Content-Encoding gzip
|
||||
.tgz Content-Encoding gzip
|
||||
.tar.gz Content-Encoding gzip
|
||||
.taz Content-Encoding gzip
|
||||
.tz Content-Encoding gzip
|
||||
.tbz2 Content-Encoding gzip
|
||||
gz.js Content-Encoding gzip
|
||||
|
||||
# regex type(test)
|
||||
reg:^/MYDIR/(.*)[.]t2$ Content-Encoding text2
|
||||
|
||||
|
||||
@ -1,284 +1,30 @@
|
||||
#!/bin/bash -e
|
||||
COMMON=integration-test-common.sh
|
||||
source $COMMON
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Test s3fs-fuse file system operations with
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
|
||||
# Require root
|
||||
REQUIRE_ROOT=require-root.sh
|
||||
source $REQUIRE_ROOT
|
||||
#source $REQUIRE_ROOT
|
||||
|
||||
# Configuration
|
||||
TEST_TEXT="HELLO WORLD"
|
||||
TEST_TEXT_FILE=test-s3fs.txt
|
||||
TEST_DIR=testdir
|
||||
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
|
||||
TEST_TEXT_FILE_LENGTH=15
|
||||
source integration-test-common.sh
|
||||
|
||||
# Mount the bucket
|
||||
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
|
||||
then
|
||||
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
$S3FS $TEST_BUCKET_1 $TEST_BUCKET_MOUNT_POINT_1 -o passwd_file=$S3FS_CREDENTIALS_FILE
|
||||
CUR_DIR=`pwd`
|
||||
cd $TEST_BUCKET_MOUNT_POINT_1
|
||||
start_s3proxy
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
rm -f $TEST_TEXT_FILE
|
||||
fi
|
||||
#
|
||||
# enable_content_md5
|
||||
# Causes s3fs to validate file contents. This isn't included in the common
|
||||
# options used by start_s3fs because tests may be performance tests
|
||||
# singlepart_copy_limit
|
||||
# Appeared in upstream s3fs-fuse tests, possibly a limitation of S3Proxy
|
||||
# TODO: github archaeology to see why it was added.
|
||||
#
|
||||
start_s3fs -o enable_content_md5 \
|
||||
-o singlepart_copy_limit=$((10 * 1024))
|
||||
|
||||
# Write a small test file
|
||||
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
|
||||
do
|
||||
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
echo $TEST_TEXT >> $TEST_TEXT_FILE
|
||||
done
|
||||
./integration-test-main.sh
|
||||
|
||||
# Verify contents of file
|
||||
echo "Verifying length of test file"
|
||||
FILE_LENGTH=`wc -l $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$FILE_LENGTH" -ne "$TEST_TEXT_FILE_LENGTH" ]
|
||||
then
|
||||
echo "error: expected $TEST_TEXT_FILE_LENGTH , got $FILE_LENGTH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete the test file
|
||||
rm $TEST_TEXT_FILE
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not delete file, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# Rename test (individual file)
|
||||
##########################################################
|
||||
echo "Testing mv file function ..."
|
||||
|
||||
# if the rename file exists, delete it
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
rm $ALT_TEST_TEXT_FILE
|
||||
fi
|
||||
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not delete file ${ALT_TEST_TEXT_FILE}, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# create the test file again
|
||||
echo $TEST_TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#rename the test file
|
||||
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ ! -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not move file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check the contents of the alt file
|
||||
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
|
||||
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
|
||||
then
|
||||
echo "moved file length is not as expected expected: $ALT_TEXT_LENGTH got: $ALT_FILE_LENGTH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm $ALT_TEST_TEXT_FILE
|
||||
|
||||
if [ -e $ALT_TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${ALT_TEST_TEXT_FILE}, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# Rename test (individual directory)
|
||||
##########################################################
|
||||
echo "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir ${TEST_DIR}
|
||||
|
||||
if [ ! -d ${TEST_DIR} ]; then
|
||||
echo "Directory ${TEST_DIR} was not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rmdir ${TEST_DIR}_rename
|
||||
if [ -e "${TEST_DIR}_rename" ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
###################################################################
|
||||
# test redirects > and >>
|
||||
###################################################################
|
||||
echo "Testing redirects ..."
|
||||
|
||||
echo ABCDEF > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONTENT=`cat $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${CONTENT} != "ABCDEF" ]; then
|
||||
echo "CONTENT read is unexpected, got ${CONTENT}, expected ABCDEF"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo XYZ > $TEST_TEXT_FILE
|
||||
|
||||
CONTENT=`cat $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${CONTENT} != "XYZ" ]; then
|
||||
echo "CONTENT read is unexpected, got ${CONTENT}, expected XYZ"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 123456 >> $TEST_TEXT_FILE
|
||||
|
||||
LINE1=`sed -n '1,1p' $TEST_TEXT_FILE`
|
||||
LINE2=`sed -n '2,2p' $TEST_TEXT_FILE`
|
||||
|
||||
if [ ${LINE1} != "XYZ" ]; then
|
||||
echo "LINE1 was not as expected, got ${LINE1}, expected XYZ"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ${LINE2} != "123456" ]; then
|
||||
echo "LINE2 was not as expected, got ${LINE2}, expected 123456"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# clean up
|
||||
rm $TEST_TEXT_FILE
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}, it still exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#####################################################################
|
||||
# Simple directory test mkdir/rmdir
|
||||
#####################################################################
|
||||
echo "Testing creation/removal of a directory"
|
||||
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir ${TEST_DIR}
|
||||
|
||||
if [ ! -d ${TEST_DIR} ]; then
|
||||
echo "Directory ${TEST_DIR} was not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rmdir ${TEST_DIR}
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# File permissions test (individual file)
|
||||
##########################################################
|
||||
echo "Testing chmod file function ..."
|
||||
|
||||
# create the test file again
|
||||
echo $TEST_TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
|
||||
chmod 777 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE permissions"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm $TEST_TEXT_FILE
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################
|
||||
# File permissions test (individual file)
|
||||
##########################################################
|
||||
echo "Testing chown file function ..."
|
||||
|
||||
# create the test file again
|
||||
echo $TEST_TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
|
||||
chown 1000:1000 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE ownership"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clean up
|
||||
rm $TEST_TEXT_FILE
|
||||
|
||||
if [ -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#####################################################################
|
||||
# Tests are finished
|
||||
#####################################################################
|
||||
|
||||
# Unmount the bucket
|
||||
cd $CUR_DIR
|
||||
umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
|
||||
echo "All tests complete."
|
||||
echo "$0: tests complete."
|
||||
|
||||
171
test/test-utils.sh
Normal file
171
test/test-utils.sh
Normal file
@ -0,0 +1,171 @@
|
||||
#### Test utils
|
||||
|
||||
set -o errexit
|
||||
|
||||
# Configuration
|
||||
TEST_TEXT="HELLO WORLD"
|
||||
TEST_TEXT_FILE=test-s3fs.txt
|
||||
TEST_DIR=testdir
|
||||
ALT_TEST_TEXT_FILE=test-s3fs-ALT.txt
|
||||
TEST_TEXT_FILE_LENGTH=15
|
||||
BIG_FILE=big-file-s3fs.txt
|
||||
BIG_FILE_LENGTH=$((25 * 1024 * 1024))
|
||||
export RUN_DIR
|
||||
|
||||
function mk_test_file {
|
||||
if [ $# == 0 ]; then
|
||||
TEXT=$TEST_TEXT
|
||||
else
|
||||
TEXT=$1
|
||||
fi
|
||||
echo $TEXT > $TEST_TEXT_FILE
|
||||
if [ ! -e $TEST_TEXT_FILE ]
|
||||
then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# wait & check
|
||||
BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'`
|
||||
TRY_COUNT=10
|
||||
while true; do
|
||||
MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then
|
||||
break
|
||||
fi
|
||||
TRY_COUNT=`expr $TRY_COUNT - 1`
|
||||
if [ $TRY_COUNT -le 0 ]; then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function rm_test_file {
|
||||
if [ $# == 0 ]; then
|
||||
FILE=$TEST_TEXT_FILE
|
||||
else
|
||||
FILE=$1
|
||||
fi
|
||||
rm -f $FILE
|
||||
|
||||
if [ -e $FILE ]
|
||||
then
|
||||
echo "Could not cleanup file ${TEST_TEXT_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function mk_test_dir {
|
||||
mkdir ${TEST_DIR}
|
||||
|
||||
if [ ! -d ${TEST_DIR} ]; then
|
||||
echo "Directory ${TEST_DIR} was not created"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function rm_test_dir {
|
||||
rmdir ${TEST_DIR}
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create and cd to a unique directory for this test run
|
||||
# Sets RUN_DIR to the name of the created directory
|
||||
function cd_run_dir {
|
||||
if [ "$TEST_BUCKET_MOUNT_POINT_1" == "" ]; then
|
||||
echo "TEST_BUCKET_MOUNT_POINT variable not set"
|
||||
exit 1
|
||||
fi
|
||||
RUN_DIR=$(mktemp -d ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
|
||||
cd ${RUN_DIR}
|
||||
}
|
||||
|
||||
function clean_run_dir {
|
||||
if [ -d ${RUN_DIR} ]; then
|
||||
rm -rf ${RUN_DIR} || echo "Error removing ${RUN_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Resets test suite
|
||||
function init_suite {
|
||||
TEST_LIST=()
|
||||
TEST_FAILED_LIST=()
|
||||
TEST_PASSED_LIST=()
|
||||
}
|
||||
|
||||
# Report a passing test case
|
||||
# report_pass TEST_NAME
|
||||
function report_pass {
|
||||
echo "$1 passed"
|
||||
TEST_PASSED_LIST+=($1)
|
||||
}
|
||||
|
||||
# Report a failing test case
|
||||
# report_fail TEST_NAME
|
||||
function report_fail {
|
||||
echo "$1 failed"
|
||||
TEST_FAILED_LIST+=($1)
|
||||
}
|
||||
|
||||
# Add tests to the suite
|
||||
# add_tests TEST_NAME...
|
||||
function add_tests {
|
||||
TEST_LIST+=("$@")
|
||||
}
|
||||
|
||||
# Log test name and description
|
||||
# describe [DESCRIPTION]
|
||||
function describe {
|
||||
echo "${FUNCNAME[1]}: "$@""
|
||||
}
|
||||
|
||||
# Runs each test in a suite and summarizes results. The list of
|
||||
# tests added by add_tests() is called with CWD set to a tmp
|
||||
# directory in the bucket. An attempt to clean this directory is
|
||||
# made after the test run.
|
||||
function run_suite {
|
||||
orig_dir=$PWD
|
||||
cd_run_dir
|
||||
for t in "${TEST_LIST[@]}"; do
|
||||
# The following sequence runs tests in a subshell to allow continuation
|
||||
# on test failure, but still allowing errexit to be in effect during
|
||||
# the test.
|
||||
#
|
||||
# See:
|
||||
# https://groups.google.com/d/msg/gnu.bash.bug/NCK_0GmIv2M/dkeZ9MFhPOIJ
|
||||
# Other ways of trying to capture the return value will also disable
|
||||
# errexit in the function due to bash... compliance with POSIX?
|
||||
set +o errexit
|
||||
(set -o errexit; $t)
|
||||
if [[ $? == 0 ]]; then
|
||||
report_pass $t
|
||||
else
|
||||
report_fail $t
|
||||
fi
|
||||
set -o errexit
|
||||
done
|
||||
cd ${orig_dir}
|
||||
clean_run_dir
|
||||
|
||||
for t in "${TEST_PASSED_LIST[@]}"; do
|
||||
echo "PASS: $t"
|
||||
done
|
||||
for t in "${TEST_FAILED_LIST[@]}"; do
|
||||
echo "FAIL: $t"
|
||||
done
|
||||
|
||||
passed=${#TEST_PASSED_LIST[@]}
|
||||
failed=${#TEST_FAILED_LIST[@]}
|
||||
|
||||
echo "SUMMARY for $0: $passed tests passed. $failed tests failed."
|
||||
|
||||
if [[ $failed != 0 ]]; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
Reference in New Issue
Block a user