Compare commits
311 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 06032aa661 | |||
| e8fb2aefb3 | |||
| 3cb6c5e161 | |||
| 7e0c53dfe9 | |||
| c2ca7e43b6 | |||
| ae47d5d349 | |||
| 35d3fce7a0 | |||
| 4177d8bd3b | |||
| ad5349a488 | |||
| 6b57a8c1fc | |||
| 92a4034c5e | |||
| 3e4002df0d | |||
| 1b9ec7f4fc | |||
| 4a7c4a9e9d | |||
| 0d3fb0658a | |||
| 73cf2ba95d | |||
| 5a481e6a01 | |||
| d8e12839af | |||
| 3bf05dabea | |||
| d4e86a17d1 | |||
| 6555e7ebb0 | |||
| ae9d8eb734 | |||
| e49d594db4 | |||
| 66bb0898db | |||
| b323312312 | |||
| 58e52bad4f | |||
| 57b2a60172 | |||
| 212bbbbdf0 | |||
| a0e62b5588 | |||
| e9831dd772 | |||
| da95afba8a | |||
| 0bd875eb9e | |||
| af63a42773 | |||
| ad9a374229 | |||
| 1b86e4d414 | |||
| 86b0921ac4 | |||
| dbe98dcbd2 | |||
| 4a72b60707 | |||
| 7a4696fc17 | |||
| e3de6ea458 | |||
| 1db4739ed8 | |||
| 25375a6b48 | |||
| ca87df7d44 | |||
| d052dc0b9d | |||
| 3f542e9cf5 | |||
| 04493de767 | |||
| 4fdab46617 | |||
| 1a23b880d5 | |||
| b3c376afbe | |||
| adcf5754ae | |||
| 0863672e27 | |||
| 0f503ced25 | |||
| 987a166bf4 | |||
| 57b6f0eeaf | |||
| f71a28f9b9 | |||
| 45c7ea9194 | |||
| c9f4312588 | |||
| 8b657eee41 | |||
| b9c9de7f97 | |||
| e559f05326 | |||
| 824124fedc | |||
| be9d407fa0 | |||
| c494e54320 | |||
| b52b6f3fc5 | |||
| 82c9733101 | |||
| a45ff6cdaa | |||
| 960d45c853 | |||
| 246b767b64 | |||
| 0edf056e95 | |||
| 88819af2d8 | |||
| b048c981ad | |||
| e1dafe76dd | |||
| 1a2e63ecff | |||
| a60b32cb80 | |||
| 6b58220009 | |||
| a841057679 | |||
| ee6abea956 | |||
| 8b0acd75e0 | |||
| cea7d44717 | |||
| 0da87e75fe | |||
| 566961c7a5 | |||
| ac65258d30 | |||
| 35261e6dba | |||
| 2818f23ba5 | |||
| 88f071ea22 | |||
| bd4bc0e7f1 | |||
| 890c1d53ff | |||
| 026260e7a1 | |||
| 99fe93b7f1 | |||
| b764c53020 | |||
| 11bd7128d2 | |||
| 7cda32664b | |||
| 4c73a0ae56 | |||
| 97fc845a6a | |||
| 7d9ac0163b | |||
| d903e064e0 | |||
| e1928288fe | |||
| 6ab6412dd3 | |||
| 30b7a69d3d | |||
| ccd0a446d8 | |||
| 0418e53b3c | |||
| bad48ab59a | |||
| bbad76bb71 | |||
| 6c1bd98c14 | |||
| b95e4acaeb | |||
| c238701d09 | |||
| 60d2ac3c7a | |||
| 967ef4d56b | |||
| ad57bdda6c | |||
| a0b69d1d3d | |||
| 5df94d7e33 | |||
| 1cbe9fb7a3 | |||
| 395f736753 | |||
| 065516c5f3 | |||
| 8660abaea2 | |||
| 366f0705a0 | |||
| ccea87ca68 | |||
| 5d54883e2f | |||
| 662f65c3c8 | |||
| 259f028490 | |||
| 5db550a298 | |||
| e3c77d2906 | |||
| ba00e79253 | |||
| c1791f920e | |||
| df3803c7b7 | |||
| 384b4cbafa | |||
| 40501a7a73 | |||
| ab89b4cd4a | |||
| 48e0d55c8e | |||
| 1eba27a50a | |||
| 41206fa0e2 | |||
| 21cf1d64e5 | |||
| ae91b6f673 | |||
| f4515b5cfa | |||
| 6c57cde7f9 | |||
| 5014c1827b | |||
| f531e6aff2 | |||
| c5c110137b | |||
| 5957d9ead0 | |||
| 5675df2a44 | |||
| 00bc9142c4 | |||
| 5653ab39fc | |||
| 473dd7c940 | |||
| ee824d52ba | |||
| 7c5fba9890 | |||
| f214cb03b2 | |||
| 416c51799b | |||
| cf6f665f03 | |||
| 20da0e4dd3 | |||
| fa8c417526 | |||
| 2c65aec6c8 | |||
| 96d8e6d823 | |||
| 62b8084300 | |||
| 907aff5de4 | |||
| bc09129ec5 | |||
| cd94f638e2 | |||
| b1fe419870 | |||
| 98b724391f | |||
| 620f6ec616 | |||
| 0c6a3882a2 | |||
| a08880ae15 | |||
| f48826dfe9 | |||
| 9c3551478e | |||
| cc94e1da26 | |||
| 2b7ea5813c | |||
| 185192be67 | |||
| ae4caa96a0 | |||
| af13ae82c1 | |||
| 13503c063b | |||
| 337da59368 | |||
| b0681246b9 | |||
| 52853f6b47 | |||
| f6eb841a24 | |||
| caea087aec | |||
| d2ae14d8b7 | |||
| 7115835834 | |||
| 551c6acf67 | |||
| 24df69f688 | |||
| 23a10dd644 | |||
| 034042f511 | |||
| 465c15ef40 | |||
| a22675bafd | |||
| 0e0ae38f6d | |||
| 7b30d5d15b | |||
| 4a5c9bef89 | |||
| 9d10a5aa70 | |||
| 107757f11d | |||
| a12e0d5ec4 | |||
| 42cdcbc2dc | |||
| eef549dac7 | |||
| c8ee132813 | |||
| d07c3f38b7 | |||
| 73da168b93 | |||
| 1fe0334c08 | |||
| 7d09914f1f | |||
| 3ac39d61f8 | |||
| c5677b4726 | |||
| 67685c3d49 | |||
| 864e20e1f2 | |||
| 51b3183cba | |||
| f02b1bc352 | |||
| 758b92e823 | |||
| df0ff3a2fd | |||
| edcf4c6218 | |||
| 28efff5986 | |||
| efba9bcbc1 | |||
| 6bd179c92b | |||
| 96764b7410 | |||
| ff3eb1971f | |||
| 94ddcb8d4f | |||
| b4c90d6957 | |||
| 75b59a7c16 | |||
| 3bcca75a88 | |||
| 79ea1a1561 | |||
| f0f61b3b55 | |||
| b955391621 | |||
| 8de992d42d | |||
| fef3fbc225 | |||
| acb61880b9 | |||
| 8ee95ff7ab | |||
| 95578cad43 | |||
| 465bbd3729 | |||
| 0fa895594e | |||
| 15573cd21e | |||
| 43df94719b | |||
| 980ba398bc | |||
| 0d59ac51c1 | |||
| 523043a2aa | |||
| 277da2c64a | |||
| 03217baa99 | |||
| 6affefff5b | |||
| 2506fe73fa | |||
| 25a03c370a | |||
| d40da2c68b | |||
| 7d6312ac78 | |||
| e26c69a327 | |||
| ff196e4257 | |||
| 19f0d498aa | |||
| 97a806447e | |||
| a00af2385b | |||
| 6fc972972f | |||
| 989d403b1f | |||
| 7b307601b5 | |||
| d731ab3a8e | |||
| 174d934d52 | |||
| b428f68acf | |||
| 5350e03147 | |||
| 28c7888a50 | |||
| 915a1321c7 | |||
| 8a11d7bc2f | |||
| 7aae4782d9 | |||
| aba9e29471 | |||
| d375bca0d0 | |||
| cd0c8599cc | |||
| 20878a1618 | |||
| edd0a11fb5 | |||
| 5e4bafeab7 | |||
| 67a836223a | |||
| 7e2d6a3eed | |||
| 1ee5a468f4 | |||
| 81e209bdd1 | |||
| 90eda81624 | |||
| cafe6015e3 | |||
| 2492dc60ce | |||
| 6f688770fd | |||
| 8c0b1d9c5b | |||
| efde0ec9de | |||
| 632495374b | |||
| 15b797f3ee | |||
| a7a64d954a | |||
| cca217f613 | |||
| 1a9cf6f66d | |||
| 02d7296210 | |||
| a688df813e | |||
| 164424bc89 | |||
| f38aaa3d0e | |||
| 7fabd18b1f | |||
| 5db369d67e | |||
| dba32fdf78 | |||
| 716baada22 | |||
| 1a93897e85 | |||
| 9fd1368611 | |||
| 9f174d7614 | |||
| 65d52506c4 | |||
| a56fe0ea28 | |||
| ec110bb0f3 | |||
| 232befb52a | |||
| f363c21ff5 | |||
| 1a96f40a10 | |||
| 6be3236b28 | |||
| ccefd835d0 | |||
| 1ddc14d59d | |||
| 87f617374a | |||
| b76fc350b0 | |||
| 4deb6fdd84 | |||
| 2d5be2157a | |||
| a19206cf0f | |||
| 0f9428ad5a | |||
| d748b333ee | |||
| e8a8019a71 | |||
| e8680b485d | |||
| ab4b92074c | |||
| d57c12d3c3 | |||
| 676b2090fb | |||
| 6005929a96 | |||
| 49ffaa1d94 | |||
| 9fb3fd1a4d | |||
| 28b2b5cac3 | |||
| 320b8e1171 | |||
| 95cb5d201f | |||
| 880708ab5f |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@ -0,0 +1 @@
|
||||
* text eol=lf
|
||||
27
.github/ISSUE_TEMPLATE.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
|
||||
#### Version of fuse being used (pkg-config --modversion fuse)
|
||||
_example: 2.9.4_
|
||||
|
||||
#### System information (uname -r)
|
||||
_command result: uname -r_
|
||||
|
||||
#### Distro (cat /etc/issue)
|
||||
_command result: cat /etc/issue_
|
||||
|
||||
#### s3fs command line used (if applicable)
|
||||
```
|
||||
```
|
||||
#### /etc/fstab entry (if applicable):
|
||||
```
|
||||
```
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
### Details about issue
|
||||
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
### Relevant Issue (if applicable)
|
||||
_If there are Issues related to this PullRequest, please list it._
|
||||
|
||||
### Details
|
||||
_Please describe the details of PullRequest._
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -10,6 +10,7 @@
|
||||
/stamp-h1
|
||||
/config.h
|
||||
/config.h.in
|
||||
/config.h.in~
|
||||
/configure
|
||||
/depcomp
|
||||
/test-driver
|
||||
@ -26,5 +27,6 @@
|
||||
/test/.deps/
|
||||
/test/Makefile
|
||||
/test/Makefile.in
|
||||
/test/s3proxy-*
|
||||
/test/*.log
|
||||
/default_commit_hash
|
||||
|
||||
57
.travis.yml
57
.travis.yml
@ -1,16 +1,43 @@
|
||||
language: cpp
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
before_install:
|
||||
- brew update
|
||||
- brew install truncate
|
||||
- brew tap caskroom/cask
|
||||
- brew cask install osxfuse
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- brew install gnu-sed
|
||||
- sudo ln -s /usr/local/opt/gnu-sed/bin/gsed /usr/local/bin/sed
|
||||
- sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf
|
||||
- brew install cppcheck
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
137
ChangeLog
137
ChangeLog
@ -1,6 +1,142 @@
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.84 -- Jul 8, 2018
|
||||
#704 - Update README.md with details about .passwd-s3fs
|
||||
#710 - add disk space reservation
|
||||
#712 - Added Cygwin build options
|
||||
#714 - reduce lock contention on file open
|
||||
#724 - don't fail multirequest on single thread error
|
||||
#726 - add an instance_name option for logging
|
||||
#727 - Fixed Travis CI error about cppcheck - #713
|
||||
#729 - FreeBSD build fixes
|
||||
#733 - More useful error message for dupe entries in passwd file
|
||||
#739 - cleanup curl handle state on retries
|
||||
#745 - don't fail mkdir when directory exists
|
||||
#753 - fix xpath selector in bucket listing
|
||||
#754 - Validate the URL format for http/https
|
||||
#755 - Added reset curl handle when returning to handle pool
|
||||
#756 - Optimize defaults
|
||||
#761 - Simplify installation for Ubuntu 16.04
|
||||
#762 - Upgrade to S3Proxy 1.6.0
|
||||
#763 - cleanup curl handles before curl share
|
||||
#764 - Remove false multihead warnings
|
||||
#765 - Add Debian installation instructions
|
||||
#766 - Remove s3fs-python
|
||||
#768 - Fixed memory leak
|
||||
#769 - Revert "enable FUSE read_sync by default"
|
||||
#774 - Option for IAM authentication endpoint
|
||||
#780 - gnutls_auth: initialize libgcrypt
|
||||
#781 - Fixed an error by cppcheck on OSX
|
||||
#786 - Log messages for 5xx and 4xx HTTP response code
|
||||
#789 - Instructions for SUSE and openSUSE prebuilt packages
|
||||
#793 - Added list_object_max_keys option based on #783 PR
|
||||
|
||||
Version 1.83 -- Dec 17, 2017
|
||||
#606 - Add Homebrew instructions
|
||||
#608 - Fix chown_nocopy losing existing uid/gid if unspecified
|
||||
#609 - Group permission checks sometimes fail with large number of groups
|
||||
#611 - Fixed clock_gettime build failure on macOS 10.12 Sierra - #600
|
||||
#621 - Upgrade to S3Proxy 1.5.3
|
||||
#627 - Update README.md
|
||||
#630 - Added travis test on osx for #601
|
||||
#631 - Merged macosx branch into master branch #601
|
||||
#636 - Fix intermittent upload failures on macOS
|
||||
#637 - Add blurb about non-Amazon S3 implementations
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argment - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
#659 - Do not fail updating directory when removing old-style object(ref #658)
|
||||
#660 - Refixed s3fs_init message(ref #652)
|
||||
#663 - Lock FdEntity when mutating orgmeta
|
||||
#664 - auth headers insertion refactoring
|
||||
#668 - Changed .travis.yml for fixing not found gpg2 on osx
|
||||
#669 - add IBM IAM authentication support
|
||||
#670 - Fixed a bug in S3fsCurl::LocateBundle
|
||||
#671 - Add support for ECS metadata endpoint
|
||||
#675 - Reduce use of preprocessor
|
||||
#676 - Move str definition from header to implementation
|
||||
#677 - Add s3proxy to .gitignore
|
||||
#679 - README.md Addition
|
||||
#681 - Changed functions about reading passwd file
|
||||
#684 - Correct signedness warning
|
||||
#686 - remove use of jsoncpp
|
||||
#688 - Improved use of temporary files - #678
|
||||
#690 - Added option ecs description to man page
|
||||
#692 - Updated template md files for issue and pr
|
||||
#695 - fix condition for parallel download
|
||||
#697 - Fixing race condition in FdEntity::GetStats
|
||||
#699 - Fix dbglevel usage
|
||||
|
||||
Version 1.82 -- May 13, 2017
|
||||
#597 - Not fallback to HTTP - #596
|
||||
#598 - Updated ChangeLog and configure.ac for release 1.82
|
||||
|
||||
Version 1.81 -- May 13, 2017
|
||||
#426 - Updated to correct ChangeLog
|
||||
#431 - fix typo s/controll/control/
|
||||
#432 - Include location constraint when creating bucket
|
||||
#433 - Correct search and replace typo
|
||||
#440 - Handled all curl error without exiting process - #437
|
||||
#443 - Fix for leaks during stat cache entry expiry / truncation (#340)
|
||||
#444 - Add mirror file logic for removing cache file
|
||||
#447 - added fuse package for mounting via /etc/fstab, fixes #417
|
||||
#449 - Accept mount options compatible with mtab
|
||||
#451 - Correct path in README
|
||||
#454 - Changed for accepting mount options compatible with mtab - #449
|
||||
#466 - Fixed a bug about could not copy file mode from org file
|
||||
#471 - Added use_xattr option for #467 and #460
|
||||
#477 - OS-specific correspondence of the extended attribute header
|
||||
#483 - Trim symbolic link original path in file
|
||||
#487 - Split header debugging onto multiple lines for easier reading
|
||||
#488 - Fixed searching Content-Length without case sensitive - #480
|
||||
#489 - Changed headers_t map using nocase compare function - #488
|
||||
#494 - Fix typo s/destroied/destroyed/
|
||||
#495 - Fix invalid V4 signature on multipart copy requests
|
||||
#498 - Upgrade to S3Proxy 1.5.1
|
||||
#502 - Fixed issue#435 branch codes for remaining bugs(2)
|
||||
#503 - Add missing call to mtime test
|
||||
#504 - Use describe helper function
|
||||
#505 - Correct typos
|
||||
#509 - Use server-provided ETag during complete upload
|
||||
#511 - Fixed a bug about uploading NULL to some part of the file contents
|
||||
#512 - Changed clock_gettime func to s3fs_clock_gettime for homebrew - #468
|
||||
#513 - Added issue and PR templates.
|
||||
#517 - Update s3fs.1 - removed duplicated word
|
||||
#520 - Added links for eventual consistency in README.md - #515
|
||||
#539 - Upgrade to S3Proxy 1.5.2
|
||||
#540 - Address cppcheck 1.77 warnings
|
||||
#545 - Changed base cached time of stat_cache_expire option - #523
|
||||
#546 - Fixed double initialization of SSL library at foreground
|
||||
#550 - Add umount instruction for unplivileged user
|
||||
#551 - Updated stat_cache_expire option description - #545
|
||||
#552 - switch S3fsMultiCurl to use foreground threads
|
||||
#553 - add TLS cipher suites customization
|
||||
#554 - cleanup cache directory when running out of disk space
|
||||
#555 - don't sign empty headers (as they are discarded
|
||||
#556 - fix multipart upload handling without cache
|
||||
#557 - Added check_cache_dir_exist option(refixed #347) - #538
|
||||
#558 - Fixed a bug in logic about truncating stat cache
|
||||
#560 - Fixed about multipart uploading at no free space related to #509
|
||||
#567 - Do not send ACL unless overridden
|
||||
#576 - Added option for complementing lack of stat mode
|
||||
#578 - Refactored the get_object_attribute function
|
||||
#579 - Added notsup_compat_dir option
|
||||
#580 - Enhanced bucket/path parameter check
|
||||
#582 - Check errors returned in 200 OK responses for put header request
|
||||
#583 - Updated limit object size in s3fs man page
|
||||
#585 - Fixed failure to upload/copy with SSE_C and SSE_KMS
|
||||
#587 - Changed copyright year format for debian pkg
|
||||
#588 - Default transport to HTTPS
|
||||
#590 - Updated man page for default_acl option - #567
|
||||
#593 - Backward compatible for changing default transport to HTTPS
|
||||
#594 - Check bucket at public bucket and add nocopyapi option automatically
|
||||
#595 - Updated ChangeLog and configure.ac for release 1.81
|
||||
|
||||
Version 1.80 -- May 29, 2016
|
||||
#213 - Parse ETag from copy multipart correctly
|
||||
#215 - Fix mem leak in openssl_auth.cpp:s3fs_sha256hexsum
|
||||
@ -103,6 +239,7 @@ Version 1.80 -- May 29, 2016
|
||||
#420 - Skip early credential checks when iam_role=auto
|
||||
#422 - Fixes for iam_role=auto
|
||||
#424 - Added travis CI badge in README.md
|
||||
#425 - Updated ChangeLog and configure.ac for release 1.80
|
||||
|
||||
Version 1.79 -- Jul 19, 2015
|
||||
issue #60 - Emit user-friendly log messages on failed CheckBucket requests
|
||||
|
||||
12
Makefile.am
12
Makefile.am
@ -30,10 +30,14 @@ release : dist ../utils/release.sh
|
||||
|
||||
cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
-D HAVE_ATTR_XATTR_H \
|
||||
-D HAVE_SYS_EXTATTR_H \
|
||||
-D HAVE_MALLOC_TRIM \
|
||||
-U CURLE_PEER_FAILED_VERIFICATION \
|
||||
--enable=all \
|
||||
-U P_tmpdir \
|
||||
-U ENOATTR \
|
||||
--enable=warning,style,information,missingInclude \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unsignedLessThanZero \
|
||||
--suppress=unusedFunction \
|
||||
--suppress=variableScope \
|
||||
src/ test/
|
||||
|
||||
80
README.md
80
README.md
@ -22,21 +22,45 @@ Features
|
||||
Installation
|
||||
------------
|
||||
|
||||
Ensure you have all the dependencies:
|
||||
Some systems provide pre-built packages:
|
||||
|
||||
* On Debian 9 and Ubuntu 16.04 or newer:
|
||||
|
||||
```
|
||||
sudo apt-get install s3fs
|
||||
```
|
||||
|
||||
* On SUSE 12 or newer and openSUSE 42.1 or newer:
|
||||
|
||||
```
|
||||
sudo zypper in s3fs
|
||||
```
|
||||
|
||||
* On Mac OS X, install via [Homebrew](http://brew.sh/):
|
||||
|
||||
```ShellSession
|
||||
$ brew cask install osxfuse
|
||||
$ brew install s3fs
|
||||
```
|
||||
|
||||
Compilation
|
||||
-----------
|
||||
|
||||
* On Linux, ensure you have all the dependencies:
|
||||
|
||||
On Ubuntu 14.04:
|
||||
|
||||
```
|
||||
sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
sudo apt-get install automake autotools-dev fuse g++ git libcurl4-openssl-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
```
|
||||
|
||||
On CentOS 7:
|
||||
|
||||
```
|
||||
sudo yum install automake fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
```
|
||||
|
||||
Compile from master via the following commands:
|
||||
Then compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
@ -50,45 +74,65 @@ sudo make install
|
||||
Examples
|
||||
--------
|
||||
|
||||
Enter your S3 identity and credential in a file `/path/to/passwd`:
|
||||
The default location for the s3fs password file can be created:
|
||||
|
||||
* using a .passwd-s3fs file in the users home directory (i.e. ~/.passwd-s3fs)
|
||||
* using the system-wide /etc/passwd-s3fs file
|
||||
|
||||
Enter your S3 identity and credential in a file `~/.passwd-s3fs` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /path/to/passwd
|
||||
```
|
||||
|
||||
Make sure the file has proper permissions (if you get 'permissions' error when mounting) `/path/to/passwd`:
|
||||
|
||||
```
|
||||
chmod 600 /path/to/passwd
|
||||
echo MYIDENTITY:MYCREDENTIAL > ~/.passwd-s3fs
|
||||
chmod 600 ~/.passwd-s3fs
|
||||
```
|
||||
|
||||
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=~/.passwd-s3fs
|
||||
```
|
||||
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=/path/to/passwd -d -d -f -o f2 -o curldbg
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=~/.passwd-s3fs -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
```
|
||||
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=~/.passwd-s3fs -o url=http://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=http://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file:
|
||||
```
|
||||
echo SERVICEINSTANCEID:APIKEY > /path/to/passwd
|
||||
```
|
||||
The Service Instance ID is only required when using the `-o create_bucket` option.
|
||||
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
|
||||
chmod 600 /path/to/passwd
|
||||
chmod 600 /etc/passwd-s3fs
|
||||
```
|
||||
|
||||
Note2: You may also need to make sure `netfs` service is start on boot
|
||||
@ -101,7 +145,7 @@ Generally S3 cannot offer the same performance or semantics as a local file syst
|
||||
|
||||
* random writes or appends to files require rewriting the entire file
|
||||
* metadata operations such as listing directories have poor performance due to network latency
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* no atomic renames of files or directories
|
||||
* no coordination between multiple clients mounting the same bucket
|
||||
* no hard links
|
||||
@ -111,8 +155,7 @@ References
|
||||
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [s3fs-python](https://fedorahosted.org/s3fs/) - an older and less complete implementation written in Python
|
||||
* [S3Proxy](https://github.com/andrewgaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
|
||||
|
||||
@ -126,3 +169,4 @@ License
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
|
||||
Licensed under the GNU GPL version 2
|
||||
|
||||
|
||||
15
configure.ac
15
configure.ac
@ -20,7 +20,7 @@
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.80)
|
||||
AC_INIT(s3fs, 1.84)
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
@ -29,12 +29,21 @@ AM_INIT_AUTOMAKE([foreign])
|
||||
AC_PROG_CXX
|
||||
AC_PROG_CC
|
||||
|
||||
AC_CHECK_HEADERS([sys/xattr.h])
|
||||
AC_CHECK_HEADERS([attr/xattr.h])
|
||||
AC_CHECK_HEADERS([sys/extattr.h])
|
||||
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl For OSX
|
||||
dnl ----------------------------------------------
|
||||
case "$target" in
|
||||
*-cygwin* )
|
||||
# Do something specific for windows using winfsp
|
||||
CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE=1"
|
||||
min_fuse_version=2.8
|
||||
;;
|
||||
*-darwin* )
|
||||
# Do something specific for mac
|
||||
min_fuse_version=2.7.3
|
||||
@ -172,13 +181,13 @@ dnl
|
||||
dnl For PKG_CONFIG before checking nss/gnutls.
|
||||
dnl this is redundant checking, but we need checking before following.
|
||||
dnl
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6])
|
||||
PKG_CHECK_MODULES([common_lib_checking], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 ])
|
||||
|
||||
AC_MSG_CHECKING([compile s3fs with])
|
||||
case "${auth_lib}" in
|
||||
openssl)
|
||||
AC_MSG_RESULT(OpenSSL)
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9])
|
||||
PKG_CHECK_MODULES([DEPS], [fuse >= ${min_fuse_version} libcurl >= 7.0 libxml-2.0 >= 2.6 libcrypto >= 0.9 ])
|
||||
;;
|
||||
gnutls)
|
||||
AC_MSG_RESULT(GnuTLS-gcrypt)
|
||||
|
||||
@ -5,9 +5,15 @@ S3FS \- FUSE-based file system backed by Amazon S3
|
||||
.SS mounting
|
||||
.TP
|
||||
\fBs3fs bucket[:/path] mountpoint \fP [options]
|
||||
.TP
|
||||
\fBs3fs mountpoint \fP [options(must specify bucket= option)]
|
||||
.SS unmounting
|
||||
.TP
|
||||
\fBumount mountpoint
|
||||
For root.
|
||||
.TP
|
||||
\fBfusermount -u mountpoint
|
||||
For unprivileged user.
|
||||
.SS utility mode ( remove interrupted multipart uploading objects )
|
||||
.TP
|
||||
\fBs3fs \-u bucket
|
||||
@ -48,17 +54,24 @@ FUSE singlethreaded option (disables multi-threaded operation)
|
||||
All s3fs options must given in the form where "opt" is:
|
||||
<option_name>=<option_value>
|
||||
.TP
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written S3 objects, e.g., "public-read".
|
||||
Any created files will have this canned acl.
|
||||
Any updated files will also have this canned acl applied!
|
||||
\fB\-o\fR bucket
|
||||
if it is not specified bucket name(and path) in command line, must specify this option after \-o option for bucket name.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="2")
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
empty string means do not send header.
|
||||
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="5")
|
||||
number of times to retry a failed S3 transaction.
|
||||
.TP
|
||||
\fB\-o\fR use_cache (default="" which means disabled)
|
||||
local folder to use for local file cache.
|
||||
.TP
|
||||
\fB\-o\fR check_cache_dir_exist (default is disable)
|
||||
If use_cache is set, check if the cache directory exists.
|
||||
If this option is not specified, it will be created at runtime when the cache directory does not exist.
|
||||
.TP
|
||||
\fB\-o\fR del_cache - delete local file cache
|
||||
delete local file cache when s3fs starts and exits.
|
||||
.TP
|
||||
@ -84,7 +97,7 @@ If there are some keys after first line, those are used downloading object which
|
||||
So that, you can keep all SSE-C keys in file, that is SSE-C key history.
|
||||
If you specify "custom"("c") without file path, you need to set custom key by load_sse_c option or AWSSSECKEYS environment.(AWSSSECKEYS environment has some SSE-C keys with ":" separator.)
|
||||
This option is used to decide the SSE type.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloaing, you can use load_sse_c option instead of this option.
|
||||
So that if you do not want to encrypt a object at uploading, but you need to decrypt encrypted object at downloading, you can use load_sse_c option instead of this option.
|
||||
For setting SSE-KMS, specify "use_sse=kmsid" or "use_sse=kmsid:<kms id>".
|
||||
You can use "k" for short "kmsid".
|
||||
If you san specify SSE-KMS type with your <kms id> in AWS KMS, you can set it after "kmsid:"(or "k:").
|
||||
@ -122,6 +135,7 @@ If you specify this option for set "Content-Encoding" HTTP header, please take c
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
.TP
|
||||
\fB\-o\fR connect_timeout (default="300" seconds)
|
||||
time to wait for connection before giving up.
|
||||
@ -129,11 +143,18 @@ time to wait for connection before giving up.
|
||||
\fB\-o\fR readwrite_timeout (default="60" seconds)
|
||||
time to wait between read/write activity before giving up.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="1000" entries (about 4MB))
|
||||
\fB\-o\fR list_object_max_keys (default="1000")
|
||||
specify the maximum number of keys returned by S3 list object API. The default is 1000. you can set this value to 1000 or more.
|
||||
.TP
|
||||
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
|
||||
maximum number of entries in the stat cache
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time indicates the time since stat cached.
|
||||
.TP
|
||||
\fB\-o\fR stat_cache_interval_expire (default is no expire)
|
||||
specify expire time(seconds) for entries in the stat cache. This expire time is based on the time from the last access time of the stat cache.
|
||||
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
|
||||
.TP
|
||||
\fB\-o\fR enable_noobj_cache (default is disable)
|
||||
enable cache entries for the object which does not exist.
|
||||
@ -165,13 +186,14 @@ number of one part size in multipart uploading request.
|
||||
The default size is 10MB(10485760byte), minimum value is 5MB(5242880byte).
|
||||
Specify number of MB and over 5(MB).
|
||||
.TP
|
||||
\fB\-o\fR ensure_diskfree(default the same as multipart_size value)
|
||||
\fB\-o\fR ensure_diskfree(default 0)
|
||||
sets MB to ensure disk free space. This option means the threshold of free space size on disk which is used for the cache file by s3fs.
|
||||
s3fs makes file for downloading, and uploading and caching files.
|
||||
If the disk free space is smaller than this value, s3fs do not use diskspace as possible in exchange for the performance.
|
||||
.TP
|
||||
\fB\-o\fR url (default="http://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTPS, then you can set url=https://s3.amazonaws.com
|
||||
\fB\-o\fR url (default="https://s3.amazonaws.com")
|
||||
sets the url to use to access Amazon S3. If you want to use HTTP, then you can set "url=http://s3.amazonaws.com".
|
||||
If you do not use https, please specify the URL with the url option.
|
||||
.TP
|
||||
\fB\-o\fR endpoint (default="us-east-1")
|
||||
sets the endpoint to use.
|
||||
@ -187,7 +209,7 @@ sets signing AWS requests by sing Signature Version 2.
|
||||
sets umask for the mount point directory.
|
||||
If allow_other option is not set, s3fs allows access to the mount point only to the owner.
|
||||
In the opposite case s3fs allows access to all users as the default.
|
||||
But if you set the allow_other with this option, you can controll the permission permissions of the mount point by this option like umask.
|
||||
But if you set the allow_other with this option, you can control permissions of the mount point by this option like umask.
|
||||
.TP
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
@ -197,11 +219,26 @@ Enable to send "Content-MD5" header when uploading a object without multipart po
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
.TP
|
||||
\fB\-o\fR ecs ( default is disable )
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
.TP
|
||||
\fB\-o\fR iam_role ( default is no IAM role )
|
||||
This option requires the IAM role name or "auto". If you specify "auto", s3fs will automatically use the IAM role names that are set to an instance. If you specify this option without any argument, it is the same as that you have specified the "auto".
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registing xml name space.
|
||||
disable registing xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
\fB\-o\fR ibm_iam_auth ( default is not using IBM IAM authentication )
|
||||
This option instructs s3fs to use IBM IAM authentication. In this mode, the AWSAccessKey and AWSSecretKey will be used as IBM's Service-Instance-ID and APIKey, respectively.
|
||||
.TP
|
||||
\fB\-o\fR ibm_iam_endpoint ( default is https://iam.bluemix.net )
|
||||
Set the URL to use for IBM IAM authentication.
|
||||
.TP
|
||||
\fB\-o\fR use_xattr ( default is not handling the extended attribute )
|
||||
Enable to handle the extended attribute(xattrs).
|
||||
If you set this option, you can use the extended attribute.
|
||||
For example, encfs and ecryptfs need to support the extended attribute.
|
||||
Notice: if s3fs handles the extended attribute, s3fs can not work to copy command with preserve=mode.
|
||||
.TP
|
||||
\fB\-o\fR noxmlns - disable registering xml name space.
|
||||
disable registering xml name space for response of ListBucketResult and ListVersionsResult etc. Default name space is looked up from "http://s3.amazonaws.com/doc/2006-03-01".
|
||||
This option should not be specified now, because s3fs looks up xmlns automatically after v1.66.
|
||||
.TP
|
||||
\fB\-o\fR nocopyapi - for other incomplete compatibility object storage.
|
||||
@ -211,15 +248,39 @@ If you set this option, s3fs do not use PUT with "x-amz-copy-source"(copy api).
|
||||
\fB\-o\fR norenameapi - for other incomplete compatibility object storage.
|
||||
For a distributed object storage which is compatibility S3 API without PUT(copy api).
|
||||
This option is a subset of nocopyapi option. The nocopyapi option does not use copy-api for all command(ex. chmod, chown, touch, mv, etc), but this option does not use copy-api for only rename command(ex. mv).
|
||||
If this option is specified with nocopapi, the s3fs ignores it.
|
||||
If this option is specified with nocopyapi, then s3fs ignores it.
|
||||
.TP
|
||||
\fB\-o\fR use_path_request_style (use legacy API calling style)
|
||||
Enble compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
Enable compatibility with S3-like APIs which do not support the virtual-host request style, by using the older path request style.
|
||||
.TP
|
||||
\fB\-o\fR noua (suppress User-Agent header)
|
||||
Usually s3fs outputs of the User-Agent in "s3fs/<version> (commit hash <hash>; <using ssl library name>)" format.
|
||||
If this option is specified, s3fs suppresses the output of the User-Agent.
|
||||
.TP
|
||||
\fB\-o\fR cipher_suites
|
||||
Customize TLS cipher suite list. Expects a colon separated list of cipher suite names.
|
||||
A list of available cipher suites, depending on your TLS engine, can be found on the CURL library documentation:
|
||||
https://curl.haxx.se/docs/ssl-ciphers.html
|
||||
.TP
|
||||
\fB\-o\fR instance_name
|
||||
The instance name of the current s3fs mountpoint.
|
||||
This name will be added to logging messages and user agent headers sent by s3fs.
|
||||
.TP
|
||||
\fB\-o\fR complement_stat (complement lack of file/directory mode)
|
||||
s3fs complements lack of information about file/directory mode if a file or a directory object does not have x-amz-meta-mode header.
|
||||
As default, s3fs does not complements stat information for a object, then the object will not be able to be allowed to list/modify.
|
||||
.TP
|
||||
\fB\-o\fR notsup_compat_dir (not support compatibility directory types)
|
||||
As a default, s3fs supports objects of the directory type as much as possible and recognizes them as directories.
|
||||
Objects that can be recognized as directory objects are "dir/", "dir", "dir_$folder$", and there is a file object that does not have a directory object but contains that directory path.
|
||||
s3fs needs redundant communication to support all these directory types.
|
||||
The object as the directory created by s3fs is "dir/".
|
||||
By restricting s3fs to recognize only "dir/" as a directory, communication traffic can be reduced.
|
||||
This option is used to give this restriction to s3fs.
|
||||
However, if there is a directory object other than "dir/" in the bucket, specifying this option is not recommended.
|
||||
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
|
||||
Please use this option when the directory in the bucket is only "dir/" object.
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
@ -234,7 +295,7 @@ Most of the generic mount options described in 'man mount' are supported (ro, rw
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH NOTES
|
||||
.TP
|
||||
Maximum file size=64GB (limited by s3fs, not Amazon).
|
||||
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
|
||||
.TP
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -130,8 +130,8 @@ bool AdditionalHeader::Load(const char* file)
|
||||
// compile
|
||||
regex_t* preg = new regex_t;
|
||||
int result;
|
||||
char errbuf[256];
|
||||
if(0 != (result = regcomp(preg, key.c_str(), REG_EXTENDED | REG_NOSUB))){ // we do not need matching info
|
||||
char errbuf[256];
|
||||
regerror(result, preg, errbuf, sizeof(errbuf));
|
||||
S3FS_PRN_ERR("failed to compile regex from %s key by %s.", key.c_str(), errbuf);
|
||||
delete preg;
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -56,8 +56,13 @@ using namespace std;
|
||||
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
#ifndef HAVE_CLOCK_GETTIME
|
||||
static int clock_gettime(int clk_id, struct timespec* ts)
|
||||
#ifdef HAVE_CLOCK_GETTIME
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
{
|
||||
return clock_gettime(static_cast<clockid_t>(clk_id), ts);
|
||||
}
|
||||
#else
|
||||
static int s3fs_clock_gettime(int clk_id, struct timespec* ts)
|
||||
{
|
||||
struct timeval now;
|
||||
if(0 != gettimeofday(&now, NULL)){
|
||||
@ -71,7 +76,7 @@ static int clock_gettime(int clk_id, struct timespec* ts)
|
||||
|
||||
inline void SetStatCacheTime(struct timespec& ts)
|
||||
{
|
||||
if(-1 == clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){
|
||||
if(-1 == s3fs_clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)){
|
||||
ts.tv_sec = time(NULL);
|
||||
ts.tv_nsec = 0;
|
||||
}
|
||||
@ -137,7 +142,7 @@ pthread_mutex_t StatCache::stat_cache_lock;
|
||||
//-------------------------------------------------------------------
|
||||
// Constructor/Destructor
|
||||
//-------------------------------------------------------------------
|
||||
StatCache::StatCache() : IsExpireTime(false), ExpireTime(0), CacheSize(1000), IsCacheNoObject(false)
|
||||
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(100000), IsCacheNoObject(false)
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
stat_cache.clear();
|
||||
@ -177,19 +182,21 @@ time_t StatCache::GetExpireTime(void) const
|
||||
return (IsExpireTime ? ExpireTime : (-1));
|
||||
}
|
||||
|
||||
time_t StatCache::SetExpireTime(time_t expire)
|
||||
time_t StatCache::SetExpireTime(time_t expire, bool is_interval)
|
||||
{
|
||||
time_t old = ExpireTime;
|
||||
ExpireTime = expire;
|
||||
IsExpireTime = true;
|
||||
time_t old = ExpireTime;
|
||||
ExpireTime = expire;
|
||||
IsExpireTime = true;
|
||||
IsExpireIntervalType = is_interval;
|
||||
return old;
|
||||
}
|
||||
|
||||
time_t StatCache::UnsetExpireTime(void)
|
||||
{
|
||||
time_t old = IsExpireTime ? ExpireTime : (-1);
|
||||
ExpireTime = 0;
|
||||
IsExpireTime = false;
|
||||
time_t old = IsExpireTime ? ExpireTime : (-1);
|
||||
ExpireTime = 0;
|
||||
IsExpireTime = false;
|
||||
IsExpireIntervalType = false;
|
||||
return old;
|
||||
}
|
||||
|
||||
@ -278,7 +285,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
(*pisforce) = ent->isforce;
|
||||
}
|
||||
ent->hit_count++;
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
|
||||
if(IsExpireIntervalType){
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
@ -495,7 +505,10 @@ bool StatCache::TruncateCache(void)
|
||||
if(IsExpireTime){
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
|
||||
stat_cache_entry* entry = iter->second;
|
||||
if(!entry || (0L < entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
|
||||
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
|
||||
if(entry){
|
||||
delete entry;
|
||||
}
|
||||
stat_cache.erase(iter++);
|
||||
}else{
|
||||
++iter;
|
||||
@ -532,6 +545,9 @@ bool StatCache::TruncateCache(void)
|
||||
stat_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
|
||||
if(siter->second){
|
||||
delete siter->second;
|
||||
}
|
||||
stat_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -55,6 +55,7 @@ class StatCache
|
||||
static pthread_mutex_t stat_cache_lock;
|
||||
stat_cache_t stat_cache;
|
||||
bool IsExpireTime;
|
||||
bool IsExpireIntervalType; // if this flag is true, cache data is updated at last access time.
|
||||
time_t ExpireTime;
|
||||
unsigned long CacheSize;
|
||||
bool IsCacheNoObject;
|
||||
@ -78,7 +79,7 @@ class StatCache
|
||||
unsigned long GetCacheSize(void) const;
|
||||
unsigned long SetCacheSize(unsigned long size);
|
||||
time_t GetExpireTime(void) const;
|
||||
time_t SetExpireTime(time_t expire);
|
||||
time_t SetExpireTime(time_t expire, bool is_interval = false);
|
||||
time_t UnsetExpireTime(void);
|
||||
bool SetCacheNoObject(bool flag);
|
||||
bool EnableCacheNoObject(void) {
|
||||
|
||||
47
src/common.h
47
src/common.h
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -21,12 +21,24 @@
|
||||
#ifndef S3FS_COMMON_H_
|
||||
#define S3FS_COMMON_H_
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "../config.h"
|
||||
|
||||
//
|
||||
// Extended attribute
|
||||
//
|
||||
#ifdef HAVE_SYS_EXTATTR_H
|
||||
#include <sys/extattr.h>
|
||||
#elif HAVE_ATTR_XATTR_H
|
||||
#include <attr/xattr.h>
|
||||
#elif HAVE_SYS_XATTR_H
|
||||
#include <sys/xattr.h>
|
||||
#endif
|
||||
|
||||
//
|
||||
// Macro
|
||||
//
|
||||
#define SAFESTRPTR(strptr) (strptr ? strptr : "")
|
||||
static inline const char *SAFESTRPTR(const char *strptr) { return strptr ? strptr : ""; }
|
||||
|
||||
//
|
||||
// Debug level
|
||||
@ -68,7 +80,7 @@ enum s3fs_log_level{
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s:%s(%d): " fmt "%s", __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s:%s(%d): " fmt "%s", instance_name.c_str(), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -77,7 +89,7 @@ enum s3fs_log_level{
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(level), S3FS_LOG_NEST(nest), __FILE__, __func__, __LINE__, __VA_ARGS__); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s" fmt "%s", S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(level), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(nest), __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -86,7 +98,15 @@ enum s3fs_log_level{
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
}else{ \
|
||||
fprintf(stderr, "s3fs: " fmt "%s\n", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "s3fs: " fmt "%s", __VA_ARGS__); \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%ss3fs: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
|
||||
}
|
||||
|
||||
// Special macro for init message
|
||||
#define S3FS_PRN_INIT_INFO(fmt, ...) \
|
||||
if(foreground){ \
|
||||
fprintf(stdout, "%s%s%s:%s(%d): " fmt "%s\n", S3FS_LOG_LEVEL_STRING(S3FS_LOG_INFO), S3FS_LOG_NEST(0), __FILE__, __func__, __LINE__, __VA_ARGS__, ""); \
|
||||
}else{ \
|
||||
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s%s" fmt "%s", instance_name.c_str(), S3FS_LOG_NEST(0), __VA_ARGS__, ""); \
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
@ -107,12 +127,18 @@ enum s3fs_log_level{
|
||||
//
|
||||
// Typedef
|
||||
//
|
||||
typedef std::map<std::string, std::string> headers_t;
|
||||
struct header_nocase_cmp : public std::binary_function<std::string, std::string, bool>{
|
||||
bool operator()(const std::string &strleft, const std::string &strright) const
|
||||
{
|
||||
return (strcasecmp(strleft.c_str(), strright.c_str()) < 0);
|
||||
}
|
||||
};
|
||||
typedef std::map<std::string, std::string, header_nocase_cmp> headers_t;
|
||||
|
||||
//
|
||||
// Header "x-amz-meta-xattr" is for extended attributes.
|
||||
// This header is url encoded string which is json formated.
|
||||
// x-amz-meta-xattr:urlencod({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
// This header is url encoded string which is json formatted.
|
||||
// x-amz-meta-xattr:urlencode({"xattr-1":"base64(value-1)","xattr-2":"base64(value-2)","xattr-3":"base64(value-3)"})
|
||||
//
|
||||
typedef struct xattr_value{
|
||||
unsigned char* pvalue;
|
||||
@ -130,17 +156,20 @@ typedef struct xattr_value{
|
||||
typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
|
||||
//
|
||||
// Global valiables
|
||||
// Global variables
|
||||
//
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
extern bool complement_stat;
|
||||
extern std::string program_name;
|
||||
extern std::string service_path;
|
||||
extern std::string host;
|
||||
extern std::string bucket;
|
||||
extern std::string mount_prefix;
|
||||
extern std::string endpoint;
|
||||
extern std::string cipher_suites;
|
||||
extern std::string instance_name;
|
||||
extern s3fs_log_level debug_level;
|
||||
extern const char* s3fs_log_nest[S3FS_LOG_NEST_MAX];
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
||||
1254
src/curl.cpp
1254
src/curl.cpp
File diff suppressed because it is too large
Load Diff
57
src/curl.h
57
src/curl.h
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -26,7 +26,7 @@
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
//----------------------------------------------
|
||||
#define MIN_MULTIPART_SIZE 5242880 // 5MB
|
||||
static const int MIN_MULTIPART_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
//----------------------------------------------
|
||||
// class BodyData
|
||||
@ -130,7 +130,7 @@ class S3fsMultiCurl;
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
CurlHandlerPool(int maxHandlers)
|
||||
explicit CurlHandlerPool(int maxHandlers)
|
||||
: mMaxHandlers(maxHandlers)
|
||||
, mHandlers(NULL)
|
||||
, mIndex(-1)
|
||||
@ -159,7 +159,7 @@ typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
|
||||
// strage class(rrs)
|
||||
// storage class(rrs)
|
||||
enum storage_class_t {
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
@ -175,9 +175,11 @@ enum sse_type_t {
|
||||
};
|
||||
|
||||
// share
|
||||
#define SHARE_MUTEX_DNS 0
|
||||
#define SHARE_MUTEX_SSL_SESSION 1
|
||||
#define SHARE_MUTEX_MAX 2
|
||||
enum {
|
||||
SHARE_MUTEX_DNS = 0,
|
||||
SHARE_MUTEX_SSL_SESSION = 1,
|
||||
SHARE_MUTEX_MAX = 2,
|
||||
};
|
||||
|
||||
// Class for lapping curl
|
||||
//
|
||||
@ -230,12 +232,19 @@ class S3fsCurl
|
||||
static std::string AWSSecretAccessKey;
|
||||
static std::string AWSAccessToken;
|
||||
static time_t AWSAccessTokenExpire;
|
||||
static bool is_ecs;
|
||||
static bool is_ibm_iam_auth;
|
||||
static std::string IAM_cred_url;
|
||||
static size_t IAM_field_count;
|
||||
static std::string IAM_token_field;
|
||||
static std::string IAM_expiry_field;
|
||||
static std::string IAM_role;
|
||||
static long ssl_verify_hostname;
|
||||
static curltime_t curl_times;
|
||||
static curlprogress_t curl_progress;
|
||||
static std::string curl_ca_bundle;
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
@ -266,6 +275,8 @@ class S3fsCurl
|
||||
int b_ssekey_pos; // backup for retrying
|
||||
std::string b_ssevalue; // backup for retrying
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
@ -311,7 +322,10 @@ class S3fsCurl
|
||||
bool ResetHandle(void);
|
||||
bool RemakeHandle(void);
|
||||
bool ClearInternalData(void);
|
||||
void insertV4Headers(const std::string &op, const std::string &path, const std::string &query_string, const std::string &payload_hash);
|
||||
void insertV4Headers();
|
||||
void insertV2Headers();
|
||||
void insertIBMIAMHeaders();
|
||||
void insertAuthHeaders();
|
||||
std::string CalcSignatureV2(const std::string& method, const std::string& strMD5, const std::string& content_type, const std::string& date, const std::string& resource);
|
||||
std::string CalcSignature(const std::string& method, const std::string& canonical_uri, const std::string& query_string, const std::string& strdate, const std::string& payload_hash, const std::string& date8601);
|
||||
bool GetUploadId(std::string& upload_id);
|
||||
@ -319,6 +333,7 @@ class S3fsCurl
|
||||
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
|
||||
public:
|
||||
// class methods
|
||||
@ -328,7 +343,7 @@ class S3fsCurl
|
||||
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
|
||||
static bool CheckIAMCredentialUpdate(void);
|
||||
|
||||
// class methods(valiables)
|
||||
// class methods(variables)
|
||||
static std::string LookupMimeType(const std::string& name);
|
||||
static bool SetCheckCertificate(bool isCertCheck);
|
||||
static bool SetDnsCache(bool isCache);
|
||||
@ -340,6 +355,7 @@ class S3fsCurl
|
||||
static bool SetPublicBucket(bool flag);
|
||||
static bool IsPublicBucket(void) { return S3fsCurl::is_public_bucket; }
|
||||
static std::string SetDefaultAcl(const char* acl);
|
||||
static std::string GetDefaultAcl();
|
||||
static storage_class_t SetStorageClass(storage_class_t storage_class);
|
||||
static storage_class_t GetStorageClass() { return S3fsCurl::storage_class; }
|
||||
static bool LoadEnvSse(void) { return (S3fsCurl::LoadEnvSseCKeys() && S3fsCurl::LoadEnvSseKmsid()); }
|
||||
@ -361,13 +377,22 @@ class S3fsCurl
|
||||
static bool SetVerbose(bool flag);
|
||||
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
|
||||
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
|
||||
static bool IsSetAccessKeyId(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || (0 < S3fsCurl::AWSAccessKeyId.size() && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
static bool IsSetAccessKeyID(void){
|
||||
return (0 < S3fsCurl::AWSAccessKeyId.size());
|
||||
}
|
||||
static bool IsSetAccessKeys(void){
|
||||
return (0 < S3fsCurl::IAM_role.size() || ((0 < S3fsCurl::AWSAccessKeyId.size() || S3fsCurl::is_ibm_iam_auth) && 0 < S3fsCurl::AWSSecretAccessKey.size()));
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
static std::string SetIAMCredentialsURL(const char* url);
|
||||
static std::string SetIAMTokenField(const char* token_field);
|
||||
static std::string SetIAMExpiryField(const char* expiry_field);
|
||||
static std::string SetIAMRole(const char* role);
|
||||
static const char* GetIAMRole(void) { return S3fsCurl::IAM_role.c_str(); }
|
||||
static bool SetMultipartSize(off_t size);
|
||||
@ -376,10 +401,11 @@ class S3fsCurl
|
||||
static bool IsSignatureV4(void) { return S3fsCurl::is_sigv4; }
|
||||
static bool SetUserAgentFlag(bool isset) { bool bresult = S3fsCurl::is_ua; S3fsCurl::is_ua = isset; return bresult; }
|
||||
static bool IsUserAgentFlag(void) { return S3fsCurl::is_ua; }
|
||||
static void InitUserAgent(void);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
bool DestroyCurlHandle(void);
|
||||
bool DestroyCurlHandle(bool force = false);
|
||||
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
@ -407,12 +433,13 @@ class S3fsCurl
|
||||
int MultipartUploadRequest(const std::string& upload_id, const char* tpath, int fd, off_t offset, size_t size, etaglist_t& list);
|
||||
int MultipartRenameRequest(const char* from, const char* to, headers_t& meta, off_t size);
|
||||
|
||||
// methods(valiables)
|
||||
// methods(variables)
|
||||
CURL* GetCurlHandle(void) const { return hCurl; }
|
||||
std::string GetPath(void) const { return path; }
|
||||
std::string GetBasePath(void) const { return base_path; }
|
||||
std::string GetSpacialSavedPath(void) const { return saved_path; }
|
||||
std::string GetUrl(void) const { return url; }
|
||||
std::string GetOp(void) const { return op; }
|
||||
headers_t* GetResponseHeaders(void) { return &responseHeaders; }
|
||||
BodyData* GetBodyData(void) const { return bodydata; }
|
||||
BodyData* GetHeadData(void) const { return headdata; }
|
||||
@ -441,7 +468,6 @@ class S3fsMultiCurl
|
||||
private:
|
||||
static int max_multireq;
|
||||
|
||||
CURLM* hMulti;
|
||||
s3fscurlmap_t cMap_all; // all of curl requests
|
||||
s3fscurlmap_t cMap_req; // curl requests are sent
|
||||
|
||||
@ -453,6 +479,8 @@ class S3fsMultiCurl
|
||||
int MultiPerform(void);
|
||||
int MultiRead(void);
|
||||
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
S3fsMultiCurl();
|
||||
~S3fsMultiCurl();
|
||||
@ -477,6 +505,7 @@ struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* d
|
||||
struct curl_slist* curl_slist_sort_insert(struct curl_slist* list, const char* key, const char* value);
|
||||
std::string get_sorted_header_keys(const struct curl_slist* list);
|
||||
std::string get_canonical_headers(const struct curl_slist* list, bool only_amz = false);
|
||||
std::string get_header_value(const struct curl_slist* list, const std::string &key);
|
||||
bool MakeUrlResource(const char* realpath, std::string& resourcepath, std::string& url);
|
||||
std::string prepare_url(const char* url);
|
||||
bool get_object_sse_type(const char* path, sse_type_t& ssetype, std::string& ssevalue); // implement in s3fs.cpp
|
||||
|
||||
540
src/fdcache.cpp
540
src/fdcache.cpp
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2013 Takeshi Nakatani <ggtakec.com>
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -31,6 +31,7 @@
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <dirent.h>
|
||||
#include <curl/curl.h>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
@ -51,7 +52,7 @@ using namespace std;
|
||||
//------------------------------------------------
|
||||
// Symbols
|
||||
//------------------------------------------------
|
||||
#define MAX_MULTIPART_CNT 10000 // S3 multipart max count
|
||||
static const int MAX_MULTIPART_CNT = 10 * 1000; // S3 multipart max count
|
||||
|
||||
//
|
||||
// For cache directory top path
|
||||
@ -632,7 +633,7 @@ int FdEntity::FillFile(int fd, unsigned char byte, size_t size, off_t start)
|
||||
// FdEntity methods
|
||||
//------------------------------------------------
|
||||
FdEntity::FdEntity(const char* tpath, const char* cpath)
|
||||
: is_lock_init(false), refcnt(0), path(SAFESTRPTR(tpath)), cachepath(SAFESTRPTR(cpath)),
|
||||
: is_lock_init(false), refcnt(0), path(SAFESTRPTR(tpath)), cachepath(SAFESTRPTR(cpath)), mirrorpath(""),
|
||||
fd(-1), pfile(NULL), is_modify(false), size_orgmeta(0), upload_id(""), mp_start(0), mp_size(0)
|
||||
{
|
||||
try{
|
||||
@ -664,22 +665,31 @@ void FdEntity::Clear(void)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
if(pfile){
|
||||
if(-1 != fd){
|
||||
if(0 != cachepath.size()){
|
||||
CacheFileStat cfstat(path.c_str());
|
||||
if(!pagelist.Serialize(cfstat, true)){
|
||||
S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str());
|
||||
}
|
||||
}
|
||||
fclose(pfile);
|
||||
pfile = NULL;
|
||||
fd = -1;
|
||||
if(pfile){
|
||||
fclose(pfile);
|
||||
pfile = NULL;
|
||||
}
|
||||
fd = -1;
|
||||
|
||||
if(!mirrorpath.empty()){
|
||||
if(-1 == unlink(mirrorpath.c_str())){
|
||||
S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno);
|
||||
}
|
||||
mirrorpath.erase();
|
||||
}
|
||||
}
|
||||
pagelist.Init(0, false);
|
||||
refcnt = 0;
|
||||
path = "";
|
||||
cachepath = "";
|
||||
is_modify = false;
|
||||
refcnt = 0;
|
||||
path = "";
|
||||
cachepath = "";
|
||||
is_modify = false;
|
||||
}
|
||||
|
||||
void FdEntity::Close(void)
|
||||
@ -699,14 +709,23 @@ void FdEntity::Close(void)
|
||||
S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str());
|
||||
}
|
||||
}
|
||||
fclose(pfile);
|
||||
pfile = NULL;
|
||||
fd = -1;
|
||||
if(pfile){
|
||||
fclose(pfile);
|
||||
pfile = NULL;
|
||||
}
|
||||
fd = -1;
|
||||
|
||||
if(!mirrorpath.empty()){
|
||||
if(-1 == unlink(mirrorpath.c_str())){
|
||||
S3FS_PRN_WARN("failed to remove mirror cache file(%s) by errno(%d).", mirrorpath.c_str(), errno);
|
||||
}
|
||||
mirrorpath.erase();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int FdEntity::Dup(void)
|
||||
int FdEntity::Dup()
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt + 1 : refcnt));
|
||||
|
||||
@ -717,14 +736,73 @@ int FdEntity::Dup(void)
|
||||
return fd;
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method does not lock fdent_lock, because FdManager::fd_manager_lock
|
||||
// is locked before calling.
|
||||
//
|
||||
int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time)
|
||||
// Open mirror file which is linked cache file.
|
||||
//
|
||||
int FdEntity::OpenMirrorFile(void)
|
||||
{
|
||||
if(cachepath.empty()){
|
||||
S3FS_PRN_ERR("cache path is empty, why come here");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// make temporary directory
|
||||
string bupdir;
|
||||
if(!FdManager::MakeCachePath(NULL, bupdir, true, true)){
|
||||
S3FS_PRN_ERR("could not make bup cache directory path or create it.");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
// create seed generating mirror file name
|
||||
unsigned int seed = static_cast<unsigned int>(time(NULL));
|
||||
int urandom_fd;
|
||||
if(-1 != (urandom_fd = open("/dev/urandom", O_RDONLY))){
|
||||
unsigned int rand_data;
|
||||
if(sizeof(rand_data) == read(urandom_fd, &rand_data, sizeof(rand_data))){
|
||||
seed ^= rand_data;
|
||||
}
|
||||
close(urandom_fd);
|
||||
}
|
||||
|
||||
// try to link mirror file
|
||||
while(true){
|
||||
// make random(temp) file path
|
||||
// (do not care for threading, because allowed any value returned.)
|
||||
//
|
||||
char szfile[NAME_MAX + 1];
|
||||
sprintf(szfile, "%x.tmp", rand_r(&seed));
|
||||
mirrorpath = bupdir + "/" + szfile;
|
||||
|
||||
// link mirror file to cache file
|
||||
if(0 == link(cachepath.c_str(), mirrorpath.c_str())){
|
||||
break;
|
||||
}
|
||||
if(EEXIST != errno){
|
||||
S3FS_PRN_ERR("could not link mirror file(%s) to cache file(%s) by errno(%d).", mirrorpath.c_str(), cachepath.c_str(), errno);
|
||||
return -errno;
|
||||
}
|
||||
++seed;
|
||||
}
|
||||
|
||||
// open mirror file
|
||||
int mirrorfd;
|
||||
if(-1 == (mirrorfd = open(mirrorpath.c_str(), O_RDWR))){
|
||||
S3FS_PRN_ERR("could not open mirror file(%s) by errno(%d).", mirrorpath.c_str(), errno);
|
||||
return -errno;
|
||||
}
|
||||
return mirrorfd;
|
||||
}
|
||||
|
||||
int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time, bool no_fd_lock_wait)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][fd=%d][size=%jd][time=%jd]", path.c_str(), fd, (intmax_t)size, (intmax_t)time);
|
||||
|
||||
AutoLock auto_lock(&fdent_lock, no_fd_lock_wait);
|
||||
if (!auto_lock.isLockAcquired()) {
|
||||
// had to wait for fd lock, return
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if(-1 != fd){
|
||||
// already opened, needs to increment refcnt.
|
||||
Dup();
|
||||
@ -763,8 +841,9 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time)
|
||||
// open cache and cache stat file, load page info.
|
||||
CacheFileStat cfstat(path.c_str());
|
||||
|
||||
if(pagelist.Serialize(cfstat, false) && -1 != (fd = open(cachepath.c_str(), O_RDWR))){
|
||||
// success to open cache file
|
||||
// try to open cache file
|
||||
if(-1 != (fd = open(cachepath.c_str(), O_RDWR)) && pagelist.Serialize(cfstat, false)){
|
||||
// succeed to open cache file and to load stats data
|
||||
struct stat st;
|
||||
memset(&st, 0, sizeof(struct stat));
|
||||
if(-1 == fstat(fd, &st)){
|
||||
@ -788,8 +867,9 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time)
|
||||
is_truncate = true;
|
||||
}
|
||||
}
|
||||
|
||||
}else{
|
||||
// could not load stat file or open file
|
||||
// could not open cache file or could not load stats data, so initialize it.
|
||||
if(-1 == (fd = open(cachepath.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0600))){
|
||||
S3FS_PRN_ERR("failed to open file(%s). errno(%d)", cachepath.c_str(), errno);
|
||||
return (0 == errno ? -EIO : -errno);
|
||||
@ -804,6 +884,16 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time)
|
||||
}
|
||||
}
|
||||
|
||||
// open mirror file
|
||||
int mirrorfd;
|
||||
if(0 >= (mirrorfd = OpenMirrorFile())){
|
||||
S3FS_PRN_ERR("failed to open mirror file linked cache file(%s).", cachepath.c_str());
|
||||
return (0 == mirrorfd ? -EIO : mirrorfd);
|
||||
}
|
||||
// switch fd
|
||||
close(fd);
|
||||
fd = mirrorfd;
|
||||
|
||||
// make file pointer(for being same tmpfile)
|
||||
if(NULL == (pfile = fdopen(fd, "wb"))){
|
||||
S3FS_PRN_ERR("failed to get fileno(%s). errno(%d)", cachepath.c_str(), errno);
|
||||
@ -880,7 +970,7 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time)
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// This method is called from olny nocopapi functions.
|
||||
// This method is called from only nocopyapi functions.
|
||||
// So we do not check disk space for this option mode, if there is no enough
|
||||
// disk space this method will be failed.
|
||||
//
|
||||
@ -918,10 +1008,10 @@ bool FdEntity::OpenAndLoadAll(headers_t* pmeta, size_t* size, bool force_load)
|
||||
|
||||
bool FdEntity::GetStats(struct stat& st)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
if(-1 == fd){
|
||||
return false;
|
||||
}
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
memset(&st, 0, sizeof(struct stat));
|
||||
if(-1 == fstat(fd, &st)){
|
||||
@ -938,9 +1028,9 @@ int FdEntity::SetMtime(time_t time)
|
||||
if(-1 == time){
|
||||
return 0;
|
||||
}
|
||||
if(-1 != fd){
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
if(-1 != fd){
|
||||
struct timeval tv[2];
|
||||
tv[0].tv_sec = time;
|
||||
tv[0].tv_usec= 0L;
|
||||
@ -967,6 +1057,7 @@ int FdEntity::SetMtime(time_t time)
|
||||
|
||||
bool FdEntity::UpdateMtime(void)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
struct stat st;
|
||||
if(!GetStats(st)){
|
||||
return false;
|
||||
@ -988,18 +1079,21 @@ bool FdEntity::GetSize(size_t& size)
|
||||
|
||||
bool FdEntity::SetMode(mode_t mode)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-mode"] = str(mode);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::SetUId(uid_t uid)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-uid"] = str(uid);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::SetGId(gid_t gid)
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["x-amz-meta-gid"] = str(gid);
|
||||
return true;
|
||||
}
|
||||
@ -1009,6 +1103,7 @@ bool FdEntity::SetContentType(const char* path)
|
||||
if(!path){
|
||||
return false;
|
||||
}
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
orgmeta["Content-Type"] = S3fsCurl::LookupMimeType(string(path));
|
||||
return true;
|
||||
}
|
||||
@ -1067,7 +1162,7 @@ int FdEntity::Load(off_t start, size_t size)
|
||||
size_t over_size = (*iter)->bytes - need_load_size;
|
||||
|
||||
// download
|
||||
if(static_cast<size_t>(2 * S3fsCurl::GetMultipartSize()) < need_load_size && !nomultipart){ // default 20MB
|
||||
if(static_cast<size_t>(2 * S3fsCurl::GetMultipartSize()) <= need_load_size && !nomultipart){ // default 20MB
|
||||
// parallel request
|
||||
// Additional time is needed for large files
|
||||
time_t backup = 0;
|
||||
@ -1135,6 +1230,7 @@ int FdEntity::NoCacheLoadAndPost(off_t start, size_t size)
|
||||
FdManager::DeleteCacheFile(path.c_str());
|
||||
// cache file path does not use no more.
|
||||
cachepath.erase();
|
||||
mirrorpath.erase();
|
||||
}
|
||||
|
||||
// Change entity key in manager mapping
|
||||
@ -1159,7 +1255,7 @@ int FdEntity::NoCacheLoadAndPost(off_t start, size_t size)
|
||||
if(0 != size && static_cast<size_t>(start + size) <= static_cast<size_t>((*iter)->offset)){
|
||||
break;
|
||||
}
|
||||
// download earch multipart size(default 10MB) in unit
|
||||
// download each multipart size(default 10MB) in unit
|
||||
for(size_t oneread = 0, totalread = ((*iter)->offset < start ? start : 0); totalread < (*iter)->bytes; totalread += oneread){
|
||||
int upload_fd = fd;
|
||||
off_t offset = (*iter)->offset + totalread;
|
||||
@ -1207,7 +1303,7 @@ int FdEntity::NoCacheLoadAndPost(off_t start, size_t size)
|
||||
// after this, file length is (offset + size), but file does not use any disk space.
|
||||
//
|
||||
if(-1 == ftruncate(tmpfd, 0) || -1 == ftruncate(tmpfd, (offset + oneread))){
|
||||
S3FS_PRN_ERR("failed to tatic_cast<size_t>runcate temporary file(%d).", tmpfd);
|
||||
S3FS_PRN_ERR("failed to truncate temporary file(%d).", tmpfd);
|
||||
result = -EIO;
|
||||
break;
|
||||
}
|
||||
@ -1340,7 +1436,7 @@ int FdEntity::NoCacheCompleteMultipartPost(void)
|
||||
|
||||
int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
{
|
||||
int result;
|
||||
int result = 0;
|
||||
|
||||
S3FS_PRN_INFO3("[tpath=%s][path=%s][fd=%d]", SAFESTRPTR(tpath), path.c_str(), fd);
|
||||
|
||||
@ -1359,10 +1455,12 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
if(0 < restsize){
|
||||
if(0 == upload_id.length()){
|
||||
// check disk space
|
||||
if(FdManager::IsSafeDiskSpace(NULL, restsize)){
|
||||
if(ReserveDiskSpace(restsize)){
|
||||
// enough disk space
|
||||
// Load all unitialized area
|
||||
if(0 != (result = Load())){
|
||||
// Load all uninitialized area
|
||||
result = Load();
|
||||
FdManager::get()->FreeReservedDiskSpace(restsize);
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("failed to upload all area(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
}
|
||||
@ -1375,7 +1473,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
}
|
||||
}
|
||||
}else{
|
||||
// alreay start miltipart uploading
|
||||
// already start multipart uploading
|
||||
}
|
||||
}
|
||||
|
||||
@ -1465,6 +1563,32 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
return result;
|
||||
}
|
||||
|
||||
// [NOTICE]
|
||||
// Need to lock before calling this method.
|
||||
bool FdEntity::ReserveDiskSpace(size_t size)
|
||||
{
|
||||
if(FdManager::get()->ReserveDiskSpace(size)){
|
||||
return true;
|
||||
}
|
||||
|
||||
if(!is_modify){
|
||||
// try to clear all cache for this fd.
|
||||
pagelist.Init(pagelist.Size(), false);
|
||||
if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, pagelist.Size())){
|
||||
S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(FdManager::get()->ReserveDiskSpace(size)){
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
FdManager::get()->CleanupCacheDir();
|
||||
|
||||
return FdManager::get()->ReserveDiskSpace(size);
|
||||
}
|
||||
|
||||
ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][fd=%d][offset=%jd][size=%zu]", path.c_str(), fd, (intmax_t)start, size);
|
||||
@ -1478,28 +1602,10 @@ ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load)
|
||||
pagelist.SetPageLoadedStatus(start, size, false);
|
||||
}
|
||||
|
||||
int result;
|
||||
ssize_t rsize;
|
||||
|
||||
// check disk space
|
||||
if(0 < pagelist.GetTotalUnloadedPageSize(start, size)){
|
||||
if(!FdManager::IsSafeDiskSpace(NULL, size)){
|
||||
// [NOTE]
|
||||
// If the area of this entity fd used can be released, try to do it.
|
||||
// But If file data is updated, we can not even release of fd.
|
||||
// Fundamentally, this method will fail as long as the disk capacity
|
||||
// is not ensured.
|
||||
//
|
||||
if(!is_modify){
|
||||
// try to clear all cache for this fd.
|
||||
pagelist.Init(pagelist.Size(), false);
|
||||
if(-1 == ftruncate(fd, 0) || -1 == ftruncate(fd, pagelist.Size())){
|
||||
S3FS_PRN_ERR("failed to truncate temporary file(%d).", fd);
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load size(for prefetch)
|
||||
size_t load_size = size;
|
||||
if(static_cast<size_t>(start + size) < pagelist.Size()){
|
||||
@ -1511,8 +1617,25 @@ ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load)
|
||||
load_size = static_cast<size_t>(pagelist.Size() - start);
|
||||
}
|
||||
}
|
||||
|
||||
if(!ReserveDiskSpace(load_size)){
|
||||
S3FS_PRN_WARN("could not reserve disk space for pre-fetch download");
|
||||
load_size = size;
|
||||
if(!ReserveDiskSpace(load_size)){
|
||||
S3FS_PRN_ERR("could not reserve disk space for pre-fetch download");
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
|
||||
// Loading
|
||||
if(0 < size && 0 != (result = Load(start, load_size))){
|
||||
int result = 0;
|
||||
if(0 < size){
|
||||
result = Load(start, load_size);
|
||||
}
|
||||
|
||||
FdManager::get()->FreeReservedDiskSpace(load_size);
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not download. start(%jd), size(%zu), errno(%d)", (intmax_t)start, size, result);
|
||||
return -EIO;
|
||||
}
|
||||
@ -1532,6 +1655,10 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
if(-1 == fd){
|
||||
return -EBADF;
|
||||
}
|
||||
// check if not enough disk space left BEFORE locking fd
|
||||
if(FdManager::IsCacheDir() && !FdManager::IsSafeDiskSpace(NULL, size)){
|
||||
FdManager::get()->CleanupCacheDir();
|
||||
}
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
// check file size
|
||||
@ -1545,17 +1672,21 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
pagelist.SetPageLoadedStatus(static_cast<off_t>(pagelist.Size()), static_cast<size_t>(start) - pagelist.Size(), false);
|
||||
}
|
||||
|
||||
int result;
|
||||
int result = 0;
|
||||
ssize_t wsize;
|
||||
|
||||
if(0 == upload_id.length()){
|
||||
// check disk space
|
||||
size_t restsize = pagelist.GetTotalUnloadedPageSize(0, start) + size;
|
||||
if(FdManager::IsSafeDiskSpace(NULL, restsize)){
|
||||
if(ReserveDiskSpace(restsize)){
|
||||
// enough disk space
|
||||
|
||||
// Load unitialized area which starts from 0 to (start + size) before writing.
|
||||
if(0 < start && 0 != (result = Load(0, static_cast<size_t>(start)))){
|
||||
// Load uninitialized area which starts from 0 to (start + size) before writing.
|
||||
if(0 < start){
|
||||
result = Load(0, static_cast<size_t>(start));
|
||||
}
|
||||
FdManager::get()->FreeReservedDiskSpace(restsize);
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
}
|
||||
@ -1574,7 +1705,7 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
mp_size = 0;
|
||||
}
|
||||
}else{
|
||||
// alreay start miltipart uploading
|
||||
// already start multipart uploading
|
||||
}
|
||||
|
||||
// Writing
|
||||
@ -1613,6 +1744,22 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
return wsize;
|
||||
}
|
||||
|
||||
void FdEntity::CleanupCache()
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock, true);
|
||||
|
||||
if (!auto_lock.isLockAcquired()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_modify) {
|
||||
// cache is not commited to s3, cannot cleanup
|
||||
return;
|
||||
}
|
||||
|
||||
FdManager::DeleteCacheFile(path.c_str());
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// FdManager symbol
|
||||
//------------------------------------------------
|
||||
@ -1632,12 +1779,15 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
#define NOCACHE_PATH_PREFIX_FORM " __S3FS_UNEXISTED_PATH_%lx__ / " // important space words for simply
|
||||
|
||||
//------------------------------------------------
|
||||
// FdManager class valiable
|
||||
// FdManager class variable
|
||||
//------------------------------------------------
|
||||
FdManager FdManager::singleton;
|
||||
pthread_mutex_t FdManager::fd_manager_lock;
|
||||
pthread_mutex_t FdManager::cache_cleanup_lock;
|
||||
pthread_mutex_t FdManager::reserved_diskspace_lock;
|
||||
bool FdManager::is_lock_init(false);
|
||||
string FdManager::cache_dir("");
|
||||
bool FdManager::check_cache_dir_exist(false);
|
||||
size_t FdManager::free_disk_space = 0;
|
||||
|
||||
//------------------------------------------------
|
||||
@ -1648,16 +1798,6 @@ bool FdManager::SetCacheDir(const char* dir)
|
||||
if(!dir || '\0' == dir[0]){
|
||||
cache_dir = "";
|
||||
}else{
|
||||
// check the directory
|
||||
struct stat st;
|
||||
if(0 != stat(dir, &st)){
|
||||
S3FS_PRN_ERR("could not access to cache directory(%s) by errno(%d).", cache_dir.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
if(!S_ISDIR(st.st_mode)){
|
||||
S3FS_PRN_ERR("the cache directory(%s) is not directory.", cache_dir.c_str());
|
||||
return false;
|
||||
}
|
||||
cache_dir = dir;
|
||||
}
|
||||
return true;
|
||||
@ -1713,13 +1853,23 @@ int FdManager::DeleteCacheFile(const char* path)
|
||||
return result;
|
||||
}
|
||||
|
||||
bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_create_dir)
|
||||
bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_create_dir, bool is_mirror_path)
|
||||
{
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
cache_path = "";
|
||||
return true;
|
||||
}
|
||||
string resolved_path(FdManager::cache_dir + "/" + bucket);
|
||||
|
||||
string resolved_path(FdManager::cache_dir);
|
||||
if(!is_mirror_path){
|
||||
resolved_path += "/";
|
||||
resolved_path += bucket;
|
||||
}else{
|
||||
resolved_path += "/.";
|
||||
resolved_path += bucket;
|
||||
resolved_path += ".mirror";
|
||||
}
|
||||
|
||||
if(is_create_dir){
|
||||
int result;
|
||||
if(0 != (result = mkdirp(resolved_path + mydirname(path), 0777))){
|
||||
@ -1749,37 +1899,57 @@ bool FdManager::MakeRandomTempPath(const char* path, string& tmppath)
|
||||
{
|
||||
char szBuff[64];
|
||||
|
||||
sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // warry for performance, but maybe don't warry.
|
||||
sprintf(szBuff, NOCACHE_PATH_PREFIX_FORM, random()); // worry for performance, but maybe don't worry.
|
||||
tmppath = szBuff;
|
||||
tmppath += path ? path : "";
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdManager::SetCheckCacheDirExist(bool is_check)
|
||||
{
|
||||
bool old = FdManager::check_cache_dir_exist;
|
||||
FdManager::check_cache_dir_exist = is_check;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool FdManager::CheckCacheDirExist(void)
|
||||
{
|
||||
if(!FdManager::check_cache_dir_exist){
|
||||
return true;
|
||||
}
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
return true;
|
||||
}
|
||||
// check the directory
|
||||
struct stat st;
|
||||
if(0 != stat(cache_dir.c_str(), &st)){
|
||||
S3FS_PRN_ERR("could not access to cache directory(%s) by errno(%d).", cache_dir.c_str(), errno);
|
||||
return false;
|
||||
}
|
||||
if(!S_ISDIR(st.st_mode)){
|
||||
S3FS_PRN_ERR("the cache directory(%s) is not directory.", cache_dir.c_str());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t FdManager::SetEnsureFreeDiskSpace(size_t size)
|
||||
{
|
||||
size_t old = FdManager::free_disk_space;
|
||||
if(0 == size){
|
||||
if(0 == FdManager::free_disk_space){
|
||||
FdManager::free_disk_space = static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount());
|
||||
}
|
||||
}else{
|
||||
if(0 == FdManager::free_disk_space){
|
||||
FdManager::free_disk_space = max(size, static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()));
|
||||
}else{
|
||||
if(static_cast<size_t>(S3fsCurl::GetMultipartSize() * S3fsCurl::GetMaxParallelCount()) <= size){
|
||||
FdManager::free_disk_space = size;
|
||||
}
|
||||
}
|
||||
}
|
||||
FdManager::free_disk_space = size;
|
||||
return old;
|
||||
}
|
||||
|
||||
fsblkcnt_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
uint64_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
{
|
||||
struct statvfs vfsbuf;
|
||||
string ctoppath;
|
||||
if(0 < FdManager::cache_dir.size()){
|
||||
ctoppath = FdManager::cache_dir + "/";
|
||||
ctoppath = get_exist_directory_path(ctoppath); // existed directory
|
||||
if(ctoppath != "/"){
|
||||
ctoppath += "/";
|
||||
}
|
||||
}else{
|
||||
ctoppath = TMPFILE_DIR_0PATH "/";
|
||||
}
|
||||
@ -1792,12 +1962,12 @@ fsblkcnt_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
S3FS_PRN_ERR("could not get vfs stat by errno(%d)", errno);
|
||||
return 0;
|
||||
}
|
||||
return (vfsbuf.f_bavail * vfsbuf.f_bsize);
|
||||
return (vfsbuf.f_bavail * vfsbuf.f_frsize);
|
||||
}
|
||||
|
||||
bool FdManager::IsSafeDiskSpace(const char* path, size_t size)
|
||||
{
|
||||
fsblkcnt_t fsize = FdManager::GetFreeDiskSpace(path);
|
||||
uint64_t fsize = FdManager::GetFreeDiskSpace(path);
|
||||
return ((size + FdManager::GetEnsureFreeDiskSpace()) <= fsize);
|
||||
}
|
||||
|
||||
@ -1809,6 +1979,8 @@ FdManager::FdManager()
|
||||
if(this == FdManager::get()){
|
||||
try{
|
||||
pthread_mutex_init(&FdManager::fd_manager_lock, NULL);
|
||||
pthread_mutex_init(&FdManager::cache_cleanup_lock, NULL);
|
||||
pthread_mutex_init(&FdManager::reserved_diskspace_lock, NULL);
|
||||
FdManager::is_lock_init = true;
|
||||
}catch(exception& e){
|
||||
FdManager::is_lock_init = false;
|
||||
@ -1831,6 +2003,8 @@ FdManager::~FdManager()
|
||||
if(FdManager::is_lock_init){
|
||||
try{
|
||||
pthread_mutex_destroy(&FdManager::fd_manager_lock);
|
||||
pthread_mutex_destroy(&FdManager::cache_cleanup_lock);
|
||||
pthread_mutex_destroy(&FdManager::reserved_diskspace_lock);
|
||||
}catch(exception& e){
|
||||
S3FS_PRN_CRIT("failed to init mutex");
|
||||
}
|
||||
@ -1858,7 +2032,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
|
||||
if(-1 != existfd){
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if((*iter).second && (*iter).second->GetFd() == existfd){
|
||||
// found opend fd in map
|
||||
// found opened fd in map
|
||||
if(0 == strcmp((*iter).second->GetPath(), path)){
|
||||
return (*iter).second;
|
||||
}
|
||||
@ -1871,52 +2045,69 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time_t time, bool force_tmpfile, bool is_create)
|
||||
FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time_t time, bool force_tmpfile, bool is_create, bool no_fd_lock_wait)
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][size=%jd][time=%jd]", SAFESTRPTR(path), (intmax_t)size, (intmax_t)time);
|
||||
|
||||
if(!path || '\0' == path[0]){
|
||||
return NULL;
|
||||
}
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
FdEntity* ent;
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
fdent_map_t::iterator iter = fent.find(string(path));
|
||||
FdEntity* ent;
|
||||
if(fent.end() != iter){
|
||||
// found
|
||||
ent = (*iter).second;
|
||||
// search in mapping by key(path)
|
||||
fdent_map_t::iterator iter = fent.find(string(path));
|
||||
|
||||
}else if(is_create){
|
||||
// not found
|
||||
string cache_path = "";
|
||||
if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){
|
||||
S3FS_PRN_ERR("failed to make cache path for object(%s).", path);
|
||||
if(fent.end() == iter && !force_tmpfile && !FdManager::IsCacheDir()){
|
||||
// If the cache directory is not specified, s3fs opens a temporary file
|
||||
// when the file is opened.
|
||||
// Then if it could not find a entity in map for the file, s3fs should
|
||||
// search a entity in all which opened the temporary file.
|
||||
//
|
||||
for(iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if((*iter).second && (*iter).second->IsOpen() && 0 == strcmp((*iter).second->GetPath(), path)){
|
||||
break; // found opened fd in mapping
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(fent.end() != iter){
|
||||
// found
|
||||
ent = (*iter).second;
|
||||
|
||||
}else if(is_create){
|
||||
// not found
|
||||
string cache_path = "";
|
||||
if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){
|
||||
S3FS_PRN_ERR("failed to make cache path for object(%s).", path);
|
||||
return NULL;
|
||||
}
|
||||
// make new obj
|
||||
ent = new FdEntity(path, cache_path.c_str());
|
||||
|
||||
if(0 < cache_path.size()){
|
||||
// using cache
|
||||
fent[string(path)] = ent;
|
||||
}else{
|
||||
// not using cache, so the key of fdentity is set not really existing path.
|
||||
// (but not strictly unexisting path.)
|
||||
//
|
||||
// [NOTE]
|
||||
// The reason why this process here, please look at the definition of the
|
||||
// comments of NOCACHE_PATH_PREFIX_FORM symbol.
|
||||
//
|
||||
string tmppath("");
|
||||
FdManager::MakeRandomTempPath(path, tmppath);
|
||||
fent[tmppath] = ent;
|
||||
}
|
||||
}else{
|
||||
return NULL;
|
||||
}
|
||||
// make new obj
|
||||
ent = new FdEntity(path, cache_path.c_str());
|
||||
|
||||
if(0 < cache_path.size()){
|
||||
// using cache
|
||||
fent[string(path)] = ent;
|
||||
}else{
|
||||
// not using cache, so the key of fdentity is set not really existsing path.
|
||||
// (but not strictly unexisting path.)
|
||||
//
|
||||
// [NOTE]
|
||||
// The reason why this process here, please look at the definition of the
|
||||
// comments of NOCACHE_PATH_PREFIX_FORM symbol.
|
||||
//
|
||||
string tmppath("");
|
||||
FdManager::MakeRandomTempPath(path, tmppath);
|
||||
fent[tmppath] = ent;
|
||||
}
|
||||
}else{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// open
|
||||
if(-1 == ent->Open(pmeta, size, time)){
|
||||
if(0 != ent->Open(pmeta, size, time, no_fd_lock_wait)){
|
||||
return NULL;
|
||||
}
|
||||
return ent;
|
||||
@ -1935,7 +2126,7 @@ FdEntity* FdManager::ExistOpen(const char* path, int existfd, bool ignore_existf
|
||||
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if((*iter).second && (*iter).second->IsOpen() && (ignore_existfd || ((*iter).second->GetFd() == existfd))){
|
||||
// found opend fd in map
|
||||
// found opened fd in map
|
||||
if(0 == strcmp((*iter).second->GetPath(), path)){
|
||||
ent = (*iter).second;
|
||||
ent->Dup();
|
||||
@ -1952,6 +2143,7 @@ FdEntity* FdManager::ExistOpen(const char* path, int existfd, bool ignore_existf
|
||||
|
||||
void FdManager::Rename(const std::string &from, const std::string &to)
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
fdent_map_t::iterator iter = fent.find(from);
|
||||
if(fent.end() != iter){
|
||||
// found
|
||||
@ -1967,16 +2159,30 @@ bool FdManager::Close(FdEntity* ent)
|
||||
{
|
||||
S3FS_PRN_DBG("[ent->file=%s][ent->fd=%d]", ent ? ent->GetPath() : "", ent ? ent->GetFd() : -1);
|
||||
|
||||
if(!ent){
|
||||
return true; // returns success
|
||||
}
|
||||
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
|
||||
for(fdent_map_t::iterator iter = fent.begin(); iter != fent.end(); ++iter){
|
||||
if((*iter).second == ent){
|
||||
ent->Close();
|
||||
if(!ent->IsOpen()){
|
||||
delete (*iter).second;
|
||||
fent.erase(iter);
|
||||
return true;
|
||||
// remove found entity from map.
|
||||
fent.erase(iter++);
|
||||
|
||||
// check another key name for entity value to be on the safe side
|
||||
for(; iter != fent.end(); ){
|
||||
if((*iter).second == ent){
|
||||
fent.erase(iter++);
|
||||
}else{
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
delete ent;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -2000,6 +2206,88 @@ bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path)
|
||||
return false;
|
||||
}
|
||||
|
||||
void FdManager::CleanupCacheDir()
|
||||
{
|
||||
S3FS_PRN_INFO("cache cleanup requested");
|
||||
|
||||
if(!FdManager::IsCacheDir()){
|
||||
return;
|
||||
}
|
||||
|
||||
AutoLock auto_lock_no_wait(&FdManager::cache_cleanup_lock, true);
|
||||
|
||||
if(auto_lock_no_wait.isLockAcquired()){
|
||||
S3FS_PRN_INFO("cache cleanup started");
|
||||
CleanupCacheDirInternal("");
|
||||
S3FS_PRN_INFO("cache cleanup ended");
|
||||
}else{
|
||||
// wait for other thread to finish cache cleanup
|
||||
AutoLock auto_lock(&FdManager::cache_cleanup_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void FdManager::CleanupCacheDirInternal(const std::string &path)
|
||||
{
|
||||
DIR* dp;
|
||||
struct dirent* dent;
|
||||
std::string abs_path = cache_dir + "/" + bucket + path;
|
||||
|
||||
if(NULL == (dp = opendir(abs_path.c_str()))){
|
||||
S3FS_PRN_ERR("could not open cache dir(%s) - errno(%d)", abs_path.c_str(), errno);
|
||||
return;
|
||||
}
|
||||
|
||||
for(dent = readdir(dp); dent; dent = readdir(dp)){
|
||||
if(0 == strcmp(dent->d_name, "..") || 0 == strcmp(dent->d_name, ".")){
|
||||
continue;
|
||||
}
|
||||
string fullpath = abs_path;
|
||||
fullpath += "/";
|
||||
fullpath += dent->d_name;
|
||||
struct stat st;
|
||||
if(0 != lstat(fullpath.c_str(), &st)){
|
||||
S3FS_PRN_ERR("could not get stats of file(%s) - errno(%d)", fullpath.c_str(), errno);
|
||||
closedir(dp);
|
||||
return;
|
||||
}
|
||||
string next_path = path + "/" + dent->d_name;
|
||||
if(S_ISDIR(st.st_mode)){
|
||||
CleanupCacheDirInternal(next_path);
|
||||
}else{
|
||||
FdEntity* ent;
|
||||
if(NULL == (ent = FdManager::get()->Open(next_path.c_str(), NULL, -1, -1, false, true, true))){
|
||||
S3FS_PRN_DBG("skipping locked file: %s", next_path.c_str());
|
||||
continue;
|
||||
}
|
||||
|
||||
if(ent->IsMultiOpened()){
|
||||
S3FS_PRN_DBG("skipping opened file: %s", next_path.c_str());
|
||||
}else{
|
||||
ent->CleanupCache();
|
||||
S3FS_PRN_DBG("cleaned up: %s", next_path.c_str());
|
||||
}
|
||||
Close(ent);
|
||||
}
|
||||
}
|
||||
closedir(dp);
|
||||
}
|
||||
|
||||
bool FdManager::ReserveDiskSpace(size_t size)
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
|
||||
if(IsSafeDiskSpace(NULL, size)){
|
||||
free_disk_space += size;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void FdManager::FreeReservedDiskSpace(size_t size)
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::reserved_diskspace_lock);
|
||||
free_disk_space -= size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -117,6 +117,7 @@ class FdEntity
|
||||
std::string path; // object path
|
||||
std::string cachepath; // local cache file path
|
||||
// (if this is empty, does not load/save pagelist.)
|
||||
std::string mirrorpath; // mirror file path to local cache file path
|
||||
int fd; // file descriptor(tmp file or cache file)
|
||||
FILE* pfile; // file pointer(tmp file or cache file)
|
||||
bool is_modify; // if file is changed, this flag is true
|
||||
@ -132,6 +133,7 @@ class FdEntity
|
||||
static int FillFile(int fd, unsigned char byte, size_t size, off_t start);
|
||||
|
||||
void Clear(void);
|
||||
int OpenMirrorFile(void);
|
||||
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
|
||||
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
|
||||
bool SetAllStatusUnloaded(void) { return SetAllStatus(false); }
|
||||
@ -142,9 +144,10 @@ class FdEntity
|
||||
|
||||
void Close(void);
|
||||
bool IsOpen(void) const { return (-1 != fd); }
|
||||
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1);
|
||||
bool IsMultiOpened(void) const { return refcnt > 1; }
|
||||
int Open(headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool no_fd_lock_wait = false);
|
||||
bool OpenAndLoadAll(headers_t* pmeta = NULL, size_t* size = NULL, bool force_load = false);
|
||||
int Dup(void);
|
||||
int Dup();
|
||||
|
||||
const char* GetPath(void) const { return path.c_str(); }
|
||||
void SetPath(const std::string &newpath) { path = newpath; }
|
||||
@ -170,6 +173,9 @@ class FdEntity
|
||||
|
||||
ssize_t Read(char* bytes, off_t start, size_t size, bool force_load = false);
|
||||
ssize_t Write(const char* bytes, off_t start, size_t size);
|
||||
|
||||
bool ReserveDiskSpace(size_t size);
|
||||
void CleanupCache();
|
||||
};
|
||||
typedef std::map<std::string, class FdEntity*> fdent_map_t; // key=path, value=FdEntity*
|
||||
|
||||
@ -181,14 +187,18 @@ class FdManager
|
||||
private:
|
||||
static FdManager singleton;
|
||||
static pthread_mutex_t fd_manager_lock;
|
||||
static pthread_mutex_t cache_cleanup_lock;
|
||||
static pthread_mutex_t reserved_diskspace_lock;
|
||||
static bool is_lock_init;
|
||||
static std::string cache_dir;
|
||||
static bool check_cache_dir_exist;
|
||||
static size_t free_disk_space; // limit free disk space
|
||||
|
||||
fdent_map_t fent;
|
||||
|
||||
private:
|
||||
static fsblkcnt_t GetFreeDiskSpace(const char* path);
|
||||
static uint64_t GetFreeDiskSpace(const char* path);
|
||||
void CleanupCacheDirInternal(const std::string &path = "");
|
||||
|
||||
public:
|
||||
FdManager();
|
||||
@ -202,21 +212,25 @@ class FdManager
|
||||
static bool SetCacheDir(const char* dir);
|
||||
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
|
||||
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true);
|
||||
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
|
||||
static bool CheckCacheTopDir(void);
|
||||
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
|
||||
static bool SetCheckCacheDirExist(bool is_check);
|
||||
static bool CheckCacheDirExist(void);
|
||||
|
||||
static size_t GetEnsureFreeDiskSpace(void) { return FdManager::free_disk_space; }
|
||||
static size_t SetEnsureFreeDiskSpace(size_t size);
|
||||
static size_t InitEnsureFreeDiskSpace(void) { return SetEnsureFreeDiskSpace(0); }
|
||||
static bool IsSafeDiskSpace(const char* path, size_t size);
|
||||
static void FreeReservedDiskSpace(size_t size);
|
||||
bool ReserveDiskSpace(size_t size);
|
||||
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
void Rename(const std::string &from, const std::string &to);
|
||||
bool Close(FdEntity* ent);
|
||||
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
|
||||
void CleanupCacheDir();
|
||||
};
|
||||
|
||||
#endif // FD_CACHE_H_
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -74,6 +74,11 @@ bool s3fs_init_global_ssl(void)
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
}
|
||||
#ifndef USE_GNUTLS_NETTLE
|
||||
if(NULL == gcry_check_version(NULL)){
|
||||
return false;
|
||||
}
|
||||
#endif // USE_GNUTLS_NETTLE
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -103,11 +108,11 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA1_DIGEST_SIZE))){
|
||||
if(NULL == (*digest = reinterpret_cast<unsigned char*>(malloc(SHA1_DIGEST_SIZE)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -122,11 +127,11 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(SHA256_DIGEST_SIZE))){
|
||||
if(NULL == (*digest = reinterpret_cast<unsigned char*>(malloc(SHA256_DIGEST_SIZE)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -143,14 +148,14 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
|
||||
bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 >= (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA1))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
if(NULL == (*digest = reinterpret_cast<unsigned char*>(malloc(*digestlen + 1)))){
|
||||
return false;
|
||||
}
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA1, key, keylen, data, datalen, *digest)){
|
||||
@ -163,14 +168,14 @@ bool s3fs_HMAC(const void* key, size_t keylen, const unsigned char* data, size_t
|
||||
|
||||
bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
if(0 >= (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
if(0 == (*digestlen = gnutls_hmac_get_len(GNUTLS_MAC_SHA256))){
|
||||
return false;
|
||||
}
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen + 1))){
|
||||
if(NULL == (*digest = reinterpret_cast<unsigned char*>(malloc(*digestlen + 1)))){
|
||||
return false;
|
||||
}
|
||||
if(0 > gnutls_hmac_fast(GNUTLS_MAC_SHA256, key, keylen, data, datalen, *digest)){
|
||||
@ -186,11 +191,9 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
#define MD5_DIGEST_LENGTH 16
|
||||
|
||||
size_t get_md5_digest_length(void)
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
return 16;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
@ -223,7 +226,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
md5_update(&ctx_md5, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc(get_md5_digest_length())))){
|
||||
return NULL;
|
||||
}
|
||||
md5_digest(&ctx_md5, get_md5_digest_length(), result);
|
||||
@ -274,12 +277,14 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_md5, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc(get_md5_digest_length())))){
|
||||
gcry_md_close(ctx_md5);
|
||||
return NULL;
|
||||
}
|
||||
memcpy(result, gcry_md_read(ctx_md5, 0), get_md5_digest_length());
|
||||
@ -298,11 +303,9 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
#define SHA256_DIGEST_LENGTH 32
|
||||
|
||||
size_t get_sha256_digest_length(void)
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
return 32;
|
||||
}
|
||||
|
||||
#ifdef USE_GNUTLS_NETTLE
|
||||
@ -350,7 +353,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
sha256_update(&ctx_sha256, bytes, buf);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc(get_sha256_digest_length())))){
|
||||
return NULL;
|
||||
}
|
||||
sha256_digest(&ctx_sha256, get_sha256_digest_length(), result);
|
||||
@ -422,12 +425,14 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
}
|
||||
gcry_md_write(ctx_sha256, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc(get_sha256_digest_length())))){
|
||||
gcry_md_close(ctx_sha256);
|
||||
return NULL;
|
||||
}
|
||||
memcpy(result, gcry_md_read(ctx_sha256, 0), get_sha256_digest_length());
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -54,8 +54,12 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
{
|
||||
NSS_Init(NULL);
|
||||
NSS_NoDB_Init(NULL);
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
if(SECSuccess != NSS_NoDB_Init(NULL)){
|
||||
S3FS_PRN_ERR("Failed NSS_NoDB_Init call.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -85,7 +89,7 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -124,7 +128,7 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
PK11_FreeSymKey(pKey);
|
||||
PK11_FreeSlot(Slot);
|
||||
|
||||
if(NULL == (*digest = (unsigned char*)malloc(*digestlen))){
|
||||
if(NULL == (*digest = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
return false;
|
||||
}
|
||||
memcpy(*digest, tmpdigest, *digestlen);
|
||||
@ -183,12 +187,13 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
}else if(-1 == bytes){
|
||||
// error
|
||||
S3FS_PRN_ERR("file read error(%d)", errno);
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
PK11_DigestOp(md5ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_md5_digest_length()))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc(get_md5_digest_length())))){
|
||||
PK11_DestroyContext(md5ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
@ -269,7 +274,7 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
PK11_DigestOp(sha256ctx, buf, bytes);
|
||||
memset(buf, 0, 512);
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc(get_sha256_digest_length()))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc(get_sha256_digest_length())))){
|
||||
PK11_DestroyContext(sha256ctx, PR_TRUE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -185,7 +185,7 @@ bool s3fs_destroy_crypt_mutex(void)
|
||||
//-------------------------------------------------------------------
|
||||
static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* data, size_t datalen, unsigned char** digest, unsigned int* digestlen, bool is_sha256)
|
||||
{
|
||||
if(!key || 0 >= keylen || !data || 0 >= datalen || !digest || !digestlen){
|
||||
if(!key || !data || !digest || !digestlen){
|
||||
return false;
|
||||
}
|
||||
(*digestlen) = EVP_MAX_MD_SIZE * sizeof(unsigned char);
|
||||
|
||||
1119
src/s3fs.cpp
1119
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -21,7 +21,8 @@
|
||||
#define S3FS_S3_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#define FIVE_GB 5368709120LL
|
||||
|
||||
static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
|
||||
#include <fuse.h>
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2013 Takeshi Nakatani <ggtakec.com>
|
||||
* Copyright(C) 2007 Takeshi Nakatani <ggtakec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -46,7 +46,7 @@
|
||||
using namespace std;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Global valiables
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
std::string mount_prefix = "";
|
||||
|
||||
@ -60,20 +60,6 @@ string get_realpath(const char *path) {
|
||||
return realpath;
|
||||
}
|
||||
|
||||
inline headers_t::const_iterator find_content_type(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
|
||||
if(meta.end() == (iter = meta.find("Content-Type"))){
|
||||
if(meta.end() == (iter = meta.find("Content-type"))){
|
||||
if(meta.end() == (iter = meta.find("content-type"))){
|
||||
iter = meta.find("content-Type");
|
||||
}
|
||||
}
|
||||
}
|
||||
return iter;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Class S3ObjList
|
||||
//-------------------------------------------------------------------
|
||||
@ -124,7 +110,7 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
if(objects.end() != (iter = objects.find(chkname))){
|
||||
// found "dir/" object --> not add new object.
|
||||
// and add normalization
|
||||
return insert_nomalized(orgname.c_str(), chkname.c_str(), true);
|
||||
return insert_normalized(orgname.c_str(), chkname.c_str(), true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,10 +135,10 @@ bool S3ObjList::insert(const char* name, const char* etag, bool is_dir)
|
||||
}
|
||||
|
||||
// add normalization
|
||||
return insert_nomalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
return insert_normalized(orgname.c_str(), newname.c_str(), is_dir);
|
||||
}
|
||||
|
||||
bool S3ObjList::insert_nomalized(const char* name, const char* normalized, bool is_dir)
|
||||
bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool is_dir)
|
||||
{
|
||||
if(!name || '\0' == name[0] || !normalized || '\0' == normalized[0]){
|
||||
return false;
|
||||
@ -439,14 +425,25 @@ void free_mvnodes(MVNODE *head)
|
||||
//-------------------------------------------------------------------
|
||||
// Class AutoLock
|
||||
//-------------------------------------------------------------------
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex) : auto_mutex(pmutex)
|
||||
AutoLock::AutoLock(pthread_mutex_t* pmutex, bool no_wait) : auto_mutex(pmutex)
|
||||
{
|
||||
pthread_mutex_lock(auto_mutex);
|
||||
if (no_wait) {
|
||||
is_lock_acquired = pthread_mutex_trylock(auto_mutex) == 0;
|
||||
} else {
|
||||
is_lock_acquired = pthread_mutex_lock(auto_mutex) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool AutoLock::isLockAcquired() const
|
||||
{
|
||||
return is_lock_acquired;
|
||||
}
|
||||
|
||||
AutoLock::~AutoLock()
|
||||
{
|
||||
pthread_mutex_unlock(auto_mutex);
|
||||
if (is_lock_acquired) {
|
||||
pthread_mutex_unlock(auto_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -455,7 +452,8 @@ AutoLock::~AutoLock()
|
||||
// get user name from uid
|
||||
string get_username(uid_t uid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
static size_t maxlen = 0; // set once
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct passwd pwinfo;
|
||||
struct passwd* ppwinfo = NULL;
|
||||
@ -464,9 +462,17 @@ string get_username(uid_t uid)
|
||||
if(0 == maxlen){
|
||||
long res = sysconf(_SC_GETPW_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
S3FS_PRN_WARN("could not get max pw length.");
|
||||
maxlen = 0;
|
||||
return string("");
|
||||
// SUSv4tc1 says the following about _SC_GETGR_R_SIZE_MAX and
|
||||
// _SC_GETPW_R_SIZE_MAX:
|
||||
// Note that sysconf(_SC_GETGR_R_SIZE_MAX) may return -1 if
|
||||
// there is no hard limit on the size of the buffer needed to
|
||||
// store all the groups returned.
|
||||
if (errno != 0){
|
||||
S3FS_PRN_WARN("could not get max pw length.");
|
||||
maxlen = 0;
|
||||
return string("");
|
||||
}
|
||||
res = 1024; // default initial length
|
||||
}
|
||||
maxlen = res;
|
||||
}
|
||||
@ -474,12 +480,22 @@ string get_username(uid_t uid)
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return string("");
|
||||
}
|
||||
// get group information
|
||||
if(0 != getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo)){
|
||||
S3FS_PRN_WARN("could not get pw information.");
|
||||
// get pw information
|
||||
while(ERANGE == (result = getpwuid_r(uid, &pwinfo, pbuf, maxlen, &ppwinfo))){
|
||||
free(pbuf);
|
||||
maxlen *= 2;
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return string("");
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get pw information(%d).", result);
|
||||
free(pbuf);
|
||||
return string("");
|
||||
}
|
||||
|
||||
// check pw
|
||||
if(NULL == ppwinfo){
|
||||
free(pbuf);
|
||||
@ -490,9 +506,9 @@ string get_username(uid_t uid)
|
||||
return name;
|
||||
}
|
||||
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
int is_uid_include_group(uid_t uid, gid_t gid)
|
||||
{
|
||||
static size_t maxlen = 0; // set onece
|
||||
static size_t maxlen = 0; // set once
|
||||
int result;
|
||||
char* pbuf;
|
||||
struct group ginfo;
|
||||
@ -501,10 +517,18 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
// make buffer
|
||||
if(0 == maxlen){
|
||||
long res = sysconf(_SC_GETGR_R_SIZE_MAX);
|
||||
if(0 > res){
|
||||
S3FS_PRN_ERR("could not get max name length.");
|
||||
maxlen = 0;
|
||||
return -ERANGE;
|
||||
if(0 > res) {
|
||||
// SUSv4tc1 says the following about _SC_GETGR_R_SIZE_MAX and
|
||||
// _SC_GETPW_R_SIZE_MAX:
|
||||
// Note that sysconf(_SC_GETGR_R_SIZE_MAX) may return -1 if
|
||||
// there is no hard limit on the size of the buffer needed to
|
||||
// store all the groups returned.
|
||||
if (errno != 0) {
|
||||
S3FS_PRN_ERR("could not get max name length.");
|
||||
maxlen = 0;
|
||||
return -ERANGE;
|
||||
}
|
||||
res = 1024; // default initial length
|
||||
}
|
||||
maxlen = res;
|
||||
}
|
||||
@ -513,8 +537,17 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
return -ENOMEM;
|
||||
}
|
||||
// get group information
|
||||
if(0 != (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
S3FS_PRN_ERR("could not get group information.");
|
||||
while(ERANGE == (result = getgrgid_r(gid, &ginfo, pbuf, maxlen, &pginfo))){
|
||||
free(pbuf);
|
||||
maxlen *= 2;
|
||||
if(NULL == (pbuf = (char*)malloc(sizeof(char) * maxlen))){
|
||||
S3FS_PRN_CRIT("failed to allocate memory.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not get group information(%d).", result);
|
||||
free(pbuf);
|
||||
return -result;
|
||||
}
|
||||
@ -545,6 +578,14 @@ int is_uid_inculde_group(uid_t uid, gid_t gid)
|
||||
//-------------------------------------------------------------------
|
||||
// safe variant of dirname
|
||||
// dirname clobbers path so let it operate on a tmp copy
|
||||
string mydirname(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return string("");
|
||||
}
|
||||
return mydirname(string(path));
|
||||
}
|
||||
|
||||
string mydirname(string path)
|
||||
{
|
||||
return string(dirname((char*)path.c_str()));
|
||||
@ -552,6 +593,14 @@ string mydirname(string path)
|
||||
|
||||
// safe variant of basename
|
||||
// basename clobbers path so let it operate on a tmp copy
|
||||
string mybasename(const char* path)
|
||||
{
|
||||
if(!path || '\0' == path[0]){
|
||||
return string("");
|
||||
}
|
||||
return mybasename(string(path));
|
||||
}
|
||||
|
||||
string mybasename(string path)
|
||||
{
|
||||
return string(basename((char*)path.c_str()));
|
||||
@ -572,7 +621,7 @@ int mkdirp(const string& path, mode_t mode)
|
||||
return EPERM;
|
||||
}
|
||||
}else{
|
||||
if(0 != mkdir(base.c_str(), mode)){
|
||||
if(0 != mkdir(base.c_str(), mode) && errno != EEXIST){
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
@ -580,6 +629,28 @@ int mkdirp(const string& path, mode_t mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
// get existed directory path
|
||||
string get_exist_directory_path(const string& path)
|
||||
{
|
||||
string existed("/"); // "/" is existed.
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
if(base != "/"){
|
||||
base += "/";
|
||||
}
|
||||
base += component;
|
||||
struct stat st;
|
||||
if(0 == stat(base.c_str(), &st) && S_ISDIR(st.st_mode)){
|
||||
existed = base;
|
||||
}else{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return existed;
|
||||
}
|
||||
|
||||
bool check_exist_dir_permission(const char* dirpath)
|
||||
{
|
||||
if(!dirpath || '\0' == dirpath[0]){
|
||||
@ -597,7 +668,7 @@ bool check_exist_dir_permission(const char* dirpath)
|
||||
// could not access directory
|
||||
return false;
|
||||
}
|
||||
// somthing error occured
|
||||
// something error occurred
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -614,7 +685,7 @@ bool check_exist_dir_permission(const char* dirpath)
|
||||
return false;
|
||||
}
|
||||
}else{
|
||||
if(1 == is_uid_inculde_group(myuid, st.st_gid)){
|
||||
if(1 == is_uid_include_group(myuid, st.st_gid)){
|
||||
if(S_IRWXG != (st.st_mode & S_IRWXG)){
|
||||
return false;
|
||||
}
|
||||
@ -701,8 +772,8 @@ off_t get_size(const char *s)
|
||||
|
||||
off_t get_size(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("Content-Length"))){
|
||||
headers_t::const_iterator iter = meta.find("Content-Length");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_size((*iter).second.c_str());
|
||||
@ -736,7 +807,7 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
if(forcedir){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(meta.end() != (iter = find_content_type(meta))){
|
||||
if(meta.end() != (iter = meta.find("Content-Type"))){
|
||||
string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
string::size_type pos = strConType.find(";");
|
||||
@ -749,7 +820,19 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' charactor at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
if(strConType == "text/plain" && (0 == size || 1 == size)){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
mode |= S_IFREG;
|
||||
@ -759,6 +842,11 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If complement lack stat mode, when it's mode is not set any permission,
|
||||
// the object is added minimal mode only for read permission.
|
||||
if(complement_stat && 0 == (mode & (S_IRWXU | S_IRWXG | S_IRWXO))){
|
||||
mode |= (S_IRUSR | (0 == (mode & S_IFDIR) ? 0 : S_IXUSR));
|
||||
}
|
||||
}else{
|
||||
if(!checkdir){
|
||||
// cut dir/reg flag.
|
||||
@ -831,8 +919,8 @@ time_t get_lastmodified(const char* s)
|
||||
|
||||
time_t get_lastmodified(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("Last-Modified"))){
|
||||
headers_t::const_iterator iter = meta.find("Last-Modified");
|
||||
if(meta.end() == iter){
|
||||
return 0;
|
||||
}
|
||||
return get_lastmodified((*iter).second.c_str());
|
||||
@ -864,7 +952,7 @@ bool is_need_check_obj_detail(headers_t& meta)
|
||||
}
|
||||
// if there is not Content-Type, or Content-Type is "x-directory",
|
||||
// checking is no more.
|
||||
if(meta.end() == (iter = find_content_type(meta))){
|
||||
if(meta.end() == (iter = meta.find("Content-Type"))){
|
||||
return false;
|
||||
}
|
||||
if("application/x-directory" == (*iter).second){
|
||||
@ -889,6 +977,17 @@ void show_help (void)
|
||||
"\n"
|
||||
"Mount an Amazon S3 bucket as a file system.\n"
|
||||
"\n"
|
||||
"Usage:\n"
|
||||
" mounting\n"
|
||||
" s3fs bucket[:/path] mountpoint [options]\n"
|
||||
" s3fs mountpoint [options(must specify bucket= option)]\n"
|
||||
"\n"
|
||||
" umounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs -u bucket\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
@ -899,17 +998,27 @@ void show_help (void)
|
||||
"\n"
|
||||
" <option_name>=<option_value>\n"
|
||||
"\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects\n"
|
||||
" see http://aws.amazon.com/documentation/s3/ for the \n"
|
||||
" full list of canned acls\n"
|
||||
" bucket\n"
|
||||
" - if it is not specified bucket name(and path) in command line,\n"
|
||||
" must specify this option after -o option for bucket name.\n"
|
||||
"\n"
|
||||
" retries (default=\"2\")\n"
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. empty string means do not send\n"
|
||||
" header. see http://aws.amazon.com/documentation/s3/ for the\n"
|
||||
" full list of canned acls\n"
|
||||
"\n"
|
||||
" retries (default=\"5\")\n"
|
||||
" - number of times to retry a failed s3 transaction\n"
|
||||
"\n"
|
||||
" use_cache (default=\"\" which means disabled)\n"
|
||||
" - local folder to use for local file cache\n"
|
||||
"\n"
|
||||
" check_cache_dir_exist (default is disable)\n"
|
||||
" - if use_cache is set, check if the cache directory exists.\n"
|
||||
" if this option is not specified, it will be created at runtime\n"
|
||||
" when the cache directory does not exist.\n"
|
||||
"\n"
|
||||
" del_cache (delete local file cache)\n"
|
||||
" - delete local file cache when s3fs starts and exits.\n"
|
||||
"\n"
|
||||
@ -942,7 +1051,7 @@ void show_help (void)
|
||||
" with \":\" separator.) This option is used to decide the\n"
|
||||
" SSE type. So that if you do not want to encrypt a object\n"
|
||||
" object at uploading, but you need to decrypt encrypted\n"
|
||||
" object at downloaing, you can use load_sse_c option instead\n"
|
||||
" object at downloading, you can use load_sse_c option instead\n"
|
||||
" of this option.\n"
|
||||
" For setting SSE-KMS, specify \"use_sse=kmsid\" or\n"
|
||||
" \"use_sse=kmsid:<kms id>\". You can use \"k\" for short \"kmsid\".\n"
|
||||
@ -954,16 +1063,20 @@ void show_help (void)
|
||||
" region.\n"
|
||||
"\n"
|
||||
" load_sse_c - specify SSE-C keys\n"
|
||||
" Specify the custom-provided encription keys file path for decrypting\n"
|
||||
" at duwnloading.\n"
|
||||
" If you use the custom-provided encription key at uploading, you\n"
|
||||
" Specify the custom-provided encryption keys file path for decrypting\n"
|
||||
" at downloading.\n"
|
||||
" If you use the custom-provided encryption key at uploading, you\n"
|
||||
" specify with \"use_sse=custom\". The file has many lines, one line\n"
|
||||
" means one custom key. So that you can keep all SSE-C keys in file,\n"
|
||||
" that is SSE-C key history. AWSSSECKEYS environment is as same as this\n"
|
||||
" file contents.\n"
|
||||
"\n"
|
||||
" public_bucket (default=\"\" which means disabled)\n"
|
||||
" - anonymously mount a public bucket when set to 1\n"
|
||||
" - anonymously mount a public bucket when set to 1, ignores the \n"
|
||||
" $HOME/.passwd-s3fs and /etc/passwd-s3fs files.\n"
|
||||
" S3 does not allow copy object api for anonymous users, then\n"
|
||||
" s3fs sets nocopyapi option automatically when public_bucket=1\n"
|
||||
" option is specified.\n"
|
||||
"\n"
|
||||
" passwd_file (default=\"\")\n"
|
||||
" - specify which s3fs password file to use\n"
|
||||
@ -997,11 +1110,16 @@ void show_help (void)
|
||||
" readwrite_timeout (default=\"60\" seconds)\n"
|
||||
" - time to wait between read/write activity before giving up\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"1000\" entries (about 4MB))\n"
|
||||
" list_object_max_keys (default=\"1000\")\n"
|
||||
" - specify the maximum number of keys returned by S3 list object\n"
|
||||
" API. The default is 1000. you can set this value to 1000 or more.\n"
|
||||
"\n"
|
||||
" max_stat_cache_size (default=\"100,000\" entries (about 40MB))\n"
|
||||
" - maximum number of entries in the stat cache\n"
|
||||
"\n"
|
||||
" stat_cache_expire (default is no expire)\n"
|
||||
" - specify expire time(seconds) for entries in the stat cache.\n"
|
||||
" This expire time indicates the time since stat cached.\n"
|
||||
"\n"
|
||||
" enable_noobj_cache (default is disable)\n"
|
||||
" - enable cache entries for the object which does not exist.\n"
|
||||
@ -1038,7 +1156,7 @@ void show_help (void)
|
||||
" multipart_size (default=\"10\")\n"
|
||||
" - part size, in MB, for each multipart request.\n"
|
||||
"\n"
|
||||
" ensure_diskfree (default same multipart_size value)\n"
|
||||
" ensure_diskfree (default 0)\n"
|
||||
" - sets MB to ensure disk free space. s3fs makes file for\n"
|
||||
" downloading, uploading and caching files. If the disk free\n"
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
@ -1048,8 +1166,11 @@ void show_help (void)
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
" url (default=\"http://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access amazon s3\n"
|
||||
" url (default=\"https://s3.amazonaws.com\")\n"
|
||||
" - sets the url to use to access Amazon S3. If you want to use HTTP,\n"
|
||||
" then you can set \"url=http://s3.amazonaws.com\".\n"
|
||||
" If you do not use https, please specify the URL with the url\n"
|
||||
" option.\n"
|
||||
"\n"
|
||||
" endpoint (default=\"us-east-1\")\n"
|
||||
" - sets the endpoint to use on signature version 4\n"
|
||||
@ -1077,12 +1198,31 @@ void show_help (void)
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - ensure data integrity during writes with MD5 hash.\n"
|
||||
"\n"
|
||||
" ecs\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
" metadata address instead of the instance metadata address.\n"
|
||||
"\n"
|
||||
" iam_role (default is no IAM role)\n"
|
||||
" - This option requires the IAM role name or \"auto\". If you specify\n"
|
||||
" \"auto\", s3fs will automatically use the IAM role names that are set\n"
|
||||
" to an instance. If you specify this option without any argument, it\n"
|
||||
" is the same as that you have specified the \"auto\".\n"
|
||||
"\n"
|
||||
" ibm_iam_auth\n"
|
||||
" - This option instructs s3fs to use IBM IAM authentication.\n"
|
||||
" In this mode, the AWSAccessKey and AWSSecretKey will be used as\n"
|
||||
" IBM's Service-Instance-ID and APIKey, respectively.\n"
|
||||
"\n"
|
||||
" ibm_iam_endpoint (default is https://iam.bluemix.net)\n"
|
||||
" - sets the url to use for IBM IAM authentication.\n"
|
||||
"\n"
|
||||
" use_xattr (default is not handling the extended attribute)\n"
|
||||
" Enable to handle the extended attribute(xattrs).\n"
|
||||
" If you set this option, you can use the extended attribute.\n"
|
||||
" For example, encfs and ecryptfs need to support the extended attribute.\n"
|
||||
" Notice: if s3fs handles the extended attribute, s3fs can not work to\n"
|
||||
" copy command with preserve=mode.\n"
|
||||
"\n"
|
||||
" noxmlns (disable registering xml name space)\n"
|
||||
" disable registering xml name space for response of \n"
|
||||
" ListBucketResult and ListVersionsResult etc. Default name \n"
|
||||
@ -1107,7 +1247,7 @@ void show_help (void)
|
||||
" nocopyapi, then s3fs ignores it.\n"
|
||||
"\n"
|
||||
" use_path_request_style (use legacy API calling style)\n"
|
||||
" Enble compatibility with S3-like APIs which do not support\n"
|
||||
" Enable compatibility with S3-like APIs which do not support\n"
|
||||
" the virtual-host request style, by using the older path request\n"
|
||||
" style.\n"
|
||||
"\n"
|
||||
@ -1127,6 +1267,41 @@ void show_help (void)
|
||||
" curldbg - put curl debug message\n"
|
||||
" Put the debug message from libcurl when this option is specified.\n"
|
||||
"\n"
|
||||
" cipher_suites - customize TLS cipher suite list\n"
|
||||
" Customize the list of TLS cipher suites.\n"
|
||||
" Expects a colon separated list of cipher suite names.\n"
|
||||
" A list of available cipher suites, depending on your TLS engine,\n"
|
||||
" can be found on the CURL library documentation:\n"
|
||||
" https://curl.haxx.se/docs/ssl-ciphers.html\n"
|
||||
"\n"
|
||||
" instance_name - The instance name of the current s3fs mountpoint.\n"
|
||||
" This name will be added to logging messages and user agent headers sent by s3fs.\n"
|
||||
"\n"
|
||||
" complement_stat (complement lack of file/directory mode)\n"
|
||||
" s3fs complements lack of information about file/directory mode\n"
|
||||
" if a file or a directory object does not have x-amz-meta-mode\n"
|
||||
" header. As default, s3fs does not complements stat information\n"
|
||||
" for a object, then the object will not be able to be allowed to\n"
|
||||
" list/modify.\n"
|
||||
"\n"
|
||||
" notsup_compat_dir (not support compatibility directory types)\n"
|
||||
" As a default, s3fs supports objects of the directory type as\n"
|
||||
" much as possible and recognizes them as directories.\n"
|
||||
" Objects that can be recognized as directory objects are \"dir/\",\n"
|
||||
" \"dir\", \"dir_$folder$\", and there is a file object that does\n"
|
||||
" not have a directory object but contains that directory path.\n"
|
||||
" s3fs needs redundant communication to support all these\n"
|
||||
" directory types. The object as the directory created by s3fs\n"
|
||||
" is \"dir/\". By restricting s3fs to recognize only \"dir/\" as\n"
|
||||
" a directory, communication traffic can be reduced. This option\n"
|
||||
" is used to give this restriction to s3fs.\n"
|
||||
" However, if there is a directory object other than \"dir/\" in\n"
|
||||
" the bucket, specifying this option is not recommended. s3fs may\n"
|
||||
" not be able to recognize the object correctly if an object\n"
|
||||
" created by s3fs exists in the bucket.\n"
|
||||
" Please use this option when the directory in the bucket is\n"
|
||||
" only \"dir/\" object.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
@ -1145,7 +1320,7 @@ void show_help (void)
|
||||
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
|
||||
" twice turns on FUSE debug messages to STDOUT.\n"
|
||||
" -f FUSE foreground option - do not run as daemon.\n"
|
||||
" -s FUSE singlethread option\n"
|
||||
" -s FUSE singlethreaded option\n"
|
||||
" disable multi-threaded operation\n"
|
||||
"\n"
|
||||
"\n"
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -27,7 +27,7 @@
|
||||
// Struct
|
||||
//
|
||||
struct s3obj_entry{
|
||||
std::string normalname; // normalized name: if empty, object is nomalized name.
|
||||
std::string normalname; // normalized name: if empty, object is normalized name.
|
||||
std::string orgname; // original name: if empty, object is original name.
|
||||
std::string etag;
|
||||
bool is_dir;
|
||||
@ -47,7 +47,7 @@ class S3ObjList
|
||||
s3obj_t objects;
|
||||
|
||||
private:
|
||||
bool insert_nomalized(const char* name, const char* normalized, bool is_dir);
|
||||
bool insert_normalized(const char* name, const char* normalized, bool is_dir);
|
||||
const s3obj_entry* GetS3Obj(const char* name) const;
|
||||
|
||||
s3obj_t::const_iterator begin(void) const {
|
||||
@ -88,9 +88,11 @@ class AutoLock
|
||||
{
|
||||
private:
|
||||
pthread_mutex_t* auto_mutex;
|
||||
bool is_lock_acquired;
|
||||
|
||||
public:
|
||||
explicit AutoLock(pthread_mutex_t* pmutex);
|
||||
explicit AutoLock(pthread_mutex_t* pmutex, bool no_wait = false);
|
||||
bool isLockAcquired() const;
|
||||
~AutoLock();
|
||||
};
|
||||
|
||||
@ -104,11 +106,14 @@ MVNODE *add_mvnode(MVNODE** head, MVNODE** tail, const char *old_path, const cha
|
||||
void free_mvnodes(MVNODE *head);
|
||||
|
||||
std::string get_username(uid_t uid);
|
||||
int is_uid_inculde_group(uid_t uid, gid_t gid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(std::string path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(std::string path);
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -32,6 +32,21 @@
|
||||
|
||||
using namespace std;
|
||||
|
||||
template <class T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
|
||||
template std::string str(short value);
|
||||
template std::string str(unsigned short value);
|
||||
template std::string str(int value);
|
||||
template std::string str(unsigned int value);
|
||||
template std::string str(long value);
|
||||
template std::string str(unsigned long value);
|
||||
template std::string str(long long value);
|
||||
template std::string str(unsigned long long value);
|
||||
|
||||
static const char hexAlphabet[] = "0123456789ABCDEF";
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
@ -288,7 +303,7 @@ char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
if(!input || 0 >= length){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (char*)malloc((((length / 3) + 1) * 4 + 1) * sizeof(char)))){
|
||||
if(NULL == (result = reinterpret_cast<char*>(malloc((((length / 3) + 1) * 4 + 1) * sizeof(char))))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
@ -338,7 +353,7 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
if(!input || 0 == strlen(input) || !plength){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = (unsigned char*)malloc((strlen(input) + 1)))){
|
||||
if(NULL == (result = reinterpret_cast<unsigned char*>(malloc((strlen(input) + 1))))){
|
||||
return NULL; // ENOMEM
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2007-2008 Randy Rizun <rrizun@gmail.com>
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -28,16 +28,12 @@
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
#define SPACES " \t\r\n"
|
||||
#define STR2NCMP(str1, str2) strncmp(str1, str2, strlen(str2))
|
||||
static const std::string SPACES = " \t\r\n";
|
||||
|
||||
template<typename T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(str1, str2, strlen(str2)); }
|
||||
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2014 Andrew Gaul <andrew@gaul.org>
|
||||
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright 2014 Andrew Gaul <andrew@gaul.org>
|
||||
* Copyright(C) 2014 Andrew Gaul <andrew@gaul.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@ -33,8 +33,9 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
{
|
||||
if(x == NULL && y == NULL){
|
||||
return;
|
||||
} else if((x == NULL || y == NULL) || strcmp(x, y) != 0){
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
// cppcheck-suppress nullPointerRedundantCheck
|
||||
} else if(x == NULL || y == NULL || strcmp(x, y) != 0){
|
||||
std::cerr << (x ? x : "null") << " != " << (y ? y : "null") << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ set -o errexit
|
||||
S3FS=../src/s3fs
|
||||
|
||||
# Allow these defaulted values to be overridden
|
||||
: ${S3_URL:="http://127.0.0.1:8080"}
|
||||
: ${S3_URL:="https://127.0.0.1:8080"}
|
||||
: ${S3FS_CREDENTIALS_FILE:="passwd-s3fs"}
|
||||
: ${TEST_BUCKET_1:="s3fs-integration-test"}
|
||||
|
||||
@ -50,7 +50,7 @@ export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.4.0"
|
||||
S3PROXY_VERSION="1.6.0"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
@ -108,7 +108,8 @@ function start_s3proxy {
|
||||
chmod +x "${S3PROXY_BINARY}"
|
||||
fi
|
||||
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG | stdbuf -oL -eL sed -u "s/^/s3proxy: /" &
|
||||
stdbuf -oL -eL java -jar "$S3PROXY_BINARY" --properties $S3PROXY_CONFIG &
|
||||
S3PROXY_PID=$!
|
||||
|
||||
# wait for S3Proxy to start
|
||||
for i in $(seq 30);
|
||||
@ -121,8 +122,6 @@ function start_s3proxy {
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
S3PROXY_PID=$(netstat -lpnt | grep :8080 | awk '{ print $7 }' | sed -u 's|/java||')
|
||||
fi
|
||||
}
|
||||
|
||||
@ -130,7 +129,6 @@ function stop_s3proxy {
|
||||
if [ -n "${S3PROXY_PID}" ]
|
||||
then
|
||||
kill $S3PROXY_PID
|
||||
wait $S3PROXY_PID
|
||||
fi
|
||||
}
|
||||
|
||||
@ -157,8 +155,6 @@ function start_s3fs {
|
||||
#
|
||||
# TODO: Allow all these options to be overriden with env variables
|
||||
#
|
||||
# sigv2
|
||||
# Historically because S3Proxy only supports sigv2.
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
# createbucket
|
||||
@ -181,18 +177,36 @@ function start_s3fs {
|
||||
${VALGRIND_EXEC} ${S3FS} \
|
||||
$TEST_BUCKET_1 \
|
||||
$TEST_BUCKET_MOUNT_POINT_1 \
|
||||
-o sigv2 \
|
||||
-o use_path_request_style \
|
||||
-o url=${S3_URL} \
|
||||
-o no_check_certificate \
|
||||
-o ssl_verify_hostname=0 \
|
||||
-o createbucket \
|
||||
${AUTH_OPT} \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
-o retries=3 \
|
||||
-f \
|
||||
${@} \
|
||||
|& stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
${@} | stdbuf -oL -eL sed -u "s/^/s3fs: /" &
|
||||
)
|
||||
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
set +o errexit
|
||||
TRYCOUNT=0
|
||||
while [ $TRYCOUNT -le 20 ]; do
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
break;
|
||||
fi
|
||||
sleep 1
|
||||
TRYCOUNT=`expr ${TRYCOUNT} + 1`
|
||||
done
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
set -o errexit
|
||||
else
|
||||
retry 5 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts || exit 1
|
||||
fi
|
||||
|
||||
# Quick way to start system up for manual testing with options under test
|
||||
if [[ -n ${INTERACT} ]]; then
|
||||
@ -205,14 +219,21 @@ function start_s3fs {
|
||||
|
||||
function stop_s3fs {
|
||||
# Retry in case file system is in use
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
df | grep -q $TEST_BUCKET_MOUNT_POINT_1
|
||||
if [ $? -eq 0 ]; then
|
||||
retry 10 df | grep -q $TEST_BUCKET_MOUNT_POINT_1 && umount $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
else
|
||||
if grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts; then
|
||||
retry 10 grep -q $TEST_BUCKET_MOUNT_POINT_1 /proc/mounts && fusermount -u $TEST_BUCKET_MOUNT_POINT_1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# trap handlers do not stack. If a test sets its own, the new handler should call common_exit_handler
|
||||
function common_exit_handler {
|
||||
stop_s3proxy
|
||||
stop_s3fs
|
||||
stop_s3proxy
|
||||
}
|
||||
trap common_exit_handler EXIT
|
||||
|
||||
@ -6,7 +6,11 @@ source test-utils.sh
|
||||
|
||||
function test_append_file {
|
||||
describe "Testing append to file ..."
|
||||
|
||||
# Write a small test file
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > ${TEST_TEXT_FILE}
|
||||
fi
|
||||
for x in `seq 1 $TEST_TEXT_FILE_LENGTH`
|
||||
do
|
||||
echo "echo ${TEST_TEXT} to ${TEST_TEXT_FILE}"
|
||||
@ -42,7 +46,7 @@ function test_truncate_file {
|
||||
}
|
||||
|
||||
function test_truncate_empty_file {
|
||||
echo "Testing truncate empty file ..."
|
||||
describe "Testing truncate empty file ..."
|
||||
# Write an empty test file
|
||||
touch ${TEST_TEXT_FILE}
|
||||
|
||||
@ -51,7 +55,11 @@ function test_truncate_empty_file {
|
||||
truncate ${TEST_TEXT_FILE} -s $t_size
|
||||
|
||||
# Verify file is zero length
|
||||
size=$(stat -c %s ${TEST_TEXT_FILE})
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
size=$(stat -f "%z" ${TEST_TEXT_FILE})
|
||||
else
|
||||
size=$(stat -c %s ${TEST_TEXT_FILE})
|
||||
fi
|
||||
if [ $t_size -ne $size ]
|
||||
then
|
||||
echo "error: expected ${TEST_TEXT_FILE} to be $t_size length, got $size"
|
||||
@ -77,6 +85,9 @@ function test_mv_file {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
# save file length
|
||||
ALT_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
|
||||
#rename the test file
|
||||
mv $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ ! -e $ALT_TEST_TEXT_FILE ]
|
||||
@ -86,7 +97,6 @@ function test_mv_file {
|
||||
fi
|
||||
|
||||
# Check the contents of the alt file
|
||||
ALT_TEXT_LENGTH=`echo $TEST_TEXT | wc -c | awk '{print $1}'`
|
||||
ALT_FILE_LENGTH=`wc -c $ALT_TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ "$ALT_FILE_LENGTH" -ne "$ALT_TEXT_LENGTH" ]
|
||||
then
|
||||
@ -179,12 +189,21 @@ function test_chmod {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chmod 777 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%a $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE permissions"
|
||||
return 1
|
||||
@ -200,15 +219,29 @@ function test_chown {
|
||||
# create the test file again
|
||||
mk_test_file
|
||||
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
ORIGINAL_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
ORIGINAL_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
|
||||
chown 1000:1000 $TEST_TEXT_FILE;
|
||||
|
||||
# if they're the same, we have a problem.
|
||||
if [ $(stat --format=%u:%g $TEST_TEXT_FILE) == $ORIGINAL_PERMISSIONS ]
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
CHANGED_PERMISSIONS=$(stat -f "%u:%g" $TEST_TEXT_FILE)
|
||||
else
|
||||
CHANGED_PERMISSIONS=$(stat --format=%u:%g $TEST_TEXT_FILE)
|
||||
fi
|
||||
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
|
||||
then
|
||||
echo "Could not modify $TEST_TEXT_FILE ownership"
|
||||
return 1
|
||||
if [ $ORIGINAL_PERMISSIONS == "1000:1000" ]
|
||||
then
|
||||
echo "Could not be strict check because original file permission 1000:1000"
|
||||
else
|
||||
echo "Could not modify $TEST_TEXT_FILE ownership($ORIGINAL_PERMISSIONS to 1000:1000)"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# clean up
|
||||
@ -257,6 +290,10 @@ function test_rename_before_close {
|
||||
|
||||
function test_multipart_upload {
|
||||
describe "Testing multi-part upload ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
|
||||
@ -273,6 +310,10 @@ function test_multipart_upload {
|
||||
|
||||
function test_multipart_copy {
|
||||
describe "Testing multi-part copy ..."
|
||||
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
cat /dev/null > $BIG_FILE
|
||||
fi
|
||||
dd if=/dev/urandom of="/tmp/${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
dd if="/tmp/${BIG_FILE}" of="${BIG_FILE}" bs=$BIG_FILE_LENGTH count=1
|
||||
mv "${BIG_FILE}" "${BIG_FILE}-copy"
|
||||
@ -359,8 +400,13 @@ function test_mtime_file {
|
||||
|
||||
#copy the test file with preserve mode
|
||||
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
testmtime=`stat -f "%m" $TEST_TEXT_FILE`
|
||||
altmtime=`stat -f "%m" $ALT_TEST_TEXT_FILE`
|
||||
else
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
fi
|
||||
if [ "$testmtime" -ne "$altmtime" ]
|
||||
then
|
||||
echo "File times do not match: $testmtime != $altmtime"
|
||||
@ -407,11 +453,11 @@ function add_all_tests {
|
||||
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
|
||||
#add_tests test_rename_before_close
|
||||
add_tests test_multipart_upload
|
||||
# TODO: test disabled until S3Proxy 1.5.0 is released
|
||||
#add_tests test_multipart_copy
|
||||
add_tests test_multipart_copy
|
||||
add_tests test_special_characters
|
||||
add_tests test_symlink
|
||||
add_tests test_extended_attributes
|
||||
add_tests test_mtime_file
|
||||
add_tests test_rm_rf_dir
|
||||
add_tests test_write_after_seek_ahead
|
||||
}
|
||||
|
||||
BIN
test/keystore.jks
Normal file
BIN
test/keystore.jks
Normal file
Binary file not shown.
@ -1,7 +1,9 @@
|
||||
s3proxy.endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v2
|
||||
s3proxy.secure-endpoint=http://127.0.0.1:8080
|
||||
s3proxy.authorization=aws-v4
|
||||
s3proxy.identity=local-identity
|
||||
s3proxy.credential=local-credential
|
||||
s3proxy.keystore-path=keystore.jks
|
||||
s3proxy.keystore-password=password
|
||||
|
||||
jclouds.provider=transient
|
||||
jclouds.identity=remote-identity
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# S3FS: Samlpe ahbe_conf parameter file.
|
||||
# S3FS: Sample ahbe_conf parameter file.
|
||||
#
|
||||
# This file is configuration file for additional header by extension(ahbe).
|
||||
# s3fs loads this file at starting.
|
||||
|
||||
@ -24,6 +24,21 @@ function mk_test_file {
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, it does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# wait & check
|
||||
BASE_TEXT_LENGTH=`echo $TEXT | wc -c | awk '{print $1}'`
|
||||
TRY_COUNT=10
|
||||
while true; do
|
||||
MK_TEXT_LENGTH=`wc -c $TEST_TEXT_FILE | awk '{print $1}'`
|
||||
if [ $BASE_TEXT_LENGTH -eq $MK_TEXT_LENGTH ]; then
|
||||
break
|
||||
fi
|
||||
TRY_COUNT=`expr $TRY_COUNT - 1`
|
||||
if [ $TRY_COUNT -le 0 ]; then
|
||||
echo "Could not create file ${TEST_TEXT_FILE}, that file size is something wrong"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
function rm_test_file {
|
||||
@ -65,9 +80,9 @@ function cd_run_dir {
|
||||
echo "TEST_BUCKET_MOUNT_POINT variable not set"
|
||||
exit 1
|
||||
fi
|
||||
RUN_DIR=$(mktemp --directory ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
|
||||
RUN_DIR=$(mktemp -d ${TEST_BUCKET_MOUNT_POINT_1}/testrun-XXXXXX)
|
||||
cd ${RUN_DIR}
|
||||
}
|
||||
}
|
||||
|
||||
function clean_run_dir {
|
||||
if [ -d ${RUN_DIR} ]; then
|
||||
|
||||
Reference in New Issue
Block a user