Compare commits
194 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 99ec09f13a | |||
| 4a011d87e0 | |||
| c6edc2cd8f | |||
| cc196bfdf0 | |||
| 895d5006bb | |||
| 62dcda6a56 | |||
| cbf072bc55 | |||
| 1b4d2a32d2 | |||
| b71c90bbe1 | |||
| 80344aafd3 | |||
| b5ca400500 | |||
| 2e89439120 | |||
| 555410386c | |||
| 08b132ddb9 | |||
| 1e86cc643d | |||
| f53503438c | |||
| 0d43d070cc | |||
| 0791fdca2a | |||
| 6e8678d5e3 | |||
| 10d9f75366 | |||
| 77993e607e | |||
| 74d8671e54 | |||
| 4c41eac29c | |||
| 3c97c1b251 | |||
| 84c671a81a | |||
| f336bdebcc | |||
| e5b8377202 | |||
| 4f42f4ab0c | |||
| 11b385820d | |||
| f1a9eaee54 | |||
| ffee8d5f39 | |||
| eeb839242b | |||
| f7760976a5 | |||
| ca2d1d873d | |||
| 951761ee2c | |||
| 231fd001d9 | |||
| e00afa8128 | |||
| e9297f39ea | |||
| 314dc5a398 | |||
| e07cb020cc | |||
| 9f79b9e0da | |||
| e87e40b3b4 | |||
| f0f95478ec | |||
| bd66b57ad3 | |||
| a1d3ff9766 | |||
| 7f61a947c2 | |||
| 4d0bef1e90 | |||
| 960823fb40 | |||
| c04e8e7a9d | |||
| fb6debd986 | |||
| d8185a25aa | |||
| 53337a0a28 | |||
| ae51556d04 | |||
| b3de9195a7 | |||
| 055ecf6ea7 | |||
| c603680e02 | |||
| 814aadd7e3 | |||
| dce63d1529 | |||
| 8ff05d8e38 | |||
| dfa84b82a8 | |||
| 6ac8618381 | |||
| 8c527c3616 | |||
| 54a074647e | |||
| c5ebf5d328 | |||
| 43c6ef560e | |||
| 3076abc744 | |||
| 07636c8a8d | |||
| 35d55ee513 | |||
| a442e843be | |||
| c0cf90cf8b | |||
| 3b1cc3b197 | |||
| a0c1f30ae7 | |||
| 8822a86709 | |||
| 98f397de0e | |||
| fd4d23f8f7 | |||
| 4820f0a42b | |||
| 807a618cf7 | |||
| a93e500b44 | |||
| 92d3114584 | |||
| 5062d6fbd9 | |||
| 7d14ebaf09 | |||
| cd794a6985 | |||
| 84b421d6ef | |||
| 8316da5bbe | |||
| fa287aeef7 | |||
| caaf4cac55 | |||
| 010276ceab | |||
| f219817eb3 | |||
| d487348d21 | |||
| eb0b29708f | |||
| 877842a720 | |||
| 1fc25e8c3f | |||
| 61ecafd426 | |||
| 79bd3441eb | |||
| 5f5da4b2cb | |||
| dede19d8c0 | |||
| fada95f58e | |||
| 014b8c5982 | |||
| 46d79c5bc2 | |||
| 40ba3b44a1 | |||
| beadf95975 | |||
| 2887f8916b | |||
| 0c9a8932f7 | |||
| ac72431195 | |||
| 2a7877beff | |||
| 7a56459103 | |||
| 5292fa74d1 | |||
| f2184e34dd | |||
| 1d4867830b | |||
| 36a4903843 | |||
| c83a3e67c9 | |||
| 05014c49c8 | |||
| aa69107165 | |||
| d373b0eca3 | |||
| 6aa40b2747 | |||
| 34c3bfe408 | |||
| 6ac56e722d | |||
| 61dc7f0a70 | |||
| 9f000957dd | |||
| b2141313e2 | |||
| aa9bd1fa3c | |||
| 5a2dc03a1c | |||
| 508fafbe62 | |||
| e29548178b | |||
| ab2f36f202 | |||
| b8c9fcfd70 | |||
| 58ce544e83 | |||
| e98ce36301 | |||
| 6401b4ae92 | |||
| 25b49e1a2e | |||
| c7def35b54 | |||
| ddba1c63c5 | |||
| c512516e14 | |||
| 2c43b1e12b | |||
| b68d97c6bf | |||
| f1757e4343 | |||
| e2d5641d99 | |||
| 523fe1e309 | |||
| c985b5e4d0 | |||
| 786f1a8fc7 | |||
| 18cb2e2662 | |||
| 743c706b0a | |||
| 4ed0e5f35a | |||
| fd6b37d3da | |||
| 56e24de0d4 | |||
| 2780043a7d | |||
| 54c9e48bb7 | |||
| ed5795eead | |||
| 3d225163f8 | |||
| 0569cec3ea | |||
| a2f8ac535e | |||
| 29355d75b0 | |||
| d9e89deef6 | |||
| 6b051eac47 | |||
| da997de918 | |||
| d97094fb8d | |||
| b91fc5409e | |||
| 3c970646d1 | |||
| a92668ae78 | |||
| 88cd8feb05 | |||
| 91c16f826a | |||
| d4d60ff315 | |||
| e8033f96de | |||
| 5fba542a29 | |||
| 44de3ffa05 | |||
| 2efa6df028 | |||
| 9e530c86ae | |||
| 95857733a1 | |||
| 664f910083 | |||
| 735e4b0848 | |||
| e8d76a6f58 | |||
| 0a6926be54 | |||
| 830a971bde | |||
| 4779d14d7d | |||
| 8929a27a24 | |||
| eea624c171 | |||
| cdaf4a9674 | |||
| 6fe92d5ed6 | |||
| 8649a68766 | |||
| af005b6e5e | |||
| b19d2ae78f | |||
| 5634f9bdcd | |||
| c703fa15c0 | |||
| d9c106cfde | |||
| 203f78fdae | |||
| c5af62b023 | |||
| dcd70daf48 | |||
| 8263919b0e | |||
| 97488e603f | |||
| 41c23adb0e | |||
| a85183d42c | |||
| 45b67b9604 | |||
| c376efdd28 | |||
| 4c5f510207 |
26
.clang-tidy
Normal file
26
.clang-tidy
Normal file
@ -0,0 +1,26 @@
|
||||
Checks: '
|
||||
-*,
|
||||
bugprone-*,
|
||||
google-*,
|
||||
-google-build-using-namespace,
|
||||
-google-readability-casting,
|
||||
-google-readability-todo,
|
||||
-google-runtime-int,
|
||||
-google-runtime-references,
|
||||
misc-*,
|
||||
-misc-misplaced-const,
|
||||
-misc-redundant-expression,
|
||||
-misc-unused-parameters,
|
||||
modernize-*,
|
||||
-modernize-deprecated-headers,
|
||||
-modernize-loop-convert,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-nullptr,
|
||||
-modernize-use-using,
|
||||
performance-*,
|
||||
portability-*,
|
||||
readability-*,
|
||||
-readability-else-after-return,
|
||||
-readability-implicit-bool-conversion,
|
||||
-readability-named-parameter,
|
||||
-readability-simplify-boolean-expr'
|
||||
31
.gitattributes
vendored
31
.gitattributes
vendored
@ -1 +1,32 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
* text eol=lf
|
||||
|
||||
*.png binary
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
15
.github/ISSUE_TEMPLATE.md
vendored
15
.github/ISSUE_TEMPLATE.md
vendored
@ -1,25 +1,26 @@
|
||||
### Additional Information
|
||||
_The following information is very important in order to help us to help you. Omission of the following details may delay your support request or receive no attention at all._
|
||||
_Keep in mind that the commands we provide to retrieve information are oriented to GNU/Linux Distributions, so you could need to use others if you use s3fs on macOS or BSD_
|
||||
|
||||
#### Version of s3fs being used (s3fs --version)
|
||||
_example: 1.00_
|
||||
|
||||
#### Version of fuse being used (pkg-config --modversion fuse)
|
||||
#### Version of fuse being used (pkg-config --modversion fuse, rpm -qi fuse, dpkg -s fuse)
|
||||
_example: 2.9.4_
|
||||
|
||||
#### System information (uname -r)
|
||||
#### Kernel information (uname -r)
|
||||
_command result: uname -r_
|
||||
|
||||
#### Distro (cat /etc/issue)
|
||||
_command result: cat /etc/issue_
|
||||
#### GNU/Linux Distribution, if applicable (cat /etc/os-release)
|
||||
_command result: cat /etc/os-release_
|
||||
|
||||
#### s3fs command line used (if applicable)
|
||||
#### s3fs command line used, if applicable
|
||||
```
|
||||
```
|
||||
#### /etc/fstab entry (if applicable):
|
||||
#### /etc/fstab entry, if applicable
|
||||
```
|
||||
```
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, or s3fs outputs)
|
||||
#### s3fs syslog messages (grep s3fs /var/log/syslog, journalctl | grep s3fs, or s3fs outputs)
|
||||
_if you execute s3fs with dbglevel, curldbg option, you can get detail debug messages_
|
||||
```
|
||||
```
|
||||
|
||||
116
.gitignore
vendored
116
.gitignore
vendored
@ -1,32 +1,86 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
#
|
||||
# Compiled Object files
|
||||
#
|
||||
*.slo
|
||||
*.lo
|
||||
*.o
|
||||
/Makefile
|
||||
/Makefile.in
|
||||
/aclocal.m4
|
||||
/autom4te.cache/
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/stamp-h1
|
||||
/config.h
|
||||
/config.h.in
|
||||
/config.h.in~
|
||||
/configure
|
||||
/depcomp
|
||||
/test-driver
|
||||
/compile
|
||||
/doc/Makefile
|
||||
/doc/Makefile.in
|
||||
/install-sh
|
||||
/missing
|
||||
/src/.deps/
|
||||
/src/Makefile
|
||||
/src/Makefile.in
|
||||
/src/s3fs
|
||||
/src/test_*
|
||||
/test/.deps/
|
||||
/test/Makefile
|
||||
/test/Makefile.in
|
||||
/test/s3proxy-*
|
||||
/test/*.log
|
||||
/default_commit_hash
|
||||
*.Po
|
||||
*.Plo
|
||||
|
||||
#
|
||||
# autotools/automake
|
||||
#
|
||||
aclocal.m4
|
||||
autom4te.cache
|
||||
autoscan.log
|
||||
config.guess
|
||||
config.h
|
||||
config.h.in
|
||||
config.h.in~
|
||||
config.log
|
||||
config.status
|
||||
config.sub
|
||||
configure
|
||||
configure.scan
|
||||
depcomp
|
||||
install-sh
|
||||
libtool
|
||||
ltmain.sh
|
||||
m4
|
||||
m4/*
|
||||
missing
|
||||
stamp-h1
|
||||
Makefile
|
||||
Makefile.in
|
||||
test-driver
|
||||
compile
|
||||
missing
|
||||
|
||||
#
|
||||
# object directories
|
||||
#
|
||||
.deps
|
||||
.libs
|
||||
*/.deps
|
||||
*/.deps/*
|
||||
*/.libs
|
||||
*/.libs/*
|
||||
|
||||
#
|
||||
# each directories
|
||||
#
|
||||
*.log
|
||||
*.trs
|
||||
default_commit_hash
|
||||
src/s3fs
|
||||
src/test_*
|
||||
test/s3proxy-*
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
61
.travis.yml
61
.travis.yml
@ -1,3 +1,23 @@
|
||||
#
|
||||
# s3fs - FUSE-based file system backed by Amazon S3
|
||||
#
|
||||
# Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
|
||||
language: cpp
|
||||
|
||||
matrix:
|
||||
@ -8,11 +28,11 @@ matrix:
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include'
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
@ -34,10 +54,45 @@ matrix:
|
||||
- brew install cppcheck
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure
|
||||
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
|
||||
- os: linux-ppc64le
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: apt
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:openjdk-r/ppa
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq attr libfuse-dev openjdk-7-jdk
|
||||
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-ppc64el/jre/bin/java
|
||||
- sudo git clone --branch 1.61 https://github.com/danmar/cppcheck.git
|
||||
- pwd
|
||||
- cd ./cppcheck
|
||||
- sudo make
|
||||
- sudo make install
|
||||
- cd ../
|
||||
script:
|
||||
- ./autogen.sh
|
||||
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03'
|
||||
- make
|
||||
- make cppcheck
|
||||
- make check -C src
|
||||
- modprobe fuse
|
||||
- make check -C test
|
||||
- cat test/test-suite.log
|
||||
|
||||
#
|
||||
# Local variables:
|
||||
# tab-width: 4
|
||||
# c-basic-offset: 4
|
||||
# End:
|
||||
# vim600: noet sw=4 ts=4 fdm=marker
|
||||
# vim<600: noet sw=4 ts=4
|
||||
#
|
||||
|
||||
4
AUTHORS
4
AUTHORS
@ -17,3 +17,7 @@ Bugfixes, performance and other improvements.
|
||||
5. Takeshi Nakatani <ggtakec@gmail.com>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
6. Andrew Gaul <gaul@gaul.org>
|
||||
|
||||
Bugfixes, performance and other improvements.
|
||||
|
||||
108
ChangeLog
108
ChangeLog
@ -1,6 +1,98 @@
|
||||
ChangeLog for S3FS
|
||||
ChangeLog for S3FS
|
||||
------------------
|
||||
|
||||
Version 1.85 -- 11 Mar, 2019
|
||||
#804 - add Backblaze B2
|
||||
#812 - Fix typo s/mutliple/multiple/
|
||||
#819 - #691: Made instructions for creating password file more obvious.
|
||||
#820 - Enable big writes if capable
|
||||
#826 - For RPM distributions fuse-libs is enough
|
||||
#831 - Add support for storage class ONEZONE_IA.
|
||||
#832 - Simplify hex conversion
|
||||
#833 - New installation instructions for Fedora >= 27 and CentOS7
|
||||
#834 - Improve template for issues
|
||||
#835 - Make the compilation instructions generic
|
||||
#840 - Replace all mentions to MacOS X to macOS
|
||||
#849 - Correct typo
|
||||
#851 - Correctly compare list_object_max_keys
|
||||
#852 - Allow credentials from ${HOME}/.aws/credentials
|
||||
#853 - Replace ~ with ${HOME} in examples
|
||||
#855 - Include StackOverflow in FAQs
|
||||
#856 - Add icon for s3fs
|
||||
#859 - Upload S3 parts without batching
|
||||
#861 - Add 'profile' option to command line help.
|
||||
#865 - fix multihead warning check
|
||||
#866 - Multi-arch support for ppc64le
|
||||
#870 - Correct typos in command-line parsing
|
||||
#874 - Address cppcheck 1.86 errors
|
||||
#877 - Check arguments and environment before .aws/creds
|
||||
#882 - [curl] Assume long encryption keys are base64 encoded
|
||||
#885 - Update s3fs_util.cpp for correspondence of Nextcloud contype
|
||||
#888 - Add Server Fault to FAQs
|
||||
#892 - Repair xattr tests
|
||||
#893 - Store and retrieve file change time
|
||||
#894 - Default uid/gid/mode when object lacks permissions
|
||||
#895 - Emit more friendly error for buckets with dots
|
||||
#898 - Flush file before renaming
|
||||
#899 - Tighten up HTTP response code check
|
||||
#900 - Plug memory leak
|
||||
#901 - Plug memory leaks
|
||||
#902 - Avoid pass-by-value when not necessary
|
||||
#903 - Prefer find(char) over find(const char *)
|
||||
#904 - Remove unnecessary calls to std::string::c_str
|
||||
#905 - Fix comparison in s3fs_strtoofft
|
||||
#906 - Prefer HTTPS links where possible
|
||||
#908 - Added an error message when HTTP 301 status
|
||||
#909 - Ignore after period character of floating point in x-amz-meta-mtime
|
||||
#910 - Added a missing extension to .gitignore, and formatted dot files
|
||||
#911 - Added detail error message when HTTP 301/307 status
|
||||
#912 - Automatic region change made possible other than us-east-1(default)
|
||||
#913 - Prefer abort over assert(false)
|
||||
#914 - Issue readdir HEAD requests without batching
|
||||
#917 - Reference better-known AWS CLI for compatibility
|
||||
#918 - Load tail range during overwrite
|
||||
#919 - Add test for mv non-empty directory
|
||||
#920 - Remove unnecessary string copies
|
||||
#921 - Remove redundant string initializations
|
||||
#923 - Reverted automatic region change and changed messages
|
||||
#924 - Prefer empty over size checks
|
||||
#925 - Remove redundant null checks before delete
|
||||
#926 - Accept paths with : in them
|
||||
#930 - Correct enable_content_md5 docs
|
||||
#931 - Correct sigv2 typo
|
||||
#932 - Prefer AutoLock for synchronization
|
||||
#933 - Remove mirror path when deleting cache
|
||||
#934 - Checked and corrected all typo
|
||||
#937 - Disable malloc_trim
|
||||
#938 - Remove unneeded void parameter
|
||||
#939 - Prefer specific [io]stringstream where possible
|
||||
#940 - Copy parts in parallel
|
||||
#942 - Ensure s3fs compiles with C++03
|
||||
#943 - Return not supported when hard linking
|
||||
#944 - Repair utility mode
|
||||
#946 - Simplify async request completion code
|
||||
#948 - Add logging for too many parts
|
||||
#949 - Implement exponential backoff for 503
|
||||
#950 - Added S3FS_MALLOC_TRIM build switch
|
||||
#951 - Added a non-interactive option to utility mode
|
||||
#952 - Automatically abort failed multipart requests
|
||||
#953 - Update s3ql link
|
||||
#954 - Clear containers instead of individual erases
|
||||
#955 - Address miscellaneous clang-tidy warnings
|
||||
#957 - Upgrade to S3Proxy 1.6.1
|
||||
#958 - Document lack of inotify support
|
||||
#959 - Fixed code for latest cppcheck error on OSX
|
||||
#960 - Wtf8
|
||||
#961 - Work around cppcheck warnings
|
||||
#965 - Improvement of curl session pool for multipart
|
||||
#967 - Increase FdEntity reference count when returning
|
||||
#969 - Fix lazy typo
|
||||
#970 - Remove from file from stat cache during rename
|
||||
#972 - Add instructions for Amazon Linux
|
||||
#974 - Changed the description order of man page options
|
||||
#975 - Fixed ref-count when error occurred.
|
||||
#977 - Make macOS instructions consistent with others
|
||||
|
||||
Version 1.84 -- Jul 8, 2018
|
||||
#704 - Update README.md with details about .passwd-s3fs
|
||||
#710 - add disk space reservation
|
||||
@ -46,7 +138,7 @@ Version 1.83 -- Dec 17, 2017
|
||||
#638 - Minor fixes to README
|
||||
#639 - Update Homebrew instructions
|
||||
#642 - Fixed potential atomic violation in S3fsCurl::AddUserAgent - #633
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argment - #643
|
||||
#644 - Fixed with unnecessary equal in POST uploads url argument - #643
|
||||
#645 - Configure S3Proxy for SSL
|
||||
#646 - Simplify S3Proxy PID handling
|
||||
#652 - Fix s3fs_init message
|
||||
@ -112,7 +204,7 @@ Version 1.81 -- May 13, 2017
|
||||
#540 - Address cppcheck 1.77 warnings
|
||||
#545 - Changed base cached time of stat_cache_expire option - #523
|
||||
#546 - Fixed double initialization of SSL library at foreground
|
||||
#550 - Add umount instruction for unplivileged user
|
||||
#550 - Add umount instruction for unprivileged user
|
||||
#551 - Updated stat_cache_expire option description - #545
|
||||
#552 - switch S3fsMultiCurl to use foreground threads
|
||||
#553 - add TLS cipher suites customization
|
||||
@ -165,7 +257,7 @@ Version 1.80 -- May 29, 2016
|
||||
#250 - s3fs can print version with short commit hash - #228
|
||||
#251 - Skip xattr tests if utilities are missing
|
||||
#252 - This fixes an issue with caching when the creation of a subdirectory …
|
||||
#253 - Added chacking cache dir perms at starting.
|
||||
#253 - Added checking cache dir perms at starting.
|
||||
#256 - Add no atomic rename to limitations
|
||||
#257 - Update README.md: Bugfix password file permissions errors
|
||||
#258 - Update README.md to better explain mount upon boot
|
||||
@ -193,7 +285,7 @@ Version 1.80 -- May 29, 2016
|
||||
#306 - Fix read concurrency to work in parallel count
|
||||
#307 - Fix pthread portability problem
|
||||
#308 - Changed ensure free disk space as additional change for #306
|
||||
#309 - Check pthread prtability in configure as additional change for #307
|
||||
#309 - Check pthread portability in configure as additional change for #307
|
||||
#310 - Update integration-test-main.sh as additional change for #300
|
||||
#311 - Change error log to debug log in s3fs_read()
|
||||
#313 - fix gitignore
|
||||
@ -205,14 +297,14 @@ Version 1.80 -- May 29, 2016
|
||||
#330 - Pass by const reference where possible
|
||||
#331 - Address various clang warnings
|
||||
#334 - Bucket host should include port and not path
|
||||
#336 - update REAME.md for fstab
|
||||
#336 - update README.md for fstab
|
||||
#338 - Fixed a bug about IAMCRED type could not be retried.
|
||||
#339 - Updated README.md for fstab example.
|
||||
#341 - Fix the memory leak issue in fdcache.
|
||||
#346 - Fix empty directory check against AWS S3
|
||||
#348 - Integration test summary, continue on error
|
||||
#350 - Changed cache out logic for stat - #340
|
||||
#351 - Check cache dirctory path and attributes - #347
|
||||
#351 - Check cache directory path and attributes - #347
|
||||
#352 - Remove stat file cache dir if specified del_cache - #337
|
||||
#354 - Supported regex type for additional header format - #343
|
||||
#355 - Fixed codes about clock_gettime for osx
|
||||
@ -289,7 +381,7 @@ issue #184 - Add usage information for multipart_size
|
||||
issue #185 - Correct obvious typos in usage and README
|
||||
issue #190 - Add a no_check_certificate option.
|
||||
issue #194 - Tilda in a file-name breaks things (EPERM)
|
||||
issue #198 - Disasble integration tests for Travis
|
||||
issue #198 - Disable integration tests for Travis
|
||||
issue #199 - Supported extended attributes(retry)
|
||||
issue #200 - fixed fallback to sigv2 for bucket create and GCS
|
||||
issue #202 - Specialize {set,get}xattr for OS X
|
||||
|
||||
2
INSTALL
2
INSTALL
@ -124,7 +124,7 @@ architecture at a time in the source code directory. After you have
|
||||
installed the package for one architecture, use `make distclean' before
|
||||
reconfiguring for another architecture.
|
||||
|
||||
On MacOS X 10.5 and later systems, you can create libraries and
|
||||
On macOS 10.5 and later systems, you can create libraries and
|
||||
executables that work on multiple system types--known as "fat" or
|
||||
"universal" binaries--by specifying multiple `-arch' options to the
|
||||
compiler but only a single `-arch' option to the preprocessor. Like
|
||||
|
||||
@ -32,6 +32,7 @@ cppcheck:
|
||||
cppcheck --quiet --error-exitcode=1 \
|
||||
--inline-suppr \
|
||||
--std=c++03 \
|
||||
--xml \
|
||||
-D HAVE_ATTR_XATTR_H \
|
||||
-D HAVE_SYS_EXTATTR_H \
|
||||
-D HAVE_MALLOC_TRIM \
|
||||
@ -40,4 +41,5 @@ cppcheck:
|
||||
-U ENOATTR \
|
||||
--enable=warning,style,information,missingInclude \
|
||||
--suppress=missingIncludeSystem \
|
||||
--suppress=unmatchedSuppression \
|
||||
src/ test/
|
||||
|
||||
110
README.md
110
README.md
@ -1,8 +1,9 @@
|
||||
s3fs
|
||||
====
|
||||
|
||||
s3fs allows Linux and Mac OS X to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other tools like [s3cmd](http://s3tools.org/s3cmd).
|
||||
s3fs allows Linux and macOS to mount an S3 bucket via FUSE.
|
||||
s3fs preserves the native object format for files, allowing use of other
|
||||
tools like [AWS CLI](https://github.com/aws/aws-cli).
|
||||
[](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
|
||||
|
||||
Features
|
||||
@ -19,8 +20,8 @@ Features
|
||||
* user-specified regions, including Amazon GovCloud
|
||||
* authenticate via v2 or v4 signatures
|
||||
|
||||
Installation
|
||||
------------
|
||||
Installation from pre-built packages
|
||||
------------------------------------
|
||||
|
||||
Some systems provide pre-built packages:
|
||||
|
||||
@ -36,67 +37,91 @@ Some systems provide pre-built packages:
|
||||
sudo zypper in s3fs
|
||||
```
|
||||
|
||||
* On Mac OS X, install via [Homebrew](http://brew.sh/):
|
||||
|
||||
```ShellSession
|
||||
$ brew cask install osxfuse
|
||||
$ brew install s3fs
|
||||
* On Fedora 27 and newer:
|
||||
```
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
Compilation
|
||||
-----------
|
||||
* On RHEL/CentOS 7 and newer through EPEL repositories:
|
||||
```
|
||||
sudo yum install epel-release
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
* On Linux, ensure you have all the dependencies:
|
||||
* On Amazon Linux through EPEL repositories:
|
||||
|
||||
On Ubuntu 14.04:
|
||||
```
|
||||
sudo amazon-linux-extras install epel
|
||||
sudo yum install s3fs-fuse
|
||||
```
|
||||
|
||||
```
|
||||
sudo apt-get install automake autotools-dev fuse g++ git libcurl4-openssl-dev libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
```
|
||||
* On macOS, install via [Homebrew](https://brew.sh/):
|
||||
|
||||
On CentOS 7:
|
||||
```
|
||||
brew cask install osxfuse
|
||||
brew install s3fs
|
||||
```
|
||||
|
||||
```
|
||||
sudo yum install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel
|
||||
```
|
||||
Compilation and installation from sources
|
||||
-----------------------------------------
|
||||
|
||||
Then compile from master via the following commands:
|
||||
These are generic instructions to compile from the master branch, and should work on almost any GNU/Linux, macOS, BSD, or similar.
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
If you want specific instructions for some distributions, check the [wiki](https://github.com/s3fs-fuse/s3fs-fuse/wiki/Installation-Notes).
|
||||
|
||||
Keep in mind using the pre-built packages when available.
|
||||
|
||||
1. Ensure your system satisfies build and runtime dependencies for:
|
||||
|
||||
* fuse >= 2.8.4
|
||||
* automake
|
||||
* gcc-c++
|
||||
* make
|
||||
* libcurl
|
||||
* libxml2
|
||||
* openssl
|
||||
|
||||
2. Then compile from master via the following commands:
|
||||
|
||||
```
|
||||
git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
cd s3fs-fuse
|
||||
./autogen.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
s3fs supports the standard
|
||||
[AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html)
|
||||
stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom passwd file.
|
||||
|
||||
The default location for the s3fs password file can be created:
|
||||
|
||||
* using a .passwd-s3fs file in the users home directory (i.e. ~/.passwd-s3fs)
|
||||
* using a .passwd-s3fs file in the users home directory (i.e. ${HOME}/.passwd-s3fs)
|
||||
* using the system-wide /etc/passwd-s3fs file
|
||||
|
||||
Enter your S3 identity and credential in a file `~/.passwd-s3fs` and set
|
||||
Enter your credentials in a file `${HOME}/.passwd-s3fs` and set
|
||||
owner-only permissions:
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > ~/.passwd-s3fs
|
||||
chmod 600 ~/.passwd-s3fs
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs
|
||||
chmod 600 ${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
Run s3fs with an existing bucket `mybucket` and directory `/path/to/mountpoint`:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=~/.passwd-s3fs
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs
|
||||
```
|
||||
|
||||
If you encounter any errors, enable debug output:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=~/.passwd-s3fs -o dbglevel=info -f -o curldbg
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbglevel=info -f -o curldbg
|
||||
```
|
||||
|
||||
You can also mount on boot by entering the following line to `/etc/fstab`:
|
||||
@ -114,12 +139,12 @@ mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
|
||||
If you use s3fs with a non-Amazon S3 implementation, specify the URL and path-style requests:
|
||||
|
||||
```
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=~/.passwd-s3fs -o url=http://url.to.s3/ -o use_path_request_style
|
||||
s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=https://url.to.s3/ -o use_path_request_style
|
||||
```
|
||||
|
||||
or(fstab)
|
||||
```
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=http://url.to.s3/ 0 0
|
||||
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
|
||||
```
|
||||
|
||||
To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file:
|
||||
@ -131,7 +156,7 @@ The Service Instance ID is only required when using the `-o create_bucket` optio
|
||||
Note: You may also want to create the global credential file first
|
||||
|
||||
```
|
||||
echo MYIDENTITY:MYCREDENTIAL > /etc/passwd-s3fs
|
||||
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > /etc/passwd-s3fs
|
||||
chmod 600 /etc/passwd-s3fs
|
||||
```
|
||||
|
||||
@ -145,23 +170,26 @@ Generally S3 cannot offer the same performance or semantics as a local file syst
|
||||
|
||||
* random writes or appends to files require rewriting the entire file
|
||||
* metadata operations such as listing directories have poor performance due to network latency
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](http://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
|
||||
* no atomic renames of files or directories
|
||||
* no coordination between multiple clients mounting the same bucket
|
||||
* no hard links
|
||||
* inotify detects only local modifications, not external ones by other clients or tools
|
||||
|
||||
References
|
||||
----------
|
||||
|
||||
* [goofys](https://github.com/kahing/goofys) - similar to s3fs but has better performance and less POSIX compatibility
|
||||
* [s3backer](https://github.com/archiecobbs/s3backer) - mount an S3 bucket as a single file
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://bitbucket.org/nikratio/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [S3Proxy](https://github.com/gaul/s3proxy) - combine with s3fs to mount Backblaze B2, EMC Atmos, Microsoft Azure, and OpenStack Swift buckets
|
||||
* [s3ql](https://github.com/s3ql/s3ql/) - similar to s3fs but uses its own object format
|
||||
* [YAS3FS](https://github.com/danilop/yas3fs) - similar to s3fs but uses SNS to allow multiple clients to mount a bucket
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
* [FAQ wiki page](https://github.com/s3fs-fuse/s3fs-fuse/wiki/FAQ)
|
||||
* [s3fs on Stack Overflow](https://stackoverflow.com/questions/tagged/s3fs)
|
||||
* [s3fs on Server Fault](https://serverfault.com/questions/tagged/s3fs)
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
dnl Process this file with autoconf to produce a configure script.
|
||||
|
||||
AC_PREREQ(2.59)
|
||||
AC_INIT(s3fs, 1.84)
|
||||
AC_INIT(s3fs, 1.85)
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
|
||||
AC_CANONICAL_SYSTEM
|
||||
@ -36,7 +36,7 @@ AC_CHECK_HEADERS([sys/extattr.h])
|
||||
CXXFLAGS="$CXXFLAGS -Wall -D_FILE_OFFSET_BITS=64"
|
||||
|
||||
dnl ----------------------------------------------
|
||||
dnl For OSX
|
||||
dnl For macOS
|
||||
dnl ----------------------------------------------
|
||||
case "$target" in
|
||||
*-cygwin* )
|
||||
@ -237,7 +237,7 @@ dnl ----------------------------------------------
|
||||
dnl malloc_trim function
|
||||
AC_CHECK_FUNCS([malloc_trim])
|
||||
|
||||
dnl clock_gettime function(osx)
|
||||
dnl clock_gettime function(macos)
|
||||
AC_SEARCH_LIBS([clock_gettime],[rt posix4])
|
||||
AC_CHECK_FUNCS([clock_gettime])
|
||||
|
||||
|
||||
@ -16,10 +16,14 @@ For root.
|
||||
For unprivileged user.
|
||||
.SS utility mode ( remove interrupted multipart uploading objects )
|
||||
.TP
|
||||
\fBs3fs \-u bucket
|
||||
\fBs3fs --incomplete-mpu-list(-u) bucket
|
||||
.TP
|
||||
\fBs3fs --incomplete-mpu-abort[=all | =<expire date format>] bucket
|
||||
.SH DESCRIPTION
|
||||
s3fs is a FUSE filesystem that allows you to mount an Amazon S3 bucket as a local filesystem. It stores files natively and transparently in S3 (i.e., you can use other programs to access the same files).
|
||||
.SH AUTHENTICATION
|
||||
s3fs supports the standard AWS credentials (filehttps://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) stored in `${HOME}/.aws/credentials`.
|
||||
Alternatively, s3fs supports a custom passwd file.
|
||||
The s3fs password file has this format (use this format if you have only one set of credentials):
|
||||
.RS 4
|
||||
\fBaccessKeyId\fP:\fBsecretAccessKey\fP
|
||||
@ -60,7 +64,7 @@ if it is not specified bucket name(and path) in command line, must specify this
|
||||
\fB\-o\fR default_acl (default="private")
|
||||
the default canned acl to apply to all written s3 objects, e.g., "private", "public-read".
|
||||
empty string means do not send header.
|
||||
see http://aws.amazon.com/documentation/s3/ for the full list of canned acls.
|
||||
see https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for the full list of canned acls.
|
||||
.TP
|
||||
\fB\-o\fR retries (default="5")
|
||||
number of times to retry a failed S3 transaction.
|
||||
@ -78,7 +82,7 @@ delete local file cache when s3fs starts and exits.
|
||||
\fB\-o\fR storage_class (default is standard)
|
||||
store object with specified storage class.
|
||||
this option replaces the old option use_rrs.
|
||||
Possible values: standard, standard_ia, and reduced_redundancy.
|
||||
Possible values: standard, standard_ia, onezone_ia and reduced_redundancy.
|
||||
.TP
|
||||
\fB\-o\fR use_rrs (default is disable)
|
||||
use Amazon's Reduced Redundancy Storage.
|
||||
@ -133,6 +137,10 @@ This option specifies the configuration file path which file is the additional H
|
||||
A sample configuration file is uploaded in "test" directory.
|
||||
If you specify this option for set "Content-Encoding" HTTP header, please take care for RFC 2616.
|
||||
.TP
|
||||
\fB\-o\fR profile (default="default")
|
||||
Choose a profile from ${HOME}/.aws/credentials to authenticate against S3.
|
||||
Note that this format matches the AWS CLI format and differs from the s3fs passwd format.
|
||||
.TP
|
||||
\fB\-o\fR public_bucket (default="" which means disabled)
|
||||
anonymously mount a public bucket when set to 1, ignores the $HOME/.passwd-s3fs and /etc/passwd-s3fs files.
|
||||
S3 does not allow copy object api for anonymous users, then s3fs sets nocopyapi option automatically when public_bucket=1 option is specified.
|
||||
@ -203,7 +211,7 @@ But if you do not specify this option, and if you can not connect with the defau
|
||||
So s3fs can know the correct region name, because s3fs can find it in an error from the S3 server.
|
||||
.TP
|
||||
\fB\-o\fR sigv2 (default is signature version 4)
|
||||
sets signing AWS requests by sing Signature Version 2.
|
||||
sets signing AWS requests by using Signature Version 2.
|
||||
.TP
|
||||
\fB\-o\fR mp_umask (default is "0000")
|
||||
sets umask for the mount point directory.
|
||||
@ -214,10 +222,8 @@ But if you set the allow_other with this option, you can control permissions of
|
||||
\fB\-o\fR nomultipart - disable multipart uploads
|
||||
.TP
|
||||
\fB\-o\fR enable_content_md5 ( default is disable )
|
||||
verifying uploaded data without multipart by content-md5 header.
|
||||
Enable to send "Content-MD5" header when uploading a object without multipart posting.
|
||||
If this option is enabled, it has some influences on a performance of s3fs when uploading small object.
|
||||
Because s3fs always checks MD5 when uploading large object, this option does not affect on large object.
|
||||
Allow S3 server to check data integrity of uploads via the Content-MD5 header.
|
||||
This can add CPU overhead to transfers.
|
||||
.TP
|
||||
\fB\-o\fR ecs ( default is disable )
|
||||
This option instructs s3fs to query the ECS container credential metadata address instead of the instance metadata address.
|
||||
@ -281,6 +287,14 @@ However, if there is a directory object other than "dir/" in the bucket, specify
|
||||
s3fs may not be able to recognize the object correctly if an object created by s3fs exists in the bucket.
|
||||
Please use this option when the directory in the bucket is only "dir/" object.
|
||||
.TP
|
||||
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
|
||||
S3 requires all object names to be valid utf-8. But some
|
||||
clients, notably Windows NFS clients, use their own encoding.
|
||||
This option re-encodes invalid utf-8 object names into valid
|
||||
utf-8 by mapping offending codes into a 'private' codepage of the
|
||||
Unicode set.
|
||||
Useful on clients not using utf-8 as their file system encoding.
|
||||
.TP
|
||||
\fB\-o\fR dbglevel (default="crit")
|
||||
Set the debug message level. set value as crit(critical), err(error), warn(warning), info(information) to debug level. default debug level is critical.
|
||||
If s3fs run with "-d" option, the debug level is set information.
|
||||
@ -288,6 +302,18 @@ When s3fs catch the signal SIGUSR2, the debug level is bumpup.
|
||||
.TP
|
||||
\fB\-o\fR curldbg - put curl debug message
|
||||
Put the debug message from libcurl when this option is specified.
|
||||
.SS "utility mode options"
|
||||
.TP
|
||||
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
|
||||
Lists multipart incomplete objects uploaded to the specified bucket.
|
||||
.TP
|
||||
\fB\-\-incomplete\-mpu\-abort\fR all or date format(default="24H")
|
||||
Delete the multipart incomplete object uploaded to the specified bucket.
|
||||
If "all" is specified for this option, all multipart incomplete objects will be deleted.
|
||||
If you specify no argument as an option, objects older than 24 hours(24H) will be deleted(This is the default value).
|
||||
You can specify an optional date format.
|
||||
It can be specified as year, month, day, hour, minute, second, and it is expressed as "Y", "M", "D", "h", "m", "s" respectively.
|
||||
For example, "1Y6M10D12h30m30s".
|
||||
.SH FUSE/MOUNT OPTIONS
|
||||
.TP
|
||||
Most of the generic mount options described in 'man mount' are supported (ro, rw, suid, nosuid, dev, nodev, exec, noexec, atime, noatime, sync async, dirsync). Filesystems are mounted with '\-onodev,nosuid' by default, which can only be overridden by a privileged user.
|
||||
@ -295,7 +321,7 @@ Most of the generic mount options described in 'man mount' are supported (ro, rw
|
||||
There are many FUSE specific mount options that can be specified. e.g. allow_other. See the FUSE README for the full set.
|
||||
.SH NOTES
|
||||
.TP
|
||||
The maximum size of objects that s3fs can handle depends on Amazone S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
|
||||
.TP
|
||||
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
|
||||
.TP
|
||||
|
||||
BIN
doc/s3fs.png
Normal file
BIN
doc/s3fs.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.3 KiB |
@ -22,7 +22,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <assert.h>
|
||||
#include <curl/curl.h>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
@ -56,7 +55,7 @@ AdditionalHeader::AdditionalHeader()
|
||||
if(this == AdditionalHeader::get()){
|
||||
is_enable = false;
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,7 +64,7 @@ AdditionalHeader::~AdditionalHeader()
|
||||
if(this == AdditionalHeader::get()){
|
||||
Unload();
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,14 +89,14 @@ bool AdditionalHeader::Load(const char* file)
|
||||
if('#' == line[0]){
|
||||
continue;
|
||||
}
|
||||
if(0 == line.size()){
|
||||
if(line.empty()){
|
||||
continue;
|
||||
}
|
||||
// load a line
|
||||
stringstream ss(line);
|
||||
string key(""); // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
istringstream ss(line);
|
||||
string key; // suffix(key)
|
||||
string head; // additional HTTP header
|
||||
string value; // header value
|
||||
if(0 == isblank(line[0])){
|
||||
ss >> key;
|
||||
}
|
||||
@ -109,8 +108,8 @@ bool AdditionalHeader::Load(const char* file)
|
||||
}
|
||||
|
||||
// check it
|
||||
if(0 == head.size()){
|
||||
if(0 == key.size()){
|
||||
if(head.empty()){
|
||||
if(key.empty()){
|
||||
continue;
|
||||
}
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) is no HTTP header value.", key.c_str());
|
||||
@ -123,6 +122,7 @@ bool AdditionalHeader::Load(const char* file)
|
||||
// regex
|
||||
if(key.size() <= strlen(ADD_HEAD_REGEX)){
|
||||
S3FS_PRN_ERR("file format error: %s key(suffix) does not have key string.", key.c_str());
|
||||
delete paddhead;
|
||||
continue;
|
||||
}
|
||||
key = key.substr(strlen(ADD_HEAD_REGEX));
|
||||
@ -164,7 +164,7 @@ bool AdditionalHeader::Load(const char* file)
|
||||
return true;
|
||||
}
|
||||
|
||||
void AdditionalHeader::Unload(void)
|
||||
void AdditionalHeader::Unload()
|
||||
{
|
||||
is_enable = false;
|
||||
|
||||
@ -239,14 +239,14 @@ struct curl_slist* AdditionalHeader::AddHeader(struct curl_slist* list, const ch
|
||||
return list;
|
||||
}
|
||||
|
||||
bool AdditionalHeader::Dump(void) const
|
||||
bool AdditionalHeader::Dump() const
|
||||
{
|
||||
if(!IS_S3FS_LOG_DBG()){
|
||||
return true;
|
||||
}
|
||||
|
||||
stringstream ssdbg;
|
||||
int cnt = 1;
|
||||
ostringstream ssdbg;
|
||||
int cnt = 1;
|
||||
|
||||
ssdbg << "Additional Header list[" << addheadlist.size() << "] = {" << endl;
|
||||
|
||||
|
||||
136
src/cache.cpp
136
src/cache.cpp
@ -28,7 +28,6 @@
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <syslog.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
@ -146,9 +145,12 @@ StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), Expir
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
stat_cache.clear();
|
||||
pthread_mutex_init(&(StatCache::stat_cache_lock), NULL);
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
pthread_mutexattr_settype(&attr, S3FS_MUTEX_RECURSIVE);
|
||||
pthread_mutex_init(&StatCache::stat_cache_lock, &attr);
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,16 +158,16 @@ StatCache::~StatCache()
|
||||
{
|
||||
if(this == StatCache::getStatCacheData()){
|
||||
Clear();
|
||||
pthread_mutex_destroy(&(StatCache::stat_cache_lock));
|
||||
pthread_mutex_destroy(&StatCache::stat_cache_lock);
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Methods
|
||||
//-------------------------------------------------------------------
|
||||
unsigned long StatCache::GetCacheSize(void) const
|
||||
unsigned long StatCache::GetCacheSize() const
|
||||
{
|
||||
return CacheSize;
|
||||
}
|
||||
@ -177,7 +179,7 @@ unsigned long StatCache::SetCacheSize(unsigned long size)
|
||||
return old;
|
||||
}
|
||||
|
||||
time_t StatCache::GetExpireTime(void) const
|
||||
time_t StatCache::GetExpireTime() const
|
||||
{
|
||||
return (IsExpireTime ? ExpireTime : (-1));
|
||||
}
|
||||
@ -191,7 +193,7 @@ time_t StatCache::SetExpireTime(time_t expire, bool is_interval)
|
||||
return old;
|
||||
}
|
||||
|
||||
time_t StatCache::UnsetExpireTime(void)
|
||||
time_t StatCache::UnsetExpireTime()
|
||||
{
|
||||
time_t old = IsExpireTime ? ExpireTime : (-1);
|
||||
ExpireTime = 0;
|
||||
@ -207,18 +209,15 @@ bool StatCache::SetCacheNoObject(bool flag)
|
||||
return old;
|
||||
}
|
||||
|
||||
void StatCache::Clear(void)
|
||||
void StatCache::Clear()
|
||||
{
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); stat_cache.erase(iter++)){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
|
||||
delete (*iter).second;
|
||||
}
|
||||
stat_cache.clear();
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
}
|
||||
|
||||
bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool overcheck, const char* petag, bool* pisforce)
|
||||
@ -226,23 +225,22 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
bool is_delete_cache = false;
|
||||
string strpath = key;
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.end();
|
||||
if(overcheck && '/' != strpath[strpath.length() - 1]){
|
||||
strpath += "/";
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
if(iter == stat_cache.end()){
|
||||
strpath = key;
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second){
|
||||
stat_cache_entry* ent = (*iter).second;
|
||||
if(!IsExpireTime || !IsExpireStatCacheTime(ent->cache_date, ExpireTime)){
|
||||
if(ent->noobjcache){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
if(!IsCacheNoObject){
|
||||
// need to delete this cache.
|
||||
DelStat(strpath);
|
||||
@ -255,10 +253,10 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
string stretag;
|
||||
if(petag){
|
||||
// find & check ETag
|
||||
for(headers_t::iterator iter = ent->meta.begin(); iter != ent->meta.end(); ++iter){
|
||||
string tag = lower(iter->first);
|
||||
for(headers_t::iterator hiter = ent->meta.begin(); hiter != ent->meta.end(); ++hiter){
|
||||
string tag = lower(hiter->first);
|
||||
if(tag == "etag"){
|
||||
stretag = iter->second;
|
||||
stretag = hiter->second;
|
||||
if('\0' != petag[0] && 0 != strcmp(petag, stretag.c_str())){
|
||||
is_delete_cache = true;
|
||||
}
|
||||
@ -289,7 +287,6 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
if(IsExpireIntervalType){
|
||||
SetStatCacheTime(ent->cache_date);
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -298,7 +295,6 @@ bool StatCache::GetStat(string& key, struct stat* pst, headers_t* meta, bool ove
|
||||
is_delete_cache = true;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(is_delete_cache){
|
||||
DelStat(strpath);
|
||||
@ -315,16 +311,16 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
return false;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.end();
|
||||
if(overcheck && '/' != strpath[strpath.length() - 1]){
|
||||
strpath += "/";
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
if(iter == stat_cache.end()){
|
||||
strpath = key;
|
||||
iter = stat_cache.find(strpath.c_str());
|
||||
iter = stat_cache.find(strpath);
|
||||
}
|
||||
|
||||
if(iter != stat_cache.end() && (*iter).second) {
|
||||
@ -332,7 +328,6 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
if((*iter).second->noobjcache){
|
||||
// noobjcache = true means no object.
|
||||
SetStatCacheTime((*iter).second->cache_date);
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
}else{
|
||||
@ -340,7 +335,6 @@ bool StatCache::IsNoObjectCache(string& key, bool overcheck)
|
||||
is_delete_cache = true;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(is_delete_cache){
|
||||
DelStat(strpath);
|
||||
@ -355,12 +349,13 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool n
|
||||
}
|
||||
S3FS_PRN_INFO3("add stat cache entry[path=%s]", key.c_str());
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
bool found = stat_cache.end() != stat_cache.find(key);
|
||||
bool do_truncate = stat_cache.size() > CacheSize;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = stat_cache.end() != stat_cache.find(key);
|
||||
do_truncate = stat_cache.size() > CacheSize;
|
||||
}
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
@ -402,19 +397,15 @@ bool StatCache::AddStat(std::string& key, headers_t& meta, bool forcedir, bool n
|
||||
}
|
||||
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
|
||||
if(stat_cache.end() != iter){
|
||||
if(iter->second){
|
||||
delete iter->second;
|
||||
}
|
||||
delete iter->second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
stat_cache[key] = ent;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -428,12 +419,13 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
}
|
||||
S3FS_PRN_INFO3("add no object cache entry[path=%s]", key.c_str());
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
bool found = stat_cache.end() != stat_cache.find(key);
|
||||
bool do_truncate = stat_cache.size() > CacheSize;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
bool found;
|
||||
bool do_truncate;
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
found = stat_cache.end() != stat_cache.find(key);
|
||||
do_truncate = stat_cache.size() > CacheSize;
|
||||
}
|
||||
|
||||
if(found){
|
||||
DelStat(key.c_str());
|
||||
@ -456,26 +448,21 @@ bool StatCache::AddNoObjectCache(string& key)
|
||||
SetStatCacheTime(ent->cache_date); // Set time.
|
||||
|
||||
// add
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter = stat_cache.find(key); // recheck for same key exists
|
||||
if(stat_cache.end() != iter){
|
||||
if(iter->second){
|
||||
delete iter->second;
|
||||
}
|
||||
delete iter->second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
stat_cache[key] = ent;
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
|
||||
void StatCache::ChangeNoTruncateFlag(const std::string& key, bool no_truncate)
|
||||
{
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
stat_cache_t::iterator iter = stat_cache.find(key);
|
||||
|
||||
if(stat_cache.end() != iter){
|
||||
@ -490,25 +477,22 @@ void StatCache::ChangeNoTruncateFlag(std::string key, bool no_truncate)
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
}
|
||||
|
||||
bool StatCache::TruncateCache(void)
|
||||
bool StatCache::TruncateCache()
|
||||
{
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
if(stat_cache.empty()){
|
||||
return true;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
|
||||
// 1) erase over expire time
|
||||
if(IsExpireTime){
|
||||
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ){
|
||||
stat_cache_entry* entry = iter->second;
|
||||
if(!entry || (0L == entry->notruncate && IsExpireStatCacheTime(entry->cache_date, ExpireTime))){
|
||||
if(entry){
|
||||
delete entry;
|
||||
}
|
||||
delete entry;
|
||||
stat_cache.erase(iter++);
|
||||
}else{
|
||||
++iter;
|
||||
@ -518,7 +502,6 @@ bool StatCache::TruncateCache(void)
|
||||
|
||||
// 2) check stat cache count
|
||||
if(stat_cache.size() < CacheSize){
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -545,15 +528,11 @@ bool StatCache::TruncateCache(void)
|
||||
stat_cache_t::iterator siter = *iiter;
|
||||
|
||||
S3FS_PRN_DBG("truncate stat cache[path=%s]", siter->first.c_str());
|
||||
if(siter->second){
|
||||
delete siter->second;
|
||||
}
|
||||
delete siter->second;
|
||||
stat_cache.erase(siter);
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -564,13 +543,11 @@ bool StatCache::DelStat(const char* key)
|
||||
}
|
||||
S3FS_PRN_INFO3("delete stat cache entry[path=%s]", key);
|
||||
|
||||
pthread_mutex_lock(&StatCache::stat_cache_lock);
|
||||
AutoLock lock(&StatCache::stat_cache_lock);
|
||||
|
||||
stat_cache_t::iterator iter;
|
||||
if(stat_cache.end() != (iter = stat_cache.find(string(key)))){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
delete (*iter).second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
if(0 < strlen(key) && 0 != strcmp(key, "/")){
|
||||
@ -582,17 +559,13 @@ bool StatCache::DelStat(const char* key)
|
||||
// If there is "path/" cache, delete it.
|
||||
strpath += "/";
|
||||
}
|
||||
if(stat_cache.end() != (iter = stat_cache.find(strpath.c_str()))){
|
||||
if((*iter).second){
|
||||
delete (*iter).second;
|
||||
}
|
||||
if(stat_cache.end() != (iter = stat_cache.find(strpath))){
|
||||
delete (*iter).second;
|
||||
stat_cache.erase(iter);
|
||||
}
|
||||
}
|
||||
S3FS_MALLOCTRIM(0);
|
||||
|
||||
pthread_mutex_unlock(&StatCache::stat_cache_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -620,6 +593,9 @@ bool convert_header_to_stat(const char* path, headers_t& meta, struct stat* pst,
|
||||
// mtime
|
||||
pst->st_mtime = get_mtime(meta);
|
||||
|
||||
// ctime
|
||||
pst->st_ctime = get_ctime(meta);
|
||||
|
||||
// size
|
||||
pst->st_size = get_size(meta);
|
||||
|
||||
|
||||
@ -117,7 +117,7 @@ class StatCache
|
||||
bool AddStat(std::string& key, headers_t& meta, bool forcedir = false, bool no_truncate = false);
|
||||
|
||||
// Change no truncate flag
|
||||
void ChangeNoTruncateFlag(std::string key, bool no_truncate);
|
||||
void ChangeNoTruncateFlag(const std::string& key, bool no_truncate);
|
||||
|
||||
// Delete stat cache
|
||||
bool DelStat(const char* key);
|
||||
|
||||
@ -158,6 +158,7 @@ typedef std::map<std::string, PXATTRVAL> xattrs_t;
|
||||
//
|
||||
// Global variables
|
||||
//
|
||||
// TODO: namespace these
|
||||
extern bool foreground;
|
||||
extern bool nomultipart;
|
||||
extern bool pathrequeststyle;
|
||||
|
||||
@ -71,7 +71,6 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
{
|
||||
size_t digestlen = get_sha256_digest_length();
|
||||
char sha256[2 * digestlen + 1];
|
||||
char hexbuf[3];
|
||||
unsigned char* sha256hex;
|
||||
|
||||
if(NULL == (sha256hex = s3fs_sha256hexsum(fd, start, size))){
|
||||
@ -80,8 +79,7 @@ string s3fs_sha256sum(int fd, off_t start, ssize_t size)
|
||||
|
||||
memset(sha256, 0, 2 * digestlen + 1);
|
||||
for(size_t pos = 0; pos < digestlen; pos++){
|
||||
snprintf(hexbuf, 3, "%02x", sha256hex[pos]);
|
||||
strncat(sha256, hexbuf, 2);
|
||||
snprintf(sha256 + 2 * pos, 3, "%02x", sha256hex[pos]);
|
||||
}
|
||||
free(sha256hex);
|
||||
|
||||
|
||||
977
src/curl.cpp
977
src/curl.cpp
File diff suppressed because it is too large
Load Diff
66
src/curl.h
66
src/curl.h
@ -23,6 +23,8 @@
|
||||
|
||||
#include <cassert>
|
||||
|
||||
#include "psemaphore.h"
|
||||
|
||||
//----------------------------------------------
|
||||
// Symbols
|
||||
//----------------------------------------------
|
||||
@ -126,14 +128,12 @@ class S3fsMultiCurl;
|
||||
//----------------------------------------------
|
||||
// class CurlHandlerPool
|
||||
//----------------------------------------------
|
||||
typedef std::list<CURL*> hcurllist_t;
|
||||
|
||||
class CurlHandlerPool
|
||||
{
|
||||
public:
|
||||
explicit CurlHandlerPool(int maxHandlers)
|
||||
: mMaxHandlers(maxHandlers)
|
||||
, mHandlers(NULL)
|
||||
, mIndex(-1)
|
||||
explicit CurlHandlerPool(int maxHandlers) : mMaxHandlers(maxHandlers)
|
||||
{
|
||||
assert(maxHandlers > 0);
|
||||
}
|
||||
@ -141,20 +141,23 @@ public:
|
||||
bool Init();
|
||||
bool Destroy();
|
||||
|
||||
CURL* GetHandler();
|
||||
void ReturnHandler(CURL* h);
|
||||
CURL* GetHandler(bool only_pool);
|
||||
void ReturnHandler(CURL* hCurl, bool restore_pool);
|
||||
|
||||
private:
|
||||
int mMaxHandlers;
|
||||
|
||||
int mMaxHandlers;
|
||||
pthread_mutex_t mLock;
|
||||
CURL** mHandlers;
|
||||
int mIndex;
|
||||
hcurllist_t mPool;
|
||||
};
|
||||
|
||||
//----------------------------------------------
|
||||
// class S3fsCurl
|
||||
//----------------------------------------------
|
||||
class S3fsCurl;
|
||||
|
||||
// Prototype function for lazy setup options for curl handle
|
||||
typedef bool (*s3fscurl_lazy_setup)(S3fsCurl* s3fscurl);
|
||||
|
||||
typedef std::map<std::string, std::string> iamcredmap_t;
|
||||
typedef std::map<std::string, std::string> sseckeymap_t;
|
||||
typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
@ -163,6 +166,7 @@ typedef std::list<sseckeymap_t> sseckeylist_t;
|
||||
enum storage_class_t {
|
||||
STANDARD,
|
||||
STANDARD_IA,
|
||||
ONEZONE_IA,
|
||||
REDUCED_REDUNDANCY
|
||||
};
|
||||
|
||||
@ -246,6 +250,7 @@ class S3fsCurl
|
||||
static mimes_t mimeTypes;
|
||||
static std::string userAgent;
|
||||
static int max_parallel_cnt;
|
||||
static int max_multireq;
|
||||
static off_t multipart_size;
|
||||
static bool is_sigv4;
|
||||
static bool is_ua; // User-Agent
|
||||
@ -277,6 +282,10 @@ class S3fsCurl
|
||||
sse_type_t b_ssetype; // backup for retrying
|
||||
std::string op; // the HTTP verb of the request ("PUT", "GET", etc.)
|
||||
std::string query_string; // request query string
|
||||
Semaphore *sem;
|
||||
pthread_mutex_t *completed_tids_lock;
|
||||
std::vector<pthread_t> *completed_tids;
|
||||
s3fscurl_lazy_setup fpLazySetup; // curl options for lazy setting function
|
||||
|
||||
public:
|
||||
// constructor/destructor
|
||||
@ -304,9 +313,17 @@ class S3fsCurl
|
||||
static size_t DownloadWriteCallback(void* ptr, size_t size, size_t nmemb, void* userp);
|
||||
|
||||
static bool UploadMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl);
|
||||
static S3fsCurl* ParallelGetObjectRetryCallback(S3fsCurl* s3fscurl);
|
||||
|
||||
// lazy functions for set curl options
|
||||
static bool UploadMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool CopyMultipartPostSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreGetObjectRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
static bool PreHeadRequestSetCurlOpts(S3fsCurl* s3fscurl);
|
||||
|
||||
static bool ParseIAMCredentialResponse(const char* response, iamcredmap_t& keyval);
|
||||
static bool SetIAMCredentials(const char* response);
|
||||
static bool ParseIAMRoleFromMetaDataResponse(const char* response, std::string& rolename);
|
||||
@ -332,8 +349,9 @@ class S3fsCurl
|
||||
int GetIAMCredentials(void);
|
||||
|
||||
int UploadMultipartPostSetup(const char* tpath, int part_num, const std::string& upload_id);
|
||||
int CopyMultipartPostRequest(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
int CopyMultipartPostSetup(const char* from, const char* to, int part_num, std::string& upload_id, headers_t& meta);
|
||||
bool UploadMultipartPostComplete();
|
||||
bool CopyMultipartPostComplete();
|
||||
|
||||
public:
|
||||
// class methods
|
||||
@ -385,8 +403,12 @@ class S3fsCurl
|
||||
}
|
||||
static long SetSslVerifyHostname(long value);
|
||||
static long GetSslVerifyHostname(void) { return S3fsCurl::ssl_verify_hostname; }
|
||||
// maximum parallel GET and PUT requests
|
||||
static int SetMaxParallelCount(int value);
|
||||
static int GetMaxParallelCount(void) { return S3fsCurl::max_parallel_cnt; }
|
||||
// maximum parallel HEAD requests
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsCurl::max_multireq; }
|
||||
static bool SetIsECS(bool flag);
|
||||
static bool SetIsIBMIAMAuth(bool flag);
|
||||
static size_t SetIAMFieldCount(size_t field_count);
|
||||
@ -404,12 +426,12 @@ class S3fsCurl
|
||||
static void InitUserAgent(void);
|
||||
|
||||
// methods
|
||||
bool CreateCurlHandle(bool force = false);
|
||||
bool DestroyCurlHandle(bool force = false);
|
||||
bool CreateCurlHandle(bool only_pool = false, bool remake = false);
|
||||
bool DestroyCurlHandle(bool restore_pool = true, bool clear_internal_data = true);
|
||||
|
||||
bool LoadIAMRoleFromMetaData(void);
|
||||
bool AddSseRequestHead(sse_type_t ssetype, std::string& ssevalue, bool is_only_c, bool is_copy);
|
||||
bool GetResponseCode(long& responseCode);
|
||||
bool GetResponseCode(long& responseCode, bool from_curl_handle = true);
|
||||
int RequestPerform(void);
|
||||
int DeleteRequest(const char* tpath);
|
||||
bool PreHeadRequest(const char* tpath, const char* bpath = NULL, const char* savedpath = NULL, int ssekey_pos = -1);
|
||||
@ -459,21 +481,24 @@ class S3fsCurl
|
||||
//----------------------------------------------
|
||||
// Class for lapping multi curl
|
||||
//
|
||||
typedef std::map<CURL*, S3fsCurl*> s3fscurlmap_t;
|
||||
typedef std::vector<S3fsCurl*> s3fscurllist_t;
|
||||
typedef bool (*S3fsMultiSuccessCallback)(S3fsCurl* s3fscurl); // callback for succeed multi request
|
||||
typedef S3fsCurl* (*S3fsMultiRetryCallback)(S3fsCurl* s3fscurl); // callback for failure and retrying
|
||||
|
||||
class S3fsMultiCurl
|
||||
{
|
||||
private:
|
||||
static int max_multireq;
|
||||
const int maxParallelism;
|
||||
|
||||
s3fscurlmap_t cMap_all; // all of curl requests
|
||||
s3fscurlmap_t cMap_req; // curl requests are sent
|
||||
s3fscurllist_t clist_all; // all of curl requests
|
||||
s3fscurllist_t clist_req; // curl requests are sent
|
||||
|
||||
S3fsMultiSuccessCallback SuccessCallback;
|
||||
S3fsMultiRetryCallback RetryCallback;
|
||||
|
||||
pthread_mutex_t completed_tids_lock;
|
||||
std::vector<pthread_t> completed_tids;
|
||||
|
||||
private:
|
||||
bool ClearEx(bool is_all);
|
||||
int MultiPerform(void);
|
||||
@ -482,11 +507,10 @@ class S3fsMultiCurl
|
||||
static void* RequestPerformWrapper(void* arg);
|
||||
|
||||
public:
|
||||
S3fsMultiCurl();
|
||||
explicit S3fsMultiCurl(int maxParallelism);
|
||||
~S3fsMultiCurl();
|
||||
|
||||
static int SetMaxMultiRequest(int max);
|
||||
static int GetMaxMultiRequest(void) { return S3fsMultiCurl::max_multireq; }
|
||||
int GetMaxParallelism() { return maxParallelism; }
|
||||
|
||||
S3fsMultiSuccessCallback SetSuccessCallback(S3fsMultiSuccessCallback function);
|
||||
S3fsMultiRetryCallback SetRetryCallback(S3fsMultiRetryCallback function);
|
||||
|
||||
152
src/fdcache.cpp
152
src/fdcache.cpp
@ -30,7 +30,6 @@
|
||||
#include <syslog.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <dirent.h>
|
||||
#include <curl/curl.h>
|
||||
#include <string>
|
||||
@ -89,7 +88,7 @@ bool CacheFileStat::MakeCacheFileStatPath(const char* path, string& sfile_path,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::CheckCacheFileStatTopDir(void)
|
||||
bool CacheFileStat::CheckCacheFileStatTopDir()
|
||||
{
|
||||
if(!FdManager::IsCacheDir()){
|
||||
return true;
|
||||
@ -129,7 +128,7 @@ bool CacheFileStat::DeleteCacheFileStat(const char* path)
|
||||
// If remove stat file directory, it should do before removing
|
||||
// file cache directory.
|
||||
//
|
||||
bool CacheFileStat::DeleteCacheFileStatDirectory(void)
|
||||
bool CacheFileStat::DeleteCacheFileStatDirectory()
|
||||
{
|
||||
string top_path = FdManager::GetCacheDir();
|
||||
|
||||
@ -175,9 +174,9 @@ bool CacheFileStat::SetPath(const char* tpath, bool is_open)
|
||||
return Open();
|
||||
}
|
||||
|
||||
bool CacheFileStat::Open(void)
|
||||
bool CacheFileStat::Open()
|
||||
{
|
||||
if(0 == path.size()){
|
||||
if(path.empty()){
|
||||
return false;
|
||||
}
|
||||
if(-1 != fd){
|
||||
@ -215,7 +214,7 @@ bool CacheFileStat::Open(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheFileStat::Release(void)
|
||||
bool CacheFileStat::Release()
|
||||
{
|
||||
if(-1 == fd){
|
||||
// already release
|
||||
@ -258,7 +257,7 @@ PageList::~PageList()
|
||||
Clear();
|
||||
}
|
||||
|
||||
void PageList::Clear(void)
|
||||
void PageList::Clear()
|
||||
{
|
||||
PageList::FreeList(pages);
|
||||
}
|
||||
@ -271,7 +270,7 @@ bool PageList::Init(size_t size, bool is_loaded)
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t PageList::Size(void) const
|
||||
size_t PageList::Size() const
|
||||
{
|
||||
if(pages.empty()){
|
||||
return 0;
|
||||
@ -280,7 +279,7 @@ size_t PageList::Size(void) const
|
||||
return static_cast<size_t>((*riter)->next());
|
||||
}
|
||||
|
||||
bool PageList::Compress(void)
|
||||
bool PageList::Compress()
|
||||
{
|
||||
bool is_first = true;
|
||||
bool is_last_loaded = false;
|
||||
@ -504,7 +503,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
//
|
||||
// put to file
|
||||
//
|
||||
stringstream ssall;
|
||||
ostringstream ssall;
|
||||
ssall << Size();
|
||||
|
||||
for(fdpage_list_t::iterator iter = pages.begin(); iter != pages.end(); ++iter){
|
||||
@ -544,8 +543,8 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
free(ptmp);
|
||||
return false;
|
||||
}
|
||||
string oneline;
|
||||
stringstream ssall(ptmp);
|
||||
string oneline;
|
||||
istringstream ssall(ptmp);
|
||||
|
||||
// loaded
|
||||
Clear();
|
||||
@ -561,8 +560,8 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
// load each part
|
||||
bool is_err = false;
|
||||
while(getline(ssall, oneline, '\n')){
|
||||
string part;
|
||||
stringstream ssparts(oneline);
|
||||
string part;
|
||||
istringstream ssparts(oneline);
|
||||
// offset
|
||||
if(!getline(ssparts, part, ':')){
|
||||
is_err = true;
|
||||
@ -601,7 +600,7 @@ bool PageList::Serialize(CacheFileStat& file, bool is_output)
|
||||
return true;
|
||||
}
|
||||
|
||||
void PageList::Dump(void)
|
||||
void PageList::Dump()
|
||||
{
|
||||
int cnt = 0;
|
||||
|
||||
@ -661,12 +660,12 @@ FdEntity::~FdEntity()
|
||||
}
|
||||
}
|
||||
|
||||
void FdEntity::Clear(void)
|
||||
void FdEntity::Clear()
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
|
||||
if(-1 != fd){
|
||||
if(0 != cachepath.size()){
|
||||
if(!cachepath.empty()){
|
||||
CacheFileStat cfstat(path.c_str());
|
||||
if(!pagelist.Serialize(cfstat, true)){
|
||||
S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str());
|
||||
@ -692,7 +691,7 @@ void FdEntity::Clear(void)
|
||||
is_modify = false;
|
||||
}
|
||||
|
||||
void FdEntity::Close(void)
|
||||
void FdEntity::Close()
|
||||
{
|
||||
S3FS_PRN_DBG("[path=%s][fd=%d][refcnt=%d]", path.c_str(), fd, (-1 != fd ? refcnt - 1 : refcnt));
|
||||
|
||||
@ -701,9 +700,12 @@ void FdEntity::Close(void)
|
||||
|
||||
if(0 < refcnt){
|
||||
refcnt--;
|
||||
}else{
|
||||
S3FS_PRN_EXIT("reference count underflow");
|
||||
abort();
|
||||
}
|
||||
if(0 == refcnt){
|
||||
if(0 != cachepath.size()){
|
||||
if(!cachepath.empty()){
|
||||
CacheFileStat cfstat(path.c_str());
|
||||
if(!pagelist.Serialize(cfstat, true)){
|
||||
S3FS_PRN_WARN("failed to save cache stat file(%s).", path.c_str());
|
||||
@ -739,7 +741,7 @@ int FdEntity::Dup()
|
||||
//
|
||||
// Open mirror file which is linked cache file.
|
||||
//
|
||||
int FdEntity::OpenMirrorFile(void)
|
||||
int FdEntity::OpenMirrorFile()
|
||||
{
|
||||
if(cachepath.empty()){
|
||||
S3FS_PRN_ERR("cache path is empty, why come here");
|
||||
@ -812,11 +814,17 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time, bool no_fd_lock_
|
||||
// truncate temporary file size
|
||||
if(-1 == ftruncate(fd, static_cast<size_t>(size))){
|
||||
S3FS_PRN_ERR("failed to truncate temporary file(%d) by errno(%d).", fd, errno);
|
||||
if(0 < refcnt){
|
||||
refcnt--;
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
// resize page list
|
||||
if(!pagelist.Resize(static_cast<size_t>(size), false)){
|
||||
S3FS_PRN_ERR("failed to truncate temporary file information(%d).", fd);
|
||||
if(0 < refcnt){
|
||||
refcnt--;
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -835,7 +843,7 @@ int FdEntity::Open(headers_t* pmeta, ssize_t size, time_t time, bool no_fd_lock_
|
||||
bool need_save_csf = false; // need to save(reset) cache stat file
|
||||
bool is_truncate = false; // need to truncate
|
||||
|
||||
if(0 != cachepath.size()){
|
||||
if(!cachepath.empty()){
|
||||
// using cache
|
||||
|
||||
// open cache and cache stat file, load page info.
|
||||
@ -1040,7 +1048,7 @@ int FdEntity::SetMtime(time_t time)
|
||||
S3FS_PRN_ERR("futimes failed. errno(%d)", errno);
|
||||
return -errno;
|
||||
}
|
||||
}else if(0 < cachepath.size()){
|
||||
}else if(!cachepath.empty()){
|
||||
// not opened file yet.
|
||||
struct utimbuf n_mtime;
|
||||
n_mtime.modtime = time;
|
||||
@ -1050,18 +1058,31 @@ int FdEntity::SetMtime(time_t time)
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
orgmeta["x-amz-meta-ctime"] = str(time);
|
||||
orgmeta["x-amz-meta-mtime"] = str(time);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool FdEntity::UpdateMtime(void)
|
||||
bool FdEntity::UpdateCtime()
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
struct stat st;
|
||||
if(!GetStats(st)){
|
||||
return false;
|
||||
}
|
||||
orgmeta["x-amz-meta-ctime"] = str(st.st_ctime);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdEntity::UpdateMtime()
|
||||
{
|
||||
AutoLock auto_lock(&fdent_lock);
|
||||
struct stat st;
|
||||
if(!GetStats(st)){
|
||||
return false;
|
||||
}
|
||||
orgmeta["x-amz-meta-ctime"] = str(st.st_ctime);
|
||||
orgmeta["x-amz-meta-mtime"] = str(st.st_mtime);
|
||||
return true;
|
||||
}
|
||||
@ -1225,7 +1246,7 @@ int FdEntity::NoCacheLoadAndPost(off_t start, size_t size)
|
||||
// [NOTE]
|
||||
// This method calling means that the cache file is never used no more.
|
||||
//
|
||||
if(0 != cachepath.size()){
|
||||
if(!cachepath.empty()){
|
||||
// remove cache files(and cache stat file)
|
||||
FdManager::DeleteCacheFile(path.c_str());
|
||||
// cache file path does not use no more.
|
||||
@ -1378,7 +1399,7 @@ int FdEntity::NoCacheLoadAndPost(off_t start, size_t size)
|
||||
// At no disk space for caching object.
|
||||
// This method is starting multipart uploading.
|
||||
//
|
||||
int FdEntity::NoCachePreMultipartPost(void)
|
||||
int FdEntity::NoCachePreMultipartPost()
|
||||
{
|
||||
// initialize multipart upload values
|
||||
upload_id.erase();
|
||||
@ -1411,7 +1432,7 @@ int FdEntity::NoCacheMultipartPost(int tgfd, off_t start, size_t size)
|
||||
// At no disk space for caching object.
|
||||
// This method is finishing multipart uploading.
|
||||
//
|
||||
int FdEntity::NoCacheCompleteMultipartPost(void)
|
||||
int FdEntity::NoCacheCompleteMultipartPost()
|
||||
{
|
||||
if(upload_id.empty() || etaglist.empty()){
|
||||
S3FS_PRN_ERR("There is no upload id or etag list.");
|
||||
@ -1459,7 +1480,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
// enough disk space
|
||||
// Load all uninitialized area
|
||||
result = Load();
|
||||
FdManager::get()->FreeReservedDiskSpace(restsize);
|
||||
FdManager::FreeReservedDiskSpace(restsize);
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("failed to upload all area(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
@ -1496,6 +1517,7 @@ int FdEntity::RowFlush(const char* tpath, bool force_sync)
|
||||
*/
|
||||
if(pagelist.Size() > static_cast<size_t>(MAX_MULTIPART_CNT * S3fsCurl::GetMultipartSize())){
|
||||
// close f ?
|
||||
S3FS_PRN_ERR("Part count exceeds %d. Increase multipart size and try again.", MAX_MULTIPART_CNT);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@ -1633,7 +1655,7 @@ ssize_t FdEntity::Read(char* bytes, off_t start, size_t size, bool force_load)
|
||||
result = Load(start, load_size);
|
||||
}
|
||||
|
||||
FdManager::get()->FreeReservedDiskSpace(load_size);
|
||||
FdManager::FreeReservedDiskSpace(load_size);
|
||||
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("could not download. start(%jd), size(%zu), errno(%d)", (intmax_t)start, size, result);
|
||||
@ -1685,7 +1707,7 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
if(0 < start){
|
||||
result = Load(0, static_cast<size_t>(start));
|
||||
}
|
||||
FdManager::get()->FreeReservedDiskSpace(restsize);
|
||||
FdManager::FreeReservedDiskSpace(restsize);
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("failed to load uninitialized area before writing(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
@ -1720,6 +1742,15 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
|
||||
pagelist.SetPageLoadedStatus(start, static_cast<size_t>(wsize), true);
|
||||
}
|
||||
|
||||
// Load uninitialized area which starts from (start + size) to EOF after writing.
|
||||
if(pagelist.Size() > static_cast<size_t>(start) + size){
|
||||
result = Load(static_cast<size_t>(start + size), pagelist.Size());
|
||||
if(0 != result){
|
||||
S3FS_PRN_ERR("failed to load uninitialized area after writing(errno=%d)", result);
|
||||
return static_cast<ssize_t>(result);
|
||||
}
|
||||
}
|
||||
|
||||
// check multipart uploading
|
||||
if(0 < upload_id.length()){
|
||||
mp_size += static_cast<size_t>(wsize);
|
||||
@ -1753,7 +1784,7 @@ void FdEntity::CleanupCache()
|
||||
}
|
||||
|
||||
if (is_modify) {
|
||||
// cache is not commited to s3, cannot cleanup
|
||||
// cache is not committed to s3, cannot cleanup
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1786,7 +1817,7 @@ pthread_mutex_t FdManager::fd_manager_lock;
|
||||
pthread_mutex_t FdManager::cache_cleanup_lock;
|
||||
pthread_mutex_t FdManager::reserved_diskspace_lock;
|
||||
bool FdManager::is_lock_init(false);
|
||||
string FdManager::cache_dir("");
|
||||
string FdManager::cache_dir;
|
||||
bool FdManager::check_cache_dir_exist(false);
|
||||
size_t FdManager::free_disk_space = 0;
|
||||
|
||||
@ -1803,16 +1834,26 @@ bool FdManager::SetCacheDir(const char* dir)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdManager::DeleteCacheDirectory(void)
|
||||
bool FdManager::DeleteCacheDirectory()
|
||||
{
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
if(FdManager::cache_dir.empty()){
|
||||
return true;
|
||||
}
|
||||
string cache_dir;
|
||||
if(!FdManager::MakeCachePath(NULL, cache_dir, false)){
|
||||
|
||||
string cache_path;
|
||||
if(!FdManager::MakeCachePath(NULL, cache_path, false)){
|
||||
return false;
|
||||
}
|
||||
return delete_files_in_dir(cache_dir.c_str(), true);
|
||||
if(!delete_files_in_dir(cache_path.c_str(), true)){
|
||||
return false;
|
||||
}
|
||||
|
||||
string mirror_path = FdManager::cache_dir + "/." + bucket + ".mirror";
|
||||
if(!delete_files_in_dir(mirror_path.c_str(), true)){
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int FdManager::DeleteCacheFile(const char* path)
|
||||
@ -1822,10 +1863,10 @@ int FdManager::DeleteCacheFile(const char* path)
|
||||
if(!path){
|
||||
return -EIO;
|
||||
}
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
if(FdManager::cache_dir.empty()){
|
||||
return 0;
|
||||
}
|
||||
string cache_path = "";
|
||||
string cache_path;
|
||||
if(!FdManager::MakeCachePath(path, cache_path, false)){
|
||||
return 0;
|
||||
}
|
||||
@ -1855,7 +1896,7 @@ int FdManager::DeleteCacheFile(const char* path)
|
||||
|
||||
bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_create_dir, bool is_mirror_path)
|
||||
{
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
if(FdManager::cache_dir.empty()){
|
||||
cache_path = "";
|
||||
return true;
|
||||
}
|
||||
@ -1885,9 +1926,9 @@ bool FdManager::MakeCachePath(const char* path, string& cache_path, bool is_crea
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FdManager::CheckCacheTopDir(void)
|
||||
bool FdManager::CheckCacheTopDir()
|
||||
{
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
if(FdManager::cache_dir.empty()){
|
||||
return true;
|
||||
}
|
||||
string toppath(FdManager::cache_dir + "/" + bucket);
|
||||
@ -1912,12 +1953,12 @@ bool FdManager::SetCheckCacheDirExist(bool is_check)
|
||||
return old;
|
||||
}
|
||||
|
||||
bool FdManager::CheckCacheDirExist(void)
|
||||
bool FdManager::CheckCacheDirExist()
|
||||
{
|
||||
if(!FdManager::check_cache_dir_exist){
|
||||
return true;
|
||||
}
|
||||
if(0 == FdManager::cache_dir.size()){
|
||||
if(FdManager::cache_dir.empty()){
|
||||
return true;
|
||||
}
|
||||
// check the directory
|
||||
@ -1944,7 +1985,7 @@ uint64_t FdManager::GetFreeDiskSpace(const char* path)
|
||||
{
|
||||
struct statvfs vfsbuf;
|
||||
string ctoppath;
|
||||
if(0 < FdManager::cache_dir.size()){
|
||||
if(!FdManager::cache_dir.empty()){
|
||||
ctoppath = FdManager::cache_dir + "/";
|
||||
ctoppath = get_exist_directory_path(ctoppath); // existed directory
|
||||
if(ctoppath != "/"){
|
||||
@ -1987,7 +2028,7 @@ FdManager::FdManager()
|
||||
S3FS_PRN_CRIT("failed to init mutex");
|
||||
}
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2011,7 +2052,7 @@ FdManager::~FdManager()
|
||||
FdManager::is_lock_init = false;
|
||||
}
|
||||
}else{
|
||||
assert(false);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2026,6 +2067,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
|
||||
|
||||
fdent_map_t::iterator iter = fent.find(string(path));
|
||||
if(fent.end() != iter && (-1 == existfd || (*iter).second->GetFd() == existfd)){
|
||||
iter->second->Dup();
|
||||
return (*iter).second;
|
||||
}
|
||||
|
||||
@ -2034,6 +2076,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
|
||||
if((*iter).second && (*iter).second->GetFd() == existfd){
|
||||
// found opened fd in map
|
||||
if(0 == strcmp((*iter).second->GetPath(), path)){
|
||||
iter->second->Dup();
|
||||
return (*iter).second;
|
||||
}
|
||||
// found fd, but it is used another file(file descriptor is recycled)
|
||||
@ -2052,6 +2095,7 @@ FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time
|
||||
if(!path || '\0' == path[0]){
|
||||
return NULL;
|
||||
}
|
||||
bool close = false;
|
||||
FdEntity* ent;
|
||||
{
|
||||
AutoLock auto_lock(&FdManager::fd_manager_lock);
|
||||
@ -2075,10 +2119,12 @@ FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time
|
||||
if(fent.end() != iter){
|
||||
// found
|
||||
ent = (*iter).second;
|
||||
ent->Dup();
|
||||
close = true;
|
||||
|
||||
}else if(is_create){
|
||||
// not found
|
||||
string cache_path = "";
|
||||
string cache_path;
|
||||
if(!force_tmpfile && !FdManager::MakeCachePath(path, cache_path, true)){
|
||||
S3FS_PRN_ERR("failed to make cache path for object(%s).", path);
|
||||
return NULL;
|
||||
@ -2086,7 +2132,7 @@ FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time
|
||||
// make new obj
|
||||
ent = new FdEntity(path, cache_path.c_str());
|
||||
|
||||
if(0 < cache_path.size()){
|
||||
if(!cache_path.empty()){
|
||||
// using cache
|
||||
fent[string(path)] = ent;
|
||||
}else{
|
||||
@ -2097,7 +2143,7 @@ FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time
|
||||
// The reason why this process here, please look at the definition of the
|
||||
// comments of NOCACHE_PATH_PREFIX_FORM symbol.
|
||||
//
|
||||
string tmppath("");
|
||||
string tmppath;
|
||||
FdManager::MakeRandomTempPath(path, tmppath);
|
||||
fent[tmppath] = ent;
|
||||
}
|
||||
@ -2108,8 +2154,14 @@ FdEntity* FdManager::Open(const char* path, headers_t* pmeta, ssize_t size, time
|
||||
|
||||
// open
|
||||
if(0 != ent->Open(pmeta, size, time, no_fd_lock_wait)){
|
||||
if(close){
|
||||
ent->Close();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
if(close){
|
||||
ent->Close();
|
||||
}
|
||||
return ent;
|
||||
}
|
||||
|
||||
@ -2196,7 +2248,7 @@ bool FdManager::ChangeEntityToTempPath(FdEntity* ent, const char* path)
|
||||
if((*iter).second == ent){
|
||||
fent.erase(iter++);
|
||||
|
||||
string tmppath("");
|
||||
string tmppath;
|
||||
FdManager::MakeRandomTempPath(path, tmppath);
|
||||
fent[tmppath] = ent;
|
||||
}else{
|
||||
|
||||
@ -155,6 +155,7 @@ class FdEntity
|
||||
|
||||
bool GetStats(struct stat& st);
|
||||
int SetMtime(time_t time);
|
||||
bool UpdateCtime(void);
|
||||
bool UpdateMtime(void);
|
||||
bool GetSize(size_t& size);
|
||||
bool SetMode(mode_t mode);
|
||||
@ -224,6 +225,7 @@ class FdManager
|
||||
static void FreeReservedDiskSpace(size_t size);
|
||||
bool ReserveDiskSpace(size_t size);
|
||||
|
||||
// Return FdEntity associated with path, returning NULL on error. This operation increments the reference count; callers must decrement via Close after use.
|
||||
FdEntity* GetFdEntity(const char* path, int existfd = -1);
|
||||
FdEntity* Open(const char* path, headers_t* pmeta = NULL, ssize_t size = -1, time_t time = -1, bool force_tmpfile = false, bool is_create = true, bool no_fd_lock_wait = false);
|
||||
FdEntity* ExistOpen(const char* path, int existfd = -1, bool ignore_existfd = false);
|
||||
|
||||
@ -57,7 +57,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
|
||||
#else // USE_GNUTLS_NETTLE
|
||||
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "GnuTLS(gcrypt)";
|
||||
|
||||
@ -69,7 +69,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
if(GNUTLS_E_SUCCESS != gnutls_global_init()){
|
||||
return false;
|
||||
@ -82,7 +82,7 @@ bool s3fs_init_global_ssl(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
gnutls_global_deinit();
|
||||
return true;
|
||||
@ -91,12 +91,12 @@ bool s3fs_destroy_global_ssl(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -191,7 +191,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return 16;
|
||||
}
|
||||
@ -303,7 +303,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
@ -370,8 +370,8 @@ unsigned char* s3fs_sha256hexsum(int fd, off_t start, ssize_t size)
|
||||
|
||||
bool s3fs_sha256(const unsigned char* data, unsigned int datalen, unsigned char** digest, unsigned int* digestlen)
|
||||
{
|
||||
(*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(*digestlen)))){
|
||||
size_t len = (*digestlen) = static_cast<unsigned int>(get_sha256_digest_length());
|
||||
if(NULL == ((*digest) = reinterpret_cast<unsigned char*>(malloc(len)))){
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "NSS";
|
||||
|
||||
@ -52,7 +52,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
|
||||
|
||||
@ -63,7 +63,7 @@ bool s3fs_init_global_ssl(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
NSS_Shutdown();
|
||||
PL_ArenaFinish();
|
||||
@ -74,12 +74,12 @@ bool s3fs_destroy_global_ssl(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for crypt lock
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -96,7 +96,6 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
PK11SlotInfo* Slot;
|
||||
PK11SymKey* pKey;
|
||||
PK11Context* Context;
|
||||
SECStatus SecStatus;
|
||||
unsigned char tmpdigest[64];
|
||||
SECItem KeySecItem = {siBuffer, reinterpret_cast<unsigned char*>(const_cast<void*>(key)), static_cast<unsigned int>(keylen)};
|
||||
SECItem NullSecItem = {siBuffer, NULL, 0};
|
||||
@ -115,9 +114,9 @@ static bool s3fs_HMAC_RAW(const void* key, size_t keylen, const unsigned char* d
|
||||
}
|
||||
|
||||
*digestlen = 0;
|
||||
if(SECSuccess != (SecStatus = PK11_DigestBegin(Context)) ||
|
||||
SECSuccess != (SecStatus = PK11_DigestOp(Context, data, datalen)) ||
|
||||
SECSuccess != (SecStatus = PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest))) )
|
||||
if(SECSuccess != PK11_DigestBegin(Context) ||
|
||||
SECSuccess != PK11_DigestOp(Context, data, datalen) ||
|
||||
SECSuccess != PK11_DigestFinal(Context, tmpdigest, digestlen, sizeof(tmpdigest)) )
|
||||
{
|
||||
PK11_DestroyContext(Context, PR_TRUE);
|
||||
PK11_FreeSymKey(pKey);
|
||||
@ -149,7 +148,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_LENGTH;
|
||||
}
|
||||
@ -211,7 +210,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_LENGTH;
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for version
|
||||
//-------------------------------------------------------------------
|
||||
const char* s3fs_crypt_lib_name(void)
|
||||
const char* s3fs_crypt_lib_name()
|
||||
{
|
||||
static const char version[] = "OpenSSL";
|
||||
|
||||
@ -56,7 +56,7 @@ const char* s3fs_crypt_lib_name(void)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for global init
|
||||
//-------------------------------------------------------------------
|
||||
bool s3fs_init_global_ssl(void)
|
||||
bool s3fs_init_global_ssl()
|
||||
{
|
||||
ERR_load_crypto_strings();
|
||||
ERR_load_BIO_strings();
|
||||
@ -64,7 +64,7 @@ bool s3fs_init_global_ssl(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_global_ssl(void)
|
||||
bool s3fs_destroy_global_ssl()
|
||||
{
|
||||
EVP_cleanup();
|
||||
ERR_free_strings();
|
||||
@ -93,7 +93,7 @@ static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long s3fs_crypt_get_threadid(void)
|
||||
static unsigned long s3fs_crypt_get_threadid()
|
||||
{
|
||||
// For FreeBSD etc, some system's pthread_t is structure pointer.
|
||||
// Then we use cast like C style(not C++) instead of ifdef.
|
||||
@ -131,7 +131,7 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
|
||||
}
|
||||
}
|
||||
|
||||
bool s3fs_init_crypt_mutex(void)
|
||||
bool s3fs_init_crypt_mutex()
|
||||
{
|
||||
if(s3fs_crypt_mutex){
|
||||
S3FS_PRN_DBG("s3fs_crypt_mutex is not NULL, destroy it.");
|
||||
@ -158,7 +158,7 @@ bool s3fs_init_crypt_mutex(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool s3fs_destroy_crypt_mutex(void)
|
||||
bool s3fs_destroy_crypt_mutex()
|
||||
{
|
||||
if(!s3fs_crypt_mutex){
|
||||
return true;
|
||||
@ -214,7 +214,7 @@ bool s3fs_HMAC256(const void* key, size_t keylen, const unsigned char* data, siz
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for MD5
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_md5_digest_length(void)
|
||||
size_t get_md5_digest_length()
|
||||
{
|
||||
return MD5_DIGEST_LENGTH;
|
||||
}
|
||||
@ -273,7 +273,7 @@ unsigned char* s3fs_md5hexsum(int fd, off_t start, ssize_t size)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility Function for SHA256
|
||||
//-------------------------------------------------------------------
|
||||
size_t get_sha256_digest_length(void)
|
||||
size_t get_sha256_digest_length()
|
||||
{
|
||||
return SHA256_DIGEST_LENGTH;
|
||||
}
|
||||
|
||||
75
src/psemaphore.h
Normal file
75
src/psemaphore.h
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* s3fs - FUSE-based file system backed by Amazon S3
|
||||
*
|
||||
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef S3FS_SEMAPHORE_H_
|
||||
#define S3FS_SEMAPHORE_H_
|
||||
|
||||
// portability wrapper for sem_t since macOS does not implement it
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value), sem(dispatch_semaphore_create(value)) {}
|
||||
~Semaphore() {
|
||||
// macOS cannot destroy a semaphore with posts less than the initializer
|
||||
for(int i = 0; i < get_value(); ++i){
|
||||
post();
|
||||
}
|
||||
dispatch_release(sem);
|
||||
}
|
||||
void wait() { dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); }
|
||||
void post() { dispatch_semaphore_signal(sem); }
|
||||
int get_value() const { return value; }
|
||||
private:
|
||||
const int value;
|
||||
dispatch_semaphore_t sem;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#include <errno.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
class Semaphore
|
||||
{
|
||||
public:
|
||||
explicit Semaphore(int value) : value(value) { sem_init(&mutex, 0, value); }
|
||||
~Semaphore() { sem_destroy(&mutex); }
|
||||
void wait()
|
||||
{
|
||||
int r;
|
||||
do {
|
||||
r = sem_wait(&mutex);
|
||||
} while (r == -1 && errno == EINTR);
|
||||
}
|
||||
void post() { sem_post(&mutex); }
|
||||
int get_value() const { return value; }
|
||||
private:
|
||||
const int value;
|
||||
sem_t mutex;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif // S3FS_SEMAPHORE_H_
|
||||
699
src/s3fs.cpp
699
src/s3fs.cpp
File diff suppressed because it is too large
Load Diff
47
src/s3fs.h
47
src/s3fs.h
@ -33,25 +33,32 @@ static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
} \
|
||||
}
|
||||
|
||||
// [NOTE]
|
||||
// s3fs use many small allocated chunk in heap area for stats
|
||||
// cache and parsing xml, etc. The OS may decide that giving
|
||||
// this little memory back to the kernel will cause too much
|
||||
// overhead and delay the operation.
|
||||
// Address of gratitude, this workaround quotes a document of
|
||||
// libxml2.( http://xmlsoft.org/xmlmem.html )
|
||||
//
|
||||
// s3fs use many small allocated chunk in heap area for
|
||||
// stats cache and parsing xml, etc. The OS may decide
|
||||
// that giving this little memory back to the kernel
|
||||
// will cause too much overhead and delay the operation.
|
||||
// So s3fs calls malloc_trim function to really get the
|
||||
// memory back. Following macros is prepared for that
|
||||
// your system does not have it.
|
||||
//
|
||||
// Address of gratitude, this workaround quotes a document
|
||||
// of libxml2.
|
||||
// http://xmlsoft.org/xmlmem.html
|
||||
// When valgrind is used to test memory leak of s3fs, a large
|
||||
// amount of chunk may be reported. You can check the memory
|
||||
// release accurately by defining the S3FS_MALLOC_TRIM flag
|
||||
// and building it. Also, when executing s3fs, you can define
|
||||
// the MMAP_THRESHOLD environment variable and check more
|
||||
// accurate memory leak.( see, man 3 free )
|
||||
//
|
||||
#ifdef S3FS_MALLOC_TRIM
|
||||
#ifdef HAVE_MALLOC_TRIM
|
||||
|
||||
#include <malloc.h>
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
#else // S3FS_MALLOC_TRIM
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#endif // S3FS_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str)
|
||||
#define S3FS_MALLOCTRIM(pad) malloc_trim(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) \
|
||||
{ \
|
||||
xmlFreeDoc(doc); \
|
||||
@ -73,18 +80,6 @@ static const int64_t FIVE_GB = 5LL * 1024LL * 1024LL * 1024LL;
|
||||
S3FS_MALLOCTRIM(0); \
|
||||
}
|
||||
|
||||
#else // HAVE_MALLOC_TRIM
|
||||
|
||||
#define DISPWARN_MALLOCTRIM(str) \
|
||||
fprintf(stderr, "Warning: %s without malloc_trim is possibility of the use memory increase.\n", program_name.c_str())
|
||||
#define S3FS_MALLOCTRIM(pad)
|
||||
#define S3FS_XMLFREEDOC(doc) xmlFreeDoc(doc)
|
||||
#define S3FS_XMLFREE(ptr) xmlFree(ptr)
|
||||
#define S3FS_XMLXPATHFREECONTEXT(ctx) xmlXPathFreeContext(ctx)
|
||||
#define S3FS_XMLXPATHFREEOBJECT(obj) xmlXPathFreeObject(obj)
|
||||
|
||||
#endif // HAVE_MALLOC_TRIM
|
||||
|
||||
#endif // S3FS_S3_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -48,7 +48,7 @@ using namespace std;
|
||||
//-------------------------------------------------------------------
|
||||
// Global variables
|
||||
//-------------------------------------------------------------------
|
||||
std::string mount_prefix = "";
|
||||
std::string mount_prefix;
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Utility
|
||||
@ -150,10 +150,10 @@ bool S3ObjList::insert_normalized(const char* name, const char* normalized, bool
|
||||
s3obj_t::iterator iter;
|
||||
if(objects.end() != (iter = objects.find(name))){
|
||||
// found name --> over write
|
||||
(*iter).second.orgname.erase();
|
||||
(*iter).second.etag.erase();
|
||||
(*iter).second.normalname = normalized;
|
||||
(*iter).second.is_dir = is_dir;
|
||||
iter->second.orgname.erase();
|
||||
iter->second.etag.erase();
|
||||
iter->second.normalname = normalized;
|
||||
iter->second.is_dir = is_dir;
|
||||
}else{
|
||||
// not found --> add new object
|
||||
s3obj_entry newobject;
|
||||
@ -259,7 +259,7 @@ bool S3ObjList::GetNameList(s3obj_list_t& list, bool OnlyNormalized, bool CutSla
|
||||
}
|
||||
string name = (*iter).first;
|
||||
if(CutSlash && 1 < name.length() && '/' == name[name.length() - 1]){
|
||||
// only "/" string is skio this.
|
||||
// only "/" string is skipped this.
|
||||
name = name.substr(0, name.length() - 1);
|
||||
}
|
||||
list.push_back(name);
|
||||
@ -283,7 +283,7 @@ bool S3ObjList::MakeHierarchizedList(s3obj_list_t& list, bool haveSlash)
|
||||
h_map[strtmp] = true;
|
||||
|
||||
// check hierarchized directory
|
||||
for(string::size_type pos = strtmp.find_last_of("/"); string::npos != pos; pos = strtmp.find_last_of("/")){
|
||||
for(string::size_type pos = strtmp.find_last_of('/'); string::npos != pos; pos = strtmp.find_last_of('/')){
|
||||
strtmp = strtmp.substr(0, pos);
|
||||
if(0 == strtmp.length() || "/" == strtmp){
|
||||
break;
|
||||
@ -419,7 +419,6 @@ void free_mvnodes(MVNODE *head)
|
||||
free(my_head->new_path);
|
||||
free(my_head);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
@ -586,7 +585,7 @@ string mydirname(const char* path)
|
||||
return mydirname(string(path));
|
||||
}
|
||||
|
||||
string mydirname(string path)
|
||||
string mydirname(const string& path)
|
||||
{
|
||||
return string(dirname((char*)path.c_str()));
|
||||
}
|
||||
@ -601,7 +600,7 @@ string mybasename(const char* path)
|
||||
return mybasename(string(path));
|
||||
}
|
||||
|
||||
string mybasename(string path)
|
||||
string mybasename(const string& path)
|
||||
{
|
||||
return string(basename((char*)path.c_str()));
|
||||
}
|
||||
@ -609,9 +608,9 @@ string mybasename(string path)
|
||||
// mkdir --parents
|
||||
int mkdirp(const string& path, mode_t mode)
|
||||
{
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
string base;
|
||||
string component;
|
||||
istringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
base += "/" + component;
|
||||
|
||||
@ -632,10 +631,10 @@ int mkdirp(const string& path, mode_t mode)
|
||||
// get existed directory path
|
||||
string get_exist_directory_path(const string& path)
|
||||
{
|
||||
string existed("/"); // "/" is existed.
|
||||
string base;
|
||||
string component;
|
||||
stringstream ss(path);
|
||||
string existed("/"); // "/" is existed.
|
||||
string base;
|
||||
string component;
|
||||
istringstream ss(path);
|
||||
while (getline(ss, component, '/')) {
|
||||
if(base != "/"){
|
||||
base += "/";
|
||||
@ -661,7 +660,7 @@ bool check_exist_dir_permission(const char* dirpath)
|
||||
struct stat st;
|
||||
if(0 != stat(dirpath, &st)){
|
||||
if(ENOENT == errno){
|
||||
// dir does not exitst
|
||||
// dir does not exist
|
||||
return true;
|
||||
}
|
||||
if(EACCES == errno){
|
||||
@ -748,15 +747,29 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
|
||||
//-------------------------------------------------------------------
|
||||
// Utility functions for convert
|
||||
//-------------------------------------------------------------------
|
||||
time_t get_mtime(const char *s)
|
||||
time_t get_mtime(const char *str)
|
||||
{
|
||||
return static_cast<time_t>(s3fs_strtoofft(s));
|
||||
// [NOTE]
|
||||
// In rclone, there are cases where ns is set to x-amz-meta-mtime
|
||||
// with floating point number. s3fs uses x-amz-meta-mtime by
|
||||
// truncating the floating point or less (in seconds or less) to
|
||||
// correspond to this.
|
||||
//
|
||||
string strmtime;
|
||||
if(str && '\0' != *str){
|
||||
strmtime = str;
|
||||
string::size_type pos = strmtime.find('.', 0);
|
||||
if(string::npos != pos){
|
||||
strmtime = strmtime.substr(0, pos);
|
||||
}
|
||||
}
|
||||
return static_cast<time_t>(s3fs_strtoofft(strmtime.c_str()));
|
||||
}
|
||||
|
||||
time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
static time_t get_time(headers_t& meta, bool overcheck, const char *header)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-mtime"))){
|
||||
if(meta.end() == (iter = meta.find(header))){
|
||||
if(overcheck){
|
||||
return get_lastmodified(meta);
|
||||
}
|
||||
@ -765,6 +778,16 @@ time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
return get_mtime((*iter).second.c_str());
|
||||
}
|
||||
|
||||
time_t get_mtime(headers_t& meta, bool overcheck)
|
||||
{
|
||||
return get_time(meta, overcheck, "x-amz-meta-mtime");
|
||||
}
|
||||
|
||||
time_t get_ctime(headers_t& meta, bool overcheck)
|
||||
{
|
||||
return get_time(meta, overcheck, "x-amz-meta-ctime");
|
||||
}
|
||||
|
||||
off_t get_size(const char *s)
|
||||
{
|
||||
return s3fs_strtoofft(s);
|
||||
@ -792,11 +815,13 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-mode"))){
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}else{
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
|
||||
mode = get_mode((*iter).second.c_str());
|
||||
isS3sync = true;
|
||||
}
|
||||
// If another tool creates an object without permissions, default to owner
|
||||
// read-write and group readable.
|
||||
mode = path[strlen(path) - 1] == '/' ? 0750 : 0640;
|
||||
}
|
||||
// Checking the bitmask, if the last 3 bits are all zero then process as a regular
|
||||
// file type (S_IFDIR or S_IFREG), otherwise return mode unmodified so that S_IFIFO,
|
||||
@ -810,18 +835,19 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
|
||||
if(meta.end() != (iter = meta.find("Content-Type"))){
|
||||
string strConType = (*iter).second;
|
||||
// Leave just the mime type, remove any optional parameters (eg charset)
|
||||
string::size_type pos = strConType.find(";");
|
||||
string::size_type pos = strConType.find(';');
|
||||
if(string::npos != pos){
|
||||
strConType = strConType.substr(0, pos);
|
||||
}
|
||||
if(strConType == "application/x-directory"){
|
||||
if(strConType == "application/x-directory"
|
||||
|| strConType == "httpd/unix-directory"){ // Nextcloud uses this MIME type for directory objects when mounting bucket as external Storage
|
||||
mode |= S_IFDIR;
|
||||
}else if(path && 0 < strlen(path) && '/' == path[strlen(path) - 1]){
|
||||
if(strConType == "binary/octet-stream" || strConType == "application/octet-stream"){
|
||||
mode |= S_IFDIR;
|
||||
}else{
|
||||
if(complement_stat){
|
||||
// If complement lack stat mode, when the object has '/' charactor at end of name
|
||||
// If complement lack stat mode, when the object has '/' character at end of name
|
||||
// and content type is text/plain and the object's size is 0 or 1, it should be
|
||||
// directory.
|
||||
off_t size = get_size(meta);
|
||||
@ -866,12 +892,13 @@ uid_t get_uid(const char *s)
|
||||
uid_t get_uid(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-uid"))){
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return 0;
|
||||
}
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-uid"))){
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
|
||||
return get_uid((*iter).second.c_str());
|
||||
}else{
|
||||
return geteuid();
|
||||
}
|
||||
return get_uid((*iter).second.c_str());
|
||||
}
|
||||
|
||||
gid_t get_gid(const char *s)
|
||||
@ -882,12 +909,13 @@ gid_t get_gid(const char *s)
|
||||
gid_t get_gid(headers_t& meta)
|
||||
{
|
||||
headers_t::const_iterator iter;
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-gid"))){
|
||||
if(meta.end() == (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return 0;
|
||||
}
|
||||
if(meta.end() != (iter = meta.find("x-amz-meta-gid"))){
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
|
||||
return get_gid((*iter).second.c_str());
|
||||
}else{
|
||||
return getegid();
|
||||
}
|
||||
return get_gid((*iter).second.c_str());
|
||||
}
|
||||
|
||||
blkcnt_t get_blocks(off_t size)
|
||||
@ -964,13 +992,13 @@ bool is_need_check_obj_detail(headers_t& meta)
|
||||
//-------------------------------------------------------------------
|
||||
// Help
|
||||
//-------------------------------------------------------------------
|
||||
void show_usage (void)
|
||||
void show_usage ()
|
||||
{
|
||||
printf("Usage: %s BUCKET:[PATH] MOUNTPOINT [OPTION]...\n",
|
||||
program_name.c_str());
|
||||
}
|
||||
|
||||
void show_help (void)
|
||||
void show_help ()
|
||||
{
|
||||
show_usage();
|
||||
printf(
|
||||
@ -985,13 +1013,14 @@ void show_help (void)
|
||||
" umounting\n"
|
||||
" umount mountpoint\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs -u bucket\n"
|
||||
"\n"
|
||||
" General forms for s3fs and FUSE/mount options:\n"
|
||||
" -o opt[,opt...]\n"
|
||||
" -o opt [-o opt] ...\n"
|
||||
"\n"
|
||||
" utility mode (remove interrupted multipart uploading objects)\n"
|
||||
" s3fs --incomplete-mpu-list(-u) bucket\n"
|
||||
" s3fs --incomplete-mpu-abort[=all | =<date format>] bucket\n"
|
||||
"\n"
|
||||
"s3fs Options:\n"
|
||||
"\n"
|
||||
" Most s3fs options are given in the form where \"opt\" is:\n"
|
||||
@ -1005,8 +1034,9 @@ void show_help (void)
|
||||
" default_acl (default=\"private\")\n"
|
||||
" - the default canned acl to apply to all written s3 objects,\n"
|
||||
" e.g., private, public-read. empty string means do not send\n"
|
||||
" header. see http://aws.amazon.com/documentation/s3/ for the\n"
|
||||
" full list of canned acls\n"
|
||||
" header. see\n"
|
||||
" https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n"
|
||||
" for the full list of canned acls\n"
|
||||
"\n"
|
||||
" retries (default=\"5\")\n"
|
||||
" - number of times to retry a failed s3 transaction\n"
|
||||
@ -1024,7 +1054,7 @@ void show_help (void)
|
||||
"\n"
|
||||
" storage_class (default=\"standard\")\n"
|
||||
" - store object with specified storage class. Possible values:\n"
|
||||
" standard, standard_ia, and reduced_redundancy.\n"
|
||||
" standard, standard_ia, onezone_ia and reduced_redundancy.\n"
|
||||
"\n"
|
||||
" use_sse (default is disable)\n"
|
||||
" - Specify three type Amazon's Server-Site Encryption: SSE-S3,\n"
|
||||
@ -1104,6 +1134,11 @@ void show_help (void)
|
||||
" If you specify this option for set \"Content-Encoding\" HTTP \n"
|
||||
" header, please take care for RFC 2616.\n"
|
||||
"\n"
|
||||
" profile (default=\"default\")\n"
|
||||
" - Choose a profile from ${HOME}/.aws/credentials to authenticate\n"
|
||||
" against S3. Note that this format matches the AWS CLI format and\n"
|
||||
" differs from the s3fs passwd format.\n"
|
||||
"\n"
|
||||
" connect_timeout (default=\"300\" seconds)\n"
|
||||
" - time to wait for connection before giving up\n"
|
||||
"\n"
|
||||
@ -1162,7 +1197,7 @@ void show_help (void)
|
||||
" space is smaller than this value, s3fs do not use diskspace\n"
|
||||
" as possible in exchange for the performance.\n"
|
||||
"\n"
|
||||
" singlepart_copy_limit (default=\"5120\")\n"
|
||||
" singlepart_copy_limit (default=\"512\")\n"
|
||||
" - maximum size, in MB, of a single-part copy before trying \n"
|
||||
" multipart copy.\n"
|
||||
"\n"
|
||||
@ -1183,7 +1218,7 @@ void show_help (void)
|
||||
" error from the S3 server.\n"
|
||||
"\n"
|
||||
" sigv2 (default is signature version 4)\n"
|
||||
" - sets signing AWS requests by sing Signature Version 2\n"
|
||||
" - sets signing AWS requests by using Signature Version 2\n"
|
||||
"\n"
|
||||
" mp_umask (default is \"0000\")\n"
|
||||
" - sets umask for the mount point directory.\n"
|
||||
@ -1196,7 +1231,8 @@ void show_help (void)
|
||||
" nomultipart (disable multipart uploads)\n"
|
||||
"\n"
|
||||
" enable_content_md5 (default is disable)\n"
|
||||
" - ensure data integrity during writes with MD5 hash.\n"
|
||||
" Allow S3 server to check data integrity of uploads via the\n"
|
||||
" Content-MD5 header. This can add CPU overhead to transfers.\n"
|
||||
"\n"
|
||||
" ecs\n"
|
||||
" - This option instructs s3fs to query the ECS container credential\n"
|
||||
@ -1302,6 +1338,14 @@ void show_help (void)
|
||||
" Please use this option when the directory in the bucket is\n"
|
||||
" only \"dir/\" object.\n"
|
||||
"\n"
|
||||
" use_wtf8 - support arbitrary file system encoding.\n"
|
||||
" S3 requires all object names to be valid utf-8. But some\n"
|
||||
" clients, notably Windows NFS clients, use their own encoding.\n"
|
||||
" This option re-encodes invalid utf-8 object names into valid\n"
|
||||
" utf-8 by mapping offending codes into a 'private' codepage of the\n"
|
||||
" Unicode set.\n"
|
||||
" Useful on clients not using utf-8 as their file system encoding.\n"
|
||||
"\n"
|
||||
"FUSE/mount Options:\n"
|
||||
"\n"
|
||||
" Most of the generic mount options described in 'man mount' are\n"
|
||||
@ -1313,6 +1357,22 @@ void show_help (void)
|
||||
" There are many FUSE specific mount options that can be specified.\n"
|
||||
" e.g. allow_other See the FUSE's README for the full set.\n"
|
||||
"\n"
|
||||
"Utility mode Options:\n"
|
||||
"\n"
|
||||
" -u, --incomplete-mpu-list\n"
|
||||
" Lists multipart incomplete objects uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" --incomplete-mpu-abort(=all or =<date format>)\n"
|
||||
" Delete the multipart incomplete object uploaded to the specified\n"
|
||||
" bucket.\n"
|
||||
" If \"all\" is specified for this option, all multipart incomplete\n"
|
||||
" objects will be deleted. If you specify no argument as an option,\n"
|
||||
" objects older than 24 hours(24H) will be deleted(This is the\n"
|
||||
" default value). You can specify an optional date format. It can\n"
|
||||
" be specified as year, month, day, hour, minute, second, and it is\n"
|
||||
" expressed as \"Y\", \"M\", \"D\", \"h\", \"m\", \"s\" respectively.\n"
|
||||
" For example, \"1Y6M10D12h30m30s\".\n"
|
||||
"\n"
|
||||
"Miscellaneous Options:\n"
|
||||
"\n"
|
||||
" -h, --help Output this help.\n"
|
||||
@ -1326,19 +1386,17 @@ void show_help (void)
|
||||
"\n"
|
||||
"s3fs home page: <https://github.com/s3fs-fuse/s3fs-fuse>\n"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
void show_version(void)
|
||||
void show_version()
|
||||
{
|
||||
printf(
|
||||
"Amazon Simple Storage Service File System V%s(commit:%s) with %s\n"
|
||||
"Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>\n"
|
||||
"License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>\n"
|
||||
"License GPL2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>\n"
|
||||
"This is free software: you are free to change and redistribute it.\n"
|
||||
"There is NO WARRANTY, to the extent permitted by law.\n",
|
||||
VERSION, COMMIT_HASH_VAL, s3fs_crypt_lib_name());
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -109,9 +109,9 @@ std::string get_username(uid_t uid);
|
||||
int is_uid_include_group(uid_t uid, gid_t gid);
|
||||
|
||||
std::string mydirname(const char* path);
|
||||
std::string mydirname(std::string path);
|
||||
std::string mydirname(const std::string& path);
|
||||
std::string mybasename(const char* path);
|
||||
std::string mybasename(std::string path);
|
||||
std::string mybasename(const std::string& path);
|
||||
int mkdirp(const std::string& path, mode_t mode);
|
||||
std::string get_exist_directory_path(const std::string& path);
|
||||
bool check_exist_dir_permission(const char* dirpath);
|
||||
@ -119,6 +119,7 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own);
|
||||
|
||||
time_t get_mtime(const char *s);
|
||||
time_t get_mtime(headers_t& meta, bool overcheck = true);
|
||||
time_t get_ctime(headers_t& meta, bool overcheck = true);
|
||||
off_t get_size(const char *s);
|
||||
off_t get_size(headers_t& meta);
|
||||
mode_t get_mode(const char *s);
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <syslog.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
@ -33,7 +34,7 @@
|
||||
using namespace std;
|
||||
|
||||
template <class T> std::string str(T value) {
|
||||
std::stringstream s;
|
||||
std::ostringstream s;
|
||||
s << value;
|
||||
return s.str();
|
||||
}
|
||||
@ -75,7 +76,7 @@ off_t s3fs_strtoofft(const char* str, bool is_base_16)
|
||||
}
|
||||
// check like isalnum and set data
|
||||
result *= (is_base_16 ? 16 : 10);
|
||||
if('0' <= *str || '9' < *str){
|
||||
if('0' <= *str && '9' >= *str){
|
||||
result += static_cast<off_t>(*str - '0');
|
||||
}else if(is_base_16){
|
||||
if('A' <= *str && *str <= 'F'){
|
||||
@ -120,8 +121,7 @@ string trim_right(const string &s, const string &t /* = SPACES */)
|
||||
|
||||
string trim(const string &s, const string &t /* = SPACES */)
|
||||
{
|
||||
string d(s);
|
||||
return trim_left(trim_right(d, t), t);
|
||||
return trim_left(trim_right(s, t), t);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -210,15 +210,15 @@ bool takeout_str_dquart(string& str)
|
||||
size_t pos;
|
||||
|
||||
// '"' for start
|
||||
if(string::npos != (pos = str.find_first_of("\""))){
|
||||
if(string::npos != (pos = str.find_first_of('\"'))){
|
||||
str = str.substr(pos + 1);
|
||||
|
||||
// '"' for end
|
||||
if(string::npos == (pos = str.find_last_of("\""))){
|
||||
if(string::npos == (pos = str.find_last_of('\"'))){
|
||||
return false;
|
||||
}
|
||||
str = str.substr(0, pos);
|
||||
if(string::npos != str.find_first_of("\"")){
|
||||
if(string::npos != str.find_first_of('\"')){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -284,6 +284,75 @@ string get_date_iso8601(time_t tm)
|
||||
return buf;
|
||||
}
|
||||
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime)
|
||||
{
|
||||
if(!pdate){
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
char* prest = strptime(pdate, "%Y-%m-%dT%T", &tm);
|
||||
if(prest == pdate){
|
||||
// wrong format
|
||||
return false;
|
||||
}
|
||||
unixtime = mktime(&tm);
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// Convert to unixtime from string which formatted by following:
|
||||
// "12Y12M12D12h12m12s", "86400s", "9h30m", etc
|
||||
//
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime)
|
||||
{
|
||||
if(!argv){
|
||||
return false;
|
||||
}
|
||||
unixtime = 0;
|
||||
const char* ptmp;
|
||||
int last_unit_type = 0; // unit flag.
|
||||
bool is_last_number;
|
||||
time_t tmptime;
|
||||
for(ptmp = argv, is_last_number = true, tmptime = 0; ptmp && *ptmp; ++ptmp){
|
||||
if('0' <= *ptmp && *ptmp <= '9'){
|
||||
tmptime *= 10;
|
||||
tmptime += static_cast<time_t>(*ptmp - '0');
|
||||
is_last_number = true;
|
||||
}else if(is_last_number){
|
||||
if('Y' == *ptmp && 1 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 365)); // average 365 day / year
|
||||
last_unit_type = 1;
|
||||
}else if('M' == *ptmp && 2 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24 * 30)); // average 30 day / month
|
||||
last_unit_type = 2;
|
||||
}else if('D' == *ptmp && 3 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60 * 24));
|
||||
last_unit_type = 3;
|
||||
}else if('h' == *ptmp && 4 > last_unit_type){
|
||||
unixtime += (tmptime * (60 * 60));
|
||||
last_unit_type = 4;
|
||||
}else if('m' == *ptmp && 5 > last_unit_type){
|
||||
unixtime += (tmptime * 60);
|
||||
last_unit_type = 5;
|
||||
}else if('s' == *ptmp && 6 > last_unit_type){
|
||||
unixtime += tmptime;
|
||||
last_unit_type = 6;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
tmptime = 0;
|
||||
is_last_number = false;
|
||||
}else{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if(is_last_number){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string s3fs_hex(const unsigned char* input, size_t length)
|
||||
{
|
||||
std::string hex;
|
||||
@ -300,7 +369,7 @@ char* s3fs_base64(const unsigned char* input, size_t length)
|
||||
static const char* base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
char* result;
|
||||
|
||||
if(!input || 0 >= length){
|
||||
if(!input || 0 == length){
|
||||
return NULL;
|
||||
}
|
||||
if(NULL == (result = reinterpret_cast<char*>(malloc((((length / 3) + 1) * 4 + 1) * sizeof(char))))){
|
||||
@ -382,6 +451,132 @@ unsigned char* s3fs_decode64(const char* input, size_t* plength)
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* detect and rewrite invalid utf8. We take invalid bytes
|
||||
* and encode them into a private region of the unicode
|
||||
* space. This is sometimes known as wtf8, wobbly transformation format.
|
||||
* it is necessary because S3 validates the utf8 used for identifiers for
|
||||
* correctness, while some clients may provide invalid utf, notably
|
||||
* windows using cp1252.
|
||||
*/
|
||||
|
||||
// Base location for transform. The range 0xE000 - 0xF8ff
|
||||
// is a private range, se use the start of this range.
|
||||
static unsigned int escape_base = 0xe000;
|
||||
|
||||
// encode bytes into wobbly utf8.
|
||||
// 'result' can be null. returns true if transform was needed.
|
||||
bool s3fs_wtf8_encode(const char *s, string *result)
|
||||
{
|
||||
bool invalid = false;
|
||||
|
||||
// Pass valid utf8 code through
|
||||
for (; *s; s++) {
|
||||
const unsigned char c = *s;
|
||||
|
||||
// single byte encoding
|
||||
if (c <= 0x7f) {
|
||||
if (result)
|
||||
*result += c;
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise, it must be one of the valid start bytes
|
||||
if ( c >= 0xc2 && c <= 0xf5 ) {
|
||||
|
||||
// two byte encoding
|
||||
// don't need bounds check, string is zero terminated
|
||||
if ((c & 0xe0) == 0xc0 && (s[1] & 0xc0) == 0x80) {
|
||||
// all two byte encodings starting higher than c1 are valid
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// three byte encoding
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f);
|
||||
if (code >= 0x800 && ! (code >= 0xd800 && code <= 0xd8ff)) {
|
||||
// not overlong and not a surrogate pair
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// four byte encoding
|
||||
if ((c & 0xf8) == 0xf0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80 && (s[3] & 0xc0) == 0x80) {
|
||||
const unsigned code = ((c & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f);
|
||||
if (code >= 0x10000 && code <= 0x10ffff) {
|
||||
// not overlong and in defined unicode space
|
||||
if (result) {
|
||||
*result += c;
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
*result += *(++s);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// printf("invalid %02x at %d\n", c, i);
|
||||
// Invalid utf8 code. Convert it to a private two byte area of unicode
|
||||
// e.g. the e000 - f8ff area. This will be a three byte encoding
|
||||
invalid = true;
|
||||
if (result) {
|
||||
unsigned escape = escape_base + c;
|
||||
*result += 0xe0 | ((escape >> 12) & 0x0f);
|
||||
*result += 0x80 | ((escape >> 06) & 0x3f);
|
||||
*result += 0x80 | ((escape >> 00) & 0x3f);
|
||||
}
|
||||
}
|
||||
return invalid;
|
||||
}
|
||||
|
||||
string s3fs_wtf8_encode(const string &s)
|
||||
{
|
||||
string result;
|
||||
s3fs_wtf8_encode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// The reverse operation, turn encoded bytes back into their original values
|
||||
// The code assumes that we map to a three-byte code point.
|
||||
bool s3fs_wtf8_decode(const char *s, string *result)
|
||||
{
|
||||
bool encoded = false;
|
||||
for (; *s; s++) {
|
||||
unsigned char c = *s;
|
||||
// look for a three byte tuple matching our encoding code
|
||||
if ((c & 0xf0) == 0xe0 && (s[1] & 0xc0) == 0x80 && (s[2] & 0xc0) == 0x80) {
|
||||
unsigned code = (c & 0x0f) << 12;
|
||||
code |= (s[1] & 0x3f) << 6;
|
||||
code |= (s[2] & 0x3f) << 0;
|
||||
if (code >= escape_base && code <= escape_base + 0xff) {
|
||||
// convert back
|
||||
encoded = true;
|
||||
if (result)
|
||||
*result += code - escape_base;
|
||||
s+=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (result)
|
||||
*result += c;
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
||||
string s3fs_wtf8_decode(const string &s)
|
||||
{
|
||||
string result;
|
||||
s3fs_wtf8_decode(s.c_str(), &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* tab-width: 4
|
||||
|
||||
@ -35,6 +35,7 @@ static inline int STR2NCMP(const char *str1, const char *str2) { return strncmp(
|
||||
|
||||
template <class T> std::string str(T value);
|
||||
|
||||
// Convert string to off_t. Does not signal invalid input.
|
||||
off_t s3fs_strtoofft(const char* str, bool is_base_16 = false);
|
||||
|
||||
std::string trim_left(const std::string &s, const std::string &t = SPACES);
|
||||
@ -45,6 +46,8 @@ std::string get_date_rfc850(void);
|
||||
void get_date_sigv3(std::string& date, std::string& date8601);
|
||||
std::string get_date_string(time_t tm);
|
||||
std::string get_date_iso8601(time_t tm);
|
||||
bool get_unixtime_from_iso8601(const char* pdate, time_t& unixtime);
|
||||
bool convert_unixtime_from_option_arg(const char* argv, time_t& unixtime);
|
||||
std::string urlEncode(const std::string &s);
|
||||
std::string urlEncode2(const std::string &s);
|
||||
std::string urlDecode(const std::string& s);
|
||||
@ -55,6 +58,11 @@ std::string s3fs_hex(const unsigned char* input, size_t length);
|
||||
char* s3fs_base64(const unsigned char* input, size_t length);
|
||||
unsigned char* s3fs_decode64(const char* input, size_t* plength);
|
||||
|
||||
bool s3fs_wtf8_encode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_encode(const std::string &s);
|
||||
bool s3fs_wtf8_decode(const char *s, std::string *result);
|
||||
std::string s3fs_wtf8_decode(const std::string &s);
|
||||
|
||||
#endif // S3FS_STRING_UTIL_H_
|
||||
|
||||
/*
|
||||
|
||||
@ -75,9 +75,47 @@ void test_base64()
|
||||
// TODO: invalid input
|
||||
}
|
||||
|
||||
void test_strtoofft()
|
||||
{
|
||||
ASSERT_EQUALS(s3fs_strtoofft("0"), static_cast<off_t>(0L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("9"), static_cast<off_t>(9L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("A"), static_cast<off_t>(0L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("A", /*is_base_16=*/ true), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("F", /*is_base_16=*/ true), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("a", /*is_base_16=*/ true), static_cast<off_t>(10L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("f", /*is_base_16=*/ true), static_cast<off_t>(15L));
|
||||
ASSERT_EQUALS(s3fs_strtoofft("deadbeef", /*is_base_16=*/ true), static_cast<off_t>(3735928559L));
|
||||
}
|
||||
|
||||
void test_wtf8_encoding()
|
||||
{
|
||||
std::string ascii("normal string");
|
||||
std::string utf8("Hyld\xc3\xbdpi \xc3\xbej\xc3\xb3\xc3\xb0""f\xc3\xa9lagsins vex \xc3\xbar k\xc3\xa6rkomnu b\xc3\xb6li \xc3\xad \xc3\xa1st");
|
||||
std::string cp1252("Hyld\xfdpi \xfej\xf3\xf0""f\xe9lagsins vex \xfar k\xe6rkomnu b\xf6li \xed \xe1st");
|
||||
std::string broken = utf8;
|
||||
broken[14] = 0x97;
|
||||
std::string mixed = ascii + utf8 + cp1252;
|
||||
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(ascii), ascii);
|
||||
ASSERT_EQUALS(s3fs_wtf8_encode(utf8), utf8);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(utf8), utf8);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(cp1252), cp1252);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(cp1252)), cp1252);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(broken), broken);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(broken)), broken);
|
||||
|
||||
ASSERT_NEQUALS(s3fs_wtf8_encode(mixed), mixed);
|
||||
ASSERT_EQUALS(s3fs_wtf8_decode(s3fs_wtf8_encode(mixed)), mixed);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_trim();
|
||||
test_base64();
|
||||
test_strtoofft();
|
||||
test_wtf8_encoding();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -20,11 +20,50 @@
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
template <typename T> void assert_equals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_equals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x != y) {
|
||||
std::cerr << x << " != " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T> void assert_nequals(const T &x, const T &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
template <> void assert_nequals(const std::string &x, const std::string &y, const char *file, int line)
|
||||
{
|
||||
if (x == y) {
|
||||
std::cerr << x << " == " << y << " at " << file << ":" << line << std::endl;
|
||||
for (unsigned i=0; i<x.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)x[i]);
|
||||
std::cerr << std::endl;
|
||||
for (unsigned i=0; i<y.length(); i++)
|
||||
fprintf(stderr, "%02x ", (unsigned char)y[i]);
|
||||
std::cerr << std::endl;
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
@ -43,5 +82,8 @@ void assert_strequals(const char *x, const char *y, const char *file, int line)
|
||||
#define ASSERT_EQUALS(x, y) \
|
||||
assert_equals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_NEQUALS(x, y) \
|
||||
assert_nequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
#define ASSERT_STREQUALS(x, y) \
|
||||
assert_strequals((x), (y), __FILE__, __LINE__)
|
||||
|
||||
@ -50,7 +50,7 @@ export S3_URL
|
||||
export TEST_SCRIPT_DIR=`pwd`
|
||||
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
|
||||
|
||||
S3PROXY_VERSION="1.6.0"
|
||||
S3PROXY_VERSION="1.6.1"
|
||||
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
|
||||
|
||||
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
|
||||
@ -146,14 +146,14 @@ function start_s3fs {
|
||||
# If VALGRIND is set, pass it as options to valgrind.
|
||||
# start valgrind-listener in another shell.
|
||||
# eg: VALGRIND="--tool=memcheck --leak-check=full" ./small-integration-test.sh
|
||||
# Start valgind-listener (default port is 1500)
|
||||
# Start valgrind-listener (default port is 1500)
|
||||
if [ -n "${VALGRIND}" ]; then
|
||||
VALGRIND_EXEC="valgrind ${VALGRIND} --log-socket=127.0.1.1"
|
||||
fi
|
||||
|
||||
# Common s3fs options:
|
||||
#
|
||||
# TODO: Allow all these options to be overriden with env variables
|
||||
# TODO: Allow all these options to be overridden with env variables
|
||||
#
|
||||
# use_path_request_style
|
||||
# The test env doesn't have virtual hosts
|
||||
@ -181,6 +181,7 @@ function start_s3fs {
|
||||
-o url=${S3_URL} \
|
||||
-o no_check_certificate \
|
||||
-o ssl_verify_hostname=0 \
|
||||
-o use_xattr=1 \
|
||||
-o createbucket \
|
||||
${AUTH_OPT} \
|
||||
-o dbglevel=${DBGLEVEL:=info} \
|
||||
|
||||
@ -108,7 +108,7 @@ function test_mv_file {
|
||||
rm_test_file $ALT_TEST_TEXT_FILE
|
||||
}
|
||||
|
||||
function test_mv_directory {
|
||||
function test_mv_empty_directory {
|
||||
describe "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
@ -118,7 +118,6 @@ function test_mv_directory {
|
||||
mk_test_dir
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
return 1
|
||||
@ -131,6 +130,30 @@ function test_mv_directory {
|
||||
fi
|
||||
}
|
||||
|
||||
function test_mv_nonempty_directory {
|
||||
describe "Testing mv directory function ..."
|
||||
if [ -e $TEST_DIR ]; then
|
||||
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
mk_test_dir
|
||||
|
||||
touch ${TEST_DIR}/file
|
||||
|
||||
mv ${TEST_DIR} ${TEST_DIR}_rename
|
||||
if [ ! -d "${TEST_DIR}_rename" ]; then
|
||||
echo "Directory ${TEST_DIR} was not renamed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -r ${TEST_DIR}_rename
|
||||
if [ -e "${TEST_DIR}_rename" ]; then
|
||||
echo "Could not remove the test directory, it still exists: ${TEST_DIR}_rename"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function test_redirects {
|
||||
describe "Testing redirects ..."
|
||||
|
||||
@ -400,13 +423,8 @@ function test_mtime_file {
|
||||
|
||||
#copy the test file with preserve mode
|
||||
cp -p $TEST_TEXT_FILE $ALT_TEST_TEXT_FILE
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
testmtime=`stat -f "%m" $TEST_TEXT_FILE`
|
||||
altmtime=`stat -f "%m" $ALT_TEST_TEXT_FILE`
|
||||
else
|
||||
testmtime=`stat -c %Y $TEST_TEXT_FILE`
|
||||
altmtime=`stat -c %Y $ALT_TEST_TEXT_FILE`
|
||||
fi
|
||||
testmtime=`get_mtime $TEST_TEXT_FILE`
|
||||
altmtime=`get_mtime $ALT_TEST_TEXT_FILE`
|
||||
if [ "$testmtime" -ne "$altmtime" ]
|
||||
then
|
||||
echo "File times do not match: $testmtime != $altmtime"
|
||||
@ -414,6 +432,61 @@ function test_mtime_file {
|
||||
fi
|
||||
}
|
||||
|
||||
function test_update_time() {
|
||||
describe "Testing update time function ..."
|
||||
|
||||
# create the test
|
||||
mk_test_file
|
||||
mtime=`get_ctime $TEST_TEXT_FILE`
|
||||
ctime=`get_mtime $TEST_TEXT_FILE`
|
||||
|
||||
sleep 2
|
||||
chmod +x $TEST_TEXT_FILE
|
||||
|
||||
ctime2=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime2=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime -eq $ctime2 -o $mtime -ne $mtime2 ]; then
|
||||
echo "Expected updated ctime: $ctime != $ctime2 and same mtime: $mtime == $mtime2"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
chown $UID:$UID $TEST_TEXT_FILE;
|
||||
|
||||
ctime3=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime3=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime2 -eq $ctime3 -o $mtime2 -ne $mtime3 ]; then
|
||||
echo "Expected updated ctime: $ctime2 != $ctime3 and same mtime: $mtime2 == $mtime3"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if command -v setfattr >/dev/null 2>&1; then
|
||||
sleep 2
|
||||
setfattr -n key -v value $TEST_TEXT_FILE
|
||||
|
||||
ctime4=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime4=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime3 -eq $ctime4 -o $mtime3 -ne $mtime4 ]; then
|
||||
echo "Expected updated ctime: $ctime3 != $ctime4 and same mtime: $mtime3 == $mtime4"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "Skipping extended attribute test"
|
||||
ctime4=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime4=`get_mtime $TEST_TEXT_FILE`
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
echo foo >> $TEST_TEXT_FILE
|
||||
|
||||
ctime5=`get_ctime $TEST_TEXT_FILE`
|
||||
mtime5=`get_mtime $TEST_TEXT_FILE`
|
||||
if [ $ctime4 -eq $ctime5 -o $mtime4 -eq $mtime5 ]; then
|
||||
echo "Expected updated ctime: $ctime4 != $ctime5 and updated mtime: $mtime4 != $mtime5"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function test_rm_rf_dir {
|
||||
describe "Test that rm -rf will remove directory with contents"
|
||||
# Create a dir with some files and directories
|
||||
@ -437,29 +510,57 @@ function test_write_after_seek_ahead {
|
||||
rm testfile
|
||||
}
|
||||
|
||||
function test_overwrite_existing_file_range {
|
||||
describe "Test overwrite range succeeds"
|
||||
dd if=<(seq 1000) of=${TEST_TEXT_FILE}
|
||||
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=1 count=1 bs=1024 conv=notrunc
|
||||
cmp ${TEST_TEXT_FILE} <(
|
||||
seq 1000 | head -c 1024
|
||||
dd if=/dev/zero count=1 bs=1024
|
||||
seq 1000 | tail -c +2049
|
||||
)
|
||||
rm -f ${TEST_TEXT_FILE}
|
||||
}
|
||||
|
||||
function test_concurrency {
|
||||
for i in `seq 10`; do echo foo > $i; done
|
||||
for process in `seq 2`; do
|
||||
for i in `seq 100`; do
|
||||
file=$(ls | sed -n "$(($RANDOM % 10 + 1)){p;q}")
|
||||
cat $file >/dev/null || true
|
||||
rm -f $file
|
||||
echo foo > $i || true
|
||||
done &
|
||||
done
|
||||
wait
|
||||
}
|
||||
|
||||
|
||||
function add_all_tests {
|
||||
add_tests test_append_file
|
||||
add_tests test_truncate_file
|
||||
add_tests test_truncate_empty_file
|
||||
add_tests test_mv_file
|
||||
add_tests test_mv_directory
|
||||
add_tests test_mv_empty_directory
|
||||
add_tests test_mv_nonempty_directory
|
||||
add_tests test_redirects
|
||||
add_tests test_mkdir_rmdir
|
||||
add_tests test_chmod
|
||||
add_tests test_chown
|
||||
add_tests test_list
|
||||
add_tests test_remove_nonempty_directory
|
||||
# TODO: broken: https://github.com/s3fs-fuse/s3fs-fuse/issues/145
|
||||
#add_tests test_rename_before_close
|
||||
add_tests test_rename_before_close
|
||||
add_tests test_multipart_upload
|
||||
add_tests test_multipart_copy
|
||||
add_tests test_special_characters
|
||||
add_tests test_symlink
|
||||
add_tests test_extended_attributes
|
||||
add_tests test_mtime_file
|
||||
add_tests test_update_time
|
||||
add_tests test_rm_rf_dir
|
||||
add_tests test_write_after_seek_ahead
|
||||
add_tests test_overwrite_existing_file_range
|
||||
add_tests test_concurrency
|
||||
}
|
||||
|
||||
init_suite
|
||||
|
||||
@ -7,12 +7,12 @@
|
||||
###
|
||||
### UsageFunction <program name>
|
||||
###
|
||||
UsageFuntion()
|
||||
UsageFunction()
|
||||
{
|
||||
echo "Usage: $1 [-h] [-y] [-all] <base directory>"
|
||||
echo " -h print usage"
|
||||
echo " -y no confirm"
|
||||
echo " -all force all directoris"
|
||||
echo " -all force all directories"
|
||||
echo " There is no -all option is only to merge for other S3 client."
|
||||
echo " If -all is specified, this shell script merge all directory"
|
||||
echo " for s3fs old version."
|
||||
@ -28,7 +28,7 @@ DIRPARAM=""
|
||||
|
||||
while [ "$1" != "" ]; do
|
||||
if [ "X$1" = "X-help" -o "X$1" = "X-h" -o "X$1" = "X-H" ]; then
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 0
|
||||
elif [ "X$1" = "X-y" -o "X$1" = "X-Y" ]; then
|
||||
AUTOYES="yes"
|
||||
@ -38,7 +38,7 @@ while [ "$1" != "" ]; do
|
||||
if [ "X$DIRPARAM" != "X" ]; then
|
||||
echo "*** Input error."
|
||||
echo ""
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 1
|
||||
fi
|
||||
DIRPARAM=$1
|
||||
@ -48,7 +48,7 @@ done
|
||||
if [ "X$DIRPARAM" = "X" ]; then
|
||||
echo "*** Input error."
|
||||
echo ""
|
||||
UsageFuntion $OWNNAME
|
||||
UsageFunction $OWNNAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -62,7 +62,7 @@ fi
|
||||
echo "#############################################################################"
|
||||
echo "[CAUTION]"
|
||||
echo "This program merges a directory made in s3fs which is older than version 1.64."
|
||||
echo "And made in other S3 client appilication."
|
||||
echo "And made in other S3 client application."
|
||||
echo "This program may be have bugs which are not fixed yet."
|
||||
echo "Please execute this program by responsibility of your own."
|
||||
echo "#############################################################################"
|
||||
@ -104,7 +104,7 @@ for DIR in $DIRLIST; do
|
||||
if [ "$ALLYES" = "no" ]; then
|
||||
### Skip "d---------" directories.
|
||||
### Other clients make directory object "dir/" which don't have
|
||||
### "x-amz-meta-mode" attribyte.
|
||||
### "x-amz-meta-mode" attribute.
|
||||
### Then these directories is "d---------", it is target directory.
|
||||
DIRPERMIT=`ls -ld --time-style=+'%Y%m%d%H%M' $DIR | awk '{print $1}'`
|
||||
if [ "$DIRPERMIT" != "d---------" ]; then
|
||||
@ -112,7 +112,7 @@ for DIR in $DIRLIST; do
|
||||
fi
|
||||
fi
|
||||
|
||||
### Comfirm
|
||||
### Confirm
|
||||
ANSWER=""
|
||||
if [ "$AUTOYES" = "yes" ]; then
|
||||
ANSWER="y"
|
||||
|
||||
@ -169,3 +169,19 @@ function run_suite {
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
function get_ctime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%c" "$1"
|
||||
else
|
||||
stat -c %Z "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_mtime() {
|
||||
if [ `uname` = "Darwin" ]; then
|
||||
stat -f "%m" "$1"
|
||||
else
|
||||
stat -c %Y "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user