83 Commits
v1.86 ... v1.87

Author SHA1 Message Date
194262c0ef Update ChangeLog and configure.ac for 1.87
Fixes #1335.
2020-08-10 11:52:14 +09:00
e2fbcb4d30 Merge pull request #1348 from gaul/readme/ibm
Move IBM information to Non Amazon S3 wiki page
2020-08-03 00:55:11 +09:00
0c1bc0f803 Add portability wrapper for stat(1)
Fixes #1344.
2020-08-02 23:14:58 +09:00
83361e7905 Add some code backticks to README 2020-08-02 23:02:11 +09:00
19abd9ffaf Move IBM information to Non Amazon S3 wiki page
This gives consistency with other providers:

https://github.com/s3fs-fuse/s3fs-fuse/wiki/Non-Amazon-S3
2020-08-01 20:22:30 +09:00
cbd925c56f Moved the SIGUSR2 handler to S3fsSignals class 2020-07-28 14:54:35 +09:00
63bbb47378 Merge pull request #1341 from gaul/stat-cache/default
Change default stat_cache_expire
2020-07-26 23:54:14 +09:00
0fbd0eac80 Change default stat_cache_expire
Previously s3fs cached files forever which confused users with
creating objects using another client.
2020-07-26 23:04:43 +09:00
e5231fa3c7 Merge pull request #1340 from gaul/test/external-directory
Test creating a directory with external program
2020-07-26 22:06:40 +09:00
ad1961417d Added SIGUSR1 option for cache file integrity test 2020-07-26 21:04:11 +09:00
4154e539ea Test creating a directory with external program
References #1264.
2020-07-25 21:22:53 +09:00
e0a38adaf6 Merge pull request #1333 from ggtakec/similar_processing
Put similar processing together into method GetCacheFileStatTopDir
2020-07-12 20:29:45 +09:00
c3e711da58 Merge pull request #1327 from pxeger/master
`s3fs#bucketname ... fuse` -> `bucketname ... fuse.s3fs` (#1321)
2020-07-12 19:14:27 +09:00
700e288718 Put similar processing together into method GetCacheFileStatTopDir 2020-07-12 08:12:02 +00:00
e72a64785b Merge pull request #1331 from gaul/travis/simplify
Simplify conditional statements
2020-07-12 12:52:21 +09:00
5ace2b70fc Simplify conditional statements 2020-07-10 19:34:46 +09:00
62c8be85d6 Interim fix for Travis macOS Build 2020-07-10 09:47:18 +09:00
3f6b5ae6a5 Merge pull request #1329 from gaul/cppcheck-2.1
Fix NULL pointer deference
2020-07-09 23:00:36 +09:00
dc365b65a0 Fix NULL pointer deference
Found via cppcheck 2.1.
2020-07-09 21:40:23 +09:00
9c88ec2128 Merge pull request #1326 from gaul/test/python2
Use Python 2 for write_multiple_offsets
2020-07-09 18:45:41 +09:00
3dd9832f61 s3fs#bucketname ... fuse -> bucketname ... fuse.s3fs (#1321) 2020-07-07 16:06:00 +01:00
4d1f5c899f Use Python 2 for write_multiple_offsets
This aligns with ut_test.py .  Using an older Python also allows
compatibility with the older macOS 10.12 Travis CI.  References #1323.
2020-07-07 21:12:52 +09:00
1f796d432d Fixed upload error about mixuploading sparse file and truncating file 2020-06-27 22:44:19 +09:00
35006e318f Fixed about ParallelMixMultipartUpload 2020-06-24 12:48:55 +09:00
7d0c66e08a Add support for glacier storage class.
Just a copy of what have been done in PR #271.
2020-06-23 11:23:21 +09:00
9dc4148743 Merge pull request #1312 from ggtakec/fix_bug_cache
Fixed a bug about serializing from cache file
2020-06-19 22:54:39 +09:00
f324d8e04f Fixed a bug about serializing from cache file 2020-06-19 12:57:27 +00:00
f16ee96d7e Merge pull request #1306 from gaul/http/500
Retry with exponential backoff during 500 error
2020-06-06 15:30:22 +09:00
0d849b38c2 Merge pull request #1305 from gaul/alibaba/multipart
Ignore case when comparing ETags
2020-06-06 15:05:39 +09:00
8ed020610f Merge pull request #1296 from gaul/test/oss
Import ossfs tests
2020-06-06 14:40:11 +09:00
d8766b2051 Retry with exponential backoff during 500 error
Amazon suggests retrying on both 500 and 503:

https://aws.amazon.com/premiumsupport/knowledge-center/http-5xx-errors-s3/

Fixes #1251.
2020-06-05 21:01:30 +09:00
9db70bab63 Ignore case when comparing ETags
This allows multipart upload to work with Alibaba OSS.
References #1297.
2020-06-05 18:17:52 +09:00
8a7548a9d4 Import ossfs tests
This turns up a failure in test_rename_file when calling seek(0) on a
renamed file.
2020-06-01 09:08:25 +09:00
0cb057dadd Merge pull request #1303 from gaul/rename/use_cache
Relink cache stats file atomically via rename
2020-06-01 00:10:33 +09:00
0f5db0d1bf Merge pull request #1302 from gaul/rename/nocopy
Fix renames of open files with nocopyapi option
2020-05-31 23:46:46 +09:00
94e67c9c58 Merge pull request #1301 from gaul/pthread-result
Check results from pthread mutex calls
2020-05-31 23:11:24 +09:00
274321524c Relink cache stats file atomically via rename
The new file may already exist so link may fail.  Further link/unlink
is not atomic.  Addresses an error when renaming an open with with
use_cache.  References #1296.
2020-05-31 23:09:58 +09:00
40f7007263 Check results from pthread mutex calls
Also remove some unnecessary exception handling.
2020-05-30 16:37:55 +09:00
66597ec5f2 Fix renames of open files with nocopyapi option
References #1296.
2020-05-30 15:45:43 +09:00
75e72385cc Added a parameter to output body to curldbg option 2020-05-25 08:49:01 +09:00
eb58460175 Merge pull request #1294 from gaul/test/profile
Allow overriding test credentials with a profile
2020-05-24 16:14:31 +09:00
0852521a7e Merge pull request #1293 from gaul/s3proxy
Update to S3Proxy 1.7.1
2020-05-24 15:22:00 +09:00
56ed6bb97f Merge pull request #1292 from gaul/retries
Do not allow zero retries
2020-05-24 15:03:03 +09:00
73098220bf Allow overriding test credentials with a profile 2020-05-24 12:05:16 +09:00
ca7756fa77 Update to S3Proxy 1.7.1
Release notes:

https://github.com/gaul/s3proxy/releases/tag/s3proxy-1.7.1
2020-05-23 10:06:44 +09:00
8b15db6dcb Do not allow zero retries
Retries actually means tries, e.g., if the user sets zero, s3fs will
never try an operation at all.
2020-05-23 10:05:23 +09:00
0b60aa81eb Merge pull request #1285 from ggtakec/wrapped_s3fs_strtoofft
Not abort process by exception threw from s3fs_strtoofft
2020-05-22 22:36:34 +09:00
da70cb92a8 Provide AWS CLI with correct test credentials
This allows tests to pass against real S3 implementations like
Backblaze.  References #272.
2020-05-22 19:27:18 +09:00
746a027e98 Expand on random write limitation 2020-05-05 08:12:04 +09:00
80c11b6c12 Not abort process by exception threw from s3fs_strtoofft 2020-05-03 13:46:05 +00:00
b76226a06d Merge pull request #1286 from gaul/gcs
Support Google Cloud Storage headers
2020-05-03 22:41:02 +09:00
8945e98d8b Support Google Cloud Storage headers
This allows s3fs to interpret objects created by gsutil.
2020-05-03 18:33:13 +09:00
97c249d5b9 Not abort process by exception threw from s3fs_strtoofft 2020-05-03 08:08:28 +00:00
6e134a23f9 Merge pull request #1280 from ggtakec/add_test_fdcache
Add test for cache file stat content
2020-05-03 16:33:50 +09:00
a4803543a1 Merge pull request #1282 from gaul/mime
Warn about missing MIME types instead of exiting
2020-05-03 15:15:05 +09:00
2cc88b933f Warn about missing MIME types instead of exiting
s3fs uses the MIME types file to set Content-Type for uploaded
objects.  Most distribution packages should install this via
recommended (not required) dependencies.  Users compiling from source
may not have this installed and s3fs should not prevent launching
since most users do not care about Content-Type.  Instead warn about
MIME types absence.  Fixes #1270.
2020-04-29 20:03:50 +09:00
ce1221c867 Add test for cache file stat content 2020-04-22 15:53:00 +00:00
005a684600 Fix typos 2020-04-22 21:49:11 +09:00
3af17c3019 Add test for out-of-order writes
References #1220.  References #1277.
2020-04-22 21:48:55 +09:00
f26a0aa71d Fixed insufficient upload size for mix multipart upload 2020-04-22 09:31:22 +09:00
2b4619842d Merge pull request #1278 from gaul/http-416
Add handler for HTTP 416
2020-04-21 22:38:54 +09:00
cf529e0af7 Add handler for HTTP 416
This prevents retries when the server indicates an unsatisfiable MPU
copy part request.  References #1220.
2020-04-21 19:45:10 +09:00
4da02d023b Improved strictness of cache file stats(file) 2020-04-21 19:45:03 +09:00
fe0677651e Merge pull request #1271 from ggtakec/fix_stat_file
Fixed the truncation bug of stat file for cache file
2020-04-19 16:27:11 +09:00
811ea0cb85 Fixed the truncation bug of stat file for cache file 2020-04-19 07:08:49 +00:00
a5f84535f3 Add install instructions for Arch Linux 2020-04-18 19:27:52 +09:00
84bf460f99 Remove deprecated sudo configuration
Addresses warnings of the form:

jobs.include: deprecated key sudo (The key `sudo` has no effect
anymore.)
2020-04-15 10:09:52 +09:00
538fbed302 Merge pull request #1266 from gaul/test/cache-eviction
Test cache eviction
2020-04-14 22:30:04 +09:00
feafb44bae Clean up macOS FUSE loading 2020-04-12 23:18:27 +09:00
a44fc1103d Avoid setting bogus group id
Addresses a symptom on newer macOS in Travis.
2020-04-12 22:37:22 +09:00
48a872e285 Address cppcheck 1.90 warning 2020-04-12 22:20:44 +09:00
c44a60f3f5 Fixed a bug of stats cache compression 2020-04-12 18:33:00 +09:00
f373df9682 Test cache eviction 2020-04-11 19:00:38 +09:00
9e01d5b8d1 Merge pull request #1254 from ggtakec/modify_mimetypes
Added mime option for strict checking of mime types file
2020-04-11 14:48:47 +09:00
7fbda230f5 Added mime option for strict checking of mime types file 2020-03-30 14:41:18 +00:00
56141557dc Avoid unneeded string copy
Found by clang-tidy 10.
2020-03-28 08:49:49 +09:00
fe2b269b6e Merge pull request #1253 from juliogonzalez/fix-1217
Generic compilation instructions: explain that /etc/mime.type is needed
2020-03-19 21:06:32 +09:00
eb6fe69af2 Generic compilation instructions: explain that /etc/mime.type is needed 2020-03-18 23:41:52 +01:00
6489c5d394 Merge pull request #1247 from ggtakec/fix_travis_timeout
Avoid TravisCI timeouts in test execution
2020-03-15 16:48:25 +09:00
854a8a8356 Avoid TravisCI timeouts in test execution 2020-02-24 09:56:18 +00:00
d34475d6a1 Add random writes and appends to README 2020-02-24 18:21:13 +09:00
b72f4b43a4 use correct content-type when complete multipart upload 2020-02-10 16:58:28 +09:00
34e797d6f5 Add Twitter link 2020-02-07 09:01:50 +09:00
28 changed files with 2343 additions and 653 deletions

View File

@ -19,18 +19,19 @@
#
language: cpp
dist: xenial
os: linux
matrix:
jobs:
include:
- os: linux
sudo: required
dist: trusty
cache: apt
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- sudo pip install --upgrade awscli
- sudo -H pip install --upgrade awscli
script:
- ./autogen.sh
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
@ -38,7 +39,7 @@ matrix:
- make cppcheck
- make check -C src
- modprobe fuse
- make check -C test
- travis_wait 30 make check -C test
- test/filter-suite-log.sh test/test-suite.log
- os: osx
@ -58,39 +59,69 @@ matrix:
rm -rf "$TAPS/caskroom/homebrew-cask";
fi;
if [ ! -f $HOME/.osx_cache/cached ]; then
brew tap homebrew/homebrew-cask;
echo "==> [Not found cache] brew tap homebrew/homebrew-cask";
echo "[NOTE]";
echo "If brew is executed without HOMEBREW_NO_AUTO_UPDATE=1,";
echo "python3 cannot be installed, so this is added as a temporary workaround.";
echo "If it is xcode 9.4 or higher, clear this patch.";
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask;
else
echo "==> [Found cache] HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask";
HOMEBREW_NO_AUTO_UPDATE=1 brew tap homebrew/homebrew-cask;
fi
- HOMEBREW_NO_AUTO_UPDATE=1 brew cask install osxfuse
- S3FS_BREW_PACKAGES='awscli cppcheck truncate';
- S3FS_BREW_PACKAGES='cppcheck python3';
for s3fs_brew_pkg in ${S3FS_BREW_PACKAGES}; do
brew list | grep -q ${s3fs_brew_pkg};
if [ $? -eq 0 ]; then
brew outdated | grep -q ${s3fs_brew_pkg} && HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg};
if brew list | grep -q ${s3fs_brew_pkg}; then
if brew outdated | grep -q ${s3fs_brew_pkg}; then
echo "==> Try to upgrade ${s3fs_brew_pkg}";
HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade ${s3fs_brew_pkg};
fi
else
echo "==> Try to install ${s3fs_brew_pkg}";
HOMEBREW_NO_AUTO_UPDATE=1 brew install ${s3fs_brew_pkg};
fi;
done
- if pip3 --version; then
echo "==> Try to install awscli by pip3";
sudo -H pip3 install awscli;
else
echo "==> Try to install awscli by pip";
curl https://bootstrap.pypa.io/get-pip.py | sudo python;
sudo -H pip install awscli --ignore-installed matplotlib;
fi
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then
sudo chmod +s /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs;
elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then
sudo chmod +s /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse;
else
exit 1;
fi
- if [ ! -f /usr/local/bin/truncate ]; then
echo "==> Make symbolic link truncate to gtruncate";
sudo ln -s /usr/local/opt/coreutils/bin/gtruncate /usr/local/bin/truncate;
fi
- if [ ! -f /usr/local/bin/stdbuf ]; then
echo "==> Make symbolic link stdbuf to gstdbuf";
sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf;
fi
- sudo ln -s /usr/local/opt/coreutils/bin/gstdbuf /usr/local/bin/stdbuf
script:
- ./autogen.sh
- PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig ./configure CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
- make
- make cppcheck
- make check -C src
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ; elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ; fi
- make check -C test
- if [ -f /Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs ]; then
/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs;
elif [ -f /Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse ]; then
/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse;
else
exit 1;
fi
- travis_wait 30 make check -C test
- test/filter-suite-log.sh test/test-suite.log
- os: linux-ppc64le
sudo: required
dist: trusty
cache: apt
before_install:
@ -98,7 +129,7 @@ matrix:
- sudo apt-get update -qq
- sudo apt-get install -qq attr cppcheck libfuse-dev openjdk-7-jdk
- sudo update-alternatives --set java /usr/lib/jvm/java-7-openjdk-ppc64el/jre/bin/java
- sudo pip install --upgrade awscli
- sudo -H pip install --upgrade awscli
script:
- ./autogen.sh
- ./configure CPPFLAGS='-I/usr/local/opt/openssl/include' CXXFLAGS='-std=c++03 -DS3FS_PTHREAD_ERRORCHECK=1'
@ -106,7 +137,7 @@ matrix:
- make cppcheck
- make check -C src
- modprobe fuse
- make check -C test
- travis_wait 30 make check -C test
- test/filter-suite-log.sh test/test-suite.log
#

View File

@ -15,6 +15,11 @@ Keep in mind using the pre-built packages when available.
* libcurl
* libxml2
* openssl
* mime.types (the package providing depends on the OS)
* s3fs tries to detect `/etc/mime.types` as default regardless of the OS
* Else s3fs tries to detect `/etc/apache2/mime.types` if OS is macOS
* s3fs exits with an error if these files are not exist
* Alternatively, you can set mime.types file path with `mime` option without detecting these default files
* pkg-config (or your OS equivalent)
2. Then compile from master via the following commands:

View File

@ -1,6 +1,28 @@
ChangeLog for S3FS
------------------
Version 1.87 -- 10 Aug, 2020 (major changes only)
#1244 - use correct content-type when complete multipart upload
#1265 - Fixed a bug of stats cache compression
#1271 - Fixed the truncation bug of stat file for cache file
#1274 - Improved strictness of cache file stats(file)
#1277 - Fixed insufficient upload size for mix multipart upload
#1282 - Warn about missing MIME types instead of exiting
#1285 - Not abort process by exception threw from s3fs_strtoofft
#1286 - Support Google Cloud Storage headers
#1295 - Added a parameter to output body to curldbg option
#1302 - Fix renames of open files with nocopyapi option
#1303 - Relink cache stats file atomically via rename
#1305 - Ignore case when comparing ETags
#1306 - Retry with exponential backoff during 500 error
#1312 - Fixed a bug about serializing from cache file
#1313 - Fixed about ParallelMixMultipartUpload
#1316 - Add support for glacier storage class
#1319 - Fixed upload error about mixuploading sparse file and truncating file
#1334 - Added SIGUSR1 option for cache file integrity test
#1341 - Change default stat_cache_expire
Version 1.86 -- 04 Feb, 2020 (major changes only)
#965 - enable various optimizations when using modern curl
#1002 - allow SSE-C keys to have NUL bytes

View File

@ -4,11 +4,13 @@ s3fs allows Linux and macOS to mount an S3 bucket via FUSE.
s3fs preserves the native object format for files, allowing use of other
tools like [AWS CLI](https://github.com/aws/aws-cli).
[![Build Status](https://travis-ci.org/s3fs-fuse/s3fs-fuse.svg?branch=master)](https://travis-ci.org/s3fs-fuse/s3fs-fuse)
[![Twitter Follow](https://img.shields.io/twitter/follow/s3fsfuse.svg?style=social&label=Follow)](https://twitter.com/s3fsfuse)
## Features
* large subset of POSIX including reading/writing files, directories, symlinks, mode, uid/gid, and extended attributes
* compatible with Amazon S3, Google Cloud Storage, and other S3-based object stores
* allows random writes and appends
* large files via multi-part upload
* renames via server-side copy
* optional server-side encryption
@ -29,6 +31,12 @@ Many systems provide pre-built packages:
sudo yum install s3fs-fuse
```
* Arch Linux:
```
sudo pacman -S s3fs-fuse
```
* Debian 9 and Ubuntu 16.04 or newer:
```
@ -77,8 +85,8 @@ stored in `${HOME}/.aws/credentials`. Alternatively, s3fs supports a custom pas
The default location for the s3fs password file can be created:
* using a .passwd-s3fs file in the users home directory (i.e. ${HOME}/.passwd-s3fs)
* using the system-wide /etc/passwd-s3fs file
* using a `.passwd-s3fs` file in the users home directory (i.e. `${HOME}/.passwd-s3fs`)
* using the system-wide `/etc/passwd-s3fs` file
Enter your credentials in a file `${HOME}/.passwd-s3fs` and set
owner-only permissions:
@ -103,7 +111,7 @@ s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o dbgleve
You can also mount on boot by entering the following line to `/etc/fstab`:
```
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other 0 0
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other 0 0
```
or
@ -121,17 +129,9 @@ s3fs mybucket /path/to/mountpoint -o passwd_file=${HOME}/.passwd-s3fs -o url=htt
or(fstab)
```
s3fs#mybucket /path/to/mountpoint fuse _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
mybucket /path/to/mountpoint fuse.s3fs _netdev,allow_other,use_path_request_style,url=https://url.to.s3/ 0 0
```
To use IBM IAM Authentication, use the `-o ibm_iam_auth` option, and specify the Service Instance ID and API Key in your credentials file:
```
echo SERVICEINSTANCEID:APIKEY > /path/to/passwd
```
The Service Instance ID is only required when using the `-o create_bucket` option.
Note: You may also want to create the global credential file first
```
@ -145,7 +145,7 @@ Note2: You may also need to make sure `netfs` service is start on boot
Generally S3 cannot offer the same performance or semantics as a local file system. More specifically:
* random writes or appends to files require rewriting the entire file
* random writes or appends to files require rewriting the entire object, optimized with multi-part upload copy
* metadata operations such as listing directories have poor performance due to network latency
* [eventual consistency](https://en.wikipedia.org/wiki/Eventual_consistency) can temporarily yield stale data([Amazon S3 Data Consistency Model](https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel))
* no atomic renames of files or directories

View File

@ -20,7 +20,7 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
AC_INIT(s3fs, 1.86)
AC_INIT(s3fs, 1.87)
AC_CONFIG_HEADER([config.h])
AC_CANONICAL_SYSTEM

View File

@ -54,7 +54,7 @@ print version
FUSE foreground option - do not run as daemon.
.TP
\fB\-s\fR
FUSE singlethreaded option (disables multi-threaded operation)
FUSE single-threaded option (disables multi-threaded operation)
.SS "mount options"
.TP
All s3fs options must given in the form where "opt" is:
@ -83,7 +83,7 @@ delete local file cache when s3fs starts and exits.
\fB\-o\fR storage_class (default="standard")
store object with specified storage class.
this option replaces the old option use_rrs.
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, and intelligent_tiering.
Possible values: standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering, and glacier.
.TP
\fB\-o\fR use_rrs (default is disable)
use Amazon's Reduced Redundancy Storage.
@ -158,10 +158,10 @@ specify the maximum number of keys returned by S3 list object API. The default i
\fB\-o\fR max_stat_cache_size (default="100,000" entries (about 40MB))
maximum number of entries in the stat cache and symbolic link cache.
.TP
\fB\-o\fR stat_cache_expire (default is no expire)
\fB\-o\fR stat_cache_expire (default is 900)
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time indicates the time since cached.
.TP
\fB\-o\fR stat_cache_interval_expire (default is no expire)
\fB\-o\fR stat_cache_interval_expire (default is 900)
specify expire time (seconds) for entries in the stat cache and symbolic link cache. This expire time is based on the time from the last access time of those cache.
This option is exclusive with stat_cache_expire, and is left for compatibility with older versions.
.TP
@ -172,17 +172,16 @@ It increases ListBucket request and makes performance bad.
You can specify this option for performance, s3fs memorizes in stat cache that the object (file or directory) does not exist.
.TP
\fB\-o\fR no_check_certificate (by default this option is disabled)
do not check ssl certificate.
server certificate won't be checked against the available certificate authorities.
.TP
\fB\-o\fR ssl_verify_hostname (default="2")
When 0, do not verify the SSL certificate against the hostname.
.TP
\fB\-o\fR nodnscache - disable dns cache.
s3fs is always using dns cache, this option make dns cache disable.
\fB\-o\fR nodnscache - disable DNS cache.
s3fs is always using DNS cache, this option make DNS cache disable.
.TP
\fB\-o\fR nosscache - disable ssl session cache.
s3fs is always using ssl session cache, this option make ssl session cache disable.
\fB\-o\fR nosscache - disable SSL session cache.
s3fs is always using SSL session cache, this option make SSL session cache disable.
.TP
\fB\-o\fR multireq_max (default="20")
maximum number of parallel request for listing objects.
@ -209,7 +208,7 @@ multipart copy.
\fB\-o\fR host (default="https://s3.amazonaws.com")
Set a non-Amazon host, e.g., https://example.com.
.TP
\fB\-o\fR sevicepath (default="/")
\fB\-o\fR servicepath (default="/")
Set a service path when the non-Amazon host requires a prefix.
.TP
\fB\-o\fR url (default="https://s3.amazonaws.com")
@ -311,12 +310,12 @@ s3fs may not be able to recognize the object correctly if an object created by s
Please use this option when the directory in the bucket is only "dir/" object.
.TP
\fB\-o\fR use_wtf8 - support arbitrary file system encoding.
S3 requires all object names to be valid utf-8. But some
S3 requires all object names to be valid UTF-8. But some
clients, notably Windows NFS clients, use their own encoding.
This option re-encodes invalid utf-8 object names into valid
utf-8 by mapping offending codes into a 'private' codepage of the
This option re-encodes invalid UTF-8 object names into valid
UTF-8 by mapping offending codes into a 'private' codepage of the
Unicode set.
Useful on clients not using utf-8 as their file system encoding.
Useful on clients not using UTF-8 as their file system encoding.
.TP
\fB\-o\fR use_session_token - indicate that session token should be provided.
If credentials are provided by environment variables this switch
@ -326,6 +325,11 @@ Otherwise an error is returned.
\fB\-o\fR requester_pays (default is disable)
This option instructs s3fs to enable requests involving Requester Pays buckets (It includes the 'x-amz-request-payer=requester' entry in the request header).
.TP
\fB\-o\fR mime (default is "/etc/mime.types")
Specify the path of the mime.types file.
If this option is not specified, the existence of "/etc/mime.types" is checked, and that file is loaded as mime information.
If this file does not exist on macOS, then "/etc/apache2/mime.types" is checked as well.
.TP
\fB\-o\fR dbglevel (default="crit")
Set the debug message level. set value as crit (critical), err (error), warn (warning), info (information) to debug level. default debug level is critical.
If s3fs run with "-d" option, the debug level is set information.
@ -333,6 +337,15 @@ When s3fs catch the signal SIGUSR2, the debug level is bumpup.
.TP
\fB\-o\fR curldbg - put curl debug message
Put the debug message from libcurl when this option is specified.
Specify "normal" or "body" for the parameter.
If the parameter is omitted, it is the same as "normal".
If "body" is specified, some API communication body data will be output in addition to the debug message output as "normal".
.TP
\fB\-o\fR set_check_cache_sigusr1 (default is stdout)
If the cache is enabled, you can check the integrity of the cache file and the cache file's stats info file.
This option is specified and when sending the SIGUSR1 signal to the s3fs process checks the cache status at that time.
This option can take a file path as parameter to output the check result to that file.
The file path parameter can be omitted. If omitted, the result will be output to stdout or syslog.
.SS "utility mode options"
.TP
\fB\-u\fR or \fB\-\-incomplete\-mpu\-list\fR
@ -354,11 +367,11 @@ There are many FUSE specific mount options that can be specified. e.g. allow_oth
.TP
The maximum size of objects that s3fs can handle depends on Amazon S3. For example, up to 5 GB when using single PUT API. And up to 5 TB is supported when Multipart Upload API is used.
.TP
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses md5 checksums to minimize downloads from S3.
If enabled via the "use_cache" option, s3fs automatically maintains a local cache of files in the folder specified by use_cache. Whenever s3fs needs to read or write a file on S3, it first downloads the entire file locally to the folder specified by use_cache and operates on it. When fuse_release() is called, s3fs will re-upload the file to S3 if it has been changed. s3fs uses MD5 checksums to minimize downloads from S3.
.TP
The folder specified by use_cache is just a local cache. It can be deleted at any time. s3fs rebuilds it on demand.
.TP
Local file caching works by calculating and comparing md5 checksums (ETag HTTP header).
Local file caching works by calculating and comparing MD5 checksums (ETag HTTP header).
.TP
s3fs leverages /etc/mime.types to "guess" the "correct" content-type based on file name extension. This means that you can copy a website to S3 and serve it up directly from S3 with correct content-types!
.SH SEE ALSO

View File

@ -32,7 +32,8 @@ s3fs_SOURCES = \
s3fs_util.cpp \
fdcache.cpp \
common_auth.cpp \
addhead.cpp
addhead.cpp \
sighandlers.cpp
if USE_SSL_OPENSSL
s3fs_SOURCES += openssl_auth.cpp
endif

View File

@ -160,7 +160,7 @@ pthread_mutex_t StatCache::stat_cache_lock;
//-------------------------------------------------------------------
// Constructor/Destructor
//-------------------------------------------------------------------
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(0), CacheSize(100000), IsCacheNoObject(false)
StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), ExpireTime(15 * 60), CacheSize(100000), IsCacheNoObject(false)
{
if(this == StatCache::getStatCacheData()){
stat_cache.clear();
@ -169,7 +169,11 @@ StatCache::StatCache() : IsExpireTime(false), IsExpireIntervalType(false), Expir
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
pthread_mutex_init(&StatCache::stat_cache_lock, &attr);
int res;
if(0 != (res = pthread_mutex_init(&StatCache::stat_cache_lock, &attr))){
S3FS_PRN_CRIT("failed to init stat_cache_lock: %d", res);
abort();
}
}else{
abort();
}
@ -179,7 +183,11 @@ StatCache::~StatCache()
{
if(this == StatCache::getStatCacheData()){
Clear();
pthread_mutex_destroy(&StatCache::stat_cache_lock);
int res = pthread_mutex_destroy(&StatCache::stat_cache_lock);
if(res != 0){
S3FS_PRN_CRIT("failed to destroy stat_cache_lock: %d", res);
abort();
}
}else{
abort();
}
@ -543,20 +551,23 @@ bool StatCache::TruncateCache()
// 3) erase from the old cache in order
size_t erase_count= stat_cache.size() - CacheSize + 1;
statiterlist_t erase_iters;
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end(); ++iter){
for(stat_cache_t::iterator iter = stat_cache.begin(); iter != stat_cache.end() && 0 < erase_count; ++iter){
// check no truncate
stat_cache_entry* ent = iter->second;
if(ent && 0L < ent->notruncate){
// skip for no truncate entry
// skip for no truncate entry and keep extra counts for this entity.
if(0 < erase_count){
--erase_count; // decrement
}
}else{
// iter is not have notruncate flag
erase_iters.push_back(iter);
}
// iter is not have notruncate flag
erase_iters.push_back(iter);
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
if(erase_count < erase_iters.size()){
erase_iters.pop_back();
sort(erase_iters.begin(), erase_iters.end(), sort_statiterlist());
while(erase_count < erase_iters.size()){
erase_iters.pop_back();
}
}
}
for(statiterlist_t::iterator iiter = erase_iters.begin(); iiter != erase_iters.end(); ++iiter){
@ -607,7 +618,7 @@ bool StatCache::DelStat(const char* key, bool lock_already_held)
bool StatCache::GetSymlink(const string& key, string& value)
{
bool is_delete_cache = false;
string strpath = key;
const string& strpath = key;
AutoLock lock(&StatCache::stat_cache_lock);

View File

@ -97,6 +97,15 @@ enum s3fs_log_level{
} \
}while(0)
#define S3FS_LOW_CURLDBG(fmt, ...) \
do{ \
if(foreground){ \
fprintf(stdout, "[CURL DBG] " fmt "%s\n", __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_CRIT), "%s" fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
#define S3FS_LOW_LOGPRN_EXIT(fmt, ...) \
do{ \
if(foreground){ \
@ -117,6 +126,16 @@ enum s3fs_log_level{
} \
}while(0)
// Special macro for checking cache files
#define S3FS_LOW_CACHE(fp, fmt, ...) \
do{ \
if(foreground){ \
fprintf(fp, fmt "%s\n", __VA_ARGS__); \
}else{ \
syslog(S3FS_LOG_LEVEL_TO_SYSLOG(S3FS_LOG_INFO), "%s: " fmt "%s", instance_name.c_str(), __VA_ARGS__); \
} \
}while(0)
// [NOTE]
// small trick for VA_ARGS
//
@ -130,7 +149,8 @@ enum s3fs_log_level{
#define S3FS_PRN_INFO1(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 1, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO2(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 2, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_INFO3(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_INFO, 3, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_LOGPRN2(S3FS_LOG_CRIT, 0, fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CURL(fmt, ...) S3FS_LOW_CURLDBG(fmt, ##__VA_ARGS__, "")
#define S3FS_PRN_CACHE(fp, ...) S3FS_LOW_CACHE(fp, ##__VA_ARGS__, "")
//
// Typedef

View File

@ -127,7 +127,7 @@ static string get_bucket_host()
return url_to_host(host);
}
// compare ETag ignoring quotes
// compare ETag ignoring quotes and case
static bool etag_equals(std::string s1, std::string s2) {
if(s1.length() > 1 && s1[0] == '\"' && s1[s1.length() - 1] == '\"'){
s1 = s1.substr(1, s1.size() - 2);
@ -135,7 +135,7 @@ static bool etag_equals(std::string s1, std::string s2) {
if(s2.length() > 1 && s2[0] == '\"' && s2[s2.length() - 1] == '\"'){
s2 = s2.substr(1, s2.size() - 2);
}
return s1 == s2;
return 0 == strcasecmp(s1.c_str(), s2.c_str());
}
#if 0 // noused
@ -154,6 +154,29 @@ static string tolower_header_name(const char* head)
}
#endif
static const char* getCurlDebugHead(curl_infotype type)
{
const char* unknown = "";
const char* dataIn = "BODY <";
const char* dataOut = "BODY >";
const char* headIn = "<";
const char* headOut = ">";
switch(type){
case CURLINFO_DATA_IN:
return dataIn;
case CURLINFO_DATA_OUT:
return dataOut;
case CURLINFO_HEADER_IN:
return headIn;
case CURLINFO_HEADER_OUT:
return headOut;
default:
break;
}
return unknown;
}
//-------------------------------------------------------------------
// Class BodyData
//-------------------------------------------------------------------
@ -343,6 +366,20 @@ static const long S3FSCURL_RESPONSECODE_NOTSET = -1;
static const long S3FSCURL_RESPONSECODE_FATAL_ERROR = -2;
static const int S3FSCURL_PERFORM_RESULT_NOTSET = 1;
// [NOTE] about default mime.types file
// If no mime.types file is specified in the mime option, s3fs
// will look for /etc/mime.types on all operating systems and
// load mime information.
// However, in the case of macOS, when this file does not exist,
// it tries to detect the /etc/apache2/mime.types file.
// The reason for this is that apache2 is preinstalled on macOS,
// and the mime.types file is expected to exist in this path.
// If the mime.types file is not found, s3fs will exit with an
// error.
//
static const char* DEFAULT_MIME_FILE = "/etc/mime.types";
static const char* SPECIAL_DARWIN_MIME_FILE = "/etc/apache2/mime.types";
// [NOTICE]
// This symbol is for libcurl under 7.23.0
#ifndef CURLSHE_NOT_BUILT_IN
@ -369,6 +406,7 @@ std::string S3fsCurl::ssekmsid;
sse_type_t S3fsCurl::ssetype = SSE_DISABLE;
bool S3fsCurl::is_content_md5 = false;
bool S3fsCurl::is_verbose = false;
bool S3fsCurl::is_dump_body = false;
string S3fsCurl::AWSAccessKeyId;
string S3fsCurl::AWSSecretAccessKey;
string S3fsCurl::AWSAccessToken;
@ -397,7 +435,7 @@ bool S3fsCurl::requester_pays = false; // default
//-------------------------------------------------------------------
// Class methods for S3fsCurl
//-------------------------------------------------------------------
bool S3fsCurl::InitS3fsCurl(const char* MimeFile)
bool S3fsCurl::InitS3fsCurl()
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
@ -413,9 +451,6 @@ bool S3fsCurl::InitS3fsCurl(const char* MimeFile)
if(0 != pthread_mutex_init(&S3fsCurl::curl_share_lock[SHARE_MUTEX_SSL_SESSION], &attr)){
return false;
}
if(!S3fsCurl::InitMimeType(MimeFile)){
return false;
}
if(!S3fsCurl::InitGlobalCurl()){
return false;
}
@ -561,11 +596,18 @@ void S3fsCurl::LockCurlShare(CURL* handle, curl_lock_data nLockData, curl_lock_a
if(!hCurlShare){
return;
}
int res;
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
if(CURL_LOCK_DATA_DNS == nLockData){
pthread_mutex_lock(&lockmutex[SHARE_MUTEX_DNS]);
if(0 != (res = pthread_mutex_lock(&lockmutex[SHARE_MUTEX_DNS]))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
pthread_mutex_lock(&lockmutex[SHARE_MUTEX_SSL_SESSION]);
if(0 != (res = pthread_mutex_lock(&lockmutex[SHARE_MUTEX_SSL_SESSION]))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}
}
@ -574,11 +616,18 @@ void S3fsCurl::UnlockCurlShare(CURL* handle, curl_lock_data nLockData, void* use
if(!hCurlShare){
return;
}
int res;
pthread_mutex_t* lockmutex = static_cast<pthread_mutex_t*>(useptr);
if(CURL_LOCK_DATA_DNS == nLockData){
pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_DNS]);
if(0 != (res = pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_DNS]))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
abort();
}
}else if(CURL_LOCK_DATA_SSL_SESSION == nLockData){
pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_SSL_SESSION]);
if(0 != (res = pthread_mutex_unlock(&lockmutex[SHARE_MUTEX_SSL_SESSION]))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
abort();
}
}
}
@ -618,15 +667,39 @@ int S3fsCurl::CurlProgress(void *clientp, double dltotal, double dlnow, double u
return 0;
}
bool S3fsCurl::InitMimeType(const char* MimeFile)
bool S3fsCurl::InitMimeType(const std::string& strFile)
{
if(!MimeFile){
MimeFile = "/etc/mime.types"; // default
string MimeFile;
if(!strFile.empty()){
MimeFile = strFile;
}else{
// search default mime.types
string errPaths = DEFAULT_MIME_FILE;
struct stat st;
if(0 == stat(DEFAULT_MIME_FILE, &st)){
MimeFile = DEFAULT_MIME_FILE;
}else if(compare_sysname("Darwin")){
// for macos, search another default file.
if(0 == stat(SPECIAL_DARWIN_MIME_FILE, &st)){
MimeFile = SPECIAL_DARWIN_MIME_FILE;
}else{
errPaths += " and ";
errPaths += SPECIAL_DARWIN_MIME_FILE;
}
}
if(MimeFile.empty()){
S3FS_PRN_WARN("Could not find mime.types files, you have to create file(%s) or specify mime option for existing mime.types file.", errPaths.c_str());
return false;
}
}
S3FS_PRN_DBG("Try to load mime types from %s file.", MimeFile.c_str());
string line;
ifstream MT(MimeFile);
ifstream MT(MimeFile.c_str());
if(MT.good()){
S3FS_PRN_DBG("The old mime types are cleared to load new mime types.");
S3fsCurl::mimeTypes.clear();
while(getline(MT, line)){
if(line[0]=='#'){
continue;
@ -647,6 +720,10 @@ bool S3fsCurl::InitMimeType(const char* MimeFile)
S3fsCurl::mimeTypes[ext] = mimeType;
}
}
S3FS_PRN_INIT_INFO("Loaded mime information from %s", MimeFile.c_str());
}else{
S3FS_PRN_WARN("Could not load mime types from %s, please check the existence and permissions of this file.", MimeFile.c_str());
return false;
}
return true;
}
@ -1201,6 +1278,13 @@ bool S3fsCurl::SetVerbose(bool flag)
return old;
}
bool S3fsCurl::SetDumpBody(bool flag)
{
bool old = S3fsCurl::is_dump_body;
S3fsCurl::is_dump_body = flag;
return old;
}
bool S3fsCurl::SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey)
{
if((!S3fsCurl::is_ibm_iam_auth && (!AccessKeyId || '\0' == AccessKeyId[0])) || !SecretAccessKey || '\0' == SecretAccessKey[0]){
@ -1336,13 +1420,17 @@ S3fsCurl* S3fsCurl::UploadMultipartPostRetryCallback(S3fsCurl* s3fscurl)
string upload_id;
string part_num_str;
int part_num;
off_t tmp_part_num = 0;
if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){
return NULL;
}
if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){
return NULL;
}
part_num = s3fs_strtoofft(part_num_str.c_str(), /*base=*/ 10);
if(!try_strtoofft(part_num_str.c_str(), tmp_part_num, /*base=*/ 10)){
return NULL;
}
part_num = static_cast<off_t>(tmp_part_num);
if(s3fscurl->retry_count >= S3fsCurl::retries){
S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num);
@ -1380,13 +1468,17 @@ S3fsCurl* S3fsCurl::CopyMultipartPostRetryCallback(S3fsCurl* s3fscurl)
string upload_id;
string part_num_str;
int part_num;
off_t tmp_part_num = 0;
if(!get_keyword_value(s3fscurl->url, "uploadId", upload_id)){
return NULL;
}
if(!get_keyword_value(s3fscurl->url, "partNumber", part_num_str)){
return NULL;
}
part_num = s3fs_strtoofft(part_num_str.c_str(), /*base=*/ 10);
if(!try_strtoofft(part_num_str.c_str(), tmp_part_num, /*base=*/ 10)){
return NULL;
}
part_num = static_cast<off_t>(tmp_part_num);
if(s3fscurl->retry_count >= S3fsCurl::retries){
S3FS_PRN_ERR("Over retry count(%d) limit(%s:%d).", s3fscurl->retry_count, s3fscurl->path.c_str(), part_num);
@ -1518,7 +1610,7 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
return 0;
}
int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const PageList& pagelist)
int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages)
{
int result;
string upload_id;
@ -1529,16 +1621,9 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
S3FS_PRN_INFO3("[tpath=%s][fd=%d]", SAFESTRPTR(tpath), fd);
// get upload mixed page list
fdpage_list_t fdplist;
if(!pagelist.GetMultipartSizeList(fdplist, S3fsCurl::multipart_size)){
return -1;
}
// duplicate fd
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
S3FS_PRN_ERR("Could not duplicate file descriptor(errno=%d)", errno);
PageList::FreeList(fdplist);
if(-1 != fd2){
close(fd2);
}
@ -1546,13 +1631,11 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
}
if(-1 == fstat(fd2, &st)){
S3FS_PRN_ERR("Invalid file descriptor(errno=%d)", errno);
PageList::FreeList(fdplist);
close(fd2);
return -errno;
}
if(0 != (result = s3fscurl.PreMultipartPostRequest(tpath, meta, upload_id, true))){
PageList::FreeList(fdplist);
close(fd2);
return result;
}
@ -1570,7 +1653,7 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
curlmulti.SetSuccessCallback(S3fsCurl::MixMultipartPostCallback);
curlmulti.SetRetryCallback(S3fsCurl::MixMultipartPostRetryCallback);
for(fdpage_list_t::const_iterator iter = fdplist.begin(); iter != fdplist.end(); ++iter){
for(fdpage_list_t::const_iterator iter = mixuppages.begin(); iter != mixuppages.end(); ++iter){
// s3fscurl sub object
S3fsCurl* s3fscurl_para = new S3fsCurl(true);
@ -1588,7 +1671,6 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
// initiate upload part for parallel
if(0 != (result = s3fscurl_para->UploadMultipartPostSetup(tpath, list.size(), upload_id))){
S3FS_PRN_ERR("failed uploading part setup(%d)", result);
PageList::FreeList(fdplist);
close(fd2);
delete s3fscurl_para;
return result;
@ -1619,13 +1701,11 @@ int S3fsCurl::ParallelMixMultipartUploadRequest(const char* tpath, headers_t& me
// set into parallel object
if(!curlmulti.SetS3fsCurlObject(s3fscurl_para)){
S3FS_PRN_ERR("Could not make curl object into multi curl(%s).", tpath);
PageList::FreeList(fdplist);
close(fd2);
delete s3fscurl_para;
return -1;
}
}
PageList::FreeList(fdplist);
// Multi request
if(0 != (result = curlmulti.Request())){
@ -1882,7 +1962,11 @@ bool S3fsCurl::SetIAMCredentials(const char* response)
S3fsCurl::AWSAccessToken = keyval[string(S3fsCurl::IAM_token_field)];
if(S3fsCurl::is_ibm_iam_auth){
S3fsCurl::AWSAccessTokenExpire = s3fs_strtoofft(keyval[string(S3fsCurl::IAM_expiry_field)].c_str(), /*base=*/ 10);
off_t tmp_expire = 0;
if(!try_strtoofft(keyval[string(S3fsCurl::IAM_expiry_field)].c_str(), tmp_expire, /*base=*/ 10)){
return false;
}
S3fsCurl::AWSAccessTokenExpire = static_cast<time_t>(tmp_expire);
}else{
S3fsCurl::AWSAccessKeyId = keyval[string(IAMCRED_ACCESSKEYID)];
S3fsCurl::AWSSecretAccessKey = keyval[string(IAMCRED_SECRETACCESSKEY)];
@ -1953,6 +2037,21 @@ bool S3fsCurl::AddUserAgent(CURL* hCurl)
}
int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr)
{
return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_END);
}
int S3fsCurl::CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr)
{
return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_DATA_IN);
}
int S3fsCurl::CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr)
{
return S3fsCurl::RawCurlDebugFunc(hcurl, type, data, size, userptr, CURLINFO_DATA_OUT);
}
int S3fsCurl::RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype)
{
if(!hcurl){
// something wrong...
@ -1968,8 +2067,17 @@ int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t
size--;
data++;
}
if(foreground && 0 < size && '\n' == data[size - 1]){
size--;
}
S3FS_PRN_CURL("* %*s%.*s", indent, "", (int)size, data);
break;
case CURLINFO_DATA_IN:
case CURLINFO_DATA_OUT:
if(type != datatype || !S3fsCurl::is_dump_body){
// not put
break;
}
case CURLINFO_HEADER_IN:
case CURLINFO_HEADER_OUT:
size_t remaining;
@ -1991,13 +2099,11 @@ int S3fsCurl::CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t
eol++;
}
size_t length = eol - p;
S3FS_PRN_CURL("%c %.*s", CURLINFO_HEADER_IN == type ? '<' : '>', (int)length - newline, p);
S3FS_PRN_CURL("%s %.*s", getCurlDebugHead(type), (int)length - newline, p);
remaining -= length;
p = eol;
} while (p != NULL && remaining > 0);
break;
case CURLINFO_DATA_IN:
case CURLINFO_DATA_OUT:
case CURLINFO_SSL_DATA_IN:
case CURLINFO_SSL_DATA_OUT:
// not put
@ -2067,9 +2173,7 @@ bool S3fsCurl::ResetHandle()
}
if(S3fsCurl::is_verbose){
curl_easy_setopt(hCurl, CURLOPT_VERBOSE, true);
if(!foreground){
curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc);
}
curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugFunc);
}
if(!cipher_suites.empty()) {
curl_easy_setopt(hCurl, CURLOPT_SSL_CIPHER_LIST, cipher_suites.c_str());
@ -2443,14 +2547,20 @@ int S3fsCurl::RequestPerform(bool dontAddAuthHeaders /*=false*/)
result = -ENOENT;
break;
case 416:
S3FS_PRN_INFO3("HTTP response code 416 was returned, returning EIO");
result = -EIO;
break;
case 501:
S3FS_PRN_INFO3("HTTP response code 501 was returned, returning ENOTSUP");
S3FS_PRN_DBG("Body Text: %s", bodydata.str());
result = -ENOTSUP;
break;
case 500:
case 503:
S3FS_PRN_INFO3("HTTP response code 503 was returned, slowing down");
S3FS_PRN_INFO3("HTTP response code %ld was returned, slowing down", responseCode);
S3FS_PRN_DBG("Body Text: %s", bodydata.str());
sleep(4 << retry_count);
break;
@ -3158,6 +3268,8 @@ int S3fsCurl::PutHeadRequest(const char* tpath, headers_t& meta, bool is_copy)
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "ONEZONE_IA");
} else if(INTELLIGENT_TIERING == GetStorageClass()) {
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "INTELLIGENT_TIERING");
} else if(GLACIER == GetStorageClass()) {
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "GLACIER");
}
// SSE
if(!is_copy){
@ -3289,6 +3401,8 @@ int S3fsCurl::PutRequest(const char* tpath, headers_t& meta, int fd)
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "ONEZONE_IA");
} else if(INTELLIGENT_TIERING == GetStorageClass()) {
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "INTELLIGENT_TIERING");
} else if(GLACIER == GetStorageClass()) {
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "GLACIER");
}
// SSE
string ssevalue;
@ -3473,6 +3587,9 @@ int S3fsCurl::ListBucketRequest(const char* tpath, const char* query)
curl_easy_setopt(hCurl, CURLOPT_URL, url.c_str());
curl_easy_setopt(hCurl, CURLOPT_WRITEDATA, (void*)&bodydata);
curl_easy_setopt(hCurl, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
if(S3fsCurl::is_verbose){
curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugBodyInFunc); // replace debug function
}
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
return RequestPerform();
@ -3549,6 +3666,8 @@ int S3fsCurl::PreMultipartPostRequest(const char* tpath, headers_t& meta, string
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "ONEZONE_IA");
} else if(INTELLIGENT_TIERING == GetStorageClass()) {
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "INTELLIGENT_TIERING");
} else if(GLACIER == GetStorageClass()) {
requestHeaders = curl_slist_sort_insert(requestHeaders, "x-amz-storage-class", "GLACIER");
}
// SSE
if(!is_copy){
@ -3636,7 +3755,7 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const string& uplo
requestHeaders = NULL;
bodydata.Clear();
responseHeaders.clear();
string contype = S3fsCurl::LookupMimeType(string(tpath));
string contype = "application/xml";
requestHeaders = curl_slist_sort_insert(requestHeaders, "Accept", NULL);
requestHeaders = curl_slist_sort_insert(requestHeaders, "Content-Type", contype.c_str());
@ -3652,6 +3771,9 @@ int S3fsCurl::CompleteMultipartPostRequest(const char* tpath, const string& uplo
curl_easy_setopt(hCurl, CURLOPT_POSTFIELDSIZE, static_cast<curl_off_t>(postdata_remaining));
curl_easy_setopt(hCurl, CURLOPT_READDATA, (void*)this);
curl_easy_setopt(hCurl, CURLOPT_READFUNCTION, S3fsCurl::ReadCallback);
if(S3fsCurl::is_verbose){
curl_easy_setopt(hCurl, CURLOPT_DEBUGFUNCTION, S3fsCurl::CurlDebugBodyOutFunc); // replace debug function
}
S3fsCurl::AddUserAgent(hCurl); // put User-Agent
// request
@ -4275,9 +4397,9 @@ int S3fsMultiCurl::MultiPerform()
for(s3fscurllist_t::iterator iter = clist_req.begin(); iter != clist_req.end(); ++iter) {
pthread_t thread;
S3fsCurl* s3fscurl = *iter;
s3fscurl->sem = &sem;
s3fscurl->completed_tids_lock = &completed_tids_lock;
s3fscurl->completed_tids = &completed_tids;
if(!s3fscurl){
continue;
}
sem.wait();
@ -4299,6 +4421,9 @@ int S3fsMultiCurl::MultiPerform()
}
completed_tids.clear();
}
s3fscurl->sem = &sem;
s3fscurl->completed_tids_lock = &completed_tids_lock;
s3fscurl->completed_tids = &completed_tids;
isMultiHead |= s3fscurl->GetOp() == "HEAD";
@ -4468,7 +4593,10 @@ void* S3fsMultiCurl::RequestPerformWrapper(void* arg)
{
S3fsCurl* s3fscurl= static_cast<S3fsCurl*>(arg);
void* result = NULL;
if(s3fscurl && s3fscurl->fpLazySetup){
if(!s3fscurl){
return (void*)(intptr_t)(-EIO);
}
if(s3fscurl->fpLazySetup){
if(!s3fscurl->fpLazySetup(s3fscurl)){
S3FS_PRN_ERR("Failed to lazy setup, then respond EIO.");
result = (void*)(intptr_t)(-EIO);

View File

@ -186,7 +186,8 @@ private:
//----------------------------------------------
// class S3fsCurl
//----------------------------------------------
class PageList;
#include "fdcache.h" // for fdpage_list_t
class S3fsCurl;
// Prototype function for lazy setup options for curl handle
@ -202,7 +203,8 @@ enum storage_class_t {
STANDARD_IA,
ONEZONE_IA,
REDUCED_REDUNDANCY,
INTELLIGENT_TIERING
INTELLIGENT_TIERING,
GLACIER
};
enum acl_t {
@ -279,6 +281,7 @@ class S3fsCurl
static sse_type_t ssetype;
static bool is_content_md5;
static bool is_verbose;
static bool is_dump_body;
static std::string AWSAccessKeyId;
static std::string AWSSecretAccessKey;
static std::string AWSAccessToken;
@ -355,7 +358,6 @@ class S3fsCurl
static bool DestroyCryptMutex(void);
static int CurlProgress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow);
static bool InitMimeType(const char* MimeFile = NULL);
static bool LocateBundle(void);
static size_t HeaderCallback(void *data, size_t blockSize, size_t numBlocks, void *userPtr);
static size_t WriteMemoryCallback(void *ptr, size_t blockSize, size_t numBlocks, void *data);
@ -387,6 +389,9 @@ class S3fsCurl
static bool AddUserAgent(CURL* hCurl);
static int CurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int CurlDebugBodyInFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int CurlDebugBodyOutFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr);
static int RawCurlDebugFunc(CURL* hcurl, curl_infotype type, char* data, size_t size, void* userptr, curl_infotype datatype);
// methods
bool ResetHandle(void);
@ -408,10 +413,11 @@ class S3fsCurl
public:
// class methods
static bool InitS3fsCurl(const char* MimeFile = NULL);
static bool InitS3fsCurl(void);
static bool InitMimeType(const std::string& strFile);
static bool DestroyS3fsCurl(void);
static int ParallelMultipartUploadRequest(const char* tpath, headers_t& meta, int fd);
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const PageList& pagelist);
static int ParallelMixMultipartUploadRequest(const char* tpath, headers_t& meta, int fd, const fdpage_list_t& mixuppages);
static int ParallelGetObjectRequest(const char* tpath, int fd, off_t start, ssize_t size);
static bool CheckIAMCredentialUpdate(void);
@ -448,6 +454,8 @@ class S3fsCurl
static bool SetContentMd5(bool flag);
static bool SetVerbose(bool flag);
static bool GetVerbose(void) { return S3fsCurl::is_verbose; }
static bool SetDumpBody(bool flag);
static bool IsDumpBody(void) { return S3fsCurl::is_dump_body; }
static bool SetAccessKey(const char* AccessKeyId, const char* SecretAccessKey);
static bool SetAccessKeyWithSessionToken(const char* AccessKeyId, const char* SecretAccessKey, const char * SessionToken);
static bool IsSetAccessKeyID(void){

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,6 @@
#define FD_CACHE_H_
#include <sys/statvfs.h>
#include "curl.h"
//------------------------------------------------
// CacheFileStat
@ -35,7 +34,10 @@ class CacheFileStat
private:
static bool MakeCacheFileStatPath(const char* path, std::string& sfile_path, bool is_create_dir = true);
bool RawOpen(bool readonly);
public:
static std::string GetCacheFileStatTopDir(void);
static bool DeleteCacheFileStat(const char* path);
static bool CheckCacheFileStatTopDir(void);
static bool DeleteCacheFileStatDirectory(void);
@ -45,6 +47,7 @@ class CacheFileStat
~CacheFileStat();
bool Open(void);
bool ReadOnlyOpen(void);
bool Release(void);
bool SetPath(const char* tpath, bool is_open = true);
int GetFd(void) const { return fd; }
@ -91,10 +94,13 @@ class PageList
};
private:
static bool GetSparseFilePages(int fd, size_t file_size, fdpage_list_t& sparse_list);
static bool CheckZeroAreaInFile(int fd, off_t start, size_t bytes);
static bool CheckAreaInSparseFile(const struct fdpage& checkpage, const fdpage_list_t& sparse_list, int fd, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
void Clear(void);
bool Compress(bool force_modified = false);
bool Compress();
bool Parse(off_t new_pos);
bool RawGetUnloadPageList(fdpage_list_t& dlpages, off_t offset, off_t size);
public:
static void FreeList(fdpage_list_t& list);
@ -112,14 +118,14 @@ class PageList
bool FindUnloadedPage(off_t start, off_t& resstart, off_t& ressize) const;
off_t GetTotalUnloadedPageSize(off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
int GetUnloadedPages(fdpage_list_t& unloaded_list, off_t start = 0, off_t size = 0) const; // size=0 is checking to end of list
bool GetLoadPageListForMultipartUpload(fdpage_list_t& dlpages);
bool GetMultipartSizeList(fdpage_list_t& mplist, off_t partsize) const;
bool GetPageListsForMultipartUpload(fdpage_list_t& dlpages, fdpage_list_t& mixuppages, off_t max_partsize);
bool IsModified(void) const;
bool ClearAllModified(void);
bool Serialize(CacheFileStat& file, bool is_output);
void Dump(void);
bool Serialize(CacheFileStat& file, bool is_output, ino_t inode);
void Dump(void) const;
bool CompareSparseFile(int fd, size_t file_size, fdpage_list_t& err_area_list, fdpage_list_t& warn_area_list);
};
//------------------------------------------------
@ -136,6 +142,7 @@ class FdEntity
std::string path; // object path
int fd; // file descriptor(tmp file or cache file)
FILE* pfile; // file pointer(tmp file or cache file)
ino_t inode; // inode number for cache file
headers_t orgmeta; // original headers at opening
off_t size_orgmeta; // original file size in original headers
@ -151,8 +158,10 @@ class FdEntity
private:
static int FillFile(int fd, unsigned char byte, off_t size, off_t start);
static ino_t GetInode(int fd);
void Clear(void);
ino_t GetInode(void);
int OpenMirrorFile(void);
bool SetAllStatus(bool is_loaded); // [NOTE] not locking
//bool SetAllStatusLoaded(void) { return SetAllStatus(true); }
@ -186,7 +195,7 @@ class FdEntity
bool SetGId(gid_t gid);
bool SetContentType(const char* path);
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false); // size=0 means loading to end
int Load(off_t start = 0, off_t size = 0, bool lock_already_held = false, bool is_modified_flag = false); // size=0 means loading to end
int NoCacheLoadAndPost(off_t start = 0, off_t size = 0); // size=0 means loading to end
int NoCachePreMultipartPost(void);
int NoCacheMultipartPost(int tgfd, off_t start, off_t size);
@ -216,12 +225,14 @@ class FdManager
static std::string cache_dir;
static bool check_cache_dir_exist;
static off_t free_disk_space; // limit free disk space
static std::string check_cache_output;
fdent_map_t fent;
private:
static off_t GetFreeDiskSpace(const char* path);
void CleanupCacheDirInternal(const std::string &path = "");
bool RawCheckAllCache(FILE* fp, const char* cache_stat_top_dir, const char* sub_path, int& total_file_cnt, int& err_file_cnt, int& err_dir_cnt);
public:
FdManager();
@ -233,8 +244,10 @@ class FdManager
static bool DeleteCacheDirectory(void);
static int DeleteCacheFile(const char* path);
static bool SetCacheDir(const char* dir);
static bool IsCacheDir(void) { return (0 < FdManager::cache_dir.size()); }
static bool IsCacheDir(void) { return !FdManager::cache_dir.empty(); }
static const char* GetCacheDir(void) { return FdManager::cache_dir.c_str(); }
static bool SetCacheCheckOutput(const char* path);
static const char* GetCacheCheckOutput(void) { return FdManager::check_cache_output.c_str(); }
static bool MakeCachePath(const char* path, std::string& cache_path, bool is_create_dir = true, bool is_mirror_path = false);
static bool CheckCacheTopDir(void);
static bool MakeRandomTempPath(const char* path, std::string& tmppath);
@ -255,6 +268,8 @@ class FdManager
bool Close(FdEntity* ent);
bool ChangeEntityToTempPath(FdEntity* ent, const char* path);
void CleanupCacheDir();
bool CheckAllCache(void);
};
#endif // FD_CACHE_H_

View File

@ -86,10 +86,17 @@ static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
static void s3fs_crypt_mutex_lock(int mode, int pos, const char* file, int line)
{
if(s3fs_crypt_mutex){
int res;
if(mode & CRYPTO_LOCK){
pthread_mutex_lock(&s3fs_crypt_mutex[pos]);
if(0 != (res = pthread_mutex_lock(&s3fs_crypt_mutex[pos]))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}else{
pthread_mutex_unlock(&s3fs_crypt_mutex[pos]);
if(0 != (res = pthread_mutex_unlock(&s3fs_crypt_mutex[pos]))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
abort();
}
}
}
}
@ -111,7 +118,11 @@ static struct CRYPTO_dynlock_value* s3fs_dyn_crypt_mutex(const char* file, int l
#if S3FS_PTHREAD_ERRORCHECK
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
pthread_mutex_init(&(dyndata->dyn_mutex), &attr);
int res;
if(0 != (res = pthread_mutex_init(&(dyndata->dyn_mutex), &attr))){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
return NULL;
}
return dyndata;
}
@ -119,10 +130,17 @@ static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyn
static void s3fs_dyn_crypt_mutex_lock(int mode, struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
{
if(dyndata){
int res;
if(mode & CRYPTO_LOCK){
pthread_mutex_lock(&(dyndata->dyn_mutex));
if(0 != (res = pthread_mutex_lock(&(dyndata->dyn_mutex)))){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}else{
pthread_mutex_unlock(&(dyndata->dyn_mutex));
if(0 != (res = pthread_mutex_unlock(&(dyndata->dyn_mutex)))){
S3FS_PRN_CRIT("pthread_mutex_unlock returned: %d", res);
abort();
}
}
}
}
@ -131,7 +149,11 @@ static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, c
static void s3fs_destroy_dyn_crypt_mutex(struct CRYPTO_dynlock_value* dyndata, const char* file, int line)
{
if(dyndata){
pthread_mutex_destroy(&(dyndata->dyn_mutex));
int res = pthread_mutex_destroy(&(dyndata->dyn_mutex));
if(res != 0){
S3FS_PRN_CRIT("failed to destroy dyn_mutex");
abort();
}
delete dyndata;
}
}
@ -152,7 +174,11 @@ bool s3fs_init_crypt_mutex()
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
#endif
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
int res = pthread_mutex_init(&s3fs_crypt_mutex[cnt], &attr);
if(res != 0){
S3FS_PRN_CRIT("pthread_mutex_init returned: %d", res);
return false;
}
}
// static lock
CRYPTO_set_locking_callback(s3fs_crypt_mutex_lock);
@ -178,7 +204,11 @@ bool s3fs_destroy_crypt_mutex()
CRYPTO_set_locking_callback(NULL);
for(int cnt = 0; cnt < CRYPTO_num_locks(); cnt++){
pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
int res = pthread_mutex_destroy(&s3fs_crypt_mutex[cnt]);
if(res != 0){
S3FS_PRN_CRIT("failed to destroy s3fs_crypt_mutex[%d]", cnt);
abort();
}
}
CRYPTO_cleanup_all_ex_data();
delete[] s3fs_crypt_mutex;

View File

@ -52,6 +52,7 @@
#include "fdcache.h"
#include "s3fs_auth.h"
#include "addhead.h"
#include "sighandlers.h"
using namespace std;
@ -110,8 +111,6 @@ std::string bucket;
std::string endpoint = "us-east-1";
std::string cipher_suites;
std::string instance_name;
s3fs_log_level debug_level = S3FS_LOG_CRIT;
const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "};
std::string aws_profile = "default";
//-------------------------------------------------------------------
@ -124,6 +123,7 @@ static mode_t mp_umask = 0; // umask for mount point
static bool is_mp_umask = false;// default does not set.
static std::string mountpoint;
static std::string passwd_file;
static std::string mimetype_file;
static utility_incomp_type utility_mode = NO_UTILITY_MODE;
static bool noxmlns = false;
static bool nocopyapi = false;
@ -158,10 +158,6 @@ static const std::string aws_secretkey = "AWSSecretKey";
//-------------------------------------------------------------------
// Static functions : prototype
//-------------------------------------------------------------------
static void s3fs_usr2_handler(int sig);
static bool set_s3fs_usr2_handler();
static s3fs_log_level set_s3fs_log_level(s3fs_log_level level);
static s3fs_log_level bumpup_s3fs_log_level();
static bool is_special_name_folder_object(const char* path);
static int chk_dir_object_type(const char* path, string& newpath, string& nowpath, string& nowcache, headers_t* pmeta = NULL, dirtype* pDirType = NULL);
static int remove_old_type_dir(const string& path, dirtype type);
@ -267,50 +263,6 @@ static int s3fs_removexattr(const char* path, const char* name);
//-------------------------------------------------------------------
// Functions
//-------------------------------------------------------------------
static void s3fs_usr2_handler(int sig)
{
if(SIGUSR2 == sig){
bumpup_s3fs_log_level();
}
}
static bool set_s3fs_usr2_handler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = s3fs_usr2_handler;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR2, &sa, NULL)){
return false;
}
return true;
}
static s3fs_log_level set_s3fs_log_level(s3fs_log_level level)
{
if(level == debug_level){
return debug_level;
}
s3fs_log_level old = debug_level;
debug_level = level;
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
return old;
}
static s3fs_log_level bumpup_s3fs_log_level()
{
s3fs_log_level old = debug_level;
debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR :
S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN :
S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO :
S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG :
S3FS_LOG_CRIT );
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
return old;
}
static bool is_special_name_folder_object(const char* path)
{
if(!support_compat_dir){
@ -1332,8 +1284,11 @@ static int rename_object(const char* from, const char* to)
FdManager::get()->Rename(from, to);
// Remove file
result = s3fs_unlink(from);
StatCache::getStatCacheData()->DelStat(to);
FdManager::DeleteCacheFile(to);
return result;
}
@ -1372,6 +1327,8 @@ static int rename_object_nocopy(const char* from, const char* to)
FdManager::get()->Close(ent);
return result;
}
FdManager::get()->Rename(from, to);
FdManager::get()->Close(ent);
// Remove file
@ -1379,7 +1336,7 @@ static int rename_object_nocopy(const char* from, const char* to)
// Stats
StatCache::getStatCacheData()->DelStat(to);
StatCache::getStatCacheData()->DelStat(from);
FdManager::DeleteCacheFile(to);
return result;
}
@ -1409,9 +1366,14 @@ static int rename_large_object(const char* from, const char* to)
return result;
}
s3fscurl.DestroyCurlHandle();
StatCache::getStatCacheData()->DelStat(to);
return s3fs_unlink(from);
// Remove file
result = s3fs_unlink(from);
StatCache::getStatCacheData()->DelStat(to);
FdManager::DeleteCacheFile(to);
return result;
}
static int clone_directory_object(const char* from, const char* to)
@ -3538,6 +3500,11 @@ static void* s3fs_init(struct fuse_conn_info* conn)
conn->want |= FUSE_CAP_BIG_WRITES;
}
// Signal object
if(S3fsSignals::Initialize()){
S3FS_PRN_ERR("Failed to initialize signal object, but continue...");
}
return NULL;
}
@ -3545,6 +3512,11 @@ static void s3fs_destroy(void*)
{
S3FS_PRN_INFO("destroy");
// Signal object
if(S3fsSignals::Destroy()){
S3FS_PRN_WARN("Failed to clean up signal object.");
}
// cache(remove at last)
if(is_remove_cache && (!CacheFileStat::DeleteCacheFileStatDirectory() || !FdManager::DeleteCacheDirectory())){
S3FS_PRN_WARN("Could not remove cache directory.");
@ -4535,7 +4507,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 1; // continue for fuse option
}
if(0 == STR2NCMP(arg, "umask=")){
s3fs_umask = s3fs_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8);
s3fs_umask = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8);
s3fs_umask &= (S_IRWXU | S_IRWXG | S_IRWXO);
is_s3fs_umask = true;
return 1; // continue for fuse option
@ -4545,7 +4517,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 1; // continue for fuse option
}
if(0 == STR2NCMP(arg, "mp_umask=")){
mp_umask = s3fs_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8);
mp_umask = cvt_strtoofft(strchr(arg, '=') + sizeof(char), /*base=*/ 8);
mp_umask &= (S_IRWXU | S_IRWXG | S_IRWXO);
is_mp_umask = true;
return 0;
@ -4561,7 +4533,12 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "retries=")){
S3fsCurl::SetRetries(static_cast<int>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))));
off_t retries = static_cast<int>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
if(retries == 0){
S3FS_PRN_EXIT("retries must be greater than zero");
return -1;
}
S3fsCurl::SetRetries(retries);
return 0;
}
if(0 == STR2NCMP(arg, "use_cache=")){
@ -4577,7 +4554,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "multireq_max=")){
int maxreq = static_cast<int>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
int maxreq = static_cast<int>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
S3fsCurl::SetMaxMultiRequest(maxreq);
return 0;
}
@ -4594,7 +4571,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
off_t rrs = 1;
// for an old format.
if(0 == STR2NCMP(arg, "use_rrs=")){
rrs = s3fs_strtoofft(strchr(arg, '=') + sizeof(char));
rrs = cvt_strtoofft(strchr(arg, '=') + sizeof(char));
}
if(0 == rrs){
S3fsCurl::SetStorageClass(STANDARD);
@ -4618,6 +4595,8 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
S3fsCurl::SetStorageClass(REDUCED_REDUNDANCY);
}else if(0 == strcmp(storage_class, "intelligent_tiering")){
S3fsCurl::SetStorageClass(INTELLIGENT_TIERING);
}else if(0 == strcmp(storage_class, "glacier")){
S3fsCurl::SetStorageClass(GLACIER);
}else{
S3FS_PRN_EXIT("unknown value for storage_class: %s", storage_class);
return -1;
@ -4734,7 +4713,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "ssl_verify_hostname=")){
long sslvh = static_cast<long>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
long sslvh = static_cast<long>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
if(-1 == S3fsCurl::SetSslVerifyHostname(sslvh)){
S3FS_PRN_EXIT("poorly formed argument to option: ssl_verify_hostname.");
return -1;
@ -4803,7 +4782,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "public_bucket=")){
off_t pubbucket = s3fs_strtoofft(strchr(arg, '=') + sizeof(char));
off_t pubbucket = cvt_strtoofft(strchr(arg, '=') + sizeof(char));
if(1 == pubbucket){
S3fsCurl::SetPublicBucket(true);
// [NOTE]
@ -4831,17 +4810,17 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "connect_timeout=")){
long contimeout = static_cast<long>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
long contimeout = static_cast<long>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
S3fsCurl::SetConnectTimeout(contimeout);
return 0;
}
if(0 == STR2NCMP(arg, "readwrite_timeout=")){
time_t rwtimeout = static_cast<time_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
time_t rwtimeout = static_cast<time_t>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
S3fsCurl::SetReadwriteTimeout(rwtimeout);
return 0;
}
if(0 == STR2NCMP(arg, "list_object_max_keys=")){
int max_keys = static_cast<int>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
int max_keys = static_cast<int>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
if(max_keys < 1000){
S3FS_PRN_EXIT("argument should be over 1000: list_object_max_keys");
return -1;
@ -4850,19 +4829,19 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "max_stat_cache_size=")){
unsigned long cache_size = static_cast<unsigned long>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
unsigned long cache_size = static_cast<unsigned long>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
StatCache::getStatCacheData()->SetCacheSize(cache_size);
return 0;
}
if(0 == STR2NCMP(arg, "stat_cache_expire=")){
time_t expr_time = static_cast<time_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
time_t expr_time = static_cast<time_t>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
StatCache::getStatCacheData()->SetExpireTime(expr_time);
return 0;
}
// [NOTE]
// This option is for compatibility old version.
if(0 == STR2NCMP(arg, "stat_cache_interval_expire=")){
time_t expr_time = static_cast<time_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
time_t expr_time = static_cast<time_t>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
StatCache::getStatCacheData()->SetExpireTime(expr_time, true);
return 0;
}
@ -4879,7 +4858,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "parallel_count=") || 0 == STR2NCMP(arg, "parallel_upload=")){
int maxpara = static_cast<int>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
int maxpara = static_cast<int>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
if(0 >= maxpara){
S3FS_PRN_EXIT("argument should be over 1: parallel_count");
return -1;
@ -4892,7 +4871,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "multipart_size=")){
off_t size = static_cast<off_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char)));
off_t size = static_cast<off_t>(cvt_strtoofft(strchr(arg, '=') + sizeof(char)));
if(!S3fsCurl::SetMultipartSize(size)){
S3FS_PRN_EXIT("multipart_size option must be at least 5 MB.");
return -1;
@ -4900,7 +4879,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "ensure_diskfree=")){
off_t dfsize = s3fs_strtoofft(strchr(arg, '=') + sizeof(char)) * 1024 * 1024;
off_t dfsize = cvt_strtoofft(strchr(arg, '=') + sizeof(char)) * 1024 * 1024;
if(dfsize < S3fsCurl::GetMultipartSize()){
S3FS_PRN_WARN("specified size to ensure disk free space is smaller than multipart size, so set multipart size to it.");
dfsize = S3fsCurl::GetMultipartSize();
@ -4909,7 +4888,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
return 0;
}
if(0 == STR2NCMP(arg, "singlepart_copy_limit=")){
singlepart_copy_limit = static_cast<int64_t>(s3fs_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024;
singlepart_copy_limit = static_cast<int64_t>(cvt_strtoofft(strchr(arg, '=') + sizeof(char))) * 1024;
return 0;
}
if(0 == STR2NCMP(arg, "ahbe_conf=")){
@ -5021,21 +5000,25 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
instance_name = "[" + instance_name + "]";
return 0;
}
if(0 == STR2NCMP(arg, "mime=")){
mimetype_file = strchr(arg, '=') + sizeof(char);
return 0;
}
//
// debug option for s3fs
//
if(0 == STR2NCMP(arg, "dbglevel=")){
const char* strlevel = strchr(arg, '=') + sizeof(char);
if(0 == strcasecmp(strlevel, "silent") || 0 == strcasecmp(strlevel, "critical") || 0 == strcasecmp(strlevel, "crit")){
set_s3fs_log_level(S3FS_LOG_CRIT);
S3fsSignals::SetLogLevel(S3FS_LOG_CRIT);
}else if(0 == strcasecmp(strlevel, "error") || 0 == strcasecmp(strlevel, "err")){
set_s3fs_log_level(S3FS_LOG_ERR);
S3fsSignals::SetLogLevel(S3FS_LOG_ERR);
}else if(0 == strcasecmp(strlevel, "wan") || 0 == strcasecmp(strlevel, "warn") || 0 == strcasecmp(strlevel, "warning")){
set_s3fs_log_level(S3FS_LOG_WARN);
S3fsSignals::SetLogLevel(S3FS_LOG_WARN);
}else if(0 == strcasecmp(strlevel, "inf") || 0 == strcasecmp(strlevel, "info") || 0 == strcasecmp(strlevel, "information")){
set_s3fs_log_level(S3FS_LOG_INFO);
S3fsSignals::SetLogLevel(S3FS_LOG_INFO);
}else if(0 == strcasecmp(strlevel, "dbg") || 0 == strcasecmp(strlevel, "debug")){
set_s3fs_log_level(S3FS_LOG_DBG);
S3fsSignals::SetLogLevel(S3FS_LOG_DBG);
}else{
S3FS_PRN_EXIT("option dbglevel has unknown parameter(%s).", strlevel);
return -1;
@ -5049,7 +5032,7 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
//
if(0 == strcmp(arg, "-d") || 0 == strcmp(arg, "--debug")){
if(!IS_S3FS_LOG_INFO() && !IS_S3FS_LOG_DBG()){
set_s3fs_log_level(S3FS_LOG_INFO);
S3fsSignals::SetLogLevel(S3FS_LOG_INFO);
return 0;
}
if(0 == strcmp(arg, "--debug")){
@ -5061,12 +5044,41 @@ static int my_fuse_opt_proc(void* data, const char* arg, int key, struct fuse_ar
// "f2" is not used no more.
// (set S3FS_LOG_DBG)
if(0 == strcmp(arg, "f2")){
set_s3fs_log_level(S3FS_LOG_DBG);
S3fsSignals::SetLogLevel(S3FS_LOG_DBG);
return 0;
}
if(0 == strcmp(arg, "curldbg")){
S3fsCurl::SetVerbose(true);
return 0;
}else if(0 == STR2NCMP(arg, "curldbg=")){
const char* strlevel = strchr(arg, '=') + sizeof(char);
if(0 == strcasecmp(strlevel, "normal")){
S3fsCurl::SetVerbose(true);
}else if(0 == strcasecmp(strlevel, "body")){
S3fsCurl::SetVerbose(true);
S3fsCurl::SetDumpBody(true);
}else{
S3FS_PRN_EXIT("option curldbg has unknown parameter(%s).", strlevel);
return -1;
}
return 0;
}
//
// Check cache file, using SIGUSR1
//
if(0 == strcmp(arg, "set_check_cache_sigusr1")){
if(!S3fsSignals::SetUsr1Handler(NULL)){
S3FS_PRN_EXIT("could not set sigusr1 for checking cache.");
return -1;
}
return 0;
}else if(0 == STR2NCMP(arg, "set_check_cache_sigusr1=")){
const char* strfilepath = strchr(arg, '=') + sizeof(char);
if(!S3fsSignals::SetUsr1Handler(strfilepath)){
S3FS_PRN_EXIT("could not set sigusr1 for checking cache and output file(%s).", strfilepath);
return -1;
}
return 0;
}
if(0 == STR2NCMP(arg, "accessKeyId=")){
@ -5122,7 +5134,7 @@ int main(int argc, char* argv[])
// init syslog(default CRIT)
openlog("s3fs", LOG_PID | LOG_ODELAY | LOG_NOWAIT, LOG_USER);
set_s3fs_log_level(debug_level);
S3fsSignals::SetLogLevel(debug_level);
// init xml2
xmlInitParser();
@ -5199,8 +5211,23 @@ int main(int argc, char* argv[])
exit(EXIT_FAILURE);
}
// init curl
if(!S3fsCurl::InitS3fsCurl("/etc/mime.types")){
// init curl (without mime types)
//
// [NOTE]
// The curl initialization here does not load mime types.
// The mime types file parameter are dynamic values according
// to the user's environment, and are analyzed by the my_fuse_opt_proc
// function.
// The my_fuse_opt_proc function is executed after this curl
// initialization. Because the curl method is used in the
// my_fuse_opt_proc function, then it must be called here to
// initialize. Fortunately, the processing using mime types
// is only PUT/POST processing, and it is not used until the
// call of my_fuse_opt_proc function is completed. Therefore,
// the mime type is loaded just after calling the my_fuse_opt_proc
// function.
//
if(!S3fsCurl::InitS3fsCurl()){
S3FS_PRN_EXIT("Could not initiate curl library.");
s3fs_destroy_global_ssl();
exit(EXIT_FAILURE);
@ -5219,6 +5246,11 @@ int main(int argc, char* argv[])
exit(EXIT_FAILURE);
}
// init mime types for curl
if(!S3fsCurl::InitMimeType(mimetype_file)){
S3FS_PRN_WARN("Missing MIME types prevents setting Content-Type on uploaded objects.");
}
// [NOTE]
// exclusive option check here.
//
@ -5429,14 +5461,6 @@ int main(int argc, char* argv[])
s3fs_oper.removexattr = s3fs_removexattr;
}
// set signal handler for debugging
if(!set_s3fs_usr2_handler()){
S3FS_PRN_EXIT("could not set signal handler for SIGUSR2.");
S3fsCurl::DestroyS3fsCurl();
s3fs_destroy_global_ssl();
exit(EXIT_FAILURE);
}
// now passing things off to fuse, fuse will finish evaluating the command line args
fuse_res = fuse_main(custom_args.argc, custom_args.argv, &s3fs_oper, NULL);
fuse_opt_free_args(&custom_args);

View File

@ -30,6 +30,7 @@
#include <syslog.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <dirent.h>
#include <libxml/xpath.h>
#include <libxml/xpathInternals.h>
@ -458,7 +459,11 @@ bool AutoLock::isLockAcquired() const
AutoLock::~AutoLock()
{
if (is_lock_acquired) {
pthread_mutex_unlock(auto_mutex);
int res = pthread_mutex_unlock(auto_mutex);
if(res != 0){
S3FS_PRN_CRIT("pthread_mutex_lock returned: %d", res);
abort();
}
}
}
@ -744,6 +749,36 @@ bool delete_files_in_dir(const char* dir, bool is_remove_own)
return true;
}
//-------------------------------------------------------------------
// Utility for system information
//-------------------------------------------------------------------
bool compare_sysname(const char* target)
{
// [NOTE]
// The buffer size of sysname member in struct utsname is
// OS dependent, but 512 bytes is sufficient for now.
//
static char* psysname = NULL;
static char sysname[512];
if(!psysname){
struct utsname sysinfo;
if(0 != uname(&sysinfo)){
S3FS_PRN_ERR("could not initialize system name to internal buffer(errno:%d), thus use \"Linux\".", errno);
strcpy(sysname, "Linux");
}else{
S3FS_PRN_INFO("system name is %s", sysinfo.sysname);
sysname[sizeof(sysname) - 1] = '\0';
strncpy(sysname, sysinfo.sysname, sizeof(sysname) - 1);
}
psysname = &sysname[0];
}
if(!target || 0 != strcmp(psysname, target)){
return false;
}
return true;
}
//-------------------------------------------------------------------
// Utility functions for convert
//-------------------------------------------------------------------
@ -763,16 +798,13 @@ time_t get_mtime(const char *str)
strmtime = strmtime.substr(0, pos);
}
}
return static_cast<time_t>(s3fs_strtoofft(strmtime.c_str()));
return static_cast<time_t>(cvt_strtoofft(strmtime.c_str()));
}
static time_t get_time(headers_t& meta, bool overcheck, const char *header)
static time_t get_time(headers_t& meta, const char *header)
{
headers_t::const_iterator iter;
if(meta.end() == (iter = meta.find(header))){
if(overcheck){
return get_lastmodified(meta);
}
return 0;
}
return get_mtime((*iter).second.c_str());
@ -780,17 +812,35 @@ static time_t get_time(headers_t& meta, bool overcheck, const char *header)
time_t get_mtime(headers_t& meta, bool overcheck)
{
return get_time(meta, overcheck, "x-amz-meta-mtime");
time_t t = get_time(meta, "x-amz-meta-mtime");
if(t != 0){
return t;
}
t = get_time(meta, "x-amz-meta-goog-reserved-file-mtime");
if(t != 0){
return t;
}
if(overcheck){
return get_lastmodified(meta);
}
return 0;
}
time_t get_ctime(headers_t& meta, bool overcheck)
{
return get_time(meta, overcheck, "x-amz-meta-ctime");
time_t t = get_time(meta, "x-amz-meta-ctime");
if(t != 0){
return t;
}
if(overcheck){
return get_lastmodified(meta);
}
return 0;
}
off_t get_size(const char *s)
{
return s3fs_strtoofft(s);
return cvt_strtoofft(s);
}
off_t get_size(headers_t& meta)
@ -802,9 +852,9 @@ off_t get_size(headers_t& meta)
return get_size((*iter).second.c_str());
}
mode_t get_mode(const char *s)
mode_t get_mode(const char *s, int base)
{
return static_cast<mode_t>(s3fs_strtoofft(s));
return static_cast<mode_t>(cvt_strtoofft(s, base));
}
mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
@ -818,6 +868,8 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
}else if(meta.end() != (iter = meta.find("x-amz-meta-permissions"))){ // for s3sync
mode = get_mode((*iter).second.c_str());
isS3sync = true;
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-mode"))){ // for GCS
mode = get_mode((*iter).second.c_str(), 8);
}else{
// If another tool creates an object without permissions, default to owner
// read-write and group readable.
@ -886,7 +938,7 @@ mode_t get_mode(headers_t& meta, const char* path, bool checkdir, bool forcedir)
uid_t get_uid(const char *s)
{
return static_cast<uid_t>(s3fs_strtoofft(s));
return static_cast<uid_t>(cvt_strtoofft(s));
}
uid_t get_uid(headers_t& meta)
@ -896,6 +948,8 @@ uid_t get_uid(headers_t& meta)
return get_uid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-owner"))){ // for s3sync
return get_uid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-uid"))){ // for GCS
return get_uid((*iter).second.c_str());
}else{
return geteuid();
}
@ -903,7 +957,7 @@ uid_t get_uid(headers_t& meta)
gid_t get_gid(const char *s)
{
return static_cast<gid_t>(s3fs_strtoofft(s));
return static_cast<gid_t>(cvt_strtoofft(s));
}
gid_t get_gid(headers_t& meta)
@ -913,6 +967,8 @@ gid_t get_gid(headers_t& meta)
return get_gid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-group"))){ // for s3sync
return get_gid((*iter).second.c_str());
}else if(meta.end() != (iter = meta.find("x-amz-meta-goog-reserved-posix-gid"))){ // for GCS
return get_gid((*iter).second.c_str());
}else{
return getegid();
}
@ -1097,7 +1153,7 @@ void show_help ()
"\n"
" storage_class (default=\"standard\")\n"
" - store object with specified storage class. Possible values:\n"
" standard, standard_ia, onezone_ia, reduced_redundancy and intelligent_tiering.\n"
" standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering and glacier.\n"
"\n"
" use_rrs (default is disable)\n"
" - use Amazon's Reduced Redundancy Storage.\n"
@ -1202,12 +1258,12 @@ void show_help ()
" - maximum number of entries in the stat cache, and this maximum is\n"
" also treated as the number of symbolic link cache.\n"
"\n"
" stat_cache_expire (default is no expire)\n"
" stat_cache_expire (default is 900))\n"
" - specify expire time (seconds) for entries in the stat cache.\n"
" This expire time indicates the time since stat cached. and this\n"
" is also set to the expire time of the symbolic link cache.\n"
"\n"
" stat_cache_interval_expire (default is no expire)\n"
" stat_cache_interval_expire (default is 900)\n"
" - specify expire time (seconds) for entries in the stat cache(and\n"
" symbolic link cache).\n"
" This expire time is based on the time from the last access time\n"
@ -1231,11 +1287,11 @@ void show_help ()
" ssl_verify_hostname (default=\"2\")\n"
" - When 0, do not verify the SSL certificate against the hostname.\n"
"\n"
" nodnscache (disable dns cache)\n"
" - s3fs is always using dns cache, this option make dns cache disable.\n"
" nodnscache (disable DNS cache)\n"
" - s3fs is always using DNS cache, this option make DNS cache disable.\n"
"\n"
" nosscache (disable ssl session cache)\n"
" - s3fs is always using ssl session cache, this option make ssl \n"
" nosscache (disable SSL session cache)\n"
" - s3fs is always using SSL session cache, this option make SSL \n"
" session cache disable.\n"
"\n"
" multireq_max (default=\"20\")\n"
@ -1411,23 +1467,30 @@ void show_help ()
" only \"dir/\" object.\n"
"\n"
" use_wtf8 - support arbitrary file system encoding.\n"
" S3 requires all object names to be valid utf-8. But some\n"
" S3 requires all object names to be valid UTF-8. But some\n"
" clients, notably Windows NFS clients, use their own encoding.\n"
" This option re-encodes invalid utf-8 object names into valid\n"
" utf-8 by mapping offending codes into a 'private' codepage of the\n"
" This option re-encodes invalid UTF-8 object names into valid\n"
" UTF-8 by mapping offending codes into a 'private' codepage of the\n"
" Unicode set.\n"
" Useful on clients not using utf-8 as their file system encoding.\n"
" Useful on clients not using UTF-8 as their file system encoding.\n"
"\n"
" use_session_token - indicate that session token should be provided.\n"
" If credentials are provided by environment variables this switch\n"
" forces presence check of AWSSESSIONTOKEN variable.\n"
" Otherwise an error is returned."
" Otherwise an error is returned.\n"
"\n"
" requester_pays (default is disable)\n"
" This option instructs s3fs to enable requests involving\n"
" Requester Pays buckets.\n"
" It includes the 'x-amz-request-payer=requester' entry in the\n"
" request header."
" request header.\n"
"\n"
" mime (default is \"/etc/mime.types\")\n"
" Specify the path of the mime.types file.\n"
" If this option is not specified, the existence of \"/etc/mime.types\"\n"
" is checked, and that file is loaded as mime information.\n"
" If this file does not exist on macOS, then \"/etc/apache2/mime.types\"\n"
" is checked as well.\n"
"\n"
" dbglevel (default=\"crit\")\n"
" Set the debug message level. set value as crit (critical), err\n"
@ -1438,6 +1501,19 @@ void show_help ()
"\n"
" curldbg - put curl debug message\n"
" Put the debug message from libcurl when this option is specified.\n"
" Specify \"normal\" or \"body\" for the parameter.\n"
" If the parameter is omitted, it is the same as \"normal\".\n"
" If \"body\" is specified, some API communication body data will be\n"
" output in addition to the debug message output as \"normal\".\n"
"\n"
" set_check_cache_sigusr1 (default is stdout)\n"
" If the cache is enabled, you can check the integrity of the\n"
" cache file and the cache file's stats info file.\n"
" This option is specified and when sending the SIGUSR1 signal\n"
" to the s3fs process checks the cache status at that time.\n"
" This option can take a file path as parameter to output the\n"
" check result to that file. The file path parameter can be omitted.\n"
" If omitted, the result will be output to stdout or syslog.\n"
"\n"
"FUSE/mount Options:\n"
"\n"
@ -1473,7 +1549,7 @@ void show_help ()
" -d --debug Turn on DEBUG messages to syslog. Specifying -d\n"
" twice turns on FUSE debug messages to STDOUT.\n"
" -f FUSE foreground option - do not run as daemon.\n"
" -s FUSE singlethreaded option\n"
" -s FUSE single-threaded option\n"
" disable multi-threaded operation\n"
"\n"
"\n"

View File

@ -123,12 +123,14 @@ std::string get_exist_directory_path(const std::string& path);
bool check_exist_dir_permission(const char* dirpath);
bool delete_files_in_dir(const char* dir, bool is_remove_own);
bool compare_sysname(const char* target);
time_t get_mtime(const char *s);
time_t get_mtime(headers_t& meta, bool overcheck = true);
time_t get_ctime(headers_t& meta, bool overcheck = true);
off_t get_size(const char *s);
off_t get_size(headers_t& meta);
mode_t get_mode(const char *s);
mode_t get_mode(const char *s, int base = 0);
mode_t get_mode(headers_t& meta, const char* path = NULL, bool checkdir = false, bool forcedir = false);
uid_t get_uid(const char *s);
uid_t get_uid(headers_t& meta);

286
src/sighandlers.cpp Normal file
View File

@ -0,0 +1,286 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cerrno>
#include <syslog.h>
#include <pthread.h>
#include <curl/curl.h>
#include <csignal>
#include <algorithm>
#include <map>
#include <string>
#include <list>
#include <vector>
#include "common.h"
#include "sighandlers.h"
#include "curl.h"
#include "fdcache.h"
#include "psemaphore.h"
using namespace std;
//-------------------------------------------------------------------
// Global variables
//-------------------------------------------------------------------
s3fs_log_level debug_level = S3FS_LOG_CRIT;
const char* s3fs_log_nest[S3FS_LOG_NEST_MAX] = {"", " ", " ", " "};
//-------------------------------------------------------------------
// Class S3fsSignals
//-------------------------------------------------------------------
S3fsSignals* S3fsSignals::pSingleton = NULL;
bool S3fsSignals::enableUsr1 = false;
//-------------------------------------------------------------------
// Class methods
//-------------------------------------------------------------------
bool S3fsSignals::Initialize()
{
if(!S3fsSignals::pSingleton){
S3fsSignals::pSingleton = new S3fsSignals;
}
return true;
}
bool S3fsSignals::Destroy()
{
if(S3fsSignals::pSingleton){
delete S3fsSignals::pSingleton;
}
return true;
}
void S3fsSignals::HandlerUSR1(int sig)
{
if(SIGUSR1 != sig){
S3FS_PRN_ERR("The handler for SIGUSR1 received signal(%d)", sig);
return;
}
S3fsSignals* pSigobj = S3fsSignals::get();
if(!pSigobj){
S3FS_PRN_ERR("S3fsSignals object is not initialized.");
return;
}
if(!pSigobj->WakeupUsr1Thread()){
S3FS_PRN_ERR("Failed to wakeup the thread for SIGUSR1.");
return;
}
}
bool S3fsSignals::SetUsr1Handler(const char* path)
{
// set output file
if(!FdManager::SetCacheCheckOutput(path)){
S3FS_PRN_ERR("Could not set output file(%s) for checking cache.", path ? path : "null(stdout)");
return false;
}
S3fsSignals::enableUsr1 = true;
return true;
}
void* S3fsSignals::CheckCacheWorker(void* arg)
{
Semaphore* pSem = static_cast<Semaphore*>(arg);
if(!pSem){
pthread_exit(NULL);
}
if(!S3fsSignals::enableUsr1){
pthread_exit(NULL);
}
// wait and loop
while(S3fsSignals::enableUsr1){
// wait
pSem->wait();
if(!S3fsSignals::enableUsr1){
break; // assap
}
// check all cache
if(!FdManager::get()->CheckAllCache()){
S3FS_PRN_ERR("Processing failed due to some problem.");
}
// do not allow request queuing
for(int value = pSem->get_value(); 0 < value; value = pSem->get_value()){
pSem->wait();
}
}
return NULL;
}
void S3fsSignals::HandlerUSR2(int sig)
{
if(SIGUSR2 == sig){
S3fsSignals::BumpupLogLevel();
}else{
S3FS_PRN_ERR("The handler for SIGUSR2 received signal(%d)", sig);
}
}
bool S3fsSignals::InitUsr2Handler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerUSR2;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR2, &sa, NULL)){
return false;
}
return true;
}
s3fs_log_level S3fsSignals::SetLogLevel(s3fs_log_level level)
{
if(level == debug_level){
return debug_level;
}
s3fs_log_level old = debug_level;
debug_level = level;
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
return old;
}
s3fs_log_level S3fsSignals::BumpupLogLevel()
{
s3fs_log_level old = debug_level;
debug_level = ( S3FS_LOG_CRIT == debug_level ? S3FS_LOG_ERR :
S3FS_LOG_ERR == debug_level ? S3FS_LOG_WARN :
S3FS_LOG_WARN == debug_level ? S3FS_LOG_INFO :
S3FS_LOG_INFO == debug_level ? S3FS_LOG_DBG :
S3FS_LOG_CRIT );
setlogmask(LOG_UPTO(S3FS_LOG_LEVEL_TO_SYSLOG(debug_level)));
S3FS_PRN_CRIT("change debug level from %sto %s", S3FS_LOG_LEVEL_STRING(old), S3FS_LOG_LEVEL_STRING(debug_level));
return old;
}
//-------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------
S3fsSignals::S3fsSignals() : pThreadUsr1(NULL), pSemUsr1(NULL)
{
if(S3fsSignals::enableUsr1){
if(!InitUsr1Handler()){
S3FS_PRN_ERR("failed creating thread for SIGUSR1 handler, but continue...");
}
}
if(!S3fsSignals::InitUsr2Handler()){
S3FS_PRN_ERR("failed to initialize SIGUSR2 handler for bumping log level, but continue...");
}
}
S3fsSignals::~S3fsSignals()
{
if(S3fsSignals::enableUsr1){
if(!DestroyUsr1Handler()){
S3FS_PRN_ERR("failed stopping thread for SIGUSR1 handler, but continue...");
}
}
}
bool S3fsSignals::InitUsr1Handler()
{
if(pThreadUsr1 || pSemUsr1){
S3FS_PRN_ERR("Already run thread for SIGUSR1");
return false;
}
// create thread
int result;
pSemUsr1 = new Semaphore(0);
pThreadUsr1 = new pthread_t;
if(0 != (result = pthread_create(pThreadUsr1, NULL, S3fsSignals::CheckCacheWorker, static_cast<void*>(pSemUsr1)))){
S3FS_PRN_ERR("Could not create thread for SIGUSR1 by %d", result);
delete pSemUsr1;
delete pThreadUsr1;
pSemUsr1 = NULL;
pThreadUsr1 = NULL;
return false;
}
// set handler
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_handler = S3fsSignals::HandlerUSR1;
sa.sa_flags = SA_RESTART;
if(0 != sigaction(SIGUSR1, &sa, NULL)){
S3FS_PRN_ERR("Could not set signal handler for SIGUSR1");
DestroyUsr1Handler();
return false;
}
return true;
}
bool S3fsSignals::DestroyUsr1Handler()
{
if(!pThreadUsr1 || !pSemUsr1){
return false;
}
// for thread exit
S3fsSignals::enableUsr1 = false;
// wakeup thread
pSemUsr1->post();
// wait for thread exiting
void* retval = NULL;
int result;
if(0 != (result = pthread_join(*pThreadUsr1, &retval))){
S3FS_PRN_ERR("Could not stop thread for SIGUSR1 by %d", result);
return false;
}
delete pSemUsr1;
delete pThreadUsr1;
pSemUsr1 = NULL;
pThreadUsr1 = NULL;
return true;
}
bool S3fsSignals::WakeupUsr1Thread()
{
if(!pThreadUsr1 || !pSemUsr1){
S3FS_PRN_ERR("The thread for SIGUSR1 is not setup.");
return false;
}
pSemUsr1->post();
return true;
}
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

73
src/sighandlers.h Normal file
View File

@ -0,0 +1,73 @@
/*
* s3fs - FUSE-based file system backed by Amazon S3
*
* Copyright(C) 2007 Randy Rizun <rrizun@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef S3FS_SIGHANDLERS_H_
#define S3FS_SIGHANDLERS_H_
#include "psemaphore.h"
//----------------------------------------------
// class S3fsSignals
//----------------------------------------------
class S3fsSignals
{
private:
static S3fsSignals* pSingleton;
static bool enableUsr1;
pthread_t* pThreadUsr1;
Semaphore* pSemUsr1;
protected:
static S3fsSignals* get(void) { return pSingleton; }
static void HandlerUSR1(int sig);
static void* CheckCacheWorker(void* arg);
static void HandlerUSR2(int sig);
static bool InitUsr2Handler(void);
S3fsSignals();
~S3fsSignals();
bool InitUsr1Handler(void);
bool DestroyUsr1Handler(void);
bool WakeupUsr1Thread(void);
public:
static bool Initialize(void);
static bool Destroy(void);
static bool SetUsr1Handler(const char* path);
static s3fs_log_level SetLogLevel(s3fs_log_level level);
static s3fs_log_level BumpupLogLevel(void);
};
#endif // S3FS_SIGHANDLERS_H_
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/

View File

@ -68,6 +68,41 @@ off_t s3fs_strtoofft(const char* str, int base)
return result;
}
// wrapped s3fs_strtoofft()
//
// This function catches the s3fs_strtoofft () exception and returns a boolean value.
//
bool try_strtoofft(const char* str, off_t& value, int base)
{
if(str){
try{
value = s3fs_strtoofft(str, base);
}catch(std::exception &e){
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t.", str);
return false;
}
}else{
S3FS_PRN_WARN("parameter string is null.");
return false;
}
return true;
}
// wrapped try_strtoofft -> s3fs_strtoofft()
//
// This function returns 0 if a value that cannot be converted is specified.
// Only call if 0 is considered an error and the operation can continue.
//
off_t cvt_strtoofft(const char* str, int base)
{
off_t result = 0;
if(!try_strtoofft(str, result, base)){
S3FS_PRN_WARN("something error is occurred in convert string(%s) to off_t, thus return 0 as default.", (str ? str : "null"));
return 0;
}
return result;
}
string lower(string s)
{
// change each character of the string to lower case

View File

@ -37,6 +37,8 @@ template <class T> std::string str(T value);
// Convert string to off_t. Throws std::invalid_argument and std::out_of_range on bad input.
off_t s3fs_strtoofft(const char* str, int base = 0);
bool try_strtoofft(const char* str, off_t& value, int base = 0);
off_t cvt_strtoofft(const char* str, int base = 0);
std::string trim_left(const std::string &s, const std::string &t = SPACES);
std::string trim_right(const std::string &s, const std::string &t = SPACES);

View File

@ -20,11 +20,21 @@
#include <limits>
#include <stdint.h>
#include <strings.h>
#include <string>
#include <map>
#include "common.h"
#include "string_util.h"
#include "test_util.h"
//-------------------------------------------------------------------
// Global variables for test_string_util
//-------------------------------------------------------------------
bool foreground = false;
s3fs_log_level debug_level = S3FS_LOG_CRIT;
std::string instance_name;
void test_trim()
{
ASSERT_EQUALS(std::string("1234"), trim(" 1234 "));

View File

@ -8,9 +8,10 @@
# environment variables:
#
# S3FS_CREDENTIALS_FILE=keyfile s3fs format key file
# S3FS_PROFILE=name s3fs profile to use (overrides key file)
# TEST_BUCKET_1=bucketname Name of bucket to use
# S3PROXY_BINARY="" Specify empty string to skip S3Proxy start
# S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
# S3_URL="https://s3.amazonaws.com" Specify Amazon AWS as the S3 provider
#
# Example of running against Amazon S3 using a bucket named "bucket:
#
@ -52,7 +53,7 @@ export S3_URL
export TEST_SCRIPT_DIR=`pwd`
export TEST_BUCKET_MOUNT_POINT_1=${TEST_BUCKET_1}
S3PROXY_VERSION="1.7.0"
S3PROXY_VERSION="1.7.1"
S3PROXY_BINARY=${S3PROXY_BINARY-"s3proxy-${S3PROXY_VERSION}"}
if [ ! -f "$S3FS_CREDENTIALS_FILE" ]
@ -62,6 +63,11 @@ then
fi
chmod 600 "$S3FS_CREDENTIALS_FILE"
if [ -z "${S3FS_PROFILE}" ]; then
export AWS_ACCESS_KEY_ID=$(cut -d: -f1 ${S3FS_CREDENTIALS_FILE})
export AWS_SECRET_ACCESS_KEY=$(cut -d: -f2 ${S3FS_CREDENTIALS_FILE})
fi
if [ ! -d $TEST_BUCKET_MOUNT_POINT_1 ]
then
mkdir -p $TEST_BUCKET_MOUNT_POINT_1
@ -140,6 +146,8 @@ function start_s3fs {
# Public bucket if PUBLIC is set
if [ -n "${PUBLIC}" ]; then
AUTH_OPT="-o public_bucket=1"
elif [ -n "${S3FS_PROFILE}" ]; then
AUTH_OPT="-o profile=${S3FS_PROFILE}"
else
AUTH_OPT="-o passwd_file=${S3FS_CREDENTIALS_FILE}"
fi
@ -199,6 +207,7 @@ function start_s3fs {
-o retries=3 \
-f \
"${@}" | stdbuf -oL -eL sed $SED_BUFFER_FLAG "s/^/s3fs: /" &
S3FS_PID=$!
)
if [ `uname` = "Darwin" ]; then

View File

@ -33,6 +33,19 @@ function test_truncate_file {
rm_test_file
}
function test_truncate_upload {
describe "Testing truncate file for uploading ..."
# This file size uses multipart, mix upload when uploading.
# We will test these cases.
rm_test_file ${BIG_FILE}
truncate ${BIG_FILE} -s ${BIG_FILE_LENGTH}
rm_test_file ${BIG_FILE}
}
function test_truncate_empty_file {
describe "Testing truncate empty file ..."
# Write an empty test file
@ -180,7 +193,7 @@ function test_redirects {
}
function test_mkdir_rmdir {
describe "Testing creation/removal of a directory"
describe "Testing creation/removal of a directory ..."
if [ -e $TEST_DIR ]; then
echo "Unexpected, this file/directory exists: ${TEST_DIR}"
@ -197,20 +210,12 @@ function test_chmod {
# create the test file again
mk_test_file
if [ `uname` = "Darwin" ]; then
ORIGINAL_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
else
ORIGINAL_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
fi
ORIGINAL_PERMISSIONS=$(get_permissions $TEST_TEXT_FILE)
chmod 777 $TEST_TEXT_FILE;
# if they're the same, we have a problem.
if [ `uname` = "Darwin" ]; then
CHANGED_PERMISSIONS=$(stat -f "%p" $TEST_TEXT_FILE)
else
CHANGED_PERMISSIONS=$(stat --format=%a $TEST_TEXT_FILE)
fi
CHANGED_PERMISSIONS=$(get_permissions $TEST_TEXT_FILE)
if [ $CHANGED_PERMISSIONS == $ORIGINAL_PERMISSIONS ]
then
echo "Could not modify $TEST_TEXT_FILE permissions"
@ -264,7 +269,7 @@ function test_chown {
}
function test_list {
describe "Testing list"
describe "Testing list ..."
mk_test_file
mk_test_dir
@ -279,7 +284,7 @@ function test_list {
}
function test_remove_nonempty_directory {
describe "Testing removing a non-empty directory"
describe "Testing removing a non-empty directory ..."
mk_test_dir
touch "${TEST_DIR}/file"
(
@ -290,8 +295,19 @@ function test_remove_nonempty_directory {
rm_test_dir
}
function test_external_directory_creation {
describe "Test external directory creation ..."
OBJECT_NAME="$(basename $PWD)/directory/${TEST_TEXT_FILE}"
echo "data" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
ls | grep directory
get_permissions directory | grep ^750$
ls directory
cmp <(echo "data") directory/${TEST_TEXT_FILE}
rm -f directory/${TEST_TEXT_FILE}
}
function test_external_modification {
describe "Test external modification to an object"
describe "Test external modification to an object ..."
echo "old" > ${TEST_TEXT_FILE}
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
sleep 2
@ -301,7 +317,7 @@ function test_external_modification {
}
function test_read_external_object() {
describe "create objects via aws CLI and read via s3fs"
describe "create objects via aws CLI and read via s3fs ..."
OBJECT_NAME="$(basename $PWD)/${TEST_TEXT_FILE}"
sleep 3
echo "test" | aws_cli s3 cp - "s3://${TEST_BUCKET_1}/${OBJECT_NAME}"
@ -540,8 +556,8 @@ function test_update_time() {
# create the test
mk_test_file
mtime=`get_ctime $TEST_TEXT_FILE`
ctime=`get_mtime $TEST_TEXT_FILE`
ctime=`get_ctime $TEST_TEXT_FILE`
mtime=`get_mtime $TEST_TEXT_FILE`
sleep 2
chmod +x $TEST_TEXT_FILE
@ -554,7 +570,7 @@ function test_update_time() {
fi
sleep 2
chown $UID:$UID $TEST_TEXT_FILE;
chown $UID $TEST_TEXT_FILE
ctime3=`get_ctime $TEST_TEXT_FILE`
mtime3=`get_mtime $TEST_TEXT_FILE`
@ -587,7 +603,7 @@ function test_update_time() {
}
function test_rm_rf_dir {
describe "Test that rm -rf will remove directory with contents"
describe "Test that rm -rf will remove directory with contents ..."
# Create a dir with some files and directories
mkdir dir1
mkdir dir1/dir2
@ -604,7 +620,7 @@ function test_rm_rf_dir {
}
function test_copy_file {
describe "Test simple copy"
describe "Test simple copy ..."
dd if=/dev/urandom of=/tmp/simple_file bs=1024 count=1
cp /tmp/simple_file copied_simple_file
@ -615,13 +631,13 @@ function test_copy_file {
}
function test_write_after_seek_ahead {
describe "Test writes succeed after a seek ahead"
describe "Test writes succeed after a seek ahead ..."
dd if=/dev/zero of=testfile seek=1 count=1 bs=1024
rm_test_file testfile
}
function test_overwrite_existing_file_range {
describe "Test overwrite range succeeds"
describe "Test overwrite range succeeds ..."
dd if=<(seq 1000) of=${TEST_TEXT_FILE}
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=1 count=1 bs=1024 conv=notrunc
cmp ${TEST_TEXT_FILE} <(
@ -633,7 +649,7 @@ function test_overwrite_existing_file_range {
}
function test_concurrency {
describe "Test concurrent updates to a directory"
describe "Test concurrent updates to a directory ..."
for i in `seq 5`; do echo foo > $i; done
for process in `seq 10`; do
for i in `seq 5`; do
@ -648,7 +664,7 @@ function test_concurrency {
}
function test_concurrent_writes {
describe "Test concurrent updates to a file"
describe "Test concurrent updates to a file ..."
dd if=/dev/urandom of=${TEST_TEXT_FILE} bs=$BIG_FILE_LENGTH count=1
for process in `seq 10`; do
dd if=/dev/zero of=${TEST_TEXT_FILE} seek=$(($RANDOM % $BIG_FILE_LENGTH)) count=1 bs=1024 conv=notrunc &
@ -658,7 +674,7 @@ function test_concurrent_writes {
}
function test_open_second_fd {
describe "read from an open fd"
describe "read from an open fd ..."
rm_test_file second_fd_file
RESULT=$( (echo foo ; wc -c < second_fd_file >&2) 2>& 1>second_fd_file)
if [ "$RESULT" -ne 4 ]; then
@ -669,13 +685,19 @@ function test_open_second_fd {
}
function test_write_multiple_offsets {
describe "test writing to multiple offsets"
../../write_multiple_offsets.py ${TEST_TEXT_FILE}
describe "test writing to multiple offsets ..."
../../write_multiple_offsets.py ${TEST_TEXT_FILE} 1024 1 $((16 * 1024 * 1024)) 1 $((18 * 1024 * 1024)) 1
rm_test_file ${TEST_TEXT_FILE}
}
function test_write_multiple_offsets_backwards {
describe "test writing to multiple offsets ..."
../../write_multiple_offsets.py ${TEST_TEXT_FILE} $((20 * 1024 * 1024 + 1)) 1 $((10 * 1024 * 1024)) 1
rm_test_file ${TEST_TEXT_FILE}
}
function test_clean_up_cache() {
describe "Test clean up cache"
describe "Test clean up cache ..."
dir="many_files"
count=25
@ -701,36 +723,22 @@ function test_clean_up_cache() {
}
function test_content_type() {
describe "Test Content-Type detection"
describe "Test Content-Type detection ..."
DIR_NAME="$(basename $PWD)"
touch "test.txt"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.txt" | grep "ContentType")
if [ `uname` = "Darwin" ]; then
if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then
echo "Unexpected Content-Type(MacOS): $CONTENT_TYPE"
return 1;
fi
else
if ! echo $CONTENT_TYPE | grep -q "text/plain"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
if ! echo $CONTENT_TYPE | grep -q "text/plain"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
touch "test.jpg"
CONTENT_TYPE=$(aws_cli s3api head-object --bucket "${TEST_BUCKET_1}" --key "${DIR_NAME}/test.jpg" | grep "ContentType")
if [ `uname` = "Darwin" ]; then
if ! echo $CONTENT_TYPE | grep -q "application/octet-stream"; then
echo "Unexpected Content-Type(MacOS): $CONTENT_TYPE"
return 1;
fi
else
if ! echo $CONTENT_TYPE | grep -q "image/jpeg"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
if ! echo $CONTENT_TYPE | grep -q "image/jpeg"; then
echo "Unexpected Content-Type: $CONTENT_TYPE"
return 1;
fi
touch "test.bin"
@ -748,12 +756,189 @@ function test_content_type() {
fi
}
# create more files than -o max_stat_cache_size
function test_truncate_cache() {
describe "Test make cache files over max cache file size ..."
for dir in $(seq 2); do
mkdir $dir
for file in $(seq 75); do
touch $dir/$file
done
ls $dir
done
}
function test_cache_file_stat() {
describe "Test cache file stat ..."
dd if=/dev/urandom of="${BIG_FILE}" bs=${BIG_FILE_LENGTH} count=1
#
# get "testrun-xxx" directory name
#
CACHE_TESTRUN_DIR=$(ls -1 ${CACHE_DIR}/${TEST_BUCKET_1}/ 2>/dev/null | grep testrun 2>/dev/null)
#
# get cache file inode number
#
CACHE_FILE_INODE=$(ls -i ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE} 2>/dev/null | awk '{print $1}')
if [ -z ${CACHE_FILE_INODE} ]; then
echo "Not found cache file or failed to get inode: ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# get lines from cache stat file
#
CACHE_FILE_STAT_LINE_1=$(sed -n 1p ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE})
CACHE_FILE_STAT_LINE_2=$(sed -n 2p ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE})
if [ -z ${CACHE_FILE_STAT_LINE_1} ] || [ -z ${CACHE_FILE_STAT_LINE_2} ]; then
echo "could not get first or second line from cache file stat: ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# compare
#
if [ "${CACHE_FILE_STAT_LINE_1}" != "${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}" ]; then
echo "first line(cache file stat) is different: \"${CACHE_FILE_STAT_LINE_1}\" != \"${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}\""
return 1;
fi
if [ "${CACHE_FILE_STAT_LINE_2}" != "0:${BIG_FILE_LENGTH}:1:0" ]; then
echo "last line(cache file stat) is different: \"${CACHE_FILE_STAT_LINE_2}\" != \"0:${BIG_FILE_LENGTH}:1:0\""
return 1;
fi
#
# remove cache files directly
#
rm -f ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}
rm -f ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE}
#
# write a byte into the middle(not the boundary) of the file
#
CHECK_UPLOAD_OFFSET=$((10 * 1024 * 1024 + 17))
dd if=/dev/urandom of="${BIG_FILE}" bs=1 count=1 seek=${CHECK_UPLOAD_OFFSET} conv=notrunc
#
# get cache file inode number
#
CACHE_FILE_INODE=$(ls -i ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE} 2>/dev/null | awk '{print $1}')
if [ -z ${CACHE_FILE_INODE} ]; then
echo "Not found cache file or failed to get inode: ${CACHE_DIR}/${TEST_BUCKET_1}/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# get lines from cache stat file
#
CACHE_FILE_STAT_LINE_1=$(sed -n 1p ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE})
CACHE_FILE_STAT_LINE_E=$(tail -1 ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE} 2>/dev/null)
if [ -z ${CACHE_FILE_STAT_LINE_1} ] || [ -z ${CACHE_FILE_STAT_LINE_E} ]; then
echo "could not get first or end line from cache file stat: ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${CACHE_TESTRUN_DIR}/${BIG_FILE}"
return 1;
fi
#
# check first and cache file length from last line
#
# we should check all stat lines, but there are cases where the value
# differs depending on the processing system etc., then the cache file
# size is calculated and compared.
#
CACHE_LAST_OFFSET=$(echo ${CACHE_FILE_STAT_LINE_E} | cut -d ":" -f1)
CACHE_LAST_SIZE=$(echo ${CACHE_FILE_STAT_LINE_E} | cut -d ":" -f2)
CACHE_TOTAL_SIZE=$((${CACHE_LAST_OFFSET} + ${CACHE_LAST_SIZE}))
if [ "${CACHE_FILE_STAT_LINE_1}" != "${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}" ]; then
echo "first line(cache file stat) is different: \"${CACHE_FILE_STAT_LINE_1}\" != \"${CACHE_FILE_INODE}:${BIG_FILE_LENGTH}\""
return 1;
fi
if [ ${BIG_FILE_LENGTH} -ne ${CACHE_TOTAL_SIZE} ]; then
echo "the file size indicated by the cache stat file is different: \"${BIG_FILE_LENGTH}\" != \"${CACHE_TOTAL_SIZE}\""
return 1;
fi
rm_test_file "${BIG_FILE}"
}
function test_upload_sparsefile {
describe "Testing upload sparse file ..."
rm_test_file ${BIG_FILE}
rm -f /tmp/${BIG_FILE}
#
# Make all HOLE file
#
truncate ${BIG_FILE} -s ${BIG_FILE_LENGTH}
#
# Write some bytes to ABOUT middle in the file
# (Dare to remove the block breaks)
#
WRITE_POS=$((${BIG_FILE_LENGTH} / 2 - 128))
echo -n "0123456789ABCDEF" | dd of="/tmp/${BIG_FILE}" bs=1 count=16 seek=${WRITE_POS} conv=notrunc
#
# copy(upload) the file
#
cp /tmp/${BIG_FILE} ${BIG_FILE}
#
# check
#
cmp /tmp/${BIG_FILE} ${BIG_FILE}
rm_test_file ${BIG_FILE}
rm -f /tmp/${BIG_FILE}
}
function test_mix_upload_entities() {
describe "Testing upload sparse files ..."
#
# Make test file
#
dd if=/dev/urandom of=${BIG_FILE} bs=$BIG_FILE_LENGTH count=1
#
# If the cache option is enabled, delete the cache of uploaded files.
#
if [ -f ${CACHE_DIR}/${TEST_BUCKET_1}/${BIG_FILE} ]; then
rm -f ${CACHE_DIR}/${TEST_BUCKET_1}/${BIG_FILE}
fi
if [ -f ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${BIG_FILE} ]; then
rm -f ${CACHE_DIR}/.${TEST_BUCKET_1}.stat/${BIG_FILE}
fi
#
# Do a partial write to the file.
#
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=0 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=8192 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=1073152 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=26214400 conv=notrunc
echo -n "0123456789ABCDEF" | dd of=${BIG_FILE} bs=1 count=16 seek=26222592 conv=notrunc
rm_test_file "${BIG_FILE}"
}
function test_ut_ossfs {
describe "Testing ossfs python ut..."
export TEST_BUCKET_MOUNT_POINT=$TEST_BUCKET_MOUNT_POINT_1
../../ut_test.py
}
function add_all_tests {
if `ps -ef | grep -v grep | grep s3fs | grep -q ensure_diskfree` && ! `uname | grep -q Darwin`; then
if ! ps u $S3FS_PID | grep -q ensure_diskfree && ! uname | grep -q Darwin; then
add_tests test_clean_up_cache
fi
add_tests test_append_file
add_tests test_truncate_file
add_tests test_append_file
add_tests test_truncate_file
add_tests test_truncate_upload
add_tests test_truncate_empty_file
add_tests test_mv_file
add_tests test_mv_empty_directory
@ -764,6 +949,10 @@ function add_all_tests {
add_tests test_chown
add_tests test_list
add_tests test_remove_nonempty_directory
if ! ps u $S3FS_PID | grep -q notsup_compat_dir; then
# TODO: investigate why notsup_compat_dir fails
add_tests test_external_directory_creation
fi
add_tests test_external_modification
add_tests test_read_external_object
add_tests test_rename_before_close
@ -783,7 +972,15 @@ function add_all_tests {
add_tests test_concurrent_writes
add_tests test_open_second_fd
add_tests test_write_multiple_offsets
add_tests test_write_multiple_offsets_backwards
add_tests test_content_type
add_tests test_truncate_cache
add_tests test_upload_sparsefile
add_tests test_mix_upload_entities
add_tests test_ut_ossfs
if `ps -ef | grep -v grep | grep s3fs | grep -q use_cache`; then
add_tests test_cache_file_stat
fi
}
init_suite

View File

@ -39,6 +39,7 @@ FLAGS=(
"use_cache=${CACHE_DIR} -o ensure_diskfree=${ENSURE_DISKFREE_SIZE}"
enable_content_md5
enable_noobj_cache
max_stat_cache_size=100
nocopyapi
nomultipart
notsup_compat_dir

View File

@ -238,7 +238,7 @@ function get_ctime() {
if [ `uname` = "Darwin" ]; then
stat -f "%c" "$1"
else
stat -c %Z "$1"
stat -c "%Z" "$1"
fi
}
@ -246,9 +246,18 @@ function get_mtime() {
if [ `uname` = "Darwin" ]; then
stat -f "%m" "$1"
else
stat -c %Y "$1"
stat -c "%Y" "$1"
fi
}
function get_permissions() {
if [ `uname` = "Darwin" ]; then
stat -f "%p" "$1"
else
stat -c "%a" "$1"
fi
}
function check_content_type() {
INFO_STR=`aws_cli s3api head-object --bucket ${TEST_BUCKET_1} --key $1`
if [[ "${INFO_STR}" != *"$2"* ]]
@ -264,5 +273,9 @@ function get_disk_avail_size() {
}
function aws_cli() {
AWS_ACCESS_KEY_ID=local-identity AWS_SECRET_ACCESS_KEY=local-credential aws $* --endpoint-url "${S3_URL}" --no-verify-ssl
FLAGS=""
if [ -n "${S3FS_PROFILE}" ]; then
FLAGS="--profile ${S3FS_PROFILE}"
fi
aws $* --endpoint-url "${S3_URL}" --no-verify-ssl $FLAGS
}

81
test/ut_test.py Executable file
View File

@ -0,0 +1,81 @@
#!/usr/bin/env python2
import os
import unittest
import ConfigParser
import random
import sys
import time
class OssfsUnitTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def random_string(self, len):
char_set = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g']
list = []
for i in range(0, len):
list.append(random.choice(char_set))
return "".join(list)
def test_read_file(self):
filename = "%s" % (self.random_string(10))
print filename
f = open(filename, 'w')
data = self.random_string(1000)
f.write(data)
f.close()
f = open(filename, 'r')
data = f.read(100)
self.assertEqual(len(data), 100)
data = f.read(100)
self.assertEqual(len(data), 100)
f.close()
def test_rename_file(self):
filename1 = "%s" % (self.random_string(10))
filename2 = "%s" % (self.random_string(10))
print filename1, filename2
f = open(filename1, 'w+')
data1 = self.random_string(1000)
f.write(data1)
os.rename(filename1, filename2)
f.seek(0, 0)
data2 = f.read()
f.close()
self.assertEqual(len(data1), len(data2))
self.assertEqual(data1, data2)
def test_rename_file2(self):
filename1 = "%s" % (self.random_string(10))
filename2 = "%s" % (self.random_string(10))
print filename1, filename2
f = open(filename1, 'w')
data1 = self.random_string(1000)
f.write(data1)
f.close()
os.rename(filename1, filename2)
f = open(filename2, 'r')
f.seek(0, 0)
data2 = f.read()
f.close()
self.assertEqual(len(data1), len(data2))
self.assertEqual(data1, data2)
if __name__ == '__main__':
unittest.main()

View File

@ -1,18 +1,18 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
import os
import sys
if len(sys.argv) < 4 or len(sys.argv) % 2 != 0:
sys.exit("Usage: %s OUTFILE OFFSET_1 SIZE_1 [OFFSET_N SIZE_N]...")
filename = sys.argv[1]
data = bytes('a', 'utf-8')
fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY)
try:
os.pwrite(fd, data, 1024)
os.pwrite(fd, data, 16 * 1024 * 1024)
os.pwrite(fd, data, 18 * 1024 * 1024)
for i in range(2, len(sys.argv), 2):
data = "a" * int(sys.argv[i+1])
os.lseek(fd, int(sys.argv[i]), os.SEEK_SET)
os.write(fd, data)
finally:
os.close(fd)
stat = os.lstat(filename)
assert stat.st_size == 18 * 1024 * 1024 + 1